Exemple #1
0
int main( int argc, char** argv )
{
    CvCapture* capture = 0;

    if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0])))
        capture = cvCaptureFromCAM( argc == 2 ? argv[1][0] - '0' : 0 );
    else if( argc == 2 )
        capture = cvCaptureFromAVI( argv[1] );

    if( !capture )
    {
        fprintf(stderr,"Could not initialize capturing...\n");
        return -1;
    }

    /* print a welcome message, and the OpenCV version */
    printf ("Welcome to lkdemo, using OpenCV version %s (%d.%d.%d)\n",
	    CV_VERSION,
	    CV_MAJOR_VERSION, CV_MINOR_VERSION, CV_SUBMINOR_VERSION);

    printf( "Hot keys: \n"
            "\tESC - quit the program\n"
            "\tr - auto-initialize tracking\n"
            "\tc - delete all the points\n"
            "\tn - switch the \"night\" mode on/off\n"
            "To add/remove a feature point click it\n" );

    cvNamedWindow( "LkDemo", 0 );
    cvSetMouseCallback( "LkDemo", on_mouse, 0 );

    for(;;)
    {
        IplImage* frame = 0;
        int i, k, c;

        frame = cvQueryFrame( capture );
        if( !frame )
            break;

        if( !image )
        {
            /* allocate all the buffers */
            image = cvCreateImage( cvGetSize(frame), 8, 3 );
            image->origin = frame->origin;
            grey = cvCreateImage( cvGetSize(frame), 8, 1 );
            prev_grey = cvCreateImage( cvGetSize(frame), 8, 1 );
            pyramid = cvCreateImage( cvGetSize(frame), 8, 1 );
            prev_pyramid = cvCreateImage( cvGetSize(frame), 8, 1 );
            points[0] = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(points[0][0]));
            points[1] = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(points[0][0]));
            status = (char*)cvAlloc(MAX_COUNT);
            flags = 0;
        }

        cvCopy( frame, image, 0 );
        cvCvtColor( image, grey, CV_BGR2GRAY );

        if( night_mode )
            cvZero( image );

        if( need_to_init )
        {
            /* automatic initialization */
            IplImage* eig = cvCreateImage( cvGetSize(grey), 32, 1 );
            IplImage* temp = cvCreateImage( cvGetSize(grey), 32, 1 );
            double quality = 0.01;
            double min_distance = 10;

            count = MAX_COUNT;
            cvGoodFeaturesToTrack( grey, eig, temp, points[1], &count,
                                   quality, min_distance, 0, 3, 0, 0.04 );
            cvFindCornerSubPix( grey, points[1], count,
                cvSize(win_size,win_size), cvSize(-1,-1),
                cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03));
            cvReleaseImage( &eig );
            cvReleaseImage( &temp );

            add_remove_pt = 0;
        }
        else if( count > 0 )
        {
            cvCalcOpticalFlowPyrLK( prev_grey, grey, prev_pyramid, pyramid,
                points[0], points[1], count, cvSize(win_size,win_size), 3, status, 0,
                cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03), flags );
            flags |= CV_LKFLOW_PYR_A_READY;
            for( i = k = 0; i < count; i++ )
            {
                if( add_remove_pt )
                {
                    double dx = pt.x - points[1][i].x;
                    double dy = pt.y - points[1][i].y;

                    if( dx*dx + dy*dy <= 25 )
                    {
                        add_remove_pt = 0;
                        continue;
                    }
                }

                if( !status[i] )
                    continue;

                points[1][k++] = points[1][i];
                cvCircle( image, cvPointFrom32f(points[1][i]), 3, CV_RGB(0,255,0), -1, 8,0);
            }
            count = k;
        }

        if( add_remove_pt && count < MAX_COUNT )
        {
            points[1][count++] = cvPointTo32f(pt);
            cvFindCornerSubPix( grey, points[1] + count - 1, 1,
                cvSize(win_size,win_size), cvSize(-1,-1),
                cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03));
            add_remove_pt = 0;
        }

        CV_SWAP( prev_grey, grey, swap_temp );
        CV_SWAP( prev_pyramid, pyramid, swap_temp );
        CV_SWAP( points[0], points[1], swap_points );
        need_to_init = 0;
        cvShowImage( "LkDemo", image );

        c = cvWaitKey(10);
        if( (char)c == 27 )
            break;
        switch( (char) c )
        {
        case 'r':
            need_to_init = 1;
            break;
        case 'c':
            count = 0;
            break;
        case 'n':
            night_mode ^= 1;
            break;
        default:
            ;
        }
    }

    cvReleaseCapture( &capture );
    cvDestroyWindow("LkDemo");

    return 0;
}
void VelocityDetector::LKFlow(Image* output)
{
    // Convert the current image to grey scale
    cvCvtColor(m_currentFrame->asIplImage(), m_currentGreyScale, CV_BGR2GRAY);
    
    
    // make it happen
    IplImage* last = m_lastGreyScale;
    IplImage* current = m_currentGreyScale;
        
    CvPoint2D32f frame1_features[m_lkMaxNumberFeatures];
    
    int number_of_features = m_lkMaxNumberFeatures;
    
    // Choosing the features to track (Shi-Tomasi)
    
    cvGoodFeaturesToTrack(last, m_eig_image, m_temp_image, frame1_features, 
                          &number_of_features, 
                          m_lkMinQualityFeatures, m_lkMinEucDistance, NULL);
    CvPoint2D32f frame2_features[m_lkMaxNumberFeatures];
    
    char optical_flow_found_feature[m_lkMaxNumberFeatures];
    float optical_flow_feature_error[m_lkMaxNumberFeatures];
    
    // To avoid "aperature problem"
    CvSize optical_flow_window = cvSize(3,3);
    
    // Terminates after iterations or when better epsilon is found
    CvTermCriteria optical_flow_termination_criteria
        = cvTermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, m_lkIterations, m_lkEpsilon);
    
    // Running pyramidla L-K Optical Flow algorithm on the desired features
    cvCalcOpticalFlowPyrLK(last, current, m_pyramid1, 
                           m_pyramid2, 
                           frame1_features, frame2_features, 
                           number_of_features, optical_flow_window, 5, 
                           optical_flow_found_feature, 
                           optical_flow_feature_error, 
                           optical_flow_termination_criteria, 0);

    // We are done copy current over to the last
    cvCopyImage(m_currentGreyScale, m_lastGreyScale);
    
    // needs to return m_velocity
    
    CvPoint totalP, totalQ;
    totalP.x = 0;
    totalP.y = 0;
    totalQ.x = 0;
    totalQ.y = 0;
    
    for(int i=0; i < number_of_features; i++)
    {
        // skip feature if not found
        if(optical_flow_found_feature[i] == 0)  continue;
        
        // plots each feature frame to frame
        CvPoint p, q;
        p.x = (int) frame1_features[i].x;
        p.y = (int) frame1_features[i].y;
        q.x = (int) frame2_features[i].x;
        q.y = (int) frame2_features[i].y;
        
        math::Vector2 flowVector(-(q.x - p.x), q.y - p.y);
        
        // Do test
        double lengthDifference = 
            fabs(flowVector.length() - m_velocity.length());
        bool good = false;
        if ((lengthDifference / m_velocity.length()) < m_lkLengthMaxError)
            good = true;
        if (m_velocity.length() < 0.0001)
            good = true;

        if (good)
        {
            totalP.x += p.x;
            totalP.y += p.y;
            totalQ.x += q.x;
            totalQ.y += q.y;
        }
        
        // we can draw then flow field if we want, but for now we will average
        
        // Draw velocity vector
        //if (output)
        //{
        //    CvPoint start;
        //    start.x = output->getWidth() / 2;
        //    start.y = output->getHeight() / 2;
        //    CvPoint end;
        //    end.x = start.x + ((int)(m_velocity.x*m_phaseLineScale));
        //    end.y = start.y - ((int)(m_velocity.y*m_phaseLineScale));
        //    cvLine(output->asIplImage(), start, end, CV_RGB(255,0,0), 1, CV_AA, 0);
        
        if (output)
        {
            int line_thickness = 1;
            CvScalar line_color = CV_RGB(0,0,255);
            if (!good)
                line_color = CV_RGB(0,255,0);
            double angle = atan2((double) p.y - q.y, (double) p.x - q.x);
            double hypotenuse = sqrt(square(p.y - q.y) + square(p.x - q.x));
            // Here we lengthen the arrow by a factor of three.
            q.x = (int) (p.x - m_lkFlowFieldScale * hypotenuse * cos(angle));
            q.y = (int) (p.y - m_lkFlowFieldScale * hypotenuse * sin(angle));
            
            cvLine(output->asIplImage(), p, q, line_color, line_thickness, CV_AA, 0);
            
            p.x = (int) (q.x + 5 * cos(angle + M_PI / 4));
            p.y = (int) (q.y + 5 * sin(angle + M_PI / 4));
            cvLine(output->asIplImage(), p, q, line_color, line_thickness, CV_AA, 0 );
            p.x = (int) (q.x + 5 * cos(angle - M_PI / 4));
            p.y = (int) (q.y + 5 * sin(angle - M_PI / 4));
            cvLine(output->asIplImage(), p, q, line_color, line_thickness, CV_AA, 0);
        }
    }
    
    CvPoint avgP, avgQ;
    avgP.x = 0;
    avgP.y = 0;
    avgQ.x = 0;
    avgQ.y = 0;
    double outImageX = 0;
    double outImageY = 0;
    
    if (number_of_features != 0)
    {
        avgP.x = totalP.x/number_of_features;
        avgP.y = totalP.y/number_of_features;
        avgQ.x = totalQ.x/number_of_features;
        avgQ.y = totalQ.y/number_of_features;
        
        outImageX = avgQ.x - avgP.x;
        outImageY = avgQ.y - avgP.y;
    }
    
    // need to convert coordinates to place origin in center
    
    //double outX = 0;
    //double outY = 0;
    //Detector::imageToAICoordinates(m_lastFrame, outImageX, outImageY, outX, 
      //                             outY);
    
    // assign velocity
    m_velocity = math::Vector2(-outImageX, outImageY);
    
}
void FeatureTracker::track_features(geometry_msgs::PoseStamped mapPose){
  //set the initial number of features to the max number we want to find
  int feature_count=num_features;
  printf("pose %f %f %f\n",mapPose.pose.position.x, mapPose.pose.position.y, tf::getYaw(mapPose.pose.orientation));
  int edge_pixels=5;
  
  //check if there were features from the last image to keep tracking
  if(last_feature_count>0){
    //if there were call cvCalcOpticalFlowPyrLK();
    //find matches between last good features and current image features
    //    store matches in featuresB
    cvCalcOpticalFlowPyrLK(last_image,image_rect,pyrA,pyrB,features,featuresB, last_feature_count,cvSize(win_size,win_size) ,4,last_features_status,track_error, cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,.3),0);
  }
  
  printf("got image flow\n");
  //    assign last_feature_id values for matched features and set the non matched spots to -1
  
  //find new features and subpixel them
  
  //I SHOULD ADD THE IMAGE FLOW VALUES AS FEATURES NOW BEFORE FINDING NEW FEATURES
  
  //find all good features
  cvGoodFeaturesToTrack(image_rect, eigImage, tempImage, features, &feature_count, quality_level, min_distance, NULL, block_size);
  
  //subpixel good features
  cvFindCornerSubPix(image_rect,features,feature_count,cvSize(win_size,win_size),cvSize(-1,-1),cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03));
  
  
  printf("subpixeled image\n");
  
  //for all the features in features B, find their matches in the newly found features
  //add all the matches to their correct featuremanager, for the non matching, make a new
  //feature manager and add them to it
  
  //for all features by now we need their ray and the robot pose at that location
  //draw dots on image where features are
  
  
  
  //set the feature ids to a control value
  
  for(int i=0;i<num_features;i++){
    current_feature_id[i]=-1;
  }
  
  for(int i=0;i<last_feature_count;i++){
    //for the previously found features in list b
    if(last_features_status[i]>0){
      for(int j=0;j<feature_count;j++){
	//for every feature found in this image
	
	//determine if the two overlap in a meaningful way
	int xdiff=featuresB[i].x-features[j].x;
	int ydiff=featuresB[i].y-features[j].y;
	//if the pixels are within some margin of eachother
	if(sqrt(xdiff*xdiff + ydiff*ydiff)<pixel_tracking_margin){
	  //if they do set the current id for j to the id of i
	  current_feature_id[j]=last_feature_id[i];
	  printf("feature found %d %d",last_feature_id[i],i);
	}
      }
    }
  }
  
  printf("assigned IDs image\n");
  
  
  for(int i=0;i<feature_count;i++){
    
    printf("looping\n");
    if(current_feature_id[i]>=0){
    printf("prev feature match\n");
      //if we matched a previous feature
      //add our new feature to the previous features list
      cv::Point3d tempRay;
      cv::Point2d tempPoint=cv::Point2d(features[i]);
      cam_model.projectPixelTo3dRay(tempPoint,tempRay);
      
      if(tempPoint.x> edge_pixels && tempPoint.x < last_image->width- edge_pixels &&
	tempPoint.y> edge_pixels && tempPoint.y<last_image->height- edge_pixels){
	featureList[current_feature_id[i]].add(RawFeature(mapPose.pose.position.x, mapPose.pose.position.y, tf::getYaw(mapPose.pose.orientation), tempPoint,tempRay));
      }else{
	current_feature_id[i]=-1;
      }
      
    }else{
    printf("new feature \n");
      
      cv::Point3d tempRay;
      cv::Point2d tempPoint=cv::Point2d(features[i]);
      cam_model.projectPixelTo3dRay(tempPoint,tempRay);
      if(tempPoint.x> edge_pixels && tempPoint.x < last_image->width- edge_pixels &&
	tempPoint.y> edge_pixels && tempPoint.y<last_image->height- edge_pixels){
	printf("new good feature \n");
	//if we didn't
	//create a new feature group in the list
	current_feature_id[i]=feature_number;
	//add the new feature to the feature list
	featureList.push_back(FeatureManager());

	featureList[feature_number].add(RawFeature(mapPose.pose.position.x, mapPose.pose.position.y, tf::getYaw(mapPose.pose.orientation), tempPoint,tempRay));
	++feature_number;
      }
    }
  }
   
//  printf("features: ");
  for(int i=0;i<num_features;i++){
    if(i<feature_count){
     last_feature_id[i]=current_feature_id[i];
    }
    else{
      last_feature_id[i]=-1;
    }
 //   printf(" %d ",current_feature_id[i]);
  }
  printf("\n");
  
  
  last_feature_count=feature_count;
  
}
int do_example_for_optical_flow(void)
{
	/* Create an object that decodes the input video stream. */
	CvCapture *input_video = cvCaptureFromFile(
		//"C:\\Documents and Settings\\David Stavens\\Desktop\\223B-Demo\\optical_flow_input.avi"
		//"C:\\Users\\Ran_the_User\\Documents\\Technion_Studies\\2016_A_winter\\02_Aerial_Video_PROJECT\\video-examples\\AnimalsMovingZSL17_07_14.mp4"
		//"C:\\Users\\Ran_the_User\\Documents\\Technion_Studies\\2016_A_winter\\02_Aerial_Video_PROJECT\\\\AnimalsMovingZSL17_07_14.mp4"
		"C:\\Users\\Ran_the_User\\Documents\\Technion_Studies\\2016_A_winter\\02_Aerial_Video_PROJECT\\video-examples\\optical_flow_input.avi"

		);
	if (input_video == NULL)
	{
		/* Either the video didn't exist OR it uses a codec OpenCV
		 * doesn't support.
		 */
		fprintf(stderr, "Error: Can't open video.\n");
		return -1;
	}

	/* Read the video's frame size out of the AVI. */
	CvSize frame_size;
	frame_size.height =		(int) cvGetCaptureProperty( input_video, CV_CAP_PROP_FRAME_HEIGHT );
	frame_size.width  =		(int) cvGetCaptureProperty( input_video, CV_CAP_PROP_FRAME_WIDTH );

	/* Determine the number of frames in the AVI. */
	long number_of_frames;
	/* Go to the end of the AVI (ie: the fraction is "1") */
	cvSetCaptureProperty( input_video, CV_CAP_PROP_POS_AVI_RATIO, 1. );
	/* Now that we're at the end, read the AVI position in frames */
	number_of_frames = (int) cvGetCaptureProperty( input_video, CV_CAP_PROP_POS_FRAMES );
	/* Return to the beginning */
	cvSetCaptureProperty( input_video, CV_CAP_PROP_POS_FRAMES, 0. );

	/* Create a windows called "Optical Flow" for visualizing the output.
	 * Have the window automatically change its size to match the output.
	 */
	cvNamedWindow("Optical Flow", CV_WINDOW_NORMAL); /// ran change:  CV_WINDOW_AUTOSIZE);
	cv::resizeWindow("Optical Flow", frame_size.width/ windowShrinkFactor, frame_size.height/ windowShrinkFactor);

	long current_frame = 0;
	while(true)
	{
		static IplImage *frame = NULL, *frame1 = NULL, *frame1_1C = NULL, *frame2_1C = NULL, *eig_image = NULL, *temp_image = NULL, *pyramid1 = NULL, *pyramid2 = NULL;
		//static cvResize smaller = 

//		cv::resize(src, src, img.size());
		/* Go to the frame we want.  Important if multiple frames are queried in
		 * the loop which they of course are for optical flow.  Note that the very
		 * first call to this is actually not needed. (Because the correct position
		 * is set outsite the for() loop.)
		 */
		cvSetCaptureProperty( input_video, CV_CAP_PROP_POS_FRAMES, current_frame );

		/* Get the next frame of the video.
		 * IMPORTANT!  cvQueryFrame() always returns a pointer to the _same_
		 * memory location.  So successive calls:
		 * frame1 = cvQueryFrame();
		 * frame2 = cvQueryFrame();
		 * frame3 = cvQueryFrame();
		 * will result in (frame1 == frame2 && frame2 == frame3) being true.
		 * The solution is to make a copy of the cvQueryFrame() output.
		 */
		frame = cvQueryFrame( input_video );
		if (frame == NULL)
		{
			/* Why did we get a NULL frame?  We shouldn't be at the end. */
			fprintf(stderr, "Error: Hmm. The end came sooner than we thought.\n");
			return -1;
		}
		/* Allocate another image if not already allocated.
		 * Image has ONE channel of color (ie: monochrome) with 8-bit "color" depth.
		 * This is the image format OpenCV algorithms actually operate on (mostly).
		 */
		allocateOnDemand( &frame1_1C, frame_size, IPL_DEPTH_8U, 1 );
		/* Convert whatever the AVI image format is into OpenCV's preferred format.
		 * AND flip the image vertically.  Flip is a shameless hack.  OpenCV reads
		 * in AVIs upside-down by default.  (No comment :-))
		 */
		cvConvertImage(frame, frame1_1C, 0);//  CV_CVTIMG_FLIP);

		/* We'll make a full color backup of this frame so that we can draw on it.
		 * (It's not the best idea to draw on the static memory space of cvQueryFrame().)
		 */
		allocateOnDemand( &frame1, frame_size, IPL_DEPTH_8U, 3 );
		cvConvertImage(frame, frame1, 0);//  CV_CVTIMG_FLIP);

		/* Get the second frame of video.  Same principles as the first. */
		frame = cvQueryFrame( input_video );
		if (frame == NULL)
		{
			fprintf(stderr, "Error: Hmm. The end came sooner than we thought.\n");
			return -1;
		}
		allocateOnDemand( &frame2_1C, frame_size, IPL_DEPTH_8U, 1 );
		cvConvertImage(frame, frame2_1C, 0);//  CV_CVTIMG_FLIP);

		/* Shi and Tomasi Feature Tracking! */

		/* Preparation: Allocate the necessary storage. */
		allocateOnDemand( &eig_image, frame_size, IPL_DEPTH_32F, 1 );
		allocateOnDemand( &temp_image, frame_size, IPL_DEPTH_32F, 1 );

		/* Preparation: This array will contain the features found in frame 1. */
		CvPoint2D32f frame1_features[NUM_OF_FEATURES];

		/* Preparation: BEFORE the function call this variable is the array size
		 * (or the maximum number of features to find).  AFTER the function call
		 * this variable is the number of features actually found.
		 */
		int number_of_features;
		
		/* I'm hardcoding this at 400.  But you should make this a #define so that you can
		 * change the number of features you use for an accuracy/speed tradeoff analysis.
		 */
		number_of_features = NUM_OF_FEATURES;

		/* Actually run the Shi and Tomasi algorithm!!
		 * "frame1_1C" is the input image.
		 * "eig_image" and "temp_image" are just workspace for the algorithm.
		 * The first ".01" specifies the minimum quality of the features (based on the eigenvalues).
		 * The second ".01" specifies the minimum Euclidean distance between features.
		 * "NULL" means use the entire input image.  You could point to a part of the image.
		 * WHEN THE ALGORITHM RETURNS:
		 * "frame1_features" will contain the feature points.
		 * "number_of_features" will be set to a value <= 400 indicating the number of feature points found.
		 */
		cvGoodFeaturesToTrack(frame1_1C, eig_image, temp_image, frame1_features, &number_of_features, .01, .01, NULL);

		/* Pyramidal Lucas Kanade Optical Flow! */

		/* This array will contain the locations of the points from frame 1 in frame 2. */
		CvPoint2D32f frame2_features[NUM_OF_FEATURES];

		/* The i-th element of this array will be non-zero if and only if the i-th feature of
		 * frame 1 was found in frame 2.
		 */
		char optical_flow_found_feature[NUM_OF_FEATURES];

		/* The i-th element of this array is the error in the optical flow for the i-th feature
		 * of frame1 as found in frame 2.  If the i-th feature was not found (see the array above)
		 * I think the i-th entry in this array is undefined.
		 */
		float optical_flow_feature_error[NUM_OF_FEATURES];

		/* This is the window size to use to avoid the aperture problem (see slide "Optical Flow: Overview"). */
		CvSize optical_flow_window = cvSize(3,3);
		
		/* This termination criteria tells the algorithm to stop when it has either done 20 iterations or when
		 * epsilon is better than .3.  You can play with these parameters for speed vs. accuracy but these values
		 * work pretty well in many situations.
		 */
		CvTermCriteria optical_flow_termination_criteria
			= cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 );

		/* This is some workspace for the algorithm.
		 * (The algorithm actually carves the image into pyramids of different resolutions.)
		 */
		allocateOnDemand( &pyramid1, frame_size, IPL_DEPTH_8U, 1 );
		allocateOnDemand( &pyramid2, frame_size, IPL_DEPTH_8U, 1 );

		/* Actually run Pyramidal Lucas Kanade Optical Flow!!
		 * "frame1_1C" is the first frame with the known features.
		 * "frame2_1C" is the second frame where we want to find the first frame's features.
		 * "pyramid1" and "pyramid2" are workspace for the algorithm.
		 * "frame1_features" are the features from the first frame.
		 * "frame2_features" is the (outputted) locations of those features in the second frame.
		 * "number_of_features" is the number of features in the frame1_features array.
		 * "optical_flow_window" is the size of the window to use to avoid the aperture problem.
		 * "5" is the maximum number of pyramids to use.  0 would be just one level.
		 * "optical_flow_found_feature" is as described above (non-zero iff feature found by the flow).
		 * "optical_flow_feature_error" is as described above (error in the flow for this feature).
		 * "optical_flow_termination_criteria" is as described above (how long the algorithm should look).
		 * "0" means disable enhancements.  (For example, the second array isn't pre-initialized with guesses.)
		 */
		cvCalcOpticalFlowPyrLK(frame1_1C, frame2_1C, pyramid1, pyramid2, frame1_features, frame2_features, number_of_features, optical_flow_window, 5, optical_flow_found_feature, optical_flow_feature_error, optical_flow_termination_criteria, 0 );
		
		/* For fun (and debugging :)), let's draw the flow field. */
		for(int i = 0; i < number_of_features; i++)
		{
			/* If Pyramidal Lucas Kanade didn't really find the feature, skip it. */
			if ( optical_flow_found_feature[i] == 0 )	continue;

			int line_thickness;				line_thickness = 1;
			/* CV_RGB(red, green, blue) is the red, green, and blue components
			 * of the color you want, each out of 255.
			 */	
			CvScalar line_color;			line_color = CV_RGB(255,0,250);
	
			/* Let's make the flow field look nice with arrows. */

			/* The arrows will be a bit too short for a nice visualization because of the high framerate
			 * (ie: there's not much motion between the frames).  So let's lengthen them by a factor of 3.
			 */
			CvPoint p,q;
			p.x = (int) frame1_features[i].x;
			p.y = (int) frame1_features[i].y;
			q.x = (int) frame2_features[i].x;
			q.y = (int) frame2_features[i].y;

			double angle;		angle = atan2( (double) p.y - q.y, (double) p.x - q.x );
			double hypotenuse;	hypotenuse = sqrt( square(p.y - q.y) + square(p.x - q.x) );

			/* Here we lengthen the arrow by a factor of three. */
			q.x = (int) (p.x - 3 * hypotenuse * cos(angle));
			q.y = (int) (p.y - 3 * hypotenuse * sin(angle));

			/* Now we draw the main line of the arrow. */
			/* "frame1" is the frame to draw on.
			 * "p" is the point where the line begins.
			 * "q" is the point where the line stops.
			 * "CV_AA" means antialiased drawing.
			 * "0" means no fractional bits in the center cooridinate or radius.
			 */
			cvLine( frame1, p, q, line_color, line_thickness, CV_AA, 0 );
			/* Now draw the tips of the arrow.  I do some scaling so that the
			 * tips look proportional to the main line of the arrow.
			 */			
			p.x = (int) (q.x + 9 * cos(angle + pi / 4));
			p.y = (int) (q.y + 9 * sin(angle + pi / 4));
			cvLine( frame1, p, q, line_color, line_thickness, CV_AA, 0 );
			p.x = (int) (q.x + 9 * cos(angle - pi / 4));
			p.y = (int) (q.y + 9 * sin(angle - pi / 4));
			cvLine( frame1, p, q, line_color, line_thickness, CV_AA, 0 );
		}
		/* Now display the image we drew on.  Recall that "Optical Flow" is the name of
		 * the window we created above.
		 */
		cvShowImage("Optical Flow", frame1);
		/* And wait for the user to press a key (so the user has time to look at the image).
		 * If the argument is 0 then it waits forever otherwise it waits that number of milliseconds.
		 * The return value is the key the user pressed.
		 */
		int key_pressed;
		key_pressed = cvWaitKey(1);  //0

		/* If the users pushes "b" or "B" go back one frame.
		 * Otherwise go forward one frame.
		 */
		if (key_pressed == 'b' || key_pressed == 'B')	current_frame--;
		else											current_frame++;
		/* Don't run past the front/end of the AVI. */
		if (current_frame < 0)						current_frame = 0;
		if (current_frame >= number_of_frames - 1)	current_frame = number_of_frames - 2;
		if (key_pressed == 27) break;
	}

}
int FindAndTrackAllPointsOnRegistersOpenCV(unsigned int reg_new , unsigned int reg_old , unsigned int timeout)
{

    if  ( ( video_register[reg_new].pixels == 0 ) || ( video_register[reg_old].pixels == 0 ) ) { return 0; }

    // Load two images and allocate other structures
    struct VideoRegister * MONOCHROME_TMP_REGISTER_OLD = GetTempRegister();
    if (MONOCHROME_TMP_REGISTER_OLD == 0 ) { fprintf(stderr," Error Getting the first temporary Video Register ( TrackAllPointsOnRegistersOpenCV ) \n"); return 0; }

    struct VideoRegister * MONOCHROME_TMP_REGISTER_NEW = GetTempRegister();
    if (MONOCHROME_TMP_REGISTER_NEW == 0 ) { fprintf(stderr," Error Getting the second temporary Video Register ( TrackAllPointsOnRegistersOpenCV ) \n"); return 0; }

    CopyRegister(&video_register[reg_new],MONOCHROME_TMP_REGISTER_NEW,0,0);
    ConvertRegisterFrom3ByteTo1Byte(MONOCHROME_TMP_REGISTER_NEW);

    CopyRegister(&video_register[reg_old],MONOCHROME_TMP_REGISTER_OLD,0,0);
    ConvertRegisterFrom3ByteTo1Byte(MONOCHROME_TMP_REGISTER_OLD);

    image_1->imageData=(char*) MONOCHROME_TMP_REGISTER_OLD->pixels; // UGLY HACK
    image_2->imageData=(char*) MONOCHROME_TMP_REGISTER_NEW->pixels; // UGLY HACK



	int win_size = 15;


	// Get the features for tracking

  StartTimer(FIND_CORNERS_DELAY); // STATISTICS KEEPER FOR HYPERVISOR | START


	int corner_count = MAX_CORNERS;

	cvGoodFeaturesToTrack( image_1, eig_image, tmp_image, cornersA, &corner_count, 0.05, 5.0, 0, 3, 0, 0.04 );

	cvFindCornerSubPix( image_1, cornersA, corner_count, cvSize( win_size, win_size ),
		cvSize( -1, -1 ), cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03 ) );

  EndTimer(FIND_CORNERS_DELAY); // STATISTICS KEEPER FOR HYPERVISOR | END



	// Call Lucas Kanade algorithm

 StartTimer(TRACK_CORNERS_DELAY); // STATISTICS KEEPER FOR HYPERVISOR | START
	char features_found[ MAX_CORNERS ];
	float feature_errors[ MAX_CORNERS ];

	CvSize pyr_sz = cvSize( image_1->width+8, image_2->height/3 );

	IplImage* pyrA = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
	IplImage* pyrB = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );


	cvCalcOpticalFlowPyrLK( image_1, image_2, pyrA, pyrB, cornersA, cornersB, corner_count,
		cvSize( win_size, win_size ), 5, features_found, feature_errors,
		 cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.3 ), 0 );


 EndTimer(TRACK_CORNERS_DELAY); // STATISTICS KEEPER FOR HYPERVISOR | END



   ClearFeatureList(video_register[reg_new].features);
   video_register[reg_new].features->last_track_time  = video_register[reg_new].time; // AFTER the procedure , the feature list is up to date

   int i=0 ;
   for ( i=0; i <corner_count; i++ )
    {

         AddToFeatureList(  video_register[reg_new].features  ,
                            cornersB[i].x , cornersB[i].y , 1 ,0,0,0);

         video_register[reg_new].features->list[i].last_x = cornersA[i].x;
         video_register[reg_new].features->list[i].last_y = cornersA[i].y;
    }


   unsigned int filtered_out = RemoveTrackPointsIfMovementMoreThan(video_register[reg_new].features,settings[FEATURE_TRACKING_MAX_MOVEMENT_THRESHOLD]);
   if ( filtered_out > 0 )
     {
       // fprintf(stderr,"Filtered %u points due to movement\n", filtered_out  );
     }
   unsigned int outside_zone = 8;
   filtered_out = Remove2DTrackPointsIfOutOfBounds(video_register[reg_new].features,outside_zone,outside_zone,metrics[RESOLUTION_X]-outside_zone,metrics[RESOLUTION_Y]-outside_zone);
   if ( filtered_out > 0 )
     {
       // fprintf(stderr,"Filtered %u points due as out of bounds \n", filtered_out  );
     }

   cvReleaseImage(&pyrA);
   cvReleaseImage(&pyrB);


   StopUsingVideoRegister(MONOCHROME_TMP_REGISTER_NEW);
   StopUsingVideoRegister(MONOCHROME_TMP_REGISTER_OLD);


  return corner_count;
}
void KLT::lkOpticalFlow (IplImage *frame)
{
    printf("KLT is starting....%d\n", count);
    printf("frame is [%d,%d] and %d channels\n", frame->height, frame->width, frame->nChannels);

    // initialize our buffers
    if (lkInitialized == false) {
        lkImage = cvCreateImage(cvGetSize(frame), 8, 3);
        lkImage->origin = frame->origin;
        lkGrey = cvCreateImage(cvGetSize(frame), 8, 1);
        lkPrevGrey = cvCreateImage(cvGetSize(frame), 8, 1);
        lkPyramid = cvCreateImage(cvGetSize(frame), 8, 1);
        lkPrevPyramid = cvCreateImage(cvGetSize(frame), 8, 1);
        lkPoints[0] = (CvPoint2D32f *)cvAlloc(MAX_COUNT * sizeof(lkPoints[0][0]));
        lkPoints[1] = (CvPoint2D32f *)cvAlloc(MAX_COUNT * sizeof(lkPoints[0][0]));
        lkStatus = (char *)cvAlloc(MAX_COUNT);
        lkFeatureError = (float *)cvAlloc(MAX_COUNT);
    }

    cvCopy(frame, lkImage, 0);
    cvCvtColor(lkImage, lkGrey, CV_BGR2GRAY);

    if (DEBUG_KLT) {
        printf("end copy and convert color...\n");
    }

    if (lkInitialized == false) printf("init is FALSE\n");
    if (lkInitialized == true)  printf("init is TRUE\n");

    if (lkInitialized == false) {

        printf("klt :: here1\n");

        IplImage *eig  = cvCreateImage(cvGetSize(lkGrey), 32, 1);
        IplImage *temp = cvCreateImage(cvGetSize(lkGrey), 32, 1);

        lkCount = MAX_COUNT;

        cvGoodFeaturesToTrack(lkGrey, eig, temp, lkPoints[1], &lkCount, quality, minDistance, 0, 3, 0, 0.04);

        cvFindCornerSubPix(lkGrey, lkPoints[1], lkCount, cvSize(winSize, winSize), cvSize(-1,-1),
            cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03));

        printf("klt :: here2\n");
        printf("lkCount = %d\n", lkCount);

        cvReleaseImage(&eig);
        cvReleaseImage(&temp);

        lkInitialized = true;

        printf("klt :: here3\n");
        printf("lkCount = %d\n", lkCount);

    } else if (lkCount > 0) {

        /**sth
        cvCalcOpticalFlowPyrLK(lkPrevGrey, lkGrey, lkPrevPyramid, lkPyramid, lkPoints[0],
            lkPoints[1], lkCount, cvSize(winSize, winSize), 3, lkStatus, 0,
            cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03), lkFlags);
        lkFlags |= CV_LKFLOW_PYR_A_READY;
        **/
        lkRanOnce = true;

    }

    CV_SWAP(lkPrevGrey, lkGrey, lkSwapTemp);
    CV_SWAP(lkPrevPyramid, lkPyramid, lkSwapTemp);
    CV_SWAP(lkPoints[0], lkPoints[1], lkSwapPoints);

    printf("Done swapping...\n");

    count++;

} // end lkOpticalFlow
static void
kms_crowd_detector_compute_optical_flow (KmsCrowdDetector * crowddetector,
    IplImage * binary_actual_motion, CvRect container, int curve)
{
  IplImage *eig_image;
  IplImage *temp_image;
  IplImage *frame1_1C;
  IplImage *frame2_1C;
  IplImage *pyramid1;
  IplImage *pyramid2;
  CvSize frame_size;
  CvPoint2D32f frame2_features[NUMBER_FEATURES_OPTICAL_FLOW];
  char optical_flow_found_feature[NUMBER_FEATURES_OPTICAL_FLOW];
  float optical_flow_feature_error[NUMBER_FEATURES_OPTICAL_FLOW];
  CvPoint2D32f frame1_features[NUMBER_FEATURES_OPTICAL_FLOW];
  int number_of_features = NUMBER_FEATURES_OPTICAL_FLOW;
  CvSize optical_flow_window =
      cvSize (WINDOW_SIZE_OPTICAL_FLOW, WINDOW_SIZE_OPTICAL_FLOW);

  frame_size.width = crowddetector->priv->actual_image->width;
  frame_size.height = crowddetector->priv->actual_image->height;

  eig_image = cvCreateImage (frame_size, IPL_DEPTH_8U, 1);
  frame1_1C = cvCreateImage (frame_size, IPL_DEPTH_8U, 1);
  frame2_1C = cvCreateImage (frame_size, IPL_DEPTH_8U, 1);

  cvConvertImage (crowddetector->priv->actual_image, frame1_1C, 0);
  cvConvertImage (crowddetector->priv->previous_image, frame2_1C, 0);
  temp_image = cvCreateImage (frame_size, IPL_DEPTH_32F, 1);

  cvGoodFeaturesToTrack (frame1_1C, eig_image, temp_image, frame1_features,
      &number_of_features, QUALITY_LEVEL, MIN_DISTANCE, NULL,
      BLOCK_SIZE, USE_HARRIS_DETECTOR, HARRIS_DETECTOR_K);

  CvTermCriteria optical_flow_termination_criteria =
      cvTermCriteria (CV_TERMCRIT_ITER | CV_TERMCRIT_EPS,
      MAX_ITER_OPTICAL_FLOW, EPSILON_OPTICAL_FLOW);

  pyramid1 = cvCreateImage (frame_size, IPL_DEPTH_8U, 1);
  pyramid2 = cvCreateImage (frame_size, IPL_DEPTH_8U, 1);

  cvCalcOpticalFlowPyrLK (frame2_1C, frame1_1C, pyramid1, pyramid2,
      frame1_features, frame2_features, number_of_features,
      optical_flow_window, 3, optical_flow_found_feature,
      optical_flow_feature_error, optical_flow_termination_criteria, 0);

  cvCopy (crowddetector->priv->actual_image,
      crowddetector->priv->previous_image, 0);

  kms_crowd_detector_compute_roi_direction_vector (crowddetector,
      number_of_features, optical_flow_found_feature, frame1_features,
      frame2_features, CV_RGB (255, 0, 0), binary_actual_motion, container,
      curve);

  cvReleaseImage (&eig_image);
  cvReleaseImage (&temp_image);
  cvReleaseImage (&frame1_1C);
  cvReleaseImage (&frame2_1C);
  cvReleaseImage (&pyramid1);
  cvReleaseImage (&pyramid2);
}
Exemple #8
0
void MainWindow::optical_flow()
{
    cvReleaseCapture(&pCapture);
    CvCapture *pCapture = cvCaptureFromFile( "/home/kevin/optical_flow_input.avi" );
    if (pCapture == NULL)
    {
        fprintf(stderr, "Error: Can't open video.\n");
    }

    /* Read the video's frame size out of the AVI. */
    CvSize frame_size;
    frame_size.height = (int) cvGetCaptureProperty( pCapture, CV_CAP_PROP_FRAME_HEIGHT );
    frame_size.width =  (int) cvGetCaptureProperty( pCapture, CV_CAP_PROP_FRAME_WIDTH );

    /* Determine the number of frames in the AVI. */
    long number_of_frames;
    /* Go to the end of the AVI (ie: the fraction is "1") */
    cvSetCaptureProperty( pCapture, CV_CAP_PROP_POS_AVI_RATIO, 1. );
    /* Now that we're at the end, read the AVI position in frames */
    number_of_frames = (int) cvGetCaptureProperty( pCapture, CV_CAP_PROP_POS_FRAMES );
    /* Return to the beginning */
    cvSetCaptureProperty( pCapture, CV_CAP_PROP_POS_FRAMES, 0. );

    while(true)
    {
        static IplImage *frame = NULL, *frame1 = NULL, *frame1_1C = NULL;
        static IplImage *frame2_1C = NULL, *eig_image = NULL, *temp_image = NULL;
        static IplImage *pyramid1 = NULL, *pyramid2 = NULL;

        cvSetCaptureProperty( pCapture, CV_CAP_PROP_POS_FRAMES, current_frame );

        frame = cvQueryFrame( pCapture );
        if (frame == NULL)
        {
            /* Why did we get a NULL frame?  We shouldn't be at the end. */
            fprintf(stderr, "Error: Hmm. The end came sooner than we thought.\n");
        }

        allocateOnDemand( &frame1_1C, frame_size, IPL_DEPTH_8U, 1 );

        cvConvertImage(frame, frame1_1C, CV_CVTIMG_FLIP);

        allocateOnDemand( &frame1, frame_size, IPL_DEPTH_8U, 3 );
        cvConvertImage(frame, frame1, CV_CVTIMG_FLIP);

        /* Get the second frame of video.  Same principles as the first. */
        frame = cvQueryFrame( pCapture );
        if (frame == NULL)
        {
            fprintf(stderr, "Error: Hmm. The end came sooner than we thought.\n");
        }
        allocateOnDemand( &frame2_1C, frame_size, IPL_DEPTH_8U, 1 );
        cvConvertImage(frame, frame2_1C, CV_CVTIMG_FLIP);

        /* Preparation: Allocate the necessary storage. */
        allocateOnDemand( &eig_image, frame_size, IPL_DEPTH_32F, 1 );
        allocateOnDemand( &temp_image, frame_size, IPL_DEPTH_32F, 1 );

        /* Preparation: This array will contain the features found in frame 1. */
        CvPoint2D32f frame1_features[400];

        int number_of_features;
        number_of_features = 400;

        cvGoodFeaturesToTrack(frame1_1C, eig_image, temp_image, frame1_features,
                              &number_of_features, .01, .01, NULL);

        /* Pyramidal Lucas Kanade Optical Flow! */

        /* This array will contain the locations of the points from frame 1 in frame 2. */
        CvPoint2D32f frame2_features[400];

        char optical_flow_found_feature[400];
        float optical_flow_feature_error[400];

        /* This is the window size to use to avoid the aperture problem (see slide "Optical Flow: Overview"). */
        CvSize optical_flow_window = cvSize(3,3);
        CvTermCriteria optical_flow_termination_criteria
            = cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 );

        /* This is some workspace for the algorithm.
         * (The algorithm actually carves the image into pyramids of different resolutions.)
         */
        allocateOnDemand( &pyramid1, frame_size, IPL_DEPTH_8U, 1 );
        allocateOnDemand( &pyramid2, frame_size, IPL_DEPTH_8U, 1 );

        cvCalcOpticalFlowPyrLK(frame1_1C, frame2_1C, pyramid1, pyramid2,
                               frame1_features, frame2_features, number_of_features,
                               optical_flow_window, 5, optical_flow_found_feature,
                               optical_flow_feature_error, optical_flow_termination_criteria, 0 );

        /* For fun (and debugging :)), let's draw the flow field. */
        for(int i = 0; i < number_of_features; i++)
        {
            /* If Pyramidal Lucas Kanade didn't really find the feature, skip it. */
            if ( optical_flow_found_feature[i] == 0 )	continue;

            int line_thickness;
            line_thickness = 1;
            /* CV_RGB(red, green, blue) is the red, green, and blue components
             * of the color you want, each out of 255.
             */
            CvScalar line_color;
            line_color = CV_RGB(255,0,0);

            CvPoint p,q;
            p.x = (int) frame1_features[i].x;
            p.y = (int) frame1_features[i].y;
            q.x = (int) frame2_features[i].x;
            q.y = (int) frame2_features[i].y;

            double angle;
            angle = atan2( (double) p.y - q.y, (double) p.x - q.x );

            double hypotenuse;
            hypotenuse = sqrt( square(p.y - q.y) + square(p.x - q.x) );

            /* Here we lengthen the arrow by a factor of three. */
            q.x = (int) (p.x - 3 * hypotenuse * cos(angle));
            q.y = (int) (p.y - 3 * hypotenuse * sin(angle));

            cvLine( frame1, p, q, line_color, line_thickness, CV_AA, 0 );
            /* Now draw the tips of the arrow.  I do some scaling so that the
             * tips look proportional to the main line of the arrow.
             */
            p.x = (int) (q.x + 9 * cos(angle + pi / 4));
            p.y = (int) (q.y + 9 * sin(angle + pi / 4));
            cvLine( frame1, p, q, line_color, line_thickness, CV_AA, 0 );
            p.x = (int) (q.x + 9 * cos(angle - pi / 4));
            p.y = (int) (q.y + 9 * sin(angle - pi / 4));
            cvLine( frame1, p, q, line_color, line_thickness, CV_AA, 0 );
        }

        cv::Mat frame1_mat=MainWindow::image_rotate(frame1);
        cv::flip(frame1_mat,frame1_mat,1);
        IplImage dst = frame1_mat;

        cv::Mat frame2_mat=MainWindow::image_rotate(frame2_1C);
        cv::flip(frame2_mat,frame2_mat,1);
        IplImage dst1 = frame2_mat;

        MainWindow::Display(frame,&dst,&dst1);

        int key_pressed;
        key_pressed = cvWaitKey(0);

        if (key_pressed == 'b' || key_pressed == 'B')
            current_frame--;
        else
            current_frame++;
        /* Don't run past the front/end of the AVI. */
        if (current_frame < 0)
            current_frame = 0;

        if (current_frame >= number_of_frames - 1)
            current_frame = number_of_frames - 2;
    }

}
Exemple #9
0
/*!
  Initialise the tracking by extracting KLT keypoints on the provided image.

  \param I : Grey level image used as input. This image should have only 1 channel.
  \param mask : Image mask used to restrict the keypoint detection area.
  If mask is NULL, all the image will be considered.

  \exception vpTrackingException::initializationError : If the image I is not
  initialized, or if the image or the mask have bad coding format.
*/
void vpKltOpencv::initTracking(const IplImage *I, const IplImage *mask)
{
  if (!I) {
    throw(vpException(vpTrackingException::initializationError,  "Image Not initialized")) ;
  }

  if (I->depth != IPL_DEPTH_8U || I->nChannels != 1)  {
    throw(vpException(vpTrackingException::initializationError,  "Bad Image format")) ;
  }

  if (mask) {
    if (mask->depth != IPL_DEPTH_8U || I->nChannels != 1) 	{
      throw(vpException(vpTrackingException::initializationError,  "Bad Image format")) ;
    }
  }

  //Creation des buffers
  CvSize Sizeim, SizeI;
  SizeI = cvGetSize(I);
  bool b_imOK = true;
  if(image != NULL){
    Sizeim = cvGetSize(image);
    if(SizeI.width != Sizeim.width || SizeI.height != Sizeim.height) b_imOK = false;
  }
  if(image == NULL || prev_image == NULL || pyramid==NULL || prev_pyramid ==NULL || !b_imOK){
    reset();
    image = cvCreateImage(cvGetSize(I), 8, 1);image->origin = I->origin;
    prev_image = cvCreateImage(cvGetSize(I), IPL_DEPTH_8U, 1);
    pyramid = cvCreateImage(cvGetSize(I), IPL_DEPTH_8U, 1);
    prev_pyramid = cvCreateImage(cvGetSize(I), IPL_DEPTH_8U, 1);
  }else{
    swap_temp = 0;
    countFeatures = 0;
    countPrevFeatures = 0;
    flags = 0;
    initialized = 0;
    globalcountFeatures = 0;   
  }

  initialized = 1;

  //Import
  cvCopy(I, image, 0);

  //Recherche de points d'int�rets
  countFeatures = maxFeatures;
  countPrevFeatures = 0;
  IplImage* eig = cvCreateImage(cvGetSize(image), 32, 1);
  IplImage* temp = cvCreateImage(cvGetSize(image), 32, 1);
  cvGoodFeaturesToTrack(image, eig, temp, features,
			&countFeatures, quality, min_distance,
      mask, block_size, use_harris, harris_free_parameter);
  cvFindCornerSubPix(image, features, countFeatures, cvSize(win_size, win_size),
		     cvSize(-1,-1),cvTermCriteria(CV_TERMCRIT_ITER|
						  CV_TERMCRIT_EPS,20,0.03));
  cvReleaseImage(&eig);
  cvReleaseImage(&temp);

  if (OnInitialize)
    OnInitialize(_tid);

  //printf("Number of features at init: %d\n", countFeatures);
  for (int boucle=0; boucle<countFeatures;boucle++)  {
    featuresid[boucle] = globalcountFeatures;
    globalcountFeatures++;
    
    if (OnNewFeature){
      OnNewFeature(_tid, boucle, featuresid[boucle], features[boucle].x,
		   features[boucle].y);
    }
  }
}
Exemple #10
0
void MainWindow::OpticalFlowDetect()
{
    cvReleaseCapture(&pCapture);
    pCapture=cvCaptureFromCAM(0);

    int corner_count = 1000;

     CvTermCriteria criteria;
     criteria = cvTermCriteria (CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 64, 0.01);

     IplImage *src_img1;
     IplImage *src_img2;

     IplImage *dst_img;
     IplImage *pre_img;
     IplImage *result;


     IplImage *eig_img;
     IplImage *temp_img;

     IplImage *prev_pyramid;
     IplImage *curr_pyramid;

     CvPoint2D32f *corners1;
     CvPoint2D32f *corners2;
     corners1 = (CvPoint2D32f *) cvAlloc (corner_count * sizeof (CvPoint2D32f));
     corners2 = (CvPoint2D32f *) cvAlloc (corner_count * sizeof (CvPoint2D32f));

     char *status;
     status = (char *) cvAlloc (corner_count);

     while (1)
     {

      pre_img = cvQueryFrame(pCapture);

      CvSize img_sz = cvGetSize(pre_img);
      src_img1 = cvCreateImage(img_sz, IPL_DEPTH_8U, 1);
      cvCvtColor(pre_img, src_img1, CV_RGB2GRAY);

      dst_img = cvQueryFrame(pCapture);
      src_img2 = cvCreateImage(img_sz, IPL_DEPTH_8U, 1);
      cvCvtColor(dst_img, src_img2, CV_RGB2GRAY);

      result=cvCreateImage(img_sz,IPL_DEPTH_8U,1);
      cvZero(result);

      eig_img = cvCreateImage (img_sz, IPL_DEPTH_32F, 1);
      temp_img = cvCreateImage (img_sz, IPL_DEPTH_32F, 1);


      prev_pyramid = cvCreateImage (cvSize (src_img1->width + 8, src_img1->height / 3), IPL_DEPTH_8U, 1);
      curr_pyramid = cvCreateImage (cvSize (src_img1->width + 8, src_img1->height / 3), IPL_DEPTH_8U, 1);



      cvGoodFeaturesToTrack (src_img1, eig_img, temp_img, corners1, &corner_count, 0.001, 5, NULL);

      cvCalcOpticalFlowPyrLK (src_img1, src_img2, prev_pyramid, curr_pyramid,
       corners1, corners2, corner_count, cvSize (10, 10), 4, status, NULL, criteria, 0);

      for (int i = 0; i < corner_count; i++)
      {

          if (status[i])
              cvLine (dst_img, cvPointFrom32f (corners1[i]), cvPointFrom32f (corners2[i]), CV_RGB (255, 0, 0), 1, CV_AA, 0);
      }

      if(27==cvWaitKey(33))
          break;

    //  cvCvtScale(dst_img,result,1.0/255,0);
      MainWindow::Display(pre_img,src_img2,dst_img);

     }
}
Exemple #11
0
void MainWindow::OpencvOpticalFlow()
{
    cvReleaseCapture(&pCapture);
    pCapture=cvCaptureFromCAM(0);

    IplImage* pre;
    IplImage* next;

    int corner_count=MAX_CORNERS;

    while(1)
    {
        //
        pre=cvQueryFrame(pCapture);
        next=cvQueryFrame(pCapture);

        CvSize img_sz=cvGetSize(pre);
        IplImage* imgC=cvCreateImage(img_sz,IPL_DEPTH_8U,1);

        //
        IplImage* imgA=cvCreateImage(img_sz,IPL_DEPTH_8U,1);
        IplImage* imgB=cvCreateImage(img_sz,IPL_DEPTH_8U,1);
        cvCvtColor(pre,imgA,CV_BGR2GRAY);
        cvCvtColor(next,imgB,CV_BGR2GRAY);

        //

        IplImage* eig_image=cvCreateImage(img_sz,IPL_DEPTH_32F,1);
        IplImage* tmp_image=cvCreateImage(img_sz,IPL_DEPTH_32F,1);

        CvPoint2D32f* cornersA = new CvPoint2D32f[ MAX_CORNERS ];


        //
        cvGoodFeaturesToTrack(
             imgA,
             eig_image,
             tmp_image,
             cornersA,
             &corner_count,
             0.01,
             5.0,
             0,
             3,
             0,
             0.04
         );

        cvFindCornerSubPix(
            imgA,
            cornersA,
            corner_count,
            cvSize(win_size,win_size),
            cvSize(-1,-1),
            cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03)
        );


        char features_found[ MAX_CORNERS ];
        float feature_errors[ MAX_CORNERS ];

        //
        CvSize pyr_sz = cvSize( imgA->width+8, imgB->height/3 );

        IplImage* pyrA = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
        IplImage* pyrB = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
        CvPoint2D32f* cornersB  = new CvPoint2D32f[ MAX_CORNERS ];

        cvCalcOpticalFlowPyrLK(
             imgA,
             imgB,
             pyrA,
             pyrB,
             cornersA,
             cornersB,
             corner_count,
             cvSize( win_size,win_size ),
             5,
             features_found,
             feature_errors,
             cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 ),
             0
          );

        for( int i=0; i<corner_count; i++ ) {
           if( features_found[i]==0|| feature_errors[i]>550 ) {
       //       printf("Error is %f/n",feature_errors[i]);
              continue;
           }
       //    printf("Got it/n");
           CvPoint p0 = cvPoint(
              cvRound( cornersA[i].x ),
              cvRound( cornersA[i].y )
           );

           CvPoint p1 = cvPoint(
              cvRound( cornersB[i].x ),
              cvRound( cornersB[i].y )
           );

           cvLine( imgC, p0, p1, CV_RGB(255,0,0),2 );
        }

        if(27==cvWaitKey(33))
            break;
        MainWindow::Display(imgA,imgB,imgC);

    }

}
Exemple #12
0
void MainWindow::OpticalFlow()
{
    cvReleaseCapture(&pCapture);
 //   pCapture=cvCaptureFromCAM(0);

    //use webcam
    CvCapture* cam = cvCaptureFromCAM( 0 ) ;
    while(1)
    {
        //get a color image
        IplImage* frame = cvQueryFrame(cam) ;

        CvSize img_sz = cvGetSize(frame);
        const int win_size = 10 ;

        //convert the image to grey image
        IplImage* frame_prev = cvQueryFrame(cam) ;
        IplImage* img_prev = cvCreateImage(img_sz,IPL_DEPTH_8U,1) ;
        cvCvtColor( frame_prev,img_prev ,CV_BGR2GRAY);

        //convert the image to grey image
        IplImage* frame_cur = cvQueryFrame(cam) ;
        IplImage* img_curr = cvCreateImage(img_sz,IPL_DEPTH_8U,1) ;
        cvCvtColor( frame_cur,img_curr ,CV_BGR2GRAY);

        //create a imge to display result
        IplImage* img_res = cvCreateImage(img_sz,IPL_DEPTH_8U,1) ;
        for ( int y = 0 ; y < img_sz.height ; ++y )
        {
            uchar* ptr = (uchar*)( img_res->imageData + y * img_res->widthStep ) ;
            for ( int x = 0 ; x <img_res->width; ++x )
            {
                ptr[x] = 255 ;
            }
        }

        //get good features
        IplImage* img_eig = cvCreateImage(img_sz,IPL_DEPTH_32F,1) ;
        IplImage* img_temp = cvCreateImage(img_sz,IPL_DEPTH_32F,1) ;
        int corner_count = MAX_CORNERS ;
        CvPoint2D32f*  features_prev = new CvPoint2D32f[MAX_CORNERS] ;

        cvGoodFeaturesToTrack(
                    img_prev,
                    img_eig,
                    img_temp,
                    features_prev,
                    &corner_count,
                    0.01,
                    5.0,
                    0,
                    3,
                    0,
                    0.4
                    );

        cvFindCornerSubPix(
                    img_prev,
                    features_prev,
                    corner_count,
                    cvSize(win_size,win_size),
                    cvSize(-1,-1),
                    cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER,20,0.03)
                    );

                // L-K
                char feature_found[ MAX_CORNERS ] ;
                float feature_errors[ MAX_CORNERS ] ;

                CvSize pyr_sz = cvSize( frame->width + 8 ,frame->height / 3 ) ;

                IplImage* pyr_prev = cvCreateImage(pyr_sz,IPL_DEPTH_32F,1) ;
                IplImage* pyr_cur = cvCreateImage(pyr_sz,IPL_DEPTH_32F,1) ;
                CvPoint2D32f*  features_cur = new CvPoint2D32f[ MAX_CORNERS ] ;

                cvCalcOpticalFlowPyrLK(
                    img_prev,
                    img_curr,
                    pyr_prev,
                    pyr_cur,
                    features_prev,
                    features_cur,
                    corner_count,
                    cvSize(win_size,win_size),
                    5,
                    feature_found,
                    feature_errors,
                    cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER,20,0.3),
                    0
                    );

                for ( int i = 0 ; i < corner_count ; i++)
                {
                    if ( 0 == feature_found[i] || feature_errors[i] > 550 )
                    {
                 //       printf("error is %f \n" , feature_errors[i] ) ;
                        continue ;
                    }

                 //   printf("find it !\n") ;

                    CvPoint pt_prev = cvPoint( features_prev[i].x , features_prev[i].y ) ;
                    CvPoint pt_cur = cvPoint( features_cur[i].x , features_cur[i].y ) ;

                    cvLine( img_res,pt_prev,pt_cur,CV_RGB( 255,0,0),2 );
                }

        if(27==cvWaitKey(33))
            break;

        MainWindow::Display(frame_cur,img_curr,img_res);

        cvReleaseImage(&img_curr);
        cvReleaseImage(&img_eig);
        cvReleaseImage(&img_prev);
        cvReleaseImage(&img_res);
        cvReleaseImage(&img_temp);

    }
}
void blobAnalysis(IplImage * imgA, IplImage * imgB){
	const int MAX_CORNERS = 1000;
	CvSize img_sz = cvGetSize( imgA );
	int win_size = 15;
	LOG4CXX_TRACE(loggerBlobAnalysis , "Blob analisis started");
	try{
		IplImage *imgC = cvCreateImage(cvGetSize(imgA), IPL_DEPTH_32F, 3);

		// Get the features for tracking
		IplImage* eig_image = cvCreateImage( img_sz, IPL_DEPTH_32F, 1 );
		IplImage* tmp_image = cvCreateImage( img_sz, IPL_DEPTH_32F, 1 );

		int corner_count = MAX_CORNERS;
		CvPoint2D32f* cornersA = new CvPoint2D32f[ MAX_CORNERS ];

		cvGoodFeaturesToTrack( imgA, eig_image, tmp_image, cornersA, &corner_count,
			0.05, 5.0, 0, 3, 0, 0.04 );

		cvFindCornerSubPix( imgA, cornersA, corner_count, cvSize( win_size, win_size ),
			cvSize( -1, -1 ), cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03 ) );

		// Call Lucas Kanade algorithm
		char features_found[ MAX_CORNERS ];
		float feature_errors[ MAX_CORNERS ];

		CvSize pyr_sz = cvSize( imgA->width+8, imgB->height/3 );

		IplImage* pyrA = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
		IplImage* pyrB = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );

		CvPoint2D32f* cornersB = new CvPoint2D32f[ MAX_CORNERS ];

		cvCalcOpticalFlowPyrLK( imgA, imgB, pyrA, pyrB, cornersA, cornersB, corner_count, 
			cvSize( win_size, win_size ), 5, features_found, feature_errors,
			 cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.3 ), 0 );
			 LOG4CXX_DEBUG(loggerBlobAnalysis,"Lucas Kanade algorithm application");
		// Make an image of the results
		for( int i=0; i<1000; i++ ){
				printf("Error is %f/n", feature_errors[i]);
			printf("Got it/n");
			CvPoint p0 = cvPoint( cvRound( cornersA[i].x ), cvRound( cornersA[i].y ) );
			CvPoint p1 = cvPoint( cvRound( cornersB[i].x ), cvRound( cornersB[i].y ) );
			cvLine( imgC, p0, p1, CV_RGB(255,0,0), 2 );
		LOG4CXX_DEBUG(loggerBlobAnalysis,"Make an image of the results");
		}
	
	cvNamedWindow( "ImageA", 0 );
	cvNamedWindow( "ImageB", 0 );
	cvNamedWindow( "LKpyr_OpticalFlow", 0 );

	cvShowImage( "ImageA", imgA );
	cvShowImage( "ImageB", imgB );
	cvShowImage( "LKpyr_OpticalFlow", imgC );
	LOG4CXX_TRACE(loggerBlobAnalysis, "Blob analisis completed");
	cvWaitKey(0);
	}
	catch(exception& e)
	{
		LOG4CXX_ERROR(loggerBlobAnalysis, "Error in Blob Analisis: " << e.what());
	}

}
int* findhand(CvCapture *webcam) {
	
	//---Initialise Variables for Optical Flow---//
	CvSize OF_window = cvSize(3,3);						//Setup the size of the window of each pyramid level
	int no_of_points = 15000;
	CvPoint2D32f Frame_t_points[15000];
	CvPoint2D32f Frame_t_dt_points[15000];
	char optical_flow_found_feature[15000];
	float optical_flow_feature_error[15000];
	CvTermCriteria optical_flow_termination_criteria = cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 );

	frame = cvQueryFrame(webcam);						//Grab a frame from the webcam at time t
	WorkingFrame=cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 3);		//Build a working image

	dots=cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 3);			//Build a working image

	//----Setup parameters for cvCalcOpticalFlowPyrLK------//
	Frame_at_t = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 1);		//Build image for Frame at t
	cvConvertImage(frame, Frame_at_t, 0);					//Populate Frame_at_t with filtered data from WorkingFrame

	Sleep(40);								//A delay of 30ms (1 frame) to allow dt to pass

	frame = cvQueryFrame(webcam);						//Grab a frame from the webcam at time t+dt
	
	Frame_at_t_dt = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 1);	//Build image for Frame at t
	cvConvertImage(frame, Frame_at_t_dt, 0);				//Populate Frame_at_t_dt with filtered data from WorkingFrame

	eig_image = cvCreateImage(cvGetSize(frame), IPL_DEPTH_32F, 1);		//Set up temporary floating-point 32-bit image
	temp_image = cvCreateImage(cvGetSize(frame), IPL_DEPTH_32F, 1);		//Another temporary image of the same size and same format as eig_image

	cvGoodFeaturesToTrack(Frame_at_t, eig_image, temp_image, Frame_t_points, &no_of_points, .01, .01, NULL);
		
	pyramid1 = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 1);
	pyramid2 = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 1);
		
	cvCalcOpticalFlowPyrLK(Frame_at_t, Frame_at_t_dt, pyramid1, pyramid2, Frame_t_points, Frame_t_dt_points, no_of_points, OF_window, 5, optical_flow_found_feature, optical_flow_feature_error, optical_flow_termination_criteria, 0 );
		
	pixel_step = frame->widthStep;						//Assign pixel step from grabbed frame
	channels = frame->nChannels;						//Assign no. channels from grabbed frame
	pixel_step_out = frame->widthStep;					//Assign pixel step from grabbed frame
	channels_out = frame->nChannels;					//Assign no. channels from grabbed frame
	data = (uchar *)frame->imageData;					//Assign pointer for source frame
	data_out = (uchar *)WorkingFrame->imageData;				//Assign pointer for Working frame

	//---Scan through grabbed frame, check pixel colour. If mostly red, saturate corresponding pixel in ---//
	//---output working image. This is designed to filter out nosie and detect skin------------------------//
	for(y=0; y<(frame->height); y++) {
		for(x=0 ; x<(frame->width); x++) {
			if(((data[y*pixel_step+x*channels+2]) > (20+data[y*pixel_step+x*channels])) && ((data[y*pixel_step+x*channels+2]) > (20+data[y*pixel_step+x*channels+1]))) {
				data_out[y*pixel_step_out+x*channels_out]=255;
			}
			else {
				data_out[y*pixel_step_out+x*channels_out]=0;
			}
		}
	}
	
	for (i = 0; i < no_of_points; i++) {
			
		if (optical_flow_found_feature[i] != 0) {
			
		double calc=(((int)Frame_t_points[i].x-(int)Frame_t_dt_points[i].x)^2)+(((int)Frame_t_points[i].x-(int)Frame_t_dt_points[i].x)^2);
		double calc2=(((int)Frame_t_points[i].y-(int)Frame_t_dt_points[i].y)^2)+(((int)Frame_t_points[i].y-(int)Frame_t_dt_points[i].y)^2);
		double calc3=(calc*calc)+(calc2*calc2);
		
		int calc4 = abs((int)sqrt(calc3));
		int thresh = 24;					//Apply Optical Flow Threshold

		if (calc4>thresh) {
			if ((data_out[(int)Frame_t_points[i].y*pixel_step+(int)Frame_t_points[i].x*channels])>0) {
				cvCircle(dots, cvPoint((int)Frame_t_points[i].x, (int)Frame_t_points[i].y), 1, CV_RGB(100, 255, 100), 2);
				xt=xt+(int)Frame_t_points[i].x;
				yt=yt+(int)Frame_t_points[i].y;
				count=count+1;
			}
		}
		}
	}
	cvShowImage("Sat", dots);

	if (count > 12 ) {						//If the number of pixels is above a threshold
			array[0]=(int)abs((double)(xt/count)/(double)640*100);			
			array[1]=(int)abs((double)(yt/count)/(double)480*100);
			xt=0;
			yt=0;
			count=0;
	}
		
	if (array[0]>frame->width) {
		array[0]=frame->width;
	}
		
	if (array[0]<0) {
		array[0]=0;						//'Hold' Function
	}
		
	if (array[1]>frame->width) {
		array[1]=0;
	}
		
	if (array[1]<0) {
		array[1]=0;
	}

	return(0);
}
Exemple #15
0
int main(int argc, char * argv[])
{
	if(argc < 2) {
		fprintf(stderr, "%s image1 image2\n", argv[0]);
		return 1;
	}

	char * im1fname = argv[1];
	char * im2fname = argv[2];

	IplImage * image1 = cvLoadImage(im1fname, CV_LOAD_IMAGE_GRAYSCALE);

	IplImage * eigenvalues = cvCreateImage(cvGetSize(image1), 32, 1);
	IplImage * temp = cvCreateImage(cvGetSize(image1), 32, 1);

	int count = MAX_COUNT;
	double quality = 0.5;
	// double min_distance = 2;
	double min_distance = 50;
	int block_size = 7;
	int use_harris = 0;
	int win_size = 10;
	int flags = 0;

	CvPoint2D32f * source_points = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(CvPoint2D32f));
	CvPoint2D32f * dest_points = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(CvPoint2D32f));
	CvPoint2D32f * delaunay_points = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(CvPoint2D32f));

	cvGoodFeaturesToTrack( image1, eigenvalues, temp, source_points, &count,
			quality, min_distance, 0, block_size, use_harris, 0.04 );

	printf("%d features\n",count);

	setbuf(stdout, NULL);

	printf("Finding corner subpix...");
	cvFindCornerSubPix( image1, source_points, count,
			cvSize(win_size,win_size), cvSize(-1,-1),
			cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03));
	printf("done.\n");

	cvReleaseImage(&eigenvalues);
	cvReleaseImage(&temp);

	IplImage * image2 = cvLoadImage(im2fname, CV_LOAD_IMAGE_GRAYSCALE);

	char * status = (char*)cvAlloc(sizeof(char)*MAX_COUNT);

	IplImage * pyramid = cvCreateImage( cvGetSize(image1), IPL_DEPTH_8U, 1 );
	IplImage * second_pyramid = cvCreateImage( cvGetSize(image2), IPL_DEPTH_8U, 1 );

	printf("Computing optical flow...");	
	cvCalcOpticalFlowPyrLK(image1, image2, pyramid, second_pyramid, source_points,
		dest_points, count, cvSize(win_size,win_size), 4, status, 0,
		cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03),
		flags);
	printf("done.\n");

	int num_matches = 0;
	int num_out_matches = 0;
	int max_dist = 30;
	int offset = 200;
	
	CvMemStorage * storage = cvCreateMemStorage(0);
	CvSubdiv2D * delaunay = cvCreateSubdivDelaunay2D( cvRect(0,0,image1->width,image1->height), storage);

	cvReleaseImage(&image1);
	cvReleaseImage(&image2);
	
	image1 = cvLoadImage(im1fname, CV_LOAD_IMAGE_COLOR);
	image2 = cvLoadImage(im2fname, CV_LOAD_IMAGE_COLOR);

	cvSet( image1, cvScalarAll(255) );

	std::map<CvPoint, CvPoint> point_lookup_map;
	std::vector<std::pair<CvPoint, CvPoint> > point_lookup;

	// put corners in the point lookup as going to themselves
	point_lookup_map[cvPoint(0,0)] = cvPoint(0,0);
	point_lookup_map[cvPoint(0,image1->height-1)] = cvPoint(0,image1->height-1);
	point_lookup_map[cvPoint(image1->width-1,0)] = cvPoint(image1->width-1,0);
	point_lookup_map[cvPoint(image1->width-1,image1->height-1)] = cvPoint(image1->width-1,image1->height-1);

	point_lookup.push_back(std::pair<CvPoint,CvPoint>(cvPoint(0,0), cvPoint(0,0)));
	point_lookup.push_back(std::pair<CvPoint,CvPoint>(cvPoint(0,image1->height-1), cvPoint(0,image1->height-1)));
	point_lookup.push_back(std::pair<CvPoint,CvPoint>(cvPoint(image1->width-1,0), cvPoint(image1->width-1,0)));
	point_lookup.push_back(std::pair<CvPoint,CvPoint>(cvPoint(image1->width-1,image1->height-1), cvPoint(image1->width-1,image1->height-1)));

	printf("Inserting corners...");
	// put corners in the Delaunay subdivision
	for(unsigned int i = 0; i < point_lookup.size(); i++) {
		cvSubdivDelaunay2DInsert( delaunay, cvPointTo32f(point_lookup[i].first) );
	}
	printf("done.\n");

	CvSubdiv2DEdge proxy_edge;
	for(int i = 0; i < count; i++) {
		if(status[i]) {
			CvPoint source = cvPointFrom32f(source_points[i]);
			CvPoint dest = cvPointFrom32f(dest_points[i]);
	
			if((((int)fabs((double)(source.x - dest.x))) > max_dist) ||
				 (((int)fabs((double)(source.y - dest.y))) > max_dist)) {	
				num_out_matches++;
			}
			else if((dest.x >= 0) && (dest.y >= 0) && (dest.x < (image1->width)) && (dest.y < (image1->height))) {
				if(point_lookup_map.find(source) == point_lookup_map.end()) {
					num_matches++;
				
					point_lookup_map[source] = dest;
					point_lookup.push_back(std::pair<CvPoint,CvPoint>(source,dest));
					delaunay_points[i] = (cvSubdivDelaunay2DInsert( delaunay, cvPointTo32f(source) ))->pt;
					cvSetImageROI( image1, cvRect(source.x-8,source.y-8,8*2,8*2) );
					cvResetImageROI( image2 );
					cvGetRectSubPix( image2, image1, dest_points[i] );
				}
				/*
				cvSet2D( image1, source.y, source.x, cvGet2D( image2, dest.y, dest.x ) );
				cvSet2D( image1, source.y, source.x+1, cvGet2D( image2, dest.y, dest.x+1 ) );
				cvSet2D( image1, source.y, source.x-1, cvGet2D( image2, dest.y, dest.x-1 ) );
				cvSet2D( image1, source.y+1, source.x, cvGet2D( image2, dest.y+1, dest.x ) );
				cvSet2D( image1, source.y-1, source.x, cvGet2D( image2, dest.y-1, dest.x ) );
				cvSet2D( image1, source.y+1, source.x+1, cvGet2D( image2, dest.y+1, dest.x+1 ) );
				cvSet2D( image1, source.y-1, source.x-1, cvGet2D( image2, dest.y-1, dest.x-1 ) );
				cvSet2D( image1, source.y+1, source.x-1, cvGet2D( image2, dest.y+1, dest.x-1 ) );
				cvSet2D( image1, source.y-1, source.x+1, cvGet2D( image2, dest.y-1, dest.x+1 ) );
				*/

				// cvCircle( image1, source, 4, CV_RGB(255,0,0), 2, CV_AA );
				// cvCircle( image2, dest, 4, CV_RGB(255,0,0), 2, CV_AA );
			}

			/*
			cvSetImageROI( image1, cvRect(source.x-offset,source.y-offset,offset*2,offset*2) );
			cvSetImageROI( image2, cvRect(dest.x-offset,dest.y-offset,offset*2,offset*2) );
			cvNamedWindow("image1",0);
			cvNamedWindow("image2",0);
			cvShowImage("image1",image1);
			cvShowImage("image2",image2);
			printf("%d,%d -> %d,%d\n",source.x,source.y,dest.x,dest.y);
			cvWaitKey(0);
			cvDestroyAllWindows();
			*/
		}
	}
	printf("%d %d\n",num_matches,num_out_matches);
	printf("%d lookups\n",point_lookup_map.size());

	cvResetImageROI( image1 );

	cvSaveImage("sparse.jpg", image1);

	cvReleaseImage(&image1);
	image1 = cvLoadImage(im1fname, CV_LOAD_IMAGE_COLOR);
	cvSet( image1, cvScalarAll(255) );
	printf("Warping image...");

	CvSeqReader  reader;
	int total = delaunay->edges->total;
	int elem_size = delaunay->edges->elem_size;

	cvStartReadSeq( (CvSeq*)(delaunay->edges), &reader, 0 );

	std::vector<Triangle> trivec;
	std::vector<CvMat *> baryinvvec;

	for( int i = 0; i < total; i++ ) {
		CvQuadEdge2D* edge = (CvQuadEdge2D*)(reader.ptr);

		if( CV_IS_SET_ELEM( edge ))	{
			CvSubdiv2DEdge curedge = (CvSubdiv2DEdge)edge;
			CvSubdiv2DEdge t = curedge;
			Triangle temptri;
			int count = 0;
			
			// construct a triangle from this edge
			do {
				CvSubdiv2DPoint* pt = cvSubdiv2DEdgeOrg( t );
				if(count < 3) {
					pt->pt.x = pt->pt.x >= image1->width ? image1->width-1 : pt->pt.x;
					pt->pt.y = pt->pt.y >= image1->height ? image1->height-1 : pt->pt.y;
					pt->pt.x = pt->pt.x < 0 ? 0 : pt->pt.x;
					pt->pt.y = pt->pt.y < 0 ? 0 : pt->pt.y;

					temptri.points[count] = cvPointFrom32f( pt->pt );
				}
				else {
					printf("More than 3 edges\n");
				}
				count++;
				t = cvSubdiv2DGetEdge( t, CV_NEXT_AROUND_LEFT );
			} while( t != curedge );
			
			// check that triangle is not already in
			if( std::find(trivec.begin(), trivec.end(), temptri) == trivec.end() ) {
				// push triangle in and draw
				trivec.push_back(temptri);
				cvLine( image1, temptri.points[0], temptri.points[1], CV_RGB(255,0,0), 1, CV_AA, 0 );
				cvLine( image1, temptri.points[1], temptri.points[2], CV_RGB(255,0,0), 1, CV_AA, 0 );
				cvLine( image1, temptri.points[2], temptri.points[0], CV_RGB(255,0,0), 1, CV_AA, 0 );

				// compute barycentric computation vector for this triangle
				CvMat * barycen = cvCreateMat( 3, 3, CV_32FC1 );
				CvMat * baryceninv = cvCreateMat( 3, 3, CV_32FC1 );

				barycen->data.fl[3*0+0] = temptri.points[0].x;
				barycen->data.fl[3*0+1] = temptri.points[1].x;
				barycen->data.fl[3*0+2] = temptri.points[2].x;
				barycen->data.fl[3*1+0] = temptri.points[0].y;
				barycen->data.fl[3*1+1] = temptri.points[1].y;
				barycen->data.fl[3*1+2] = temptri.points[2].y;
				barycen->data.fl[3*2+0] = 1;
				barycen->data.fl[3*2+1] = 1;
				barycen->data.fl[3*2+2] = 1;

				cvInvert( barycen, baryceninv, CV_LU );
				baryinvvec.push_back(baryceninv);

				cvReleaseMat( &barycen );
			}
		}

		CV_NEXT_SEQ_ELEM( elem_size, reader );
	}
	printf("%d triangles...", trivec.size());
	cvSaveImage("triangles.jpg", image1);
	
	cvSet( image1, cvScalarAll(255) );
	IplImage * clean_nonthresh = cvLoadImage( "conhull-clean.jpg", CV_LOAD_IMAGE_COLOR );

	// for each triangle
	for(unsigned int i = 0; i < trivec.size(); i++) {
		Triangle curtri = trivec[i];
		CvMat * curpoints = cvCreateMat( 1, 3, CV_32SC2 );
		Triangle target;
		std::map<CvPoint,CvPoint>::iterator piter[3];
		
		printf("Triangle %d / %d\n",i,trivec.size());
		bool is_corner = false;
		for(int j = 0; j < 3; j++) {
			/*
			curpoints->data.i[2*j+0] = curtri.points[j].x;
			curpoints->data.i[2*j+1] = curtri.points[j].y;
			*/
			CV_MAT_ELEM( *curpoints, CvPoint, 0, j ) = curtri.points[j];
			printf("%d,%d\n",curtri.points[j].x,curtri.points[j].y);
	
			/*	
			if((curtri.points[j] == cvPoint(0,0)) ||  (curtri.points[j] == cvPoint(0,image1->height)) ||(curtri.points[j] == cvPoint(image1->width,0)) ||(curtri.points[j] == cvPoint(image1->width,image1->height))) {
				is_corner = true;
				break;
			}
			*/

			for(unsigned int k = 0; k < point_lookup.size(); k++) {
				std::pair<CvPoint,CvPoint> thispair = point_lookup[k];
				if(thispair.first == curtri.points[j]) {
					target.points[j] = thispair.second;
					break;
				}
			}

			/*
			piter[j] = point_lookup_map.find(curtri.points[j]);
			if(piter[j] != point_lookup_map.end() ) {
				target.points[j] = piter[j]->second;
			}
			*/
		}
			
		// if((piter[0] != point_lookup_map.end()) && (piter[1] != point_lookup_map.end()) && (piter[2] != point_lookup_map.end())) {
		if(!is_corner) {
			CvMat * newcorners = cvCreateMat( 3, 3, CV_32FC1 );
			newcorners->data.fl[3*0+0] = target.points[0].x;
			newcorners->data.fl[3*0+1] = target.points[1].x;
			newcorners->data.fl[3*0+2] = target.points[2].x;
			newcorners->data.fl[3*1+0] = target.points[0].y;
			newcorners->data.fl[3*1+1] = target.points[1].y;
			newcorners->data.fl[3*1+2] = target.points[2].y;
			newcorners->data.fl[3*2+0] = 1;
			newcorners->data.fl[3*2+1] = 1;
			newcorners->data.fl[3*2+2] = 1;

			CvContour hdr;
			CvSeqBlock blk;
			CvRect trianglebound = cvBoundingRect( cvPointSeqFromMat(CV_SEQ_KIND_CURVE+CV_SEQ_FLAG_CLOSED, curpoints, &hdr, &blk), 1 );
			printf("Bounding box: %d,%d,%d,%d\n",trianglebound.x,trianglebound.y,trianglebound.width,trianglebound.height);
			for(int y = trianglebound.y; (y < (trianglebound.y + trianglebound.height)) && ( y < image1->height); y++) {
				for(int x = trianglebound.x; (x < (trianglebound.x + trianglebound.width)) && (x < image1->width); x++) {
					// check to see if we're inside this triangle
					/*
					CvPoint v0 = cvPoint( curtri.points[2].x - curtri.points[0].x, curtri.points[2].y - curtri.points[0].y );
					CvPoint v1 = cvPoint( curtri.points[1].x - curtri.points[0].x, curtri.points[1].y - curtri.points[0].y );
					CvPoint v2 = cvPoint( x - curtri.points[0].x, y - curtri.points[0].y );
					
					int dot00 = v0.x * v0.x + v0.y * v0. y;
					int dot01 = v0.x * v1.x + v0.y * v1. y;
					int dot02 = v0.x * v2.x + v0.y * v2. y;
					int dot11 = v1.x * v1.x + v1.y * v1. y;
					int dot12 = v1.x * v2.x + v1.y * v2. y;

					double invDenom = 1.0 / (double)(dot00 * dot11 - dot01 * dot01);
					double u = (double)(dot11 * dot02 - dot01 * dot12) * invDenom;
					double v = (double)(dot00 * dot12 - dot01 * dot02) * invDenom;
					*/

					CvMat * curp = cvCreateMat(3, 1, CV_32FC1);
					CvMat * result = cvCreateMat(3, 1, CV_32FC1);
					curp->data.fl[0] = x;
					curp->data.fl[1] = y;
					curp->data.fl[2] = 1;
					cvMatMul( baryinvvec[i], curp, result );
					// double u = result->data.fl[0]/result->data.fl[2];
					// double v = result->data.fl[1]/result->data.fl[2];
			

					if( (result->data.fl[0] > 0) && (result->data.fl[1] > 0) && (fabs(1.0 - (result->data.fl[0]+result->data.fl[1]+result->data.fl[2])) <= 0.01) ) {
					// if((u > 0) || (v > 0) /*&& ((u +v) < 1)*/ ) {
						// printf("Barycentric: %f %f %f\n", result->data.fl[0], result->data.fl[1], result->data.fl[2]);
						// this point is inside this triangle
						// printf("Point %d,%d inside %d,%d %d,%d %d,%d\n",x,y,trivec[i].points[0].x,trivec[i].points[0].y,
						//	trivec[i].points[1].x,trivec[i].points[1].y,trivec[i].points[2].x,trivec[i].points[2].y);

						CvMat * sourcepoint = cvCreateMat(3, 1, CV_32FC1);
						cvMatMul( newcorners, result, sourcepoint );
						double sourcex = sourcepoint->data.fl[0]/*/sourcepoint->data.fl[2]*/;
						double sourcey = sourcepoint->data.fl[1]/*/sourcepoint->data.fl[2]*/;
						if((sourcex >= 0) && (sourcey >= 0) && (sourcex < (image1->width)) && (sourcey < (image1->height))) {
							// printf("%d,%d %d,%d\n",x,y,(int)sourcex,(int)sourcey);
							cvSet2D( image1, y, x, cvGet2D( clean_nonthresh, (int)sourcey, (int)sourcex ) );
						}
	
						/*
						if((i == 143) && (y == 3577) && (x > 2055) && (x < 2087)) {
							printf("%d: %f, %f, %f\t%f, %f, %f\n",x,result->data.fl[0],result->data.fl[1],result->data.fl[2],
									sourcepoint->data.fl[0],sourcepoint->data.fl[1],sourcepoint->data.fl[2]);
						}
						*/
	
						cvReleaseMat( &sourcepoint );
						
						// printf("Point %d,%d inside %d,%d %d,%d %d,%d\n",x,y,trivec[i].points[0].x,trivec[i].points[0].y,
						//		trivec[i].points[1].x,trivec[i].points[1].y,trivec[i].points[2].x,trivec[i].points[2].y);

					}
					cvReleaseMat( &result );
					cvReleaseMat( &curp );
				}
			}
			cvReleaseMat( &newcorners );
		}
		cvReleaseMat( &curpoints );
	}
	/*
	for(int y = 0; y < image1->height; y++) {
		for(int x = 0; x < image1->width; x++) {
			CvMat * curp = cvCreateMat(3, 1, CV_32FC1);
			CvMat * result = cvCreateMat(3, 1, CV_32FC1);
			curp->data.fl[0] = x;
			curp->data.fl[1] = y;
			curp->data.fl[2] = 1;
			for(unsigned int i = 0; i < baryinvvec.size(); i++) {
				cvMatMul( baryinvvec[i], curp, result );
				double u = result->data.fl[0]/result->data.fl[2];
				double v = result->data.fl[1]/result->data.fl[2];
				if((u > 0) && (v > 0) && (u + v < 1)) {
					// printf("Point %d,%d inside %d,%d %d,%d %d,%d\n",x,y,trivec[i].points[0].x,trivec[i].points[0].y,
					//		trivec[i].points[1].x,trivec[i].points[1].y,trivec[i].points[2].x,trivec[i].points[2].y);

					break;
				}
			}
			cvReleaseMat( &result );
			cvReleaseMat( &curp );
		}
	}
	*/

	cvReleaseImage( &clean_nonthresh );

#ifdef OLD_BUSTED
	for(int y = 0; y < image1->height; y++) {
		for(int x = 0; x < image1->width; x++) {
			CvSubdiv2DPointLocation locate_result;
			CvSubdiv2DEdge on_edge;
			CvSubdiv2DPoint * on_vertex;
			CvPoint curpoint = cvPoint( x, y );
			locate_result = cvSubdiv2DLocate( delaunay, cvPointTo32f( curpoint ),
				&on_edge, &on_vertex );
			if( (locate_result != CV_PTLOC_OUTSIDE_RECT) && (locate_result != CV_PTLOC_ERROR) ) {
				if( locate_result == CV_PTLOC_VERTEX ) { // this point is on a vertex
					for(int i = 0; i < count; i++) {
						if(((on_vertex->pt).x == delaunay_points[i].x) && ((on_vertex->pt).y == delaunay_points[i].y)) {
							cvSet2D( image1, y, x, cvGet2D( image2, cvPointFrom32f(dest_points[i]).y, cvPointFrom32f(dest_points[i]).x ) );
							break;
						}
					}
				}
				else if( locate_result == CV_PTLOC_ON_EDGE ) { // this point is on an edge
					CvSubdiv2DPoint* org_pt;
					CvSubdiv2DPoint* dst_pt;
					CvPoint org_pt_warp;
					CvPoint dst_pt_warp;
						
					org_pt = cvSubdiv2DEdgeOrg(on_edge);
					dst_pt = cvSubdiv2DEdgeDst(on_edge);

					for(int i = 0; i < count; i++) {
						if(((org_pt->pt).x == delaunay_points[i].x) && ((org_pt->pt).y == delaunay_points[i].y)) {
							org_pt_warp = cvPointFrom32f(dest_points[i]);
						}
						if(((dst_pt->pt).x == delaunay_points[i].x) && ((dst_pt->pt).y == delaunay_points[i].y)) {
							dst_pt_warp = cvPointFrom32f(dest_points[i]);
						}
					}

					// compute vector length of original edge and current point
					double original_length;
					double cur_length; 
					if( (int)((org_pt->pt).x) == curpoint.x ) { // vertical line
						original_length = fabs((org_pt->pt).y - (dst_pt->pt).y);
						cur_length = fabs((org_pt->pt).y - curpoint.y);
					}
					else if( (int)((org_pt->pt).y) == curpoint.y ) { // horizontal line
						original_length = fabs((org_pt->pt).x - (dst_pt->pt).x);
						cur_length = fabs((org_pt->pt).x - curpoint.x);
					}
					else { // sloped line
				 		original_length = sqrt(pow((org_pt->pt).x - (dst_pt->pt).x, 2.0) + pow((org_pt->pt).y - (dst_pt->pt).y, 2.0));
						cur_length = sqrt(pow((org_pt->pt).x - curpoint.x, 2.0) + pow((org_pt->pt).y - curpoint.y, 2.0));
					}
					// compute ratio of this point on the edge
					double ratio = cur_length / original_length;
					// copy this point from the destination edge
					CvPoint point_in_original;
					int warped_x = (int)(org_pt_warp.x - dst_pt_warp.x);
					int warped_y = (int)(org_pt_warp.y - dst_pt_warp.y);
					if( org_pt_warp.x == curpoint.x ) { // vertical line
						point_in_original.y = (int)(org_pt_warp.y + (ratio * (org_pt_warp.y - dst_pt_warp.y)));
						point_in_original.x = org_pt_warp.x;
					}
					else if(org_pt_warp.y == curpoint.y) { // horizontal line
						point_in_original.x = (int)(org_pt_warp.x + (ratio * (org_pt_warp.x - dst_pt_warp.x)));
						point_in_original.y = org_pt_warp.y;
					}
					else { // sloped line
						double destination_length = sqrt(pow((org_pt_warp).x - (dst_pt_warp).x, 2.0) + pow((org_pt_warp).y - (dst_pt_warp).y, 2.0));
						double scaled_length = ratio * destination_length;
						double dest_angle = atan(fabs( (double)warped_y / (double)warped_x ));
						double xdist = scaled_length * cos(dest_angle);
						double ydist = scaled_length * sin(dest_angle);
						xdist = warped_x > 0 ? xdist : xdist * -1;
						ydist = warped_y > 0 ? ydist : ydist * -1;
						point_in_original.x = (int)( org_pt_warp.x + xdist);
						point_in_original.y = (int)( org_pt_warp.y + ydist);
					}
					
					if((point_in_original.x >= 0) && (point_in_original.y >= 0) && (point_in_original.x < (image1->width)) && (point_in_original.y < (image1->height))) {
						cvSet2D( image1, y, x, cvGet2D( image2, point_in_original.y, point_in_original.x ) );
					}
					else {
						printf("Edge point outside image\n");
					}
					// cvSet2D( image1, y, x, cvGet2D( image2, (int)(org_pt_warp.x + (ratio * (org_pt_warp.x - dst_pt_warp.x))), 
					//			(int)(org_pt_warp.y + (ratio * (org_pt_warp.y - dst_pt_warp.y))) ) );
				}
				else if( locate_result == CV_PTLOC_INSIDE ) { // this point is inside a facet (triangle)
					/*
					printf("Point inside facet: %d, %d\n",curpoint.x,curpoint.y);
					int count = 0;
					CvPoint * origins = (CvPoint*)malloc(sizeof(CvPoint)*3);
					CvSubdiv2DEdge t = on_edge;
					// count number of edges
					do {
						CvSubdiv2DPoint* pt = cvSubdiv2DEdgeOrg( t );
						if(count < 3) {
							origins[count] = cvPoint( cvRound(pt->pt.x), cvRound(pt->pt.y));
							printf("%d,%d\t",origins[count].x,origins[count].y);
						}
						count++;
						t = cvSubdiv2DGetEdge( t, CV_NEXT_AROUND_LEFT );
					} while(t != on_edge);
					printf("\n");

					free(origins);
					*/
				}
			}
		}
	}
#endif // OLD_BUSTED
	printf("done.\n");

	cvSaveImage("fullwarp.jpg", image1);

	printf("Drawing subdivisions on warped image...");
	draw_subdiv( image1, delaunay, NULL, NULL, 0, NULL );
	// draw_subdiv( image1, delaunay, delaunay_points, source_points, count, status );
	printf("done.\n");
	
	cvSaveImage("edgeswarp.jpg", image1);

	cvReleaseImage(&image2);

	image2 = cvLoadImage(im2fname, CV_LOAD_IMAGE_COLOR);
	// cvCreateImage( cvGetSize(image2), IPL_DEPTH_8U, 3 );

	// cvCalcSubdivVoronoi2D( delaunay );
	printf("Drawing subdivisions on unwarped image...");
	draw_subdiv( image2, delaunay, delaunay_points, dest_points, count, status );
	// draw_subdiv( image2, delaunay, NULL, NULL, 0, NULL );
	printf("done.\n");

	cvSaveImage("edges.jpg",image2);

	cvReleaseImage(&image1);
	cvFree(&source_points);
	cvFree(&dest_points);
	cvFree(&status);
	cvReleaseMemStorage(&storage);
	cvFree(&delaunay_points);

	cvReleaseImage(&image2);

	return 0;
}
Exemple #16
0
void OpenCVManager::update(int timeScale)
{
	int stepTime = lastFrame + timeScale*16.f;

	///////////////////////////
	//  update vector field  //
	///////////////////////////

	//approach normal
	Particle* vect;
	for (int i = 0; i < m_fieldWidth; ++i)
	{
		for (int j = 0; j < m_fieldHeight; ++j)
		{
			vect = &m_vectorField[(i*m_fieldHeight) + j];

			ofVec3f target;
			if (vect->vel.lengthSquared() > 0.f)
				target = (vect->vel / vect->vel.length()) * m_vectorFieldNorm;
			else
				target = ofVec3f(m_vectorFieldNorm, 0.f);
			vect->vel += (target - vect->vel) * 0.01;
		}
	}

	//update from video
	if (m_vidGrabber.isInitialized())
	{
		bool isNewFrame = false;
		int id = 0;

		m_vidGrabber.update();
		isNewFrame = m_vidGrabber.isFrameNew();

		if (isNewFrame)
		{
			//grab new frame
			m_curImg.setFromPixels(m_vidGrabber.getPixels(), s_frameSize.width, s_frameSize.height);

			//populate image
			if (firstFrame)
			{
				cvCvtColor(m_curImg.getCvImage(), m_newFrame, CV_RGB2GRAY);;
			}

			//cycle new to old
			IplImage* buff = m_pyramidOld;
			m_pyramidOld = m_pyramidNew;
			m_pyramidNew = buff;

			buff = m_oldFrame;
			m_oldFrame = m_newFrame;
			m_newFrame = buff;

			m_numFeatures = s_maxFeatures;

			//convert color
			cvCvtColor(m_curImg.getCvImage(), buff, CV_RGB2GRAY);

			//mirror image
			cvFlip(buff, m_newFrame, -1);

			cvGoodFeaturesToTrack(m_oldFrame, m_eig_image, m_tmp_image, m_oldImgFeatures, &m_numFeatures, 0.02, 1.0);

			//run flow
			int level = 2;// num levels in pyramid
			m_pointFound = new char[m_numFeatures];
			float* err = new float[s_maxFeatures];

			cvCalcOpticalFlowPyrLK(m_oldFrame, m_newFrame, 
				m_pyramidOld, m_pyramidNew, 
				m_oldImgFeatures, m_newImgFeatures, m_numFeatures, 
				s_frameSize, level, m_pointFound, err, m_termCriteria, 0);

			//set flags if necessary
			if (firstFrame)
			{
				m_flowFlags = CV_LKFLOW_PYR_A_READY;
				firstFrame = false;
			}

			//affect vector field
			ofVec2f deltaVec;
			Particle* tmpVec;

			try
			{

				for (int i = 0; i < m_numFeatures; ++i)
				{
					if (!m_pointFound[i]
						|| m_newImgFeatures[i].x < 0
						|| m_newImgFeatures[i].y < 0
						|| m_newImgFeatures[i].x >= ofGetWidth()
						|| m_newImgFeatures[i].y >= ofGetHeight())
						continue;

					deltaVec = ofVec2f(m_newImgFeatures[i].x - m_oldImgFeatures[i].x, m_newImgFeatures[i].y - m_oldImgFeatures[i].y);

					if (deltaVec.lengthSquared() < m_vectorFieldNorm * m_vectorFieldNorm)
						continue;

					//closest field value
					int posX = (int)m_newImgFeatures[i].x * s_frameSizeInv.x * ofGetWidth() * s_vectorFieldDensityInv;
					int posY = (int)(s_frameSize.height - m_newImgFeatures[i].y) * s_frameSizeInv.y * ofGetHeight() * s_vectorFieldDensityInv;

					if (posX >= m_fieldWidth) continue;
					if (posY >= m_fieldHeight) continue;

					tmpVec = &m_vectorField[(posX * m_fieldHeight) + posY];

					//reverse for cv opposite y coord
					deltaVec.y *= -1;

					tmpVec->vel += deltaVec * timeScale * 0.5f;

					tmpVec->vel.limit(tmpVec->maxSpeed);
				}
			}
			catch (exception e)
			{
				cout << e.what() << endl;
			}
		}
	}
	else
	{
		//no video camera, use noise
		Particle* vect;
		for (int i = 0; i < m_fieldWidth; ++i)
		{
			for (int j = 0; j < m_fieldHeight; ++j)
			{
				vect = &m_vectorField[(i*m_fieldHeight) + j];

				float noiseNum = ((i*m_fieldHeight) + j) + ofGetFrameNum() * 0.001f;
				vect->vel = ofVec2f(-1.f + ofNoise(noiseNum)*2.f, -1.f + 2.f * ofNoise(noiseNum + 1000)) * vect->maxSpeed;
			}
		}
	}

	//////////////////////
	//  simulate crowd  //
	//////////////////////

	//generate a person each 5 seconds
	if (stepTime - crowdLastGenerated > 2000
		&& pPeople->size() < s_maxPeopleParticles)
	{
		float y = ofGetWindowHeight();
		y = ofRandom(y * 0.25f, y * 0.75f);

		float x = -s_generationBuffer;
		if (ofRandom(1) > 0.5)
			x = ofGetWidth() + s_generationBuffer;

		//debug
		x = ofRandom(ofGetWidth());
		y = ofRandom(ofGetHeight());

		Particle* p = ppCurrentScene[0]->addParticleOfProperType(
			ofVec2f(x, y)
			);
		
		p->maxSpeed = s_vectorFieldDensity * 0.1f;
		p->vel = ofVec2f( (x < 0) ? 0.5f : -0.5f, 0.f);
		crowdLastGenerated = stepTime;
	}

	//move people across screen or remove them
	for (vector<Particle*>::iterator p = pPeople->begin(); p != pPeople->end();)
	{
		ofVec3f targetVel = ofVec3f(0.f, 0.f);

		//calculate vector field that's close

		int fieldX = (*p)->pos.x / s_vectorFieldDensity;
		int fieldY = (*p)->pos.y / s_vectorFieldDensity;

		if (fieldX < 2) fieldX = 2;
		else if (fieldX > m_fieldWidth - 3) fieldX = m_fieldWidth - 3;
		if (fieldY < 2) fieldY = 2;
		else if (fieldY > m_fieldHeight - 3) fieldY = m_fieldHeight - 3;

		for (int i = -2; i < 3; ++i)
		{
			for (int j = -2; j < 3; ++j)
			{
				int pos = ((fieldX + i) * m_fieldHeight) + (fieldY + j);
				targetVel += (3.f - std::max(abs(i), abs(j))) * m_vectorField[pos].vel;
			}
		}
		targetVel *= 0.029f;
		(*p)->accel += (targetVel - (*p)->vel) * timeScale * 0.1f;

		//update person
		(*p)->update(timeScale);// stepTimeDelta;

		if ((*p)->pos.x > ofGetWindowWidth() + s_generationBuffer*1.5f
			|| (*p)->pos.x < -s_generationBuffer * 1.5f
			|| (*p)->pos.y > ofGetHeight() + s_generationBuffer * 1.5f
			|| (*p)->pos.y < -s_generationBuffer * 1.5f)
		{
			p = pPeople->erase(p);
		}
		else
		{
			++p;
		}
	}
	lastFrame = stepTime;

}
Exemple #17
0
void processImagePair(const char *file1, const char *file2, CvVideoWriter *out, struct CvMat *currentOrientation) {
  // Load two images and allocate other structures
	IplImage* imgA = cvLoadImage(file1, CV_LOAD_IMAGE_GRAYSCALE);
	IplImage* imgB = cvLoadImage(file2, CV_LOAD_IMAGE_GRAYSCALE);
	IplImage* imgBcolor = cvLoadImage(file2);
 
	CvSize img_sz = cvGetSize( imgA );
	int win_size = 15;
  
	// Get the features for tracking
	IplImage* eig_image = cvCreateImage( img_sz, IPL_DEPTH_32F, 1 );
	IplImage* tmp_image = cvCreateImage( img_sz, IPL_DEPTH_32F, 1 );
 
	int corner_count = MAX_CORNERS;
	CvPoint2D32f* cornersA = new CvPoint2D32f[ MAX_CORNERS ];
 
	cvGoodFeaturesToTrack( imgA, eig_image, tmp_image, cornersA, &corner_count,
		0.05, 3.0, 0, 3, 0, 0.04 );
 
  fprintf(stderr, "%s: Corner count = %d\n", file1, corner_count);
 
	cvFindCornerSubPix( imgA, cornersA, corner_count, cvSize( win_size, win_size ),
		cvSize( -1, -1 ), cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 50, 0.03 ) );
 
	// Call Lucas Kanade algorithm
	char features_found[ MAX_CORNERS ];
	float feature_errors[ MAX_CORNERS ];
 
	CvSize pyr_sz = cvSize( imgA->width+8, imgB->height/3 );
 
	IplImage* pyrA = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
	IplImage* pyrB = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
 
	CvPoint2D32f* cornersB = new CvPoint2D32f[ MAX_CORNERS ];
 
  calcNecessaryImageRotation(imgA);
 
	cvCalcOpticalFlowPyrLK( imgA, imgB, pyrA, pyrB, cornersA, cornersB, corner_count, 
		cvSize( win_size, win_size ), 5, features_found, feature_errors,
		 cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.3 ), 0 );
 
   CvMat *transform = cvCreateMat(3,3, CV_32FC1);
   CvMat *invTransform = cvCreateMat(3,3, CV_32FC1);
	// Find a homography based on the gradient
   CvMat cornersAMat = cvMat(1, corner_count, CV_32FC2, cornersA);
   CvMat cornersBMat = cvMat(1, corner_count, CV_32FC2, cornersB);
   cvFindHomography(&cornersAMat, &cornersBMat, transform, CV_RANSAC, 15, NULL);

   cvInvert(transform, invTransform);
   cvMatMul(currentOrientation, invTransform, currentOrientation);
   // save the translated image
 	 IplImage* trans_image = cvCloneImage(imgBcolor);
   cvWarpPerspective(imgBcolor, trans_image, currentOrientation, CV_INTER_CUBIC+CV_WARP_FILL_OUTLIERS);

   printf("%s:\n", file1);
   PrintMat(currentOrientation);

  // cvSaveImage(out, trans_image);
  cvWriteFrame(out, trans_image);

  cvReleaseImage(&eig_image);
  cvReleaseImage(&tmp_image);  
  cvReleaseImage(&trans_image);
  cvReleaseImage(&imgA);
  cvReleaseImage(&imgB);
  cvReleaseImage(&imgBcolor);
  cvReleaseImage(&pyrA);
  cvReleaseImage(&pyrB);
  
  cvReleaseData(transform);
  delete [] cornersA;
  delete [] cornersB;
  
  
}
// поиск оптического потока
void OpticalFlowLK::make()
{
	if(!imgA || !imgB || !eig_image || !tmp_image)
	{
		return;
	}

	int i=0;

#if 1
	cornerCount = LK_MAX_CORNERS;
	//
	// находим точки для отслеживания перемещения
	//
	cvGoodFeaturesToTrack( imgA, eig_image, tmp_image, 
							cornersA,		// возвращаемое значение найденых углов
							&cornerCount,	// возвращаемое значение числа найденых углов
							0.01,			// множитель, определяющий минимально допустимое качество углов
							5.0,			// предел, определяющий минимально-возможную дистанцию между углами
							0,				// маска, определяющая ROI (если NULL, то поиск по всему изображению)
							5,				// размер среднего блока
							0,				// если !=0 используется cvCornerHarris(), иначе cvCornerMinEigenVal()
							0.04 );			// параметр для cvCornerHarris()
#else
	//
	// Покроем изображение равномерной сеткой из точек
	//
	int step_x = imgA->width / 5;
	int step_y = imgA->height / 5;

	int points_count = (imgA->width / step_x + 1) * (imgA->height / step_y + 1);

	if(points_count>LK_MAX_CORNERS){
		delete []cornersA;
		cornersA=0;
		delete []cornersB;
		cornersB=0;

		cornersA= new CvPoint2D32f[ points_count ];
		cornersB= new CvPoint2D32f[ points_count ];
		featuresFound = new char[ points_count ];
		featureErrors = new float[ points_count ];
		assert(cornersA);
		assert(cornersB);
		assert(featuresFound);
		assert(featureErrors);
	}

	cornerCount = 0;
	for ( j = 1; j < imgA->height; j += step_y){
		for ( i = 1; i < imgA->width; i += step_x){
			cornersA[cornerCount] = cvPoint2D32f((float)i, (float)j);
			cornerCount++;
		}
	}
#endif

	//
	// уточнение координат точек с субпиксельной точностью
	//
	cvFindCornerSubPix( imgA, cornersA, cornerCount,
						cvSize(LK_WINDOW_SIZE, LK_WINDOW_SIZE), // размер половины длины окна для поиска
						cvSize(-1,-1),
						cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, LK_ITER_COUNT, 0.03) );

	// определяем размер пирамиды 
	CvSize pyr_sz = cvSize( imgA->width+8, imgB->height/3 );

	if(pyrA!=0)
	{
		cvReleaseImage(&pyrA);
		cvReleaseImage(&pyrB);
	}
	pyrA = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
	pyrB = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );

	//
	// находим оптический поток
	//
	cvCalcOpticalFlowPyrLK( imgA, imgB, pyrA, pyrB,
							cornersA,
							cornersB,
							cornerCount,
							cvSize( LK_WINDOW_SIZE, LK_WINDOW_SIZE ),// размер окна поиска каждого уровня пирамиды
							5,										 // максимальный уровень пирамиды.
							featuresFound, // если элемент массива установлен в 1, то соответсвующая особая точка была обнаружена
							featureErrors, // массив разности между оригинальными и сдвинутыми точками (может быть NULL)
							cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, LK_ITER_COUNT, .3 ),
							0 );

	center.x=0.0;
	center.y=0.0;
	cornerCountGood = 0;
	for( i=0; i<cornerCount; i++ )
	{
		// пропускаем ненайденные точки и точки с большой ошибкой
		if( featuresFound[i]==0 || featureErrors[i]>LK_MAX_FEATURE_ERROR ) {
			center.x += cornersB[i].x;
			center.y += cornersB[i].y;
			cornerCountGood++;
		}
	}

	if(cornerCountGood)
	{
		center.x /= cornerCountGood;
		center.y /= cornerCountGood;
	}

}
Exemple #19
0
void calc_homography(const IplImage *src, IplImage *dst[], CvMat *hom[], int image_num)
{
	CvSize size = cvSize(src->width, src->height);
	IplImage *img_prev = cvCreateImage(size, src->depth, 1);//单通道图像
	IplImage *img_curr = cvCreateImage(size, src->depth, 1);
	cvCvtColor(src, img_prev, CV_BGR2GRAY);
	
	CvPoint2D32f features[MAX_CORNERS];
	CvPoint2D32f features_curr[MAX_CORNERS];
	int corner_count = MAX_CORNERS;
	
	int t1 = clock();
	cvGoodFeaturesToTrack(img_prev, NULL, NULL, features, &corner_count, 0.02, 0.5, NULL, 3, 0, 0.04);
	//good features to track 得到的features 相当于输出
	//quality_level 0.01表示一点被认为是角点的最小特征值
	//min_distance 0.5角点间的距离不小于x个像素
	st1 += clock()-t1;
	
	t1 = clock();
	cvFindCornerSubPix(img_prev, features, corner_count, cvSize(WIN_SIZE,WIN_SIZE), cvSize(-1, -1), cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 20, 0.03));
	//求更加精细的亚像素级角点
	//window的大小21*21
	st2 += clock()-t1;
	
	char feature_found[MAX_CORNERS];
	float feature_error[MAX_CORNERS];
	CvPoint2D32f good_src[MAX_CORNERS];
	CvPoint2D32f good_dst[MAX_CORNERS];
	CvSize pyr_size = cvSize(img_prev->width + 8, img_prev->height/3);//?
	IplImage *pyr_prev = cvCreateImage(pyr_size, IPL_DEPTH_32F, 1);//两幅金字塔图像缓存
	IplImage *pyr_curr = cvCreateImage(pyr_size, IPL_DEPTH_32F, 1);
	
	for (int k = 0; k < image_num; ++k)
	{
		cvCvtColor(dst[k], img_curr, CV_BGR2GRAY);
		t1 = clock();
		cvCalcOpticalFlowPyrLK(img_prev, img_curr, pyr_prev, pyr_curr, features, features_curr, corner_count, cvSize(WIN_SIZE,WIN_SIZE), 5, feature_found, feature_error, cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 20, 0.03), 0);
		//计算光流 金字塔 level为5
		//得到的最终图像为img_curr
		//features_found得到的长度为corner的count
		st3 += clock()-t1;
	
		int good_num = 0;
		for (int i = 0; i < corner_count; ++i)
		{
			if (feature_found[i] != 0 && feature_error[i] < 550)
			//比较好的feature记录
			{
				good_src[good_num] = features[i];
				good_dst[good_num] = features_curr[i];
				++good_num;
			}
		}
	
		if (good_num >= 4)
		{
			CvMat pt_src = cvMat(1, good_num, CV_32FC2, good_src);
			CvMat pt_dst = cvMat(1, good_num, CV_32FC2, good_dst);
			
			t1 = clock();
			cvFindHomography(&pt_src, &pt_dst, hom[k], CV_RANSAC, 5, NULL);
			st4 += clock()-t1;
		}
		else fprintf(stderr, "Unable to calc homography : %d\n", k);
	}
	cvReleaseImage(&pyr_prev);
	cvReleaseImage(&pyr_curr);
	cvReleaseImage(&img_prev);
	cvReleaseImage(&img_curr);
}
void defense::HormigasUpdate(){

    float ThDis =13.4;
//    KtarHormigas = 20.0;
//    KdamHormigas = 1.05;
    G_QualityHormigas=.001;
    G_minDistanceHormigas=4 + 4*(int)Slider1/127.0;
    
    float dt = 0.25;
    
    TheTargetsHormigas.clear();
    IplImage* eig_image = cvCreateImage( cvSize(Nx,Ny), IPL_DEPTH_32F, 1 );
    IplImage* tmp_image = cvCreateImage( cvSize(Nx,Ny), IPL_DEPTH_32F, 1 );       
    
    
    int corner_count = NobHormigas;
    CvPoint2D32f* cornersA = new CvPoint2D32f[ NobHormigas ];
    
    
    cvGoodFeaturesToTrack(TheInputGray,eig_image, tmp_image,cornersA,&corner_count,
                          G_QualityHormigas,G_minDistanceHormigas,0,3,0,0);
    
    for (int k=0;k<corner_count;k++){
        
        TheTargetsHormigas.push_back(ofVec2f(cornersA[k].x,cornersA[k].y));
    }
    
    
    // reseting
    
    if (!TheTargetsHormigas.empty()){     
        
        for(int i =0;i< NobHormigas;i++){
            TargIndHormigas[i]=-1;
        }
        
        for (int kt=0; kt<TheTargetsHormigas.size(); kt++) {
            float MinDis =10000000;
            int MinIndi =-5;
            for (int ko=0; ko<NobHormigas; ko++) {
                
                ofVec2f ErrorVec;
                ErrorVec = TheTargetsHormigas[kt]-TheObjectsHormigas[ko];
                float dis = ErrorVec.length();
                if ((dis < MinDis)&&(TargIndHormigas[ko]==-1)){
                    MinIndi = ko;  
                    MinDis = dis;
                }
            }
            TargIndHormigas[MinIndi] = kt;
        }
        
        
        for (int ko=0; ko<NobHormigas; ko++) {
            
            ofVec2f UpdateVec;
            float MinDis =10000000;
            int MinIndi =-5;
            
            if (TargIndHormigas[ko]==-1) {
                MinDis =10000000;
                for (int kt=0; kt<TheTargetsHormigas.size(); kt++) {
                    ofVec2f ErrorVec;
                    ErrorVec = TheTargetsHormigas[kt]-TheObjectsHormigas[ko];
                    float dis = ErrorVec.length();
                    if (dis < MinDis){
                        MinDis = dis;  
                        MinIndi = kt;
                    }
                    
                }
                         
                TargIndHormigas[ko] = MinIndi;
            }
            
            UpdateVec = TheTargetsHormigas[TargIndHormigas[ko]]-TheObjectsHormigas[ko];
            float newDis = UpdateVec.length();
            UpdateVec.normalize();
            
            
            ofVec2f acc;
            if (newDis < ThDis){
                acc = (newDis/10.0)*(KtarHormigas*UpdateVec) - KdamHormigas*TheVelocitiesHormigas[ko];
            }
            else{
                acc = (KtarHormigas*UpdateVec) - KdamHormigas*TheVelocitiesHormigas[ko];
            }
            TheVelocitiesHormigas[ko] = TheVelocitiesHormigas[ko] - (-dt)*acc;
            TheObjectsHormigas[ko]  =  TheObjectsHormigas[ko] - (-dt)*TheVelocitiesHormigas[ko];
        }
        
    }// closing the  if from target non empty
    
    
    
    cvReleaseImage(&eig_image);
    cvReleaseImage(&tmp_image);
}
Exemple #21
0
int fbtrack(IplImage *imgI, IplImage *imgJ, float *bb, float *bbnew,
            float *scaleshift)
{
    char level = 5;
    int numAdd = 50;

    // find good points
    const int margin = 5;
    CvRect rect = cvRect(bb[0],bb[1],bb[2]-bb[0]+1,bb[3]-bb[1]+1);
    cvSetImageROI(imgI, rect);
    IplImage *eig_image = cvCreateImage(cvGetSize(imgI), 32, 1);
    IplImage *temp_image = cvCreateImage(cvGetSize(imgI), 32, 1);
    CvPoint2D32f corners [numAdd];
    cvGoodFeaturesToTrack(imgI, eig_image, temp_image, corners, &numAdd, 0.01, 0, NULL, 2, 0, 0.04);
    cvReleaseImage(&(eig_image));
    cvReleaseImage(&(temp_image));
    cvResetImageROI(imgI);
    //printf("%d - number of features\n", numAdd);
    if (numAdd > 50) {
      numAdd = 50;
    }

    int numM = sqrt(100 - numAdd);
    int numN = sqrt(100 - numAdd);
    const int nPoints = numM * numN + numAdd;
    const int sizePointsArray = nPoints * 2;


    float fb[nPoints];
    float ncc[nPoints];
    char status[nPoints];

    float pt[sizePointsArray];
    float ptTracked[sizePointsArray];
    int nlkPoints;
    CvPoint2D32f *startPoints;
    CvPoint2D32f *targetPoints;
    float *fbLkCleaned;
    float *nccLkCleaned;
    int i, M;
    int nRealPoints;
    float medFb;
    float medNcc;
    int nAfterFbUsage;
    getFilledBBPoints(bb, numM, numN, margin, pt);
    //getFilledBBPoints(bb, numM, numN, 5, &ptTracked);

    //show good points
    //IplImage *tmp_show = cvCreateImage(cvGetSize(imgI), imgI->depth, imgI->nChannels);
    //cvCopy(imgI, tmp_show, NULL);
    //for(i = numN+numM; i < numN+numM+numAdd; i++) {
    //    cvCircle(tmp_show, CvPoint{bb[0]+corners[i-(numN+numM)].x, bb[1]+corners[i-(numN+numM)].y}, 2, CvScalar{0,0,255}, 1, 8, 0);
    //}
    //cvRectangle(tmp_show, CvPoint{bb[0],bb[1]},CvPoint{bb[2],bb[3]},CvScalar{0,0,255},1,8,0);
    //cvShowImage("name",tmp_show);

    //copy good points
    for(i = numN*numM; i < numN*numM+numAdd; i++)
    {
            pt[2*i + 0] = (int)(corners[i-(numN*numM)].x+bb[0]);
            pt[2*i + 1] = (int)(corners[i-(numN*numM)].y+bb[1]);
    }

    memcpy(ptTracked, pt, sizeof(float) * sizePointsArray);

    initImgs();
    trackLK(imgI, imgJ, pt, nPoints, ptTracked, nPoints, level, fb, ncc, status);
    initImgs();
    //  char* status = *statusP;
    nlkPoints = 0;

    for(i = 0; i < nPoints; i++)
    {
        nlkPoints += status[i];
    }

    startPoints = (CvPoint2D32f *) malloc(nlkPoints * sizeof(CvPoint2D32f));
    targetPoints = (CvPoint2D32f *) malloc(nlkPoints * sizeof(CvPoint2D32f));
    fbLkCleaned = (float *) malloc(nlkPoints * sizeof(float));
    nccLkCleaned = (float *) malloc(nlkPoints * sizeof(float));

    M = 2;
    nRealPoints = 0;

    for(i = 0; i < nPoints; i++)
    {
        //TODO:handle Missing Points
        //or status[i]==0
        if(ptTracked[M * i] == -1)
        {
        }
        else
        {
            startPoints[nRealPoints].x = pt[2 * i];
            startPoints[nRealPoints].y = pt[2 * i + 1];
            targetPoints[nRealPoints].x = ptTracked[2 * i];
            targetPoints[nRealPoints].y = ptTracked[2 * i + 1];
            fbLkCleaned[nRealPoints] = fb[i];
            nccLkCleaned[nRealPoints] = ncc[i];
            nRealPoints++;
        }
    }

    //assert nRealPoints==nlkPoints
    medFb = getMedian(fbLkCleaned, nlkPoints);
    medNcc = getMedian(nccLkCleaned, nlkPoints);
    /*  printf("medianfb: %f\nmedianncc: %f\n", medFb, medNcc);
     printf("Number of points after lk: %d\n", nlkPoints);*/
    nAfterFbUsage = 0;

    for(i = 0; i < nlkPoints; i++)
    {
        if((fbLkCleaned[i] <= medFb) & (nccLkCleaned[i] >= medNcc))
        {
            startPoints[nAfterFbUsage] = startPoints[i];
            targetPoints[nAfterFbUsage] = targetPoints[i];
            nAfterFbUsage++;
        }
    }

    /*printf("Number of points after fb correction: %d\n", nAfterFbUsage);*/
    //  showIplImage(IMGS[1]);
    // show "OpticalFlow" fb filtered.
    //  drawLinesCvPoint2D32f(imgI, startPoints, nRealPoints, targetPoints,
    //      nRealPoints);
    //  showIplImage(imgI);

    predictbb(bb, startPoints, targetPoints, nAfterFbUsage, bbnew, scaleshift);
    /*printf("bbnew: %f,%f,%f,%f\n", bbnew[0], bbnew[1], bbnew[2], bbnew[3]);
     printf("relative scale: %f \n", scaleshift[0]);*/
    //show picture with tracked bb
    //  drawRectFromBB(imgJ, bbnew);
    //  showIplImage(imgJ);
    free(startPoints);
    free(targetPoints);
    free(fbLkCleaned);
    free(nccLkCleaned);

    if(medFb > 10) return 0;
    else return 1;

}
int main(int argc, char **argv) {
  // Check parameters
  if (argc < 2) {
    fprintf(stderr, "%s: %s\n", APP_NAME, "No video name given");
    fprintf(stderr, "Usage: %s <video file name> [output file name]\n", APP_NAME);

    exit(EXIT_FAILURE);
  }

  char *output_file_name;
  if (argc == 3) {
    output_file_name = argv[2];
  }
  else {
    output_file_name = OUTPUT_FILE_NAME;
  }

  // Load video
  char *file_name = argv[1];
  CvCapture *video = cvCaptureFromFile(file_name);

  if (!video) {
    exit(EXIT_FAILURE);
  }

  // Extract video parameters
  CvSize video_frame_size;
  video_frame_size.width = cvGetCaptureProperty(video, CV_CAP_PROP_FRAME_WIDTH);
  video_frame_size.height = cvGetCaptureProperty(video, CV_CAP_PROP_FRAME_HEIGHT);
  double video_fps = cvGetCaptureProperty(video, CV_CAP_PROP_FPS);
  long video_frame_count = cvGetCaptureProperty(video, CV_CAP_PROP_FRAME_COUNT);

  // Initialize video writer
  CvVideoWriter *video_writer = cvCreateVideoWriter(output_file_name,
    FOURCC, video_fps, video_frame_size, true);

  // Initialize variables for optical flow calculation
  IplImage *current_frame = cvCreateImage(video_frame_size, IPL_DEPTH_8U, 3);
  IplImage *eigen_image = cvCreateImage(video_frame_size, IPL_DEPTH_32F, 1);
  IplImage *temp_image = cvCreateImage(video_frame_size, IPL_DEPTH_32F, 1);

  int corner_count = MAX_CORNERS;
  CvPoint2D32f corners[2][MAX_CORNERS];
  char features_found[MAX_CORNERS];
  float feature_errors[MAX_CORNERS];

  IplImage *frame_buffer[2];
  IplImage *pyramid_images[2];
  CvSize pyramid_size = cvSize(video_frame_size.width + 8, video_frame_size.height / 3);

  int i;
  for (i = 0; i < 2; i++) {
    frame_buffer[i] = cvCreateImage(video_frame_size, IPL_DEPTH_8U, 1);
    pyramid_images[i] = cvCreateImage(pyramid_size, IPL_DEPTH_32F, 1);
  }

  // Process video
  while (query_frame(video, frame_buffer, current_frame)) {
    // Corner finding with Shi and Thomasi
    cvGoodFeaturesToTrack(
      frame_buffer[0],
      eigen_image,
      temp_image,
      corners[0],
      &corner_count,
      0.01,
      5.0,
      0,
      3,
      0,
      0.4);

    cvFindCornerSubPix(
      frame_buffer[0],
      corners[0],
      corner_count,
      cvSize(WINDOW_SIZE, WINDOW_SIZE),
      cvSize(-1, -1),
      cvTermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.3));

    // Pyramid Lucas-Kanade
    cvCalcOpticalFlowPyrLK(
      frame_buffer[0],
      frame_buffer[1],
      pyramid_images[0],
      pyramid_images[1],
      corners[0],
      corners[1],
      corner_count,
      cvSize(WINDOW_SIZE, WINDOW_SIZE),
      5,
      features_found,
      feature_errors,
      cvTermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.3),
      0);

    // Draw optical flow vectors
    int i;
	double l_max, l;
	for (i = 0; i < corner_count; i++) {
      if (features_found[i] == 0 || feature_errors[i] > 550) {
        continue;
      }
	  l = sqrt(corners[1][i].x*corners[1][i].x+corners[1][i].y*corners[1][i].y);
	  if(l>l_max) l_max = l;	  
	}
	
    for (i = 0; i < corner_count; i++) {
      if (features_found[i] == 0 || feature_errors[i] > 550) {
        continue;
      }
	  
	  double spinSize = 5.0 * l/l_max; 

      CvPoint points[2];
      points[0] = cvPoint(cvRound(corners[0][i].x), cvRound(corners[0][i].y));
      points[1] = cvPoint(cvRound(corners[1][i].x), cvRound(corners[1][i].y));

      cvLine(current_frame, points[0], points[1], CV_RGB(0, 255, 0), 1, 8, 0);
	  
	  double angle;                                                                          
	  angle = atan2( (double) points[0].y - points[1].y, (double) points[0].x - points[1].x );
	
	  points[0].x = (int) (points[1].x + spinSize * cos(angle + 3.1416 / 4));
	  points[0].y = (int) (points[1].y + spinSize * sin(angle + 3.1416 / 4));
	  cvLine(current_frame, points[0], points[1], CV_RGB(0, 255, 0), 1, 8, 0);

	  points[0].x = (int) (points[1].x + spinSize * cos(angle - 3.1416 / 4));
	  points[0].y = (int) (points[1].y + spinSize * sin(angle - 3.1416 / 4));
	  cvLine( current_frame, points[0], points[1], CV_RGB(0, 255, 0), 1, 8, 0);
    }

    cvWriteFrame(video_writer, current_frame);
  }

  // Clean up
  cvReleaseImage(&current_frame);
  cvReleaseImage(&eigen_image);
  cvReleaseImage(&temp_image);

  for (i = 0; i < 2; i++) {
    cvReleaseImage(&frame_buffer[0]);
    cvReleaseImage(&pyramid_images[0]);
  }
  cvReleaseCapture(&video);
  cvReleaseVideoWriter(&video_writer);

  return 0;
}
int main(int argc, const char * argv[]) {
    
    IplImage* imgA = cvLoadImage( "data/OpticalFlow0.jpg", CV_LOAD_IMAGE_GRAYSCALE );
    IplImage* imgB = cvLoadImage( "data/OpticalFlow1.jpg", CV_LOAD_IMAGE_GRAYSCALE );
    
    CvSize img_sz = cvGetSize( imgA );
    int win_size = 10;
    
    IplImage* imgC = cvLoadImage( "data/OpticalFlow1.jpg", CV_LOAD_IMAGE_UNCHANGED );
    
    IplImage* image_eig = cvCreateImage( img_sz, IPL_DEPTH_32F, 1 );
    IplImage* image_tmp = cvCreateImage( img_sz, IPL_DEPTH_32F, 1 );
    
    int corner_count = MAX_CORNERS;
    CvPoint2D32f* cornersA = new CvPoint2D32f[ MAX_CORNERS ];
    CvPoint2D32f* cornersB = new CvPoint2D32f[ MAX_CORNERS ];
    
    cvGoodFeaturesToTrack( imgA, image_eig, image_tmp, cornersA, &corner_count, 0.01, 5.0, 0, 3, 0, 0.04 );
    
    cvFindCornerSubPix(
                       imgA,
                       cornersA,
                       corner_count,
                       cvSize(win_size, win_size),
                       cvSize(-1, -1),
                       cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20.0, 0.03)
                       );
    
    char features_found[ MAX_CORNERS ];
    float feature_errors[ MAX_CORNERS ];
    
    CvSize pyr_sz = cvSize( imgA->width+8, imgB->height/3 );
    
    IplImage* pyrA = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
    IplImage* pyrB = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
    
    cvCalcOpticalFlowPyrLK(imgA,
                           imgB,
                           pyrA,
                           pyrB,
                           cornersA,
                           cornersB,
                           corner_count,
                           cvSize(win_size, win_size),
                           5,
                           features_found,
                           feature_errors,
                           cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20.0, 0.3),
                           0
                           );
    
    for( int i=0; i<corner_count; i++ ) {
        if( features_found[i]==0|| feature_errors[i]>550 ) {
            printf("Error is %f\n",feature_errors[i]);
            continue;
        }
        CvPoint p0 = cvPoint(
                             cvRound( cornersA[i].x ),
                             cvRound( cornersA[i].y )
                             );
        CvPoint p1 = cvPoint(
                             cvRound( cornersB[i].x ),
                             cvRound( cornersB[i].y )
                             );
        
        cvLine( imgC, p0, p1, CV_RGB(255,0,0),2 );
    }
    cvNamedWindow("ImageA",0);
    cvNamedWindow("ImageB",0);
    cvNamedWindow("LKpyr_OpticalFlow",0);
    cvShowImage("ImageA",imgA);
    cvShowImage("ImageB",imgB);
    cvShowImage("LKpyr_OpticalFlow",imgC);
    cvWaitKey(0);
    
    return 0;

}
int main( int argc, char** argv ) 
{ 
     
    FILE *ptr; 
    ptr=fopen("dataerr.dat","w+"); 
    CvCapture* capture = 0; 
 
    int counter1=0; 
 
    IplImage* image2 = 0; 
 
    float sumX=0; 
    float sumY=0; 
 
    float err_X; 
    float err_Y; 
 
    int XX=0; 
    int YY=0; 
 
    CvPoint ipt1; 
 
    int tempxx1=0; 
    int tempyy1=0; 
    int tempxx2=0; 
    int tempyy2=0; 
 
     
 
    char *imgFmt="pgm"; 
    char str1[100]; 
 
    /* Initailize the error array */ 
    for(int kk=0;kk<=400;kk++) 
    { 
        optical_flow_error[0][kk]=0; 
        optical_flow_errorP[0][kk]=0; 
        optical_flow_error[1][kk]=0; 
        optical_flow_errorP[1][kk]=0; 
    } 
 
    //capturing frame from video 
    capture = cvCaptureFromAVI("soccer_track.mpeg"); 
 
    cvNamedWindow( "KLT-Tracking Group_R", 0 ); 
    cvSetMouseCallback( "KLT-Tracking Group_R", on_mouse, 0 ); 
 
    if(add_remove_pt==1) 
    { 
        flagg=1; 
    } 
 
    for(;;) 
    { 
        IplImage* frame = 0; 
         
        int i, k, c; 
 
        //creating file name 
        sprintf(str1,"%d.%s",counter1,imgFmt); 
        err_X=0; 
        err_Y=0; 
        sumX=0; 
        sumY=0; 
 
        //decompressing the grab images 
 
        frame = cvQueryFrame( capture ); 
 
     
        if( !frame ) 
            break; 
 
        if( !image ) 
            //The first frame:to allocation some memories,and do somen initialization work 
        { 
            // allocate all the image buffers  
            image = cvCreateImage( cvGetSize(frame), 8, 3 ); 
            image->origin = frame->origin; 
            grey = cvCreateImage( cvGetSize(frame), 8, 1 );//make it grey 
            prev_grey = cvCreateImage( cvGetSize(frame), 8, 1 );//the previous frame in grey mode 
            pyramid = cvCreateImage( cvGetSize(frame), 8, 1 );//pyramid frame 
            prev_pyramid = cvCreateImage( cvGetSize(frame), 8, 1 );//previous pyramid frame 
            /* Define two pointers */ 
            points[0] = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(points[0][0])); 
            points[1] = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(points[0][0])); 
            status = (char*)cvAlloc(MAX_COUNT); 
            flags = 0; 
        } 
 
        cvCopy( frame, image, 0 );//frame->image 
 
        //converting the image into gray scale for further computation 
        cvCvtColor( image, grey, CV_BGR2GRAY ); 
         
        if( need_to_init ) 
        { 
             
            IplImage* eig = cvCreateImage( cvGetSize(grey), 32, 1 ); 
            IplImage* temp = cvCreateImage( cvGetSize(grey), 32, 1 ); 
            double quality = 0.01; 
            double min_distance = 10; 
         
 
            //using good features to track 
            count = MAX_COUNT; 
            cvGoodFeaturesToTrack( grey, eig, temp, points[1], &count, 
                                   quality, min_distance, 0, 3, 0, 0.04 ); 
            cvFindCornerSubPix( grey, points[1], count, 
            cvSize(win_size,win_size), cvSize(-1,-1), 
            cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03)); 
            cvReleaseImage( &eig ); 
            cvReleaseImage( &temp ); 
 
 
 
            add_remove_pt = 0; 
        } 
        else if( count > 0 ) 
        { 
            //using pyramidal optical flow method 
            cvCalcOpticalFlowPyrLK(  
                    prev_grey, grey,  
                    prev_pyramid, pyramid, 
                    points[0], points[1],  
                    count, cvSize(win_size,win_size),  
                    5, status,0, 
                    cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03), flags ); 
             
            flags |= CV_LKFLOW_PYR_A_READY|CV_LKFLOW_PYR_B_READY; 
 
            for( i = k = 0; i < count; i++ ) 
            { 
                /* When need to add or remove the point */ 
                if( add_remove_pt ) 
                { 
 
                    double dx = pt.x - points[1][i].x; 
                    double dy = pt.y - points[1][i].y; 
                    /* Calulate the distance between the point you select and the point tracked  
                    if they are far from less than 5,stop the add or move action     
                    */ 
                    if( dx*dx + dy*dy <= 25 ) 
                    { 
                        add_remove_pt = 0; 
                        continue; 
                    } 
                } 
                 
                if( !status[i] )//if the point is not tracked correctly,pass! 
                    continue; 
                
                points[1][k++] = points[1][i]; 
 
                ipt1=cvPointFrom32f(points[1][i]);//get a point 
                 
            //calculating error here,initalize the error array 
                optical_flow_error[0][i]=ipt1.x; 
                optical_flow_error[1][i]=ipt1.y; 
 
 
            } 
            //taking average error for moving the window 
 
            for(int zz=0; zz<=count;zz++) 
                { 
                    errX[zz]=optical_flow_error[0][zz]- optical_flow_errorP[0][zz]; 
                    errY[zz]=optical_flow_error[1][zz]- optical_flow_errorP[1][zz]; 
 
                    sumX=sumX+errX[zz]; 
                    sumY=sumY+errY[zz]; 
 
                    optical_flow_errorP[0][zz]=optical_flow_error[0][zz]; 
                    optical_flow_errorP[1][zz]=optical_flow_error[1][zz]; 
 
                } 
 
                fprintf(ptr,"%d\n",count); 
                 
                err_X=sumX/count; 
                err_Y=sumY/count; 
 
            if(flagg==1) 
            { 
              int static startonce=0; 
 
            if(startonce==0) 
            { 
                 
             
            tempxx1=pt.x-20; 
            tempyy1=pt.y-20; 
 
            tempxx2=pt.x+20; 
            tempyy2=pt.y+20; 
 
            XX=pt.x; 
            YY=pt.y; 
 
            startonce=1; 
 
            } 
            if(err_X<3) 
            { 
                tempxx1=tempxx1+err_X; 
                tempyy1=tempyy1+err_Y; 
                tempxx2=tempxx2+err_X; 
                tempyy2=tempyy2+err_Y; 
 
                XX=XX+err_X; 
                YY=YY+err_Y; 
                fprintf(ptr,"%f %f\n",err_X,err_Y); 
            } 
 
            printf("\n%f",err_X); 
 
            //moving window 
 
            cvRectangle(image, cvPoint(tempxx1,tempyy1), cvPoint(tempxx2,tempyy2), cvScalar(255,0,0), 1); 
            cvCircle(image, cvPoint(XX,YY), 3, cvScalar(0,0,255), 1); 
        } 
            count = k; 
        } 
 
 
        if( add_remove_pt && count < MAX_COUNT ) 
        { 
            points[1][count++] = cvPointTo32f(pt); 
            cvFindCornerSubPix( grey, points[1] + count - 1, 1, 
                cvSize(win_size,win_size), cvSize(-1,-1), 
                cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03)); 
            add_remove_pt = 0; 
        } 
 
        CV_SWAP( prev_grey, grey, swap_temp ); 
        CV_SWAP( prev_pyramid, pyramid, swap_temp ); 
        CV_SWAP( points[0], points[1], swap_points ); 
        need_to_init = 0; 
 
       
        //writing image file to the file 
        //if(!cvSaveImage(str1,image)) printf("Could not save: %s\n",str1); 
        //storing in a video also 
  
         
        cvShowImage( "KLT-Tracking Group_R", image ); 
 
        c = cvWaitKey(100); 
        if( (char)c == 27 ) 
            break; 
        switch( (char) c ) 
        { 
        case 's': 
            need_to_init = 1; 
          } 
 
        counter1++; 
    } 
 
    cvReleaseCapture( &capture ); 
    cvDestroyWindow("KLT-Tracking Group_R"); 
 
    fcloseall(); 
     
    return 0; 
} 
//추후 수정
void FkPaperKeyboard_TypeA::cornerVerification(IplImage* srcImage){
	CvSize size = cvGetSize(srcImage);
	IplImage* eigImage = cvCreateImage(size, IPL_DEPTH_8U,1);
	IplImage* tempImage = cvCreateImage(size, IPL_DEPTH_8U, 1);
	IplImage* grayImage = cvCreateImage(size, IPL_DEPTH_8U, 1);
	IplImage* veriImage = cvCreateImage(size, IPL_DEPTH_8U, 1);
	IplImage* dstImage = cvCreateImage(size, IPL_DEPTH_8U, 1);
	IplImage* mask = cvCreateImage(size, IPL_DEPTH_8U, 1);
	IplImage* mask2 = cvCreateImage(size, IPL_DEPTH_8U, 1);
	CvRect rect = cvRect(10, 10, 640 - 20, 480 - 20);

	CvPoint2D32f srcQuad[4], dstQuad[4];
	CvMat* warp_matrix = cvCreateMat(3,3, CV_32FC1);
	CvMat* warp_matrix_invert = cvCreateMat(3,3, CV_32FC1);
	CvMat* result = cvCreateMat(3, 1, CV_32FC1);
	CvMat* dst = cvCreateMat(3, 1,CV_32FC1);

	int keyButtonCornerCount = 316;
	
	cvCvtColor(srcImage, grayImage, CV_BGR2GRAY);
	cvSetImageROI(grayImage, rect);
	cvSetImageROI(mask, rect);
	cvSetImageROI(dstImage, rect);
	cvSetImageROI(mask2, rect);

	// 150~255사이의 값만 추출해서 마스크에 저장
	cvInRangeS(grayImage, cvScalar(100, 100, 100), cvScalar(255, 255, 255), mask);
	cvCopy(mask, mask2);

	//cvShowImage("mask", mask);
	//cvShowImage("mask2", mask2);

	// 20,20? 150 미만의 값을 제외하기 위해 0인 값(mask)과 추출한 값(mask2)을 XOR 연산 한다.
	cvFloodFill(mask, cvPoint(10, 10), cvScalar(0, 0, 0));
	cvXor(mask2, mask, dstImage);
	
	//cvShowImage("mask3", mask);
	//cvShowImage("mask4", mask2);
	//cvShowImage("dstImage", dstImage);

	// 최종 연산된 이미지에서 코너 추출(각 키패드의 코너)
	cvGoodFeaturesToTrack(dstImage, eigImage, tempImage, keyButtonCorner, &keyButtonCornerCount, 0.01, 7, NULL, 7, 0);
	cvFindCornerSubPix (dstImage, keyButtonCorner, keyButtonCornerCount,cvSize (3, 3), cvSize (-1, -1), cvTermCriteria (CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03));
	
	cvResetImageROI(dstImage);
	for(int i =0 ; i < 316 ; i++){
		keyButtonCorner[i].x += rect.x;
		keyButtonCorner[i].y += rect.y;
	}
	
	initKeyButtonCorner();
	
	srcQuad[CLOCKWISE_1].x = keyButtonCorner[315].x+10;
	srcQuad[CLOCKWISE_1].y = keyButtonCorner[315].y-10;
	srcQuad[CLOCKWISE_5].x = keyButtonCorner[31].x + 10;
	srcQuad[CLOCKWISE_5].y = keyButtonCorner[31].y + 10;
	srcQuad[CLOCKWISE_7].x = keyButtonCorner[0].x - 10;
	srcQuad[CLOCKWISE_7].y = keyButtonCorner[0].y + 10;
	srcQuad[CLOCKWISE_11].x = keyButtonCorner[290].x - 10;
	srcQuad[CLOCKWISE_11].y = keyButtonCorner[290].y - 10;
	dstQuad[CLOCKWISE_1].x = 640;
	dstQuad[CLOCKWISE_1].y = 0;
	dstQuad[CLOCKWISE_5].x = 640;
	dstQuad[CLOCKWISE_5].y = 480;
	dstQuad[CLOCKWISE_7].x = 0;
	dstQuad[CLOCKWISE_7].y = 480;
	dstQuad[CLOCKWISE_11].x = 0;
	dstQuad[CLOCKWISE_11].y = 0;
	cvGetPerspectiveTransform(srcQuad, dstQuad, warp_matrix);
	
	cvWarpPerspective(dstImage, veriImage, warp_matrix);
	detectKeyButtonCorner(veriImage);
	cvInvert(warp_matrix, warp_matrix_invert);
	for(int i = 0 ; i < 316 ; i++){	
		cvmSet(dst, 0, 0, keyButtonCorner[i].x);  
		cvmSet(dst, 1, 0, keyButtonCorner[i].y);
		cvmSet(dst, 2, 0, 1);

		cvMatMul(warp_matrix_invert, dst, result);
		float t = cvmGet(result, 2,0);
		keyButtonCorner[i].x = cvmGet(result, 0,0)/t ;
		keyButtonCorner[i].y = cvmGet(result, 1,0)/t ;
	}
	cvResetImageROI(srcImage);
	cvResetImageROI(mask);
	cvReleaseImage(&eigImage);
	cvReleaseImage(&tempImage);
	cvReleaseImage(&grayImage);
	cvReleaseImage(&veriImage);
	cvReleaseImage(&dstImage);
	cvReleaseImage(&mask);
	cvReleaseImage(&mask2);
	cvReleaseMat(&warp_matrix);
	cvReleaseMat(&warp_matrix_invert);
	cvReleaseMat(&result);
	cvReleaseMat(&dst);	
}
Exemple #26
0
int main(int argc, char** argv) 
{
  // GLOBAL SETTINGS
  static int framecounter=0;
  const CvSize imsize = cvSize(320,240);
  int delay = 0;
  
  const int win_size = 10;
  CvSize pyr_sz = cvSize( imsize.width+8, imsize.height/3 );
  IplImage * pyrA = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
  IplImage * pyrB = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
  IplImage * rawImage_resized = cvCreateImage( imsize, IPL_DEPTH_8U, 3);

  cvNamedWindow("Test");
  CvGenericTracker tracker;

  // LOAD INPUT FILE
  CvCapture * capture = NULL;
  if (argc==1) {
    capture = cvCreateCameraCapture(0);
	cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH, imsize.width);
	cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT, imsize.height);
  }else{
    capture = cvCreateFileCapture(argv[1]);
  }
  if (!capture) {fprintf(stderr, "Error: fail to open source video!\n");return 0;}
  cvSetCaptureProperty(capture, CV_CAP_PROP_POS_FRAMES, framecounter);

  // START ENDLESS LOOP
  while(1)
  {
	// GET NEXT FRAME
    if (1){
      cvSetCaptureProperty(capture, CV_CAP_PROP_POS_FRAMES, framecounter++);
    }else{
      framecounter++;
    }
    IplImage * rawImage = cvQueryFrame(capture);
	cvResize(rawImage,rawImage_resized);
    if (!rawImage) {fprintf(stderr, "Info: end of video!\n"); break;}
    if (tracker.initialized()){
      tracker.update(rawImage_resized);
    }else{
      tracker.initialize(rawImage_resized);
      tracker.m_framecounter=framecounter;
    }

    // START PROCESSING HERE
    {
	  // Initialize, load two images from the file system, and
	  // allocate the images and other structures we will need for
	  // results.
	  CvMat * imgA = tracker.m_currImage;
	  IplImage * imgB = tracker.m_nextImage;
	  IplImage * imgC = cvCloneImage(rawImage_resized);
  
	  // The first thing we need to do is get the features
	  // we want to track.
	  IplImage * eig_image = cvCreateImage( imsize, IPL_DEPTH_32F, 1 );
	  IplImage * tmp_image = cvCreateImage( imsize, IPL_DEPTH_32F, 1 );
	  int corner_count = MAX_CORNERS;
	  CvPoint2D32f* cornersA = new CvPoint2D32f[ MAX_CORNERS ];
	  cvGoodFeaturesToTrack(imgA,eig_image,tmp_image,cornersA,&corner_count,0.01,5.0,0,3,0,0.04);
	  cvFindCornerSubPix(imgA,cornersA,corner_count,cvSize(win_size,win_size),cvSize(-1,-1),
						 cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03));

	  // Call the Lucas Kanade algorithm
	  char features_found[ MAX_CORNERS ];
	  float feature_errors[ MAX_CORNERS ];
	  CvPoint2D32f * cornersB = new CvPoint2D32f[ MAX_CORNERS ];
	  cvCalcOpticalFlowPyrLK(imgA,imgB,pyrA,pyrB,
							 cornersA,cornersB,corner_count,cvSize( win_size,win_size ),
							 5,features_found,feature_errors,
							 cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 ),
							 (framecounter<2)?0:CV_LKFLOW_PYR_B_READY);

	  // Now make some image of what we are looking at:
	  for( int i=0; i<corner_count; i++ ) {
		if( features_found[i]==0|| feature_errors[i]>550 ) {
		  fprintf(stderr,"error=%f\n",feature_errors[i]);continue;
		}
		CvPoint p0 = cvPoint(cvRound( cornersA[i].x ),cvRound( cornersA[i].y ));
		CvPoint p1 = cvPoint(cvRound( cornersB[i].x ),cvRound( cornersB[i].y ));
		cvLine( imgC, p0, p1, CV_RGB(255,0,0), 1 );
	  }

	  cvShowImage("Test",imgC);
	  cvReleaseImage(&imgC);
	  cvReleaseImage(&eig_image);
	  cvReleaseImage(&tmp_image);
	  delete [] cornersA;
	  delete [] cornersB;
	}
	
	// DISPLAY PROCESSING RESULT
	int key = cvWaitKey(delay)&0xff;
	if (key==27){
	  break;
	}else if (key==' '){
	  if (delay){ delay = 0; }else{ delay = 30; }
	}else if (key=='f'){ // skip to next frame
	}else if (key=='S'){ // skip to next frame
	  framecounter+=10;fprintf(stderr,"framecount:%d\n",framecounter);
	}else if (key=='Q'){ // skip to next frame
	  framecounter=MAX(1,framecounter-10);fprintf(stderr,"framecount:%d\n",framecounter);
	}else if (key!=0xff){
	  fprintf(stderr, "Warning: Unknown key press : %c\n", key);
	} // end of key press processing
  } // end of video

  cvReleaseImage(&pyrA);
  cvReleaseImage(&pyrB);
  cvReleaseImage(&rawImage_resized);

  return 0;
}
// --------------------------------------------------------------------------
// main(Number of arguments, Argument values)
// Description  : This is the entry point of the program.
// Return value : SUCCESS:0  ERROR:-1
// --------------------------------------------------------------------------
int main(int argc, char **argv)
{
    // AR.Drone class
    ARDrone ardrone;

    // Initialize
    if (!ardrone.open()) {
        printf("Failed to initialize.\n");
        return -1;
    }

    // Image of AR.Drone's camera
    IplImage *image = ardrone.getImage();

    // Variables for optical flow
    int corner_count = 50;
    IplImage *gray = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, 1);
    IplImage *prev = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, 1);
    cvCvtColor(image, prev, CV_BGR2GRAY);
    IplImage *eig_img = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1);
    IplImage *tmp_img = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1);
    IplImage *prev_pyramid = cvCreateImage(cvSize(image->width+8, image->height/3), IPL_DEPTH_8U, 1);
    IplImage *curr_pyramid = cvCreateImage(cvSize(image->width+8, image->height/3), IPL_DEPTH_8U, 1);
    CvPoint2D32f *corners1 = (CvPoint2D32f*)malloc(corner_count * sizeof(CvPoint2D32f));
    CvPoint2D32f *corners2 = (CvPoint2D32f*)malloc(corner_count * sizeof(CvPoint2D32f));

    // Main loop
    while (1) {
        // Key input
        int key = cvWaitKey(33);
        if (key == 0x1b) break;

        // Update
        if (!ardrone.update()) break;

        // Get an image
        image = ardrone.getImage();

        // Convert the camera image to grayscale
        cvCvtColor(image, gray, CV_BGR2GRAY);

        // Detect features
        int corner_count = 50;
        cvGoodFeaturesToTrack(prev, eig_img, tmp_img, corners1, &corner_count, 0.1, 5.0, NULL);

        // Corner detected
        if (corner_count > 0) {
            char *status = (char*)malloc(corner_count * sizeof(char));

            // Calicurate optical flows
            CvTermCriteria criteria = cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.3);
            cvCalcOpticalFlowPyrLK(prev, gray, prev_pyramid, curr_pyramid, corners1, corners2, corner_count, cvSize(10, 10), 3, status, NULL, criteria, 0);

            // Drow the optical flows
            for (int i = 0; i < corner_count; i++) {
                cvCircle(image, cvPointFrom32f(corners1[i]), 1, CV_RGB (255, 0, 0));
                if (status[i]) cvLine(image, cvPointFrom32f(corners1[i]), cvPointFrom32f(corners2[i]), CV_RGB (0, 0, 255), 1, CV_AA, 0);
            }

            // Release the memory
            free(status);
        }

        // Save the last frame
        cvCopy(gray, prev);

        // Display the image
        cvShowImage("camera", image);
    }

    // Release the images
    cvReleaseImage(&gray);
    cvReleaseImage(&prev);
    cvReleaseImage(&eig_img);
    cvReleaseImage(&tmp_img);
    cvReleaseImage(&prev_pyramid);
    cvReleaseImage(&curr_pyramid);
    free(corners1);
    free(corners2);

    // See you
    ardrone.close();

    return 0;
}
// Parameters
// - imgA and imgB: frames in sequence (8U single channel)
// - imgC: original frame to mark (RGB is fine)
void calcOpticalFlowAndMark(IplImage *imgA, IplImage *imgB, IplImage *imgC) {
    // Create buffers if necessary
    CvSize img_sz = cvGetSize( imgA );
    if( !eig_image )
        eig_image = cvCreateImage( img_sz, IPL_DEPTH_32F, 1 );
    if( !tmp_image )
        tmp_image = cvCreateImage( img_sz, IPL_DEPTH_32F, 1 );

    // Find features to track
    int corner_count = MAX_CORNERS;
    cvGoodFeaturesToTrack(
        imgA,
        eig_image,
        tmp_image,
        cornersA,
        &corner_count,
        0.03, // quality_level
        5.0, // min_distance
        NULL,
        3, // block_size (default)
        0, // use_harris (default)
        0.04 // k (default)
    );
    cvFindCornerSubPix(
        imgA,
        cornersA,
        corner_count,
        cvSize(win_size, win_size),
        cvSize(-1, -1),
        cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03)
    );

    // Call the Lucas-Kanade algorithm
    int flags = CV_LKFLOW_PYR_A_READY;
    CvSize pyr_sz = cvSize( imgA->width+8, imgB->height/3 );
    if( !pyrA || !pyrB ) {
        pyrA = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
        pyrB = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
        flags = 0;
    }

    cvCalcOpticalFlowPyrLK(
        imgA,
        imgB,
        pyrA,
        pyrB,
        cornersA,
        cornersB,
        corner_count,
        cvSize( win_size, win_size ),
        5,
        features_found,
        feature_errors,
        cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 ),
        flags
    );

    // Draw resulting velocity vectors
    for( int i = 0; i < corner_count; i++ ) {
        if( features_found[i] == 0 || feature_errors[i] > 550 ) {
            // printf("Error is %f/n", feature_errors[i]);
            continue;
        }

        double x0 = cornersA[i].x;
        double y0 = cornersA[i].y;
        CvPoint p = cvPoint( cvRound(x0), cvRound(y0) );
        double x1 = cornersB[i].x;
        double y1 = cornersB[i].y;
        CvPoint q = cvPoint( cvRound(x1), cvRound(y1) );
        //if( sqrt( (double) (y1-y0)*(y1-y0) + (x1-x0)*(x1-x0) ) < 0.1 )
        //if(fabs(y1 - y0) < .5 || fabs(x1 - x0) < .5)
        //  continue;
        //printf("%.4lf %.4lf -> %.4lf %.4lf\n", x0, y0, x1, y1);

        CvScalar line_color = CV_RGB(255, 0, 0);
        int line_thickness = 1;
        // Main line (p -> q)
        //cvLine( imgC, p, q, CV_RGB(255,0,0), 2 );

        // Main line (p -> q) lengthened
        double angle = atan2( (double) y1 - y0, (double) x1 - x0 );
        double hypotenuse = sqrt( (double) (y1-y0)*(y1-y0) + (x1-x0)*(x1-x0) );
        if(hypotenuse < 1.01)
            hypotenuse = 1.01;
        if(hypotenuse > 1.99)
            hypotenuse = 1.99;
        q.x = cvRound(x0 + 6 * hypotenuse * cos(angle));
        q.y = cvRound(y0 + 6 * hypotenuse * sin(angle));
        cvLine( imgC, p, q, line_color, line_thickness, CV_AA, 0 );

        // Arrows
        p.x = (int) (x0 + 5 * hypotenuse * cos(angle + pi / 4));
        p.y = (int) (y0 + 5 * hypotenuse * sin(angle + pi / 4));
        cvLine( imgC, p, q, line_color, line_thickness, CV_AA, 0 );

        p.x = (int) (x0 + 5 * hypotenuse * cos(angle - pi / 4));
        p.y = (int) (y0 + 5 * hypotenuse * sin(angle - pi / 4));
        cvLine( imgC, p, q, line_color, line_thickness, CV_AA, 0 );
    }
}
ofPoint GoodFeaturesToTrack::trackPoints(ofImage* _image) {

    colorImg.resize(_image->getWidth(), _image->getHeight());
    grayImage.resize(_image->getWidth(), _image->getHeight());

    colorImg.setFromPixels(_image->getPixels(), _image->getWidth(), _image->getHeight());
    grayImage = colorImg;
    //grayImage.flagImageChanged();

    //colorImg.draw(390, 200);

    /* find good points to track */
    double quality = 0.1;
    double min_distance = 15;
    int count = MAX_COUNT;
    int win_size = 10;

    IplImage* eig = cvCreateImage( cvGetSize(grayImage.getCvImage()), 32, 1 );
    IplImage* temp = cvCreateImage( cvGetSize(grayImage.getCvImage()), 32, 1 );

    act_points = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(CvPoint2D32f));
    cvGoodFeaturesToTrack( grayImage.getCvImage(), eig, temp, act_points, &count, quality, min_distance, 0, 3, 0, 0.04 );

    cvReleaseImage( &eig );
    cvReleaseImage( &temp );

    /* Match them with the array of stable points to prevent too much temporal hopping */
    if (stable_points.size() == 0) {
        for(int i=0; i<count; i++) {
            tStablePoint pt;
            pt.src_point = act_points[i];
            stable_points.push_back(pt);
        }
    }

    for(vector<tStablePoint>::iterator it = stable_points.begin(); it!=stable_points.end();) {
        /* find best matching point in last imgae */
        float mindist = 1000000;
        int bestInd = -1;
        for(int i=0; i<count; i++) {
            float dx = act_points[i].x-it->src_point.x;
            float dy = act_points[i].y-it->src_point.y;
            float dist = sqrt(dx*dx+dy*dy);
            if (dist < mindist) {
                mindist = dist;
                bestInd=i;
            }
        }

        if (mindist<16)	{
            it->dst_point = act_points[bestInd];
            it++;
        } else {
            it = stable_points.erase(it);
        }
    }


    if (stable_points.size() >= 4) {

        double avg_dx = 0;
        double avg_dy = 0;
        double avg_zoom = 1;

        for(int i=0; i<stable_points.size(); i++) {
            avg_dx += (stable_points[i].dst_point.x - stable_points[i].src_point.x);
            avg_dy += (stable_points[i].dst_point.y - stable_points[i].src_point.y);

        }

        avg_dx/= (double)stable_points.size();
        avg_dy/= (double)stable_points.size();

        /* majority estimation - filter the outliers */
        double* weight = new double[stable_points.size()];
        for(int i=0; i<stable_points.size(); i++) {
            weight[i]=1;
        }
        double sum_weight =  0;

        for (int iter=0; iter<20; iter++) {
            avg_dx = 0;
            avg_dy = 0;
            double sum_weight =  0;
            for(int i=0; i<stable_points.size(); i++) {
                avg_dx += weight[i] * (stable_points[i].dst_point.x - stable_points[i].src_point.x);
                avg_dy += weight[i] * (stable_points[i].dst_point.y - stable_points[i].src_point.y);
                sum_weight+=weight[i];
            }
            avg_dx/= (double)sum_weight;
            avg_dy/= (double)sum_weight;
            for(int i=0; i<stable_points.size(); i++) {
                double dx = avg_dx - (stable_points[i].dst_point.x - stable_points[i].src_point.x);
                double dy = avg_dy - (stable_points[i].dst_point.y - stable_points[i].src_point.y);
                double dst = sqrt(dx*dx+dy*dy);
                weight[i]*=1/(1+dst);
            }
        }
        delete [] weight;

        float ofactor = 0.88;
        float nfactor = 0.19;

        /* actual offset is calculated here -
         ** mixed up with interpolation data from the last frame */
        flt_horiz = ofactor*flt_horiz + nfactor*avg_dx;
        flt_vert = ofactor*flt_vert + nfactor*avg_dy;

        /* make it absolute */
        //initX += flt_horiz;
        //initY += flt_vert;

        //cout << "VOM TRACKER: " << initX << "," << initY << endl;
    }

    stable_points.clear();
    for(int i=0; i<count; i++) {
        tStablePoint pt;
        pt.src_point = act_points[i];
        stable_points.push_back(pt);
    }

    return ofPoint((float)flt_horiz, (float)flt_vert, 0);

}
Exemple #30
0
void mexFunction(int output_size, mxArray *output[], int input_size, const mxArray *input[]) {
    
    char* input_buf;
    /* copy the string data from input[0] into a C string input_ buf. */
    input_buf = mxArrayToString(I_IN);
    CvCapture* capture = 0;

    capture = cvCaptureFromAVI(input_buf);
    if (!capture) {
        fprintf(stderr, "Could not initialize capturing...\n");
    }

    cvNamedWindow( "LkDemo", 0 );

    for(;;) {
        init = clock();
        IplImage* frame = 0;
        int i, k, c;
        
        frame = cvQueryFrame( capture );
        if (!frame)
            break;

        if (!image) {
            /* allocate all the buffers */
            image = cvCreateImage(cvGetSize(frame), 8, 3);
            image->origin = frame->origin;
            grey = cvCreateImage( cvGetSize(frame), 8, 1 );
            prev_grey = cvCreateImage( cvGetSize(frame), 8, 1 );
            pyramid = cvCreateImage( cvGetSize(frame), 8, 1 );
            prev_pyramid = cvCreateImage( cvGetSize(frame), 8, 1 );
            points[0] = (CvPoint2D32f*)cvAlloc(MAX_COUNT * sizeof(points[0][0]));
            points[1] = (CvPoint2D32f*)cvAlloc(MAX_COUNT * sizeof(points[0][0]));
            pointadd[0] = (CvPoint2D32f*)cvAlloc(MAX_COUNT * sizeof(points[0][0]));
            ptcolor = (int*)cvAlloc(MAX_COUNT*sizeof(ptcolor[0]));
            status = (char*)cvAlloc(MAX_COUNT);
            flags = 0;
        }

        cvCopy( frame, image, 0 );
        cvCvtColor( image, grey, CV_BGR2GRAY );
        //CvRect rect = cvRect(image->width/2-50, 0, 100,image->height*0.6);
        
        if (night_mode)
            cvZero( image );

        countlast = ptcount;
        if (need_to_add) {
            /* automatic initialization */
            IplImage* eig = cvCreateImage(cvGetSize(grey), 32, 1);
            IplImage* temp = cvCreateImage(cvGetSize(grey), 32, 1);
            double quality = 0.01;
            double min_distance = 10;
            
            countadd = MAX_COUNT;
            //cvSetImageROI(grey, rect);
            //cvSetImageROI(eig, rect);
            //cvSetImageROI(temp, rect);
            
            cvGoodFeaturesToTrack(grey, eig, temp, pointadd[0], &countadd, quality, min_distance, 0, 3, 0, 0.04);
            cvFindCornerSubPix(grey, pointadd[0], countadd, cvSize(win_size, win_size), cvSize(-1, -1), cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03));

            //for(l=0;l<countadd;l++)
            //	pointadd[0][l].x = pointadd[0][l].x + image->width/2-50;
            cvReleaseImage( &eig );
            cvReleaseImage( &temp );
            //cvResetImageROI(grey);
            for (m = 0; m < countadd; m++) {
                flag = 1;
                for (i = 0; i < countlast; i++) {
                    double dx = pointadd[0][m].x - points[0][i].x;
                    double dy = pointadd[0][m].y - points[0][i].y;

                    if( dx*dx + dy*dy <= 100 ) {
                        flag = 0;
                        break;
                    }
                }

                if (flag==1) {
                    points[0][ptcount++] = pointadd[0][m];
                    cvCircle(image, cvPointFrom32f(points[1][ptcount-1]), 3, CV_RGB(255, 0, 0), -1, 8, 0);
                }
                if (ptcount >= MAX_COUNT) {
                    break;
                }
            }
        }

        if (need_to_init) {
            /* automatic initialization */
            IplImage* eig = cvCreateImage( cvGetSize(grey), 32, 1 );
            IplImage* temp = cvCreateImage( cvGetSize(grey), 32, 1 );
            double quality = 0.01;
            double min_distance = 10;
            
            ptcount = MAX_COUNT;
            cvGoodFeaturesToTrack(grey, eig, temp, points[1], &ptcount, quality, min_distance, 0, 3, 0, 0.04);
            cvFindCornerSubPix(grey, points[1], ptcount, cvSize(win_size, win_size), cvSize(-1, -1), cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03));
            cvReleaseImage( &eig );
            cvReleaseImage( &temp );
            add_remove_pt = 0;
            /* set the point color */
            for( i=0; i<ptcount; i++ ){
                switch (i%5) {
                    case 0:
                        ptcolor[i] = 0;
                        break;
                    case 1:
                        ptcolor[i] = 1;
                        break;
                    case 2:
                        ptcolor[i] = 2;
                        break;
                    case 3:
                        ptcolor[i] = 3;
                        break;
                    case 4:
                        ptcolor[i] = 4;
                        break;
                    default:
                        ptcolor[i] = 0;
                }
            }
        }
        else if( ptcount > 0 ) {
            cvCalcOpticalFlowPyrLK( prev_grey, grey, prev_pyramid, pyramid,
                    points[0], points[1], ptcount, cvSize(win_size, win_size), 3, status, 0,
                    cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03), flags );
            flags |= CV_LKFLOW_PYR_A_READY;
            for( i = k = 0; i < ptcount; i++ ) {
                if( add_remove_pt ) {
                    double dx = pointadd[0][m].x - points[1][i].x;
                    double dy = pointadd[0][m].y - points[1][i].y;

                    if( dx*dx + dy*dy <= 25 ) {
                        add_remove_pt = 0;
                        continue;
                    }
                }

                pt = cvPointFrom32f(points[1][i]);
                pttl.x = pt.x-3; pttl.y = pt.y-3; // point top left
                ptdr.x = pt.x+3; ptdr.y = pt.y+3; // point down right

                if( !status[i] ){
                    pt = cvPointFrom32f(points[0][i]);
                    cvCircle( image, pt, 3, CV_RGB(0, 0, 255), -1, 8, 0);
                    continue;
                }

                pt = cvPointFrom32f(points[1][i]);
                points[1][k] = points[1][i];
                if(i<countlast){
                    /* matched feats */
                    ptcolor[k] = ptcolor[i];
                    switch (ptcolor[k]) {
                        case 0:
                            cvCircle( image, pt, 3, CV_RGB(0, 255, 0), -1, 8, 0);
                            break;
                        case 1:
                            cvCircle( image, pt, 3, CV_RGB(255, 255, 0), -1, 8, 0);
                            break;
                        case 2:
                            cvCircle( image, pt, 3, CV_RGB(0, 255, 255), -1, 8, 0);
                            break;
                        case 3:
                            cvCircle( image, pt, 3, CV_RGB(255, 0, 255), -1, 8, 0);
                            break;
                        case 4:
                            cvCircle( image, pt, 3, CV_RGB(255, 0, 0), -1, 8, 0);                            
                            break;
                        default:
                            cvCircle( image, pt, 3, CV_RGB(0, 255, 0), -1, 8, 0);
                    }
                }
                else
                    /* new feats */
                    switch (k%5) {
                        case 0:
                            //  void cvRectangle( CvArr* img, CvPoint pt1, CvPoint pt2, double color, int thickness=1 );
                            cvRectangle( image, pttl, ptdr, CV_RGB(0, 255, 0), -1, 8, 0);
                            ptcolor[k] = 0;
                            break;
                        case 1:
                            cvRectangle( image, pttl, ptdr, CV_RGB(255, 255, 0), -1, 8, 0);
                            ptcolor[k] = 1;
                            break;
                        case 2:
                            cvRectangle( image, pttl, ptdr, CV_RGB(0, 255, 255), -1, 8, 0);
                            ptcolor[k] = 2;
                            break;
                        case 3:
                            cvRectangle( image, pttl, ptdr, CV_RGB(255, 0, 255), -1, 8, 0);
                            ptcolor[k] = 3;
                            break;
                        case 4:
                            cvRectangle( image, pttl, ptdr, CV_RGB(255, 0, 0), -1, 8, 0);
                            ptcolor[k] = 4;
                            break;
                        default:
                            cvRectangle( image, pttl, ptdr, CV_RGB(0, 255, 0), -1, 8, 0);
                    }
                    k++;
            }
            ptcount = k;
        }

        if( add_remove_pt && ptcount < MAX_COUNT ) {
            points[1][ptcount++] = cvPointTo32f(pt);
            cvFindCornerSubPix( grey, points[1] + ptcount - 1, 1,
                    cvSize(win_size, win_size), cvSize(-1, -1),
                    cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03));
            add_remove_pt = 0;
        }

        CV_SWAP( prev_grey, grey, swap_temp );
        CV_SWAP( prev_pyramid, pyramid, swap_temp );
        CV_SWAP( points[0], points[1], swap_points );
        need_to_init = 0;
        cvShowImage( "LkDemo", image );

        std::string filename = "Rst/Rst";
        std::string seq;
        std::ostringstream fs;
        fs << imgseq << "\n";
        std::istringstream input(fs.str());
        input >> seq>> imgseq;
        filename += seq + ".jpg";
        cvSaveImage(filename.c_str(), image);
        imgseq++;
        if(imgseq>500)
            break;

        c = cvWaitKey(10);
        if( (char)c == 27 )
            break;
        switch( (char) c ) {
            case 'r':
                need_to_init = 1;
                break;
            case 'c':
                ptcount = 0;
                break;
            case 'n':
                night_mode ^= 1;
                break;
            default:
                ;
        }
        if (ptcount<100) {
            need_to_init =1;
        }
        if (ptcount>50&&ptcount<MAX_COUNT) {
            need_to_add = 1;
        }
        final = clock()-init;
    }