Example #1
0
int main( int argc, char** argv )
{
    CvCapture* capture = 0;
    IplImage *frame, *frame_copy = 0;
    int optlen = strlen("--cascade=");
    const char* input_name;
 
    if( argc > 1 && strncmp( argv[1], "--cascade=", optlen ) == 0 )
    {
        cascade_name = argv[1] + optlen;
        input_name = argc > 2 ? argv[2] : 0;
    }
    else
    {
        cascade_name= "haarcascade_frontalface_alt2.xml";
        //opencv装好后haarcascade_frontalface_alt2.xml的路径,
       //也可以把这个文件拷到你的工程文件夹下然后不用写路径名cascade_name= "haarcascade_frontalface_alt2.xml";  
       //或者cascade_name ="C:\\Program Files\\OpenCV\\data\\haarcascades\\haarcascade_frontalface_alt2.xml"
        input_name = argc > 1 ? argv[1] : 0;
    }
 
    cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 );
 
    if( !cascade )
    {
        fprintf( stderr, "ERROR: Could not load classifier cascade\n" );
        fprintf( stderr,
        "Usage: facedetect --cascade=\"<cascade_path>\" [filename|camera_index]\n" );
        return -1;
    }
    storage = cvCreateMemStorage(0);
 
    if( !input_name || (isdigit(input_name[0]) && input_name[1] == '\0') )
        capture = cvCaptureFromCAM( !input_name ? 0 : input_name[0] - '0' );
    else
        capture = cvCaptureFromAVI( input_name ); 
 
    cvNamedWindow( "result", 1 );
 
    if( capture )
    {
        for(;;)
        {
            if( !cvGrabFrame( capture ))
                break;
            frame = cvRetrieveFrame( capture );
            if( !frame )
                break;
            if( !frame_copy )
                frame_copy = cvCreateImage( cvSize(frame->width,frame->height),
                                            IPL_DEPTH_8U, frame->nChannels );
            if( frame->origin == IPL_ORIGIN_TL )
                cvCopy( frame, frame_copy, 0 );
            else
                cvFlip( frame, frame_copy, 0 );
 
            detect_and_draw( frame_copy );
 
            if( cvWaitKey( 10 ) >= 0 )
                break;
        }
 
        cvReleaseImage( &frame_copy );
        cvReleaseCapture( &capture );
    }
    else
    {
        const char* filename = input_name ? input_name : (char*)"00.jpg";
        IplImage* image = cvLoadImage( filename, 1 );
 
        if( image )
        {
            detect_and_draw( image );
            cvWaitKey(0);
            cvReleaseImage( &image );
        }
        else
        {
            /* assume it is a text file containing the
               list of the image filenames to be processed - one per line */
            FILE* f = fopen( filename, "rt" );
            if( f )
            {
                char buf[1000+1];
                while( fgets( buf, 1000, f ) )
                {
                    int len = (int)strlen(buf);
                    while( len > 0 && isspace(buf[len-1]) )
                        len--;
                    buf[len] = '\0';
                    image = cvLoadImage( buf, 1 );
                    if( image )
                    {
                        detect_and_draw( image );
                        cvWaitKey(0);
                        cvReleaseImage( &image );
                    }
                }
                fclose(f);
            }
        }
 
    }
 
    cvDestroyWindow("result");
 
    return 0;
}
Example #2
0
int main(int argc, char** argv)
{
  int fire[640][480][2];
  int palette[256][3];
  int i;

  memset(fire,0,sizeof(fire));

  for(i=0;i<256;i++) {
    HSL2BGR(i/3,255,MIN(255, i * 2),palette[i]);
  }

  CvCapture* capture = cvCaptureFromAVI( "/home/sandaru1/Desktop/smartcut/server/media/videos/53.avi" ); 

  cvNamedWindow("win", 1);

  image = cvCreateImage( cvSize(640,480), 8, 3 );

  for(;;)
  {
		IplImage* frame = cvQueryFrame( capture );
		if (!frame) break;
		cvCopy( frame, image, 0 );

    int h = image->height;
    int w = image->width;

    for(int x = 0; x < image->width; x++)  {
      fire[x][image->height - 1][0] = abs(32768 + rand()) % 256;
      fire[x][image->height - 1][1] = 129;
    }

    for(int y = 0; y < h - 1; y++)
    for(int x = 0; x < w; x++)
    {
      fire[x][y][0] =
        ((fire[(x - 1 + w) % w][(y + 1) % h][0]
        + fire[(x) % w][(y + 1) % h][0]
        + fire[(x + 1) % w][(y + 1) % h][0]
        + fire[(x) % w][(y + 2) % h][0])
        * 32) / 129;
      int temp = fire[(x) % w][(y + 1) % h][1];
      if (temp>1)
        fire[x][y][1] = temp - 1;
    }

    for(int x = 0; x < w; x++)
    for(int y = h-129; y < h; y++)
    {
      int pos = (x+y*w)*3;
      image->imageData[pos] = lerp(fire[x][y][1]/129.0,image->imageData[pos],palette[fire[x][y][0]][0]);
      image->imageData[pos+1] = lerp(fire[x][y][1]/129.0,image->imageData[pos+1],palette[fire[x][y][0]][1]);
      image->imageData[pos+2] = lerp(fire[x][y][1]/129.0,image->imageData[pos+2],palette[fire[x][y][0]][2]);
    }

	
    cvWaitKey(10);
    cvShowImage( "win", image );
	}

  cvDestroyWindow("win");

  return 0;
}
Example #3
0
int main( int argc, char** argv )
{
  CvCapture* capture = 0;

  const char *captureSrc = "0";
  bool redo_geom=false;
  bool redo_training=false;
  bool redo_lighting=false;

  char *modelFile = "model.bmp";
  // parse command line
  for (int i=1; i<argc; i++) {
    if (strcmp(argv[i], "-m") ==0) {
      if (i==argc-1) usage(argv[0]);
      modelFile = argv[i+1];
      i++;
    } else if (strcmp(argv[i], "-r")==0) {
      redo_geom=redo_training=redo_lighting=true;
    } else if (strcmp(argv[i], "-g")==0) {
      redo_geom=redo_lighting=true;
    } else if (strcmp(argv[i], "-l")==0) {
      redo_lighting=true;
    } else if (strcmp(argv[i], "-t")==0) {
      redo_training=true;
    } else if (argv[i][0]=='-') {
      usage(argv[0]);
    } else {
      captureSrc = argv[i];
    }
  }

  if(strlen(captureSrc) == 1 && isdigit(captureSrc[0]))
    capture = cvCaptureFromCAM( captureSrc[0]-'0');
  else 
    capture = cvCaptureFromAVI( captureSrc ); 

  if( !capture )
    {
      cerr <<"Could not initialize capturing from " << captureSrc << " ...\n";
      return -1;
    }

  // Allocate the detector object
  CalibModel model(modelFile);

  if (!model.buildCached(capture, !redo_training)) {
    cout << "model.buildCached() failed.\n";
    return 1;
  }

  cout << "Model build. Starting geometric calibration.\n";

  if (!geometric_calibration(model,capture, !redo_geom)) {
    cerr << "Geometric calibration failed.\n";
    return 2;
  }

  cout << "Geometric calibration OK. Calibrating light...\n";
	
  photometric_calibration(model, capture, 150, !redo_lighting);
}
Example #4
0
ImagesInput<T>::ImagesInput(args::variables_map &options)
{
    _reached_last_image = false;
    video_capture = NULL;

    // parse options --
    if (options.count("input_images"))
        input_images = options["input_images"].as< vector<string> >();

    video_input = (options.count("input_video") != 0 || options.count("input_camera") !=0 );


    blur_sigma = -1.0f; // negative value indicates no blurring
    if (options.count("blur_sigma") &&  false) // <<< disabling blur sigma because it is not good for features detection
    {
        blur_sigma = options["blur_sigma"].as<double>();
        cout << "Blurring the images with sigma value " << blur_sigma << endl;
    }

    grayscale = true;
    if (options.count("use_color_images"))
        grayscale = ! options["use_color_images"].as<bool>();

    // load first image --

    video_capture = NULL;


    if (!video_input)
    {
        if ( input_images.empty() )
            throw runtime_error("No input images where defined");
        if (input_images.size() < 2)
            throw runtime_error("The software requires at least two input images");


        input_images_it = input_images.begin();

        current_image = CImg<T>( input_images_it->c_str() );

        if (grayscale && current_image.dimv() != 1)
        {
            CImg<T> color_image = current_image;
            current_image = CImg<T>(current_image.dimx(), current_image.dimy(), 1, 1);
            color_to_grey(color_image, current_image);
        }

    }
    else
    {
        // open the video input --
        if (options.count("input_camera"))
        {
            printf("Trying to capture from a video device...");
            video_capture = cvCaptureFromCAM( options["input_camera"].as<int>() );
            if (!video_capture)
                printf("Failed.\n");
            else
                printf("\n");

            cvSetCaptureProperty(video_capture, CV_CAP_PROP_FRAME_WIDTH, 640);
            cvSetCaptureProperty(video_capture, CV_CAP_PROP_FRAME_HEIGHT, 480);
        }

        if (options.count("input_video"))
        {
            printf("Trying to capture from a video file...");
            video_capture = cvCaptureFromAVI( options["input_video"].as<string>().c_str() );
            if (!video_capture)
                printf("Failed.\n");
            else
                printf("\n");
        }

        if (!video_capture)
        {
            throw runtime_error("\nCould not obtain a video input. Please try with another parameters.\n");
            return;
        }

        int i;
        for (i=0; i < 5; i+=1)
        { // skip the initial frames (camera auto adjustment, video codec initialization)
            bool ret = grab_frame();
            if (ret == false)
                throw runtime_error("Could not grab the first frames");
        }
    }

    desired_width = current_image.dimx();
    desired_height = current_image.dimy();

    { // resize the image to avoid too long computation
        const int
        max_size = options["max_size"].as< int >(); // copy

        const int max_width = max_size, max_height = max_size;

        desired_width = min(desired_width, max_width);
        desired_height = min(desired_height, max_height);

        if ((desired_width != current_image.dimx()) || (desired_height != current_image.dimy()))
        {
            printf("Going to resize the images to (%i, %i) [pixels]\n", desired_width, desired_height);
        }
    }

    return;
}
int CCalibration::CalibrateFromCAMorAVI( int nImages, int nChessRow, int nChessCol, int nSquareSize, int nSkip, CvMat* intrinsic, CvMat* distortion, int fromCamera, char* fName)
{
    int nChessSize = nChessRow*nChessCol;
    int nAllPoints = nImages*nChessSize;
    int i, j, k;
    int corner_count, found;
    int* p_count = new int[nImages];
    IplImage **src_img = new IplImage*[nImages];
    CvSize pattern_size = cvSize (nChessCol, nChessRow);
    CvPoint3D32f* objects = new CvPoint3D32f[nAllPoints];
    CvPoint2D32f *corners = (CvPoint2D32f *) cvAlloc (sizeof (CvPoint2D32f) * nAllPoints);
    CvMat object_points;
    CvMat image_points;
    CvMat point_counts;
    CvMat *rotation = cvCreateMat (1, 3, CV_32FC1);
    CvMat *translation = cvCreateMat (1, 3, CV_32FC1);

    // (1)
    for (i = 0; i < nImages; i++) {
        for (j = 0; j < nChessRow; j++) {
            for (k = 0; k < nChessCol; k++) {
                objects[i * nChessSize + j * nChessCol + k].x = (float)j * nSquareSize;
                objects[i * nChessSize + j * nChessCol + k].y = (float)k * nSquareSize;
                objects[i * nChessSize + j * nChessCol + k].z = 0.0;
            }
        }
    }
    cvInitMatHeader (&object_points, nAllPoints, 3, CV_32FC1, objects);

    // (2)
    CvCapture *capture = NULL;
    if (fromCamera)
        capture = cvCaptureFromCAM(0);
    else
        capture = cvCaptureFromAVI(fName);
    assert(capture);

    int found_num = 0;
    cvNamedWindow ("Calibration", CV_WINDOW_AUTOSIZE);
    cvNamedWindow ("Webcam", CV_WINDOW_AUTOSIZE);
    int c = 0;
    for (i = 0; i < nImages; i++)
    {
        IplImage * frame;
        while (true)
        {
            frame = cvQueryFrame(capture);
            cvShowImage("Webcam", frame);

            if (c++ % nSkip == 0)
            {
                found = cvFindChessboardCorners (frame, pattern_size, &corners[i * nChessSize], &corner_count);
                if (found)
                {
                    char s[100];
                    sprintf(s, "%d.png", i);
                    cvSaveImage(s, frame);
                    src_img[i] = cvCloneImage(frame);
                    fprintf (stderr, "ok\n");
                    found_num++;
                    break;
                }
            }
            cvWaitKey(5);
        }
        fprintf (stderr, "%02d...", i);

        // (4)
        IplImage *src_gray = cvCreateImage (cvGetSize (src_img[i]), IPL_DEPTH_8U, 1);
        cvCvtColor (src_img[i], src_gray, CV_BGR2GRAY);
        cvFindCornerSubPix (src_gray, &corners[i * nChessSize], corner_count,
                            cvSize (3, 3), cvSize (-1, -1), cvTermCriteria (CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03));
        cvDrawChessboardCorners (src_img[i], pattern_size, &corners[i * nChessSize], corner_count, found);
        p_count[i] = corner_count;
        cvShowImage ("Calibration", src_img[i]);
        //cvWaitKey (0);
    }
    cvDestroyWindow ("Calibration");

    if (found_num != nImages)
        return -1;
    cvInitMatHeader (&image_points, nAllPoints, 1, CV_32FC2, corners);
    cvInitMatHeader (&point_counts, nImages, 1, CV_32SC1, p_count);
    // (5)
    cvCalibrateCamera2 (&object_points, &image_points, &point_counts, cvSize (640, 480), intrinsic, distortion);

    // (6)
    /*
    CvMat sub_image_points, sub_object_points;
    int base = 0;
    cvGetRows (&image_points, &sub_image_points, base * nChessSize, (base + 1) * nChessSize);
    cvGetRows (&object_points, &sub_object_points, base * nChessSize, (base + 1) * nChessSize);
    cvFindExtrinsicCameraParams2 (&sub_object_points, &sub_image_points, intrinsic, distortion, rotation, translation);
    */
    // (7)ToXML
    /*
    CvFileStorage *fs;
    fs = cvOpenFileStorage ("camera.xml", 0, CV_STORAGE_WRITE);
    cvWrite (fs, "intrinsic", intrinsic);
    cvWrite (fs, "rotation", rotation);
    cvWrite (fs, "translation", translation);
    cvWrite (fs, "distortion", distortion);
    cvReleaseFileStorage (&fs);*/

    for (i = 0; i < nImages; i++)
        cvReleaseImage (&src_img[i]);
    delete p_count;
    delete src_img;
    delete objects;
    return 1;
}
int main(int argc, char **argv)
{
  std::cout << "Using OpenCV " << CV_MAJOR_VERSION << "." << CV_MINOR_VERSION << "." << CV_SUBMINOR_VERSION << std::endl;

  /* Open video file */
  CvCapture *capture = 0;
  capture = cvCaptureFromAVI("dataset/jakomali.mp4"); //video.avi
  if(!capture){
    std::cerr << "Cannot open video!" << std::endl;
    return 1;
  }
  
  /* Background Subtraction Algorithm */
  IBGS *bgs;
  bgs = new PixelBasedAdaptiveSegmenter;
  
  /* Blob Tracking Algorithm */
  cv::Mat img_blob;
  BlobTracking* blobTracking;
  blobTracking = new BlobTracking;

  /* Vehicle Counting Algorithm */
  VehicleCouting* vehicleCouting;
  vehicleCouting = new VehicleCouting;

  std::cout << "Press 'q' to quit..." << std::endl;
  int key = 0;
  IplImage *frame;
  while(key != 'q')
  {
    frame = cvQueryFrame(capture);
    if(!frame) break;

    cv::Mat img_input(frame);
    //cv::imshow("Input", img_input); // input video

    cv::Mat img_mask;
    bgs->process(img_input, img_mask);
    
    if(!img_mask.empty())
    {
      // Perform blob tracking
      blobTracking->process(img_input, img_mask, img_blob);

      // Perform vehicle counting
      vehicleCouting->setInput(img_blob);
      vehicleCouting->setTracks(blobTracking->getTracks());
      vehicleCouting->process();
    }

    key = cvWaitKey(1);
  }

  delete vehicleCouting;
  delete blobTracking;
  delete bgs;

  cvDestroyAllWindows();
  cvReleaseCapture(&capture);
  
  return 0;
}
Example #7
0
unsigned __stdcall FrameCaptureThread( void* Param )
{
    cout << "First thread started!" << endl;
    //----------------------------------------------------------
    OpData* pInfo = (OpData*) Param;
    CvSeq** contour = pInfo->ppCont;		//variable for storing contours
    CvCapture* capture = 0;		//interface for capturing frames of the video/camera
    //----------------------------------------------------------
    string strVid = "test";
    strVid.append( NumberToString( pInfo->nConv ) );
    strVid.append( ".avi" );
    //----------------------------------------------------------
    capture = cvCaptureFromAVI( strVid.c_str() );		//select video based on conveyor id
    //capture = cvCaptureFromAVI( "test.avi" );		//should be selection of file/camera here
    if( !capture )
    {
        cout << "Could not initialize capturing..." << endl;
        return 0;
    }
    cvNamedWindow( strVid.c_str() );
    while( true )
    {
        //----------------------------------------------------------
        IplImage* frame = 0;
        //----------------------------------------------------------
        frame = cvQueryFrame( capture );
        if( !frame )
        {
            break;
        }
        //reprocess frame, creating only black & white image
        IplImage* imgThr = GetThresholdedImage( frame );
        //transform image into its binary representation
        cvThreshold( imgThr, imgThr, 128, 255, CV_THRESH_BINARY);
        CvMemStorage* storage = cvCreateMemStorage(0);
        IplImage *imgNew = cvCloneImage( imgThr );
        //find all contours
        cvFindContours( imgNew, storage, contour, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );
        SetEvent( hEvent );
        CvSeq* temp = *contour;

        for( ; temp != 0; temp = temp->h_next )
        {
            cvDrawContours( frame, temp, cvScalar( 104, 178, 70 ), cvScalar( 130, 240, 124 ), 1, 1, 8 );
        }
        //SetEvent( hEvent );
        cvShowImage( strVid.c_str(), frame );
        // Wait for a keypress
        int c = cvWaitKey( 300 );
        if( c != -1 )
        {
            // If pressed, break out of the loop
            break;
        }
        // Release the thresholded image... we need no memory leaks.. please
        cvClearMemStorage( storage );
        cvReleaseMemStorage( &storage );
        cvReleaseImage( &imgNew );
        cvReleaseImage( &imgThr );
    }
    return 0;
}
Example #8
0
int main( int argc, char** argv )
{
    CvCapture* capture = 0;

    if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0])))
        capture = cvCaptureFromCAM( argc == 2 ? argv[1][0] - '0' : 0 );
    else if( argc == 2 )
        capture = cvCaptureFromAVI( argv[1] );

    if( !capture )
    {
        fprintf(stderr,"Could not initialize capturing...\n");
        return -1;
    }

    /* print a welcome message, and the OpenCV version */
    printf ("Welcome to lkdemo, using OpenCV version %s (%d.%d.%d)\n",
	    CV_VERSION,
	    CV_MAJOR_VERSION, CV_MINOR_VERSION, CV_SUBMINOR_VERSION);

    printf( "Hot keys: \n"
            "\tESC - quit the program\n"
            "\tr - auto-initialize tracking\n"
            "\tc - delete all the points\n"
            "\tn - switch the \"night\" mode on/off\n"
            "To add/remove a feature point click it\n" );

    cvNamedWindow( "LkDemo", 0 );
    cvSetMouseCallback( "LkDemo", on_mouse, 0 );

    for(;;)
    {
        IplImage* frame = 0;
        int i, k, c;

        frame = cvQueryFrame( capture );
        if( !frame )
            break;

        if( !image )
        {
            /* allocate all the buffers */
            image = cvCreateImage( cvGetSize(frame), 8, 3 );
            image->origin = frame->origin;
            grey = cvCreateImage( cvGetSize(frame), 8, 1 );
            prev_grey = cvCreateImage( cvGetSize(frame), 8, 1 );
            pyramid = cvCreateImage( cvGetSize(frame), 8, 1 );
            prev_pyramid = cvCreateImage( cvGetSize(frame), 8, 1 );
            points[0] = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(points[0][0]));
            points[1] = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(points[0][0]));
            status = (char*)cvAlloc(MAX_COUNT);
            flags = 0;
        }

        cvCopy( frame, image, 0 );
        cvCvtColor( image, grey, CV_BGR2GRAY );

        if( night_mode )
            cvZero( image );

        if( need_to_init )
        {
            /* automatic initialization */
            IplImage* eig = cvCreateImage( cvGetSize(grey), 32, 1 );
            IplImage* temp = cvCreateImage( cvGetSize(grey), 32, 1 );
            double quality = 0.01;
            double min_distance = 10;

            count = MAX_COUNT;
            cvGoodFeaturesToTrack( grey, eig, temp, points[1], &count,
                                   quality, min_distance, 0, 3, 0, 0.04 );
            cvFindCornerSubPix( grey, points[1], count,
                cvSize(win_size,win_size), cvSize(-1,-1),
                cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03));
            cvReleaseImage( &eig );
            cvReleaseImage( &temp );

            add_remove_pt = 0;
        }
        else if( count > 0 )
        {
            cvCalcOpticalFlowPyrLK( prev_grey, grey, prev_pyramid, pyramid,
                points[0], points[1], count, cvSize(win_size,win_size), 3, status, 0,
                cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03), flags );
            flags |= CV_LKFLOW_PYR_A_READY;
            for( i = k = 0; i < count; i++ )
            {
                if( add_remove_pt )
                {
                    double dx = pt.x - points[1][i].x;
                    double dy = pt.y - points[1][i].y;

                    if( dx*dx + dy*dy <= 25 )
                    {
                        add_remove_pt = 0;
                        continue;
                    }
                }

                if( !status[i] )
                    continue;

                points[1][k++] = points[1][i];
                cvCircle( image, cvPointFrom32f(points[1][i]), 3, CV_RGB(0,255,0), -1, 8,0);
            }
            count = k;
        }

        if( add_remove_pt && count < MAX_COUNT )
        {
            points[1][count++] = cvPointTo32f(pt);
            cvFindCornerSubPix( grey, points[1] + count - 1, 1,
                cvSize(win_size,win_size), cvSize(-1,-1),
                cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03));
            add_remove_pt = 0;
        }

        CV_SWAP( prev_grey, grey, swap_temp );
        CV_SWAP( prev_pyramid, pyramid, swap_temp );
        CV_SWAP( points[0], points[1], swap_points );
        need_to_init = 0;
        cvShowImage( "LkDemo", image );

        c = cvWaitKey(10);
        if( (char)c == 27 )
            break;
        switch( (char) c )
        {
        case 'r':
            need_to_init = 1;
            break;
        case 'c':
            count = 0;
            break;
        case 'n':
            night_mode ^= 1;
            break;
        default:
            ;
        }
    }

    cvReleaseCapture( &capture );
    cvDestroyWindow("LkDemo");

    return 0;
}
Example #9
0
int main(){
	int mode;
	int camera_idx;

	char *ini_path = "./setting.ini";
	char file_path[256];

	IplImage *src_img = NULL, *dst_img = NULL;
	CvCapture *Capture = NULL;
	IplImage *Capture_img;

	while(1){
		printf("\n==========================================================================\n");
		printf("Select Menu\n");
		printf("1.Setting\n");
		printf("2.사진 보정\n");
		printf("3.동영상 보정\n");
		printf("4.Live Cam\n");
		printf("5.색각 이상 시뮬레이션\n");
		printf("6.색약 보정 & 색각 이상 체험\n");
		printf("7.exit\n");
		printf("==========================================================================\n\n");
		printf("menu : ");
		scanf("%d", &mode);

		switch(mode){
		case 1:
			{
			// 설정부
			cvNamedWindow("Color weakness test");
			cvCreateTrackbar("factor", "Color weakness test", &temp_factor, 100, SettingTrackbar);
			IplImage *test_img = cvLoadImage("test_img.jpg");
			IplImage *modify_img = cvCreateImage(cvGetSize(test_img), IPL_DEPTH_8U, 3);

			while(1){
				Refine_img(test_img, modify_img, modification_factor, MODE_CORRECTION);

				cvShowImage("Origin", test_img);
				cvShowImage("Color weakness test", modify_img);
				if(cvWaitKey(33) == 27)		break;
			}

			cvReleaseImage(&test_img);
			cvReleaseImage(&modify_img);

			cvDestroyAllWindows();
			}
			break;
		case 2:
			// 사진 보정
			printf("Image path : ");
			scanf("%s", file_path);
			src_img = cvLoadImage(file_path);
			if(src_img->width == 0){
				printf("ERROR : File not found\n");
				break;
			}else{
				dst_img = cvCreateImage(cvGetSize(src_img), IPL_DEPTH_8U, 3);
				Refine_img(src_img, dst_img, modification_factor, MODE_CORRECTION);

				cvShowImage("Source Image", src_img);
				cvShowImage("Destination Image", dst_img);
				cvWaitKey(0);

				cvReleaseImage(&src_img);
				cvReleaseImage(&dst_img);
				src_img = NULL;
				dst_img = NULL;

				cvDestroyAllWindows();
			}

			break;
		case 3:
			// 동영상 보정
			printf("Image path : ");
			scanf("%s", file_path);
			Capture = cvCaptureFromAVI(file_path);
			
			if(Capture == NULL){
				printf("ERROR : Video not found\n");
				break;
			}else{
				double nFPS = cvGetCaptureProperty(Capture, CV_CAP_PROP_FPS);
				//nFPS = 1000 / nFPS;
				int nTotalFrame = (int)cvGetCaptureProperty(Capture, CV_CAP_PROP_FRAME_COUNT);
				int frameCount = 1;

				Capture_img = cvQueryFrame(Capture);
				dst_img = cvCreateImage(cvGetSize(Capture_img), IPL_DEPTH_8U, 3);
				IplImage *small_src = cvCreateImage(cvSize(Capture_img->width/2, Capture_img->height/2), IPL_DEPTH_8U, 3);
				IplImage *small_dst = cvCloneImage(small_src);

				while(1){
					int _TICK = GetTickCount(); 

					Capture_img = cvQueryFrame(Capture);
					cvResize(Capture_img, small_src);

					Refine_img(small_src, small_dst, modification_factor, MODE_CORRECTION);
					cvResize(small_dst, dst_img);

					//cvShowImage("Source Image", src_img);
					
					frameCount++;

					if(cvWaitKey(1) == 27 || frameCount >= nTotalFrame-1){
						cvDestroyAllWindows();
						break;
					}

					_TICK = GetTickCount() - _TICK;
					float fps = 1000.0f / (float)_TICK;
					char buf[32];
					sprintf(buf, "%.2f fps", fps);
					cvPutText(dst_img, buf, cvPoint(30, 30), &cvFont(1.0), cvScalar(0,0,255));

					cvShowImage("Destination Image", dst_img);
				}

				cvReleaseImage(&small_src);
				cvReleaseImage(&small_dst);
				cvReleaseImage(&src_img);
				cvReleaseImage(&dst_img);

				Capture = NULL;

				src_img = NULL;
				dst_img = NULL;
			}
			break;

		case 4:
			// 카메라 보정
			printf("Select Camera index : ");
			scanf("%d", &camera_idx);
			Capture = cvCaptureFromCAM(camera_idx);
			if(Capture == NULL){
				printf("ERROR : Camera not found\n");
				break;
			}else{
				while(1){
					Capture_img = cvQueryFrame(Capture);
					src_img = cvCloneImage(Capture_img);
					dst_img = cvCreateImage(cvGetSize(src_img), IPL_DEPTH_8U, 3);

					Refine_img(src_img, dst_img, modification_factor, MODE_CORRECTION);

					cvShowImage("Source Image", src_img);
					cvShowImage("Destination Image", dst_img);

					cvReleaseImage(&src_img);
					cvReleaseImage(&dst_img);

					if(cvWaitKey(10) == 27){
						cvDestroyAllWindows();
						break;
					}
				}

				cvReleaseCapture(&Capture);
				Capture = NULL;

				src_img = NULL;
				dst_img = NULL;
			}

			break;
		case 5:
			// 색각 이상 체험
			printf("Image path : ");
			scanf("%s", file_path);
			src_img = cvLoadImage(file_path);
			if(src_img->width == 0){
				printf("ERROR : File not found\n");
				break;
			}else{
				// 설정부
				cvNamedWindow("Color weakness test");
				cvCreateTrackbar("factor", "Color weakness test", &temp_factor, 100, SettingTrackbar);
				IplImage *modify_img = cvCreateImage(cvGetSize(src_img), IPL_DEPTH_8U, 3);

				while(1){
					Refine_img(src_img, modify_img, modification_factor, MODE_DYSCHROMATOPSA);

					cvShowImage("Color weakness test", modify_img);
					if(cvWaitKey(33) == 27)		break;
				}

				cvReleaseImage(&src_img);
				cvReleaseImage(&modify_img);

				cvDestroyAllWindows();
			}
			break;
		case 6:
			{
			// 설정부
			cvNamedWindow("Color weakness test");
			cvNamedWindow("Inverse");
			cvCreateTrackbar("factor", "Color weakness test", &temp_factor, 100, SettingTrackbar);
			cvCreateTrackbar("factor", "Inverse", &t_inverse_factor, 100, inverse_SettingTrackbar);
			IplImage *test_img = cvLoadImage("test_img.jpg");
			IplImage *modify_img = cvCreateImage(cvGetSize(test_img), IPL_DEPTH_8U, 3);
			IplImage *inverseImg = cvCreateImage(cvGetSize(test_img), IPL_DEPTH_8U, 3);


			while(1){
				Refine_img(test_img, modify_img, modification_factor, MODE_CORRECTION);
				Refine_img(modify_img, inverseImg, inverse_factor, MODE_DYSCHROMATOPSA);

				cvShowImage("Origin", test_img);
				cvShowImage("Inverse", inverseImg);
				cvShowImage("Color weakness test", modify_img);
				if(cvWaitKey(33) == 27)		break;
			}

			cvReleaseImage(&inverseImg);
			cvReleaseImage(&test_img);
			cvReleaseImage(&modify_img);

			cvDestroyAllWindows();
			}
		case 7:
			return 0;
		default:
			printf("ERRER : select correct menu\n");
			break;
		}
	}

	return 0;
}
Example #10
0
void mexFunction(int output_size, mxArray *output[], int input_size, const mxArray *input[]) {
    
    char* input_buf;
    /* copy the string data from input[0] into a C string input_ buf. */
    input_buf = mxArrayToString(I_IN);
    CvCapture* capture = 0;

    capture = cvCaptureFromAVI(input_buf);
    if (!capture) {
        fprintf(stderr, "Could not initialize capturing...\n");
    }

    cvNamedWindow( "LkDemo", 0 );

    for(;;) {
        init = clock();
        IplImage* frame = 0;
        int i, k, c;
        
        frame = cvQueryFrame( capture );
        if (!frame)
            break;

        if (!image) {
            /* allocate all the buffers */
            image = cvCreateImage(cvGetSize(frame), 8, 3);
            image->origin = frame->origin;
            grey = cvCreateImage( cvGetSize(frame), 8, 1 );
            prev_grey = cvCreateImage( cvGetSize(frame), 8, 1 );
            pyramid = cvCreateImage( cvGetSize(frame), 8, 1 );
            prev_pyramid = cvCreateImage( cvGetSize(frame), 8, 1 );
            points[0] = (CvPoint2D32f*)cvAlloc(MAX_COUNT * sizeof(points[0][0]));
            points[1] = (CvPoint2D32f*)cvAlloc(MAX_COUNT * sizeof(points[0][0]));
            pointadd[0] = (CvPoint2D32f*)cvAlloc(MAX_COUNT * sizeof(points[0][0]));
            ptcolor = (int*)cvAlloc(MAX_COUNT*sizeof(ptcolor[0]));
            status = (char*)cvAlloc(MAX_COUNT);
            flags = 0;
        }

        cvCopy( frame, image, 0 );
        cvCvtColor( image, grey, CV_BGR2GRAY );
        //CvRect rect = cvRect(image->width/2-50, 0, 100,image->height*0.6);
        
        if (night_mode)
            cvZero( image );

        countlast = ptcount;
        if (need_to_add) {
            /* automatic initialization */
            IplImage* eig = cvCreateImage(cvGetSize(grey), 32, 1);
            IplImage* temp = cvCreateImage(cvGetSize(grey), 32, 1);
            double quality = 0.01;
            double min_distance = 10;
            
            countadd = MAX_COUNT;
            //cvSetImageROI(grey, rect);
            //cvSetImageROI(eig, rect);
            //cvSetImageROI(temp, rect);
            
            cvGoodFeaturesToTrack(grey, eig, temp, pointadd[0], &countadd, quality, min_distance, 0, 3, 0, 0.04);
            cvFindCornerSubPix(grey, pointadd[0], countadd, cvSize(win_size, win_size), cvSize(-1, -1), cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03));

            //for(l=0;l<countadd;l++)
            //	pointadd[0][l].x = pointadd[0][l].x + image->width/2-50;
            cvReleaseImage( &eig );
            cvReleaseImage( &temp );
            //cvResetImageROI(grey);
            for (m = 0; m < countadd; m++) {
                flag = 1;
                for (i = 0; i < countlast; i++) {
                    double dx = pointadd[0][m].x - points[0][i].x;
                    double dy = pointadd[0][m].y - points[0][i].y;

                    if( dx*dx + dy*dy <= 100 ) {
                        flag = 0;
                        break;
                    }
                }

                if (flag==1) {
                    points[0][ptcount++] = pointadd[0][m];
                    cvCircle(image, cvPointFrom32f(points[1][ptcount-1]), 3, CV_RGB(255, 0, 0), -1, 8, 0);
                }
                if (ptcount >= MAX_COUNT) {
                    break;
                }
            }
        }

        if (need_to_init) {
            /* automatic initialization */
            IplImage* eig = cvCreateImage( cvGetSize(grey), 32, 1 );
            IplImage* temp = cvCreateImage( cvGetSize(grey), 32, 1 );
            double quality = 0.01;
            double min_distance = 10;
            
            ptcount = MAX_COUNT;
            cvGoodFeaturesToTrack(grey, eig, temp, points[1], &ptcount, quality, min_distance, 0, 3, 0, 0.04);
            cvFindCornerSubPix(grey, points[1], ptcount, cvSize(win_size, win_size), cvSize(-1, -1), cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03));
            cvReleaseImage( &eig );
            cvReleaseImage( &temp );
            add_remove_pt = 0;
            /* set the point color */
            for( i=0; i<ptcount; i++ ){
                switch (i%5) {
                    case 0:
                        ptcolor[i] = 0;
                        break;
                    case 1:
                        ptcolor[i] = 1;
                        break;
                    case 2:
                        ptcolor[i] = 2;
                        break;
                    case 3:
                        ptcolor[i] = 3;
                        break;
                    case 4:
                        ptcolor[i] = 4;
                        break;
                    default:
                        ptcolor[i] = 0;
                }
            }
        }
        else if( ptcount > 0 ) {
            cvCalcOpticalFlowPyrLK( prev_grey, grey, prev_pyramid, pyramid,
                    points[0], points[1], ptcount, cvSize(win_size, win_size), 3, status, 0,
                    cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03), flags );
            flags |= CV_LKFLOW_PYR_A_READY;
            for( i = k = 0; i < ptcount; i++ ) {
                if( add_remove_pt ) {
                    double dx = pointadd[0][m].x - points[1][i].x;
                    double dy = pointadd[0][m].y - points[1][i].y;

                    if( dx*dx + dy*dy <= 25 ) {
                        add_remove_pt = 0;
                        continue;
                    }
                }

                pt = cvPointFrom32f(points[1][i]);
                pttl.x = pt.x-3; pttl.y = pt.y-3; // point top left
                ptdr.x = pt.x+3; ptdr.y = pt.y+3; // point down right

                if( !status[i] ){
                    pt = cvPointFrom32f(points[0][i]);
                    cvCircle( image, pt, 3, CV_RGB(0, 0, 255), -1, 8, 0);
                    continue;
                }

                pt = cvPointFrom32f(points[1][i]);
                points[1][k] = points[1][i];
                if(i<countlast){
                    /* matched feats */
                    ptcolor[k] = ptcolor[i];
                    switch (ptcolor[k]) {
                        case 0:
                            cvCircle( image, pt, 3, CV_RGB(0, 255, 0), -1, 8, 0);
                            break;
                        case 1:
                            cvCircle( image, pt, 3, CV_RGB(255, 255, 0), -1, 8, 0);
                            break;
                        case 2:
                            cvCircle( image, pt, 3, CV_RGB(0, 255, 255), -1, 8, 0);
                            break;
                        case 3:
                            cvCircle( image, pt, 3, CV_RGB(255, 0, 255), -1, 8, 0);
                            break;
                        case 4:
                            cvCircle( image, pt, 3, CV_RGB(255, 0, 0), -1, 8, 0);                            
                            break;
                        default:
                            cvCircle( image, pt, 3, CV_RGB(0, 255, 0), -1, 8, 0);
                    }
                }
                else
                    /* new feats */
                    switch (k%5) {
                        case 0:
                            //  void cvRectangle( CvArr* img, CvPoint pt1, CvPoint pt2, double color, int thickness=1 );
                            cvRectangle( image, pttl, ptdr, CV_RGB(0, 255, 0), -1, 8, 0);
                            ptcolor[k] = 0;
                            break;
                        case 1:
                            cvRectangle( image, pttl, ptdr, CV_RGB(255, 255, 0), -1, 8, 0);
                            ptcolor[k] = 1;
                            break;
                        case 2:
                            cvRectangle( image, pttl, ptdr, CV_RGB(0, 255, 255), -1, 8, 0);
                            ptcolor[k] = 2;
                            break;
                        case 3:
                            cvRectangle( image, pttl, ptdr, CV_RGB(255, 0, 255), -1, 8, 0);
                            ptcolor[k] = 3;
                            break;
                        case 4:
                            cvRectangle( image, pttl, ptdr, CV_RGB(255, 0, 0), -1, 8, 0);
                            ptcolor[k] = 4;
                            break;
                        default:
                            cvRectangle( image, pttl, ptdr, CV_RGB(0, 255, 0), -1, 8, 0);
                    }
                    k++;
            }
            ptcount = k;
        }

        if( add_remove_pt && ptcount < MAX_COUNT ) {
            points[1][ptcount++] = cvPointTo32f(pt);
            cvFindCornerSubPix( grey, points[1] + ptcount - 1, 1,
                    cvSize(win_size, win_size), cvSize(-1, -1),
                    cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03));
            add_remove_pt = 0;
        }

        CV_SWAP( prev_grey, grey, swap_temp );
        CV_SWAP( prev_pyramid, pyramid, swap_temp );
        CV_SWAP( points[0], points[1], swap_points );
        need_to_init = 0;
        cvShowImage( "LkDemo", image );

        std::string filename = "Rst/Rst";
        std::string seq;
        std::ostringstream fs;
        fs << imgseq << "\n";
        std::istringstream input(fs.str());
        input >> seq>> imgseq;
        filename += seq + ".jpg";
        cvSaveImage(filename.c_str(), image);
        imgseq++;
        if(imgseq>500)
            break;

        c = cvWaitKey(10);
        if( (char)c == 27 )
            break;
        switch( (char) c ) {
            case 'r':
                need_to_init = 1;
                break;
            case 'c':
                ptcount = 0;
                break;
            case 'n':
                night_mode ^= 1;
                break;
            default:
                ;
        }
        if (ptcount<100) {
            need_to_init =1;
        }
        if (ptcount>50&&ptcount<MAX_COUNT) {
            need_to_add = 1;
        }
        final = clock()-init;
    }
Example #11
0
// Main function, defines the entry point for the program.
int main( int argc, char** argv )
{

    // Structure for getting video from camera or avi
    CvCapture* capture = 0;

    // Images to capture the frame from video or camera or from file
    IplImage *frame, *frame_copy = 0;

    // Input file name for avi or image file.
    const char* input_name;

    // Check for the correct usage of the command line
    if( argc <= 2 )
        input_name = argv[1];
    else
    {
        fprintf( stderr,
        "Usage: BSubtraction Filename\n" );
        system ("pause"); // MS-DOS pause command
        return -1;
        /*input_name = argc > 1 ? argv[1] : 0;*/
    }

    // Configure output file
    OutFile = fopen("svgout.svg", "w+");
    
    
    // Initialize Face detection
    // Load the HaarClassifierCascade
    cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 );
    
    
    // Allocate the memory storage
    storage = cvCreateMemStorage(0);
    
    // Find whether to detect the object from file or from camera.
    if( !input_name || (isdigit(input_name[0]) && input_name[1] == '\0') )
        capture = cvCaptureFromCAM( !input_name ? 0 : input_name[0] - '0' );
    else
        capture = cvCaptureFromAVI( input_name ); 

    // Create a new named window with title: result
    cvNamedWindow( "result", 1 );
    cvNamedWindow("original", 1);

    // Find if the capture is loaded successfully or not.

    // If loaded succesfully, then:
    if( capture )
    {
        
        // Capture from the camera.
        for(;;)
        {
            // Capture the frame and load it in IplImage
            if( !cvGrabFrame( capture ))
                break;
            frame = cvRetrieveFrame( capture );

            // If the frame does not exist, quit the loop
            if( !frame )
                break;

            // Allocate framecopy as the same size of the frame
            if( !frame_copy )
                frame_copy = cvCreateImage( cvSize(frame->width,frame->height),
                                            IPL_DEPTH_8U, frame->nChannels );

            // Reserve Memory for background subtraction if you haven't already
            pGMM = (pGMM==0) ? cvCreatePixelBackgroundGMM(frame->width,frame->height)
                           : pGMM;
            pGMM->fAlphaT = .005f;

            // Check the origin of image. If top left, copy the image frame to frame_copy. 
            if( frame->origin == IPL_ORIGIN_TL )
                cvCopy( frame, frame_copy, 0 );
            // Else flip and copy the image
            else
                cvFlip( frame, frame_copy, 0 );
            
            // Call the function to detect and draw the facees
            detect_and_draw( frame_copy );
            
            //system ("pause"); // MS-DOS pause command
            
            // Wait for a while before proceeding to the next frame
            if( cvWaitKey( 10 ) >= 0 )
                break;
        }

        // Release the images, and capture memory
        cvReleaseImage( &frame_copy );
        cvReleaseCapture( &capture );
    }else{// Assume the image to be lena.jpg, or the input_name specified
        const char* filename = input_name ? input_name : (char*)"test.jpg";
        singleframe = true;
        // Load the image from that filename
        IplImage* image = cvLoadImage( filename, 1 );
        // If Image is loaded succesfully, then:
        if( image ){
            pGMM = (pGMM==0) ? cvCreatePixelBackgroundGMM(image->width,image->height)
                           : pGMM;
            pGMM->fAlphaT = .005f;
            // Detect and draw the face
            detect_and_draw( image );

            // Release the image memory
            cvReleaseImage( &image );
        }
    }      

    
    // Destroy the window previously created with filename: "result"
    cvDestroyWindow("result");
    cvDestroyWindow("original");
    
    // release the background subtraction structure
    //cvReleasePixelBackgroundGMM(&pGMM);
    
    // write out terminal data and close your svg file
    page_svg_close(OutFile);    
    fclose(OutFile);    

    // return 0 to indicate successfull execution of the program
    return 0;
}
Example #12
0
int main(int argc, char **argv)
{
  std::cout << "Using OpenCV " << CV_MAJOR_VERSION << "." << CV_MINOR_VERSION << "." << CV_SUBMINOR_VERSION << std::endl;

  CvCapture *capture = 0;
  int resize_factor = 100;

  if(argc > 1)
  {
    std::cout << "Openning: " << argv[1] << std::endl;
    capture = cvCaptureFromAVI(argv[1]);
  }
  else
  {
    capture = cvCaptureFromCAM(0);
    resize_factor = 50; // set size = 50% of original image
  }

  if(!capture)
  {
    std::cerr << "Cannot initialize video!" << std::endl;
    return -1;
  }
  
  IplImage *frame_aux = cvQueryFrame(capture);
  IplImage *frame = cvCreateImage(cvSize((int)((frame_aux->width*resize_factor)/100) , (int)((frame_aux->height*resize_factor)/100)), frame_aux->depth, frame_aux->nChannels);
  cvResize(frame_aux, frame);

  /* Background Subtraction Methods */
  IBGS *bgs;

  /*** Default Package ***/
  bgs = new FrameDifferenceBGS;
  //bgs = new StaticFrameDifferenceBGS;
  //bgs = new WeightedMovingMeanBGS;
  //bgs = new WeightedMovingVarianceBGS;
  //bgs = new MixtureOfGaussianV1BGS;
  //bgs = new MixtureOfGaussianV2BGS;
  //bgs = new AdaptiveBackgroundLearning;
  //bgs = new AdaptiveSelectiveBackgroundLearning;
  //bgs = new GMG;
  
  /*** DP Package (thanks to Donovan Parks) ***/
  //bgs = new DPAdaptiveMedianBGS;
  //bgs = new DPGrimsonGMMBGS;
  //bgs = new DPZivkovicAGMMBGS;
  //bgs = new DPMeanBGS;
  //bgs = new DPWrenGABGS;
  //bgs = new DPPratiMediodBGS;
  //bgs = new DPEigenbackgroundBGS;
  //bgs = new DPTextureBGS;

  /*** TB Package (thanks to Thierry Bouwmans, Fida EL BAF and Zhenjie Zhao) ***/
  //bgs = new T2FGMM_UM;
  //bgs = new T2FGMM_UV;
  //bgs = new T2FMRF_UM;
  //bgs = new T2FMRF_UV;
  //bgs = new FuzzySugenoIntegral;
  //bgs = new FuzzyChoquetIntegral;

  /*** JMO Package (thanks to Jean-Marc Odobez) ***/
  //bgs = new MultiLayerBGS;

  /*** PT Package (thanks to Martin Hofmann, Philipp Tiefenbacher and Gerhard Rigoll) ***/
  //bgs = new PixelBasedAdaptiveSegmenter;

  /*** LB Package (thanks to Laurence Bender) ***/
  //bgs = new LBSimpleGaussian;
  //bgs = new LBFuzzyGaussian;
  //bgs = new LBMixtureOfGaussians;
  //bgs = new LBAdaptiveSOM;
  //bgs = new LBFuzzyAdaptiveSOM;

  /*** LBP-MRF Package (thanks to Csaba Kertész) ***/
  //bgs = new LbpMrf;

  /*** AV Package (thanks to Lionel Robinault and Antoine Vacavant) ***/
  //bgs = new VuMeter;

  /*** EG Package (thanks to Ahmed Elgammal) ***/
  //bgs = new KDE;
  
  /*** DB Package (thanks to Domenico Daniele Bloisi) ***/
  //bgs = new IndependentMultimodalBGS;

  /*** SJN Package (thanks to SeungJong Noh) ***/
  //bgs = new SJN_MultiCueBGS;

  /*** BL Package (thanks to Benjamin Laugraud) ***/
  //bgs = new SigmaDeltaBGS;

  int key = 0;
  while(key != 'q')
  {
    frame_aux = cvQueryFrame(capture);
    if(!frame_aux) break;

    cvResize(frame_aux, frame);
    
    cv::Mat img_input(frame);
    cv::imshow("input", img_input);

    cv::Mat img_mask;
    cv::Mat img_bkgmodel;
    bgs->process(img_input, img_mask, img_bkgmodel); // by default, it shows automatically the foreground mask image
    
    //if(!img_mask.empty())
    //  cv::imshow("Foreground", img_mask);
    //  do something
    
    key = cvWaitKey(33);
  }

  delete bgs;

  cvDestroyAllWindows();
  cvReleaseCapture(&capture);

  return 0;
}
/**************************** Main ********************************/
int main( int argc, char** argv )
{
    IplImage *frame;
    CvCapture* video;
    int frame_num = 0;
    int i;

    arg_parse( argc, argv );

    // initialization
    cvParticleObserveInitialize( featsize );

    // read a video
    if( !vid_file || (isdigit(vid_file[0]) && vid_file[1] == '\0') )
        video = cvCaptureFromCAM( !vid_file ? 0 : vid_file[0] - '0' );
    else
        video = cvCaptureFromAVI( vid_file ); 
    if( (frame = cvQueryFrame( video )) == NULL )
    {
        fprintf( stderr, "Video %s is not loadable.\n", vid_file );
        usage();
        exit(1);
    }

    // allows user to select initial region
    CvRect region;
    icvGetRegion( frame, &region );

    // configure particle filter
    bool logprob = true;
    CvParticle *particle = cvCreateParticle( num_states, num_particles, logprob );
    CvParticleState std = cvParticleState (
        std_x,
        std_y,
        std_w,
        std_h,
        std_r
    );
    cvParticleStateConfig( particle, cvGetSize(frame), std );

    // initialize particle filter
    CvParticleState s;
    CvParticle *init_particle;
    init_particle = cvCreateParticle( num_states, 1 );
    CvRect32f region32f = cvRect32fFromRect( region );
    CvBox32f box = cvBox32fFromRect32f( region32f ); // centerize
    s = cvParticleState( box.cx, box.cy, box.width, box.height, 0.0 );
    cvParticleStateSet( init_particle, 0, s );
    cvParticleInit( particle, init_particle );
    cvReleaseParticle( &init_particle );

    // template
    IplImage* reference = cvCreateImage( featsize, frame->depth, frame->nChannels );
    IplImage* tmp = cvCreateImage( cvSize(region.width,region.height), frame->depth, frame->nChannels );
    cvCropImageROI( frame, tmp, region32f );
    cvResize( tmp, reference );
    cvReleaseImage( &tmp );

    while( ( frame = cvQueryFrame( video ) ) != NULL )
    {
        // Draw new particles
        cvParticleTransition( particle );
        // Measurements
        cvParticleObserveMeasure( particle, frame, reference );

        // Draw all particles
        for( i = 0; i < particle->num_particles; i++ )
        {
            CvParticleState s = cvParticleStateGet( particle, i );
            cvParticleStateDisplay( s, frame, CV_RGB(0,0,255) );
        }
        // Draw most probable particle
        //printf( "Most probable particle's state\n" );
        int maxp_id = cvParticleGetMax( particle );
        CvParticleState maxs = cvParticleStateGet( particle, maxp_id );
        cvParticleStateDisplay( maxs, frame, CV_RGB(255,0,0) );
        ///cvParticleStatePrint( maxs );
        
        // Save pictures
        if( arg_export ) {
            sprintf( export_filename, export_format, vid_file, frame_num );
            printf( "Export: %s\n", export_filename ); fflush( stdout );
            cvSaveImage( export_filename, frame );
        }
        cvShowImage( "Select an initial region > SPACE > ESC", frame );

        // Normalize
        cvParticleNormalize( particle);
        // Resampling
        cvParticleResample( particle );

        char c = cvWaitKey( 1000 );
        if(c == '\x1b')
            break;
    }

    cvParticleObserveFinalize();
    cvDestroyWindow( "Select an initial region > SPACE > ESC" );
    cvReleaseImage( &reference );
    cvReleaseParticle( &particle );
    cvReleaseCapture( &video );
    return 0;
}
Example #14
0
int main(void)  
{        
    int key = 0;
	int seek_line_idx = 120;
	
	CvPoint pt1 = cvPoint(0,seek_line_idx);
	CvPoint pt2 = cvPoint(350,seek_line_idx);
		
	CvPoint pt1_beam_right = cvPoint(180,0);
	CvPoint pt2_beam_right = cvPoint(180,250);
	
	CvPoint pt1_beam_left = cvPoint(160,0);
	CvPoint pt2_beam_left = cvPoint(160,250);
	
	CvScalar red = CV_RGB(250,0,0);
	CvScalar green = CV_RGB(0,0,250);
	CvScalar white = CV_RGB(255,255,255);
	
	int thickness = 1;
	int connectivity = 8;
	int sub, res;
	int j;
	char buffer[BUFSIZ*2], *ptr;  // BUFSIZ = 1024
	int array[350]; 
	
	printf("Array Length: %d\n", (int)ARRAYSIZE(array));
	
	// Load background file:
	const char filename[] = "example.txt";
	FILE *file = fopen(filename, "r");
	if ( file ){	
		// Read newline terminated line:
	   //	for ( i = 0; fgets(buffer, sizeof(buffer) , file); ++i )
	   //	{
		fgets(buffer, sizeof(buffer) , file);
		
		printf("Buf Length: %d\n", (int)ARRAYSIZE(buffer));		
		//printf("%s\n\n", buffer);
		
		// Parse the comma-separated values from each line into 'array'	
		for (ptr = buffer, j = 0; j < ARRAYSIZE(array); ++j, ++ptr )
 		{
 		 	array[j] = (int)strtol(ptr, &ptr, 10);
			//printf("%d: %d\n", j, array[j]);

 		}
		// }
		fclose(file);
	}
	else{
		printf("Can't load example.txt");
		return 0;
	}
	
	// Initialize camera and OpenCV image  
    //CvCapture *capture = cvCaptureFromCAM( 0 );  
    CvCapture *capture = cvCaptureFromAVI( "sample_plug.avi" );    
    	
	IplImage *frame = cvQueryFrame( capture );
	IplImage *gray_frame = cvCreateImage( cvSize(frame->width, frame->height), IPL_DEPTH_8U, 1 );
	
	int fps = ( int )cvGetCaptureProperty( capture, CV_CAP_PROP_FPS ); 
	
	printf("\nFPS: %f\n", float( fps ));
	printf("Image Width: %d\n", int( frame->width ));
	printf("Image Height: %d\n", int( frame->height ));

	cvNamedWindow("video", CV_WINDOW_AUTOSIZE );
	cvNamedWindow("Plot", CV_WINDOW_AUTOSIZE );

	float clean_signal[frame->width];
	float subtract[frame->width];

	int step_key = 0;
	
	CvSize gauss_size = cvSize( 11, 11 );

	while( key != 'x' )
	{
		frame = cvQueryFrame( capture );
		
		if( !frame )
			break;
			
		cvCvtColor( frame, gray_frame, CV_RGB2GRAY ); 
		//cvGaussianBlur(gray_frame, gray_frame, gauss_size, 0);
		
		if( key == 'p'){
			key = 0;
			while( key != 'p' && key != 27 ){
				key = cvWaitKey( 250 );
			}
		}
		
		for( int i = gray_frame->width-1; i >= 0 ; i-- )
		{
			//Get image intensity on seek_line:
			//uchar val = gray_frame.at<uchar>(seek_line_idx, i);
			uchar val = CV_IMAGE_ELEM( gray_frame, uchar, seek_line_idx, i );
			
			//Get background intensity:
			sub = array[i];
			
			//Avoid chaos if itensity-bg < 0
			res = (255-val) + uchar( sub )-250;
			if(res < 0)
				res = 1;
			
			//Save itensity-bg value
			clean_signal[i] = res;
	
			// plot curve:
			//plt.at<uchar>(res, i) = 250;	
			//std::cout << res << "\n";	
		}
		
		for( int i = gray_frame->width; i >= 0; i-- )
		{
			if( double(clean_signal[i]) > 80.0 )
			{
				CvPoint pt1_plug = cvPoint( i, 0 );
				CvPoint pt2_plug = cvPoint( i, 250 );
				
				cvLine( gray_frame, pt1_plug, pt2_plug, white, thickness, connectivity );
				//line_location = i;
				break;
			}
		}
		
		cvLine(gray_frame, pt1, pt2, red, thickness,connectivity);
		//cvLine(gray_frame, pt1_beam_right, pt2_beam_right, red, thickness, connectivity);
		cvLine(gray_frame, pt1_beam_left, pt2_beam_left, red, thickness, connectivity);
		
		cvShowImage( "Plot", gray_frame );
		
		key = cvWaitKey( 1000 / fps ); 
		
	}

	cvReleaseCapture( &capture );
	//cvReleaseImage( &frame );  //This causes crash..why??
	//cvReleaseImage( &gray_frame );	
	cvDestroyWindow( "video" );
	cvDestroyWindow( "Plot" );
	
	return 0;
	
} //end
Example #15
0
VidCapture::VidCapture(char * filePath) {
	capture = cvCaptureFromAVI(filePath);
}
int main(int argc, char** argv)
{
  std::cout << "Using OpenCV " << CV_MAJOR_VERSION << "." << CV_MINOR_VERSION << "." << CV_SUBMINOR_VERSION << std::endl;
  
  CvCapture *capture;
  IplImage  *frame;
  int input_resize_percent = 100;
  
  if(argc < 3)
  {
    std::cout << "Usage " << argv[0] << " cascade.xml /home/msit/dataCollection/sftp://[email protected]/home/msit/dataCollection/slavePi2_RW1600_RH1200_TT60_FR15_06_03_2016_17_01_45_618527.h264" << std::endl;
    return 0;
  }

  if(argc == 4)
  {
    input_resize_percent = atoi(argv[3]);
    std::cout << "Resizing to: " << input_resize_percent << "%" << std::endl;
  }

  cascade = (CvHaarClassifierCascade*) cvLoad(argv[1], 0, 0, 0);
  storage = cvCreateMemStorage(0);
  capture = cvCaptureFromAVI(argv[2]);

  assert(cascade && storage && capture);

  cvNamedWindow("video", 1);

  IplImage* frame1 = cvQueryFrame(capture);
  frame = cvCreateImage(cvSize((int)((frame1->width*input_resize_percent)/100) , (int)((frame1->height*input_resize_percent)/100)), frame1->depth, frame1->nChannels);

  int key = 0;
  do
  {
    frame1 = cvQueryFrame(capture);

    if(!frame1)
      break;

    cvResize(frame1, frame);

    detect(frame);

    key = cvWaitKey(33);

    if(key == KEY_SPACE)
      key = cvWaitKey(0);

    if(key == KEY_ESC)
      break;

  }while(1);

  cvDestroyAllWindows();
  cvReleaseImage(&frame);
  cvReleaseCapture(&capture);
  cvReleaseHaarClassifierCascade(&cascade);
  cvReleaseMemStorage(&storage);

  return 0;
}
Example #17
0
// Main function, defines the entry point for the program.
int main( int argc, char** argv )
{
    int scale = 2;
    // Structure for getting video from camera or avi
    CvCapture* capture = 0;
    // Images to capture the frame from video or camera or from file
    IplImage *frame = 0, *frame_copy = 0;
    // Used for calculations
    int optlen = strlen("--cascade=");
    // Input file name for avi or image file.
    const char* input_name;

    // Check for the correct usage of the command line
    if( argc > 1 && strncmp( argv[1], "--cascade=", optlen ) == 0 )
    {
        cascade_name = argv[1] + optlen;
        input_name = argc > 2 ? argv[2] : 0;
    } else if (strncmp(argv[1], "train", 5) == 0) {
	learn_eigenfaces();
	exit(0);
    } else if (strncmp(argv[1], "test", 4) == 0) {
	recognize_eigenfaces();
	exit(0);
    } else {
        fprintf( stderr,
        "Usage: facedetect --cascade=\"<cascade_path>\" [filename|camera_index]\n" );
        return -1;
        /*input_name = argc > 1 ? argv[1] : 0;*/
    }

    // Load the HaarClassifierCascade
    cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 );
    
    // Check whether the cascade has loaded successfully. Else report and error and quit
    if( !cascade )
    {
        fprintf( stderr, "ERROR: Could not load classifier cascade\n" );
        return -1;
    }
   
    cascade_eyes = (CvHaarClassifierCascade*)cvLoad(cascade_eyes_name, 0, 0, 0 );
    if (!cascade_eyes) {
	fprintf(stderr, "ERROR: failed to load eye classifier cascade\n" );
	return -1;
    }
 
    char *ext = strrchr(input_name, '.');
    // Allocate the memory storage
    storage = cvCreateMemStorage(0);
    // Find whether to detect the object from file or from camera.
    if( !input_name || (isdigit(input_name[0]) && input_name[1] == '\0') ){
        capture = cvCaptureFromCAM( !input_name ? 0 : input_name[0] - '0' );
    } else if (ext && strncmp(ext, ".txt", 4) == 0) {
	capture = NULL;
    } else
        capture = cvCaptureFromAVI( input_name ); 

    // Create a new named window with title: result
    cvNamedWindow( "result", 1 );
    // Find if the capture is loaded successfully or not.

    // If loaded succesfully, then:
    if( capture )
    {
 
        // Capture from the camera.
        for(;;)
        {
            // Capture the frame and load it in IplImage
            if( !cvGrabFrame( capture ))
                break;
            frame = cvRetrieveFrame( capture, 0 );

            // If the frame does not exist, quit the loop
            if( !frame )
                break;

            if (!frame_copy) {
             	   printf("Allocate image\n");
		   frame_copy = cvCreateImage(cvSize(frame->width/2,frame->height/2),
                                   8, 3);
	    }
            cvResize(frame, frame_copy, CV_INTER_LINEAR);
 	    //cvCopy(frame, frame_copy,0);

            // Call the function to detect and draw the face
            //detect_and_draw( frame_copy );
	    process_image(frame_copy);
	    //cvShowImage("result", frame_copy);
            // Wait for a while before proceeding to the next frame
            cvWaitKey(1);
	    //if( cvWaitKey( 10 ) >= 0 )
            //    break;
        }

        // Release the images, and capture memory
        cvReleaseImage( &frame_copy );
	//cvReleaseImage( &frame_resized );
        cvReleaseCapture( &capture );
    }

    // If the capture is not loaded succesfully, then:
    else
    {
	still = 1;
        // Assume the image to be lena.jpg, or the input_name specified
        const char* filename = input_name ? input_name : (char*)"lena.jpg";

	IplImage* image = NULL;
	printf("%s\n", filename);
	if (strncmp(strrchr(filename, '.')+1, "txt", 3) != 0) {
        // Load the image from that filename
            image = cvLoadImage( filename, 1 );

        // If Image is loaded succesfully, then:
        //if( image )
        //{
            // Detect and draw the face
            //detect_and_draw( image );
	    process_image(image);
            // Wait for user input
            cvWaitKey(0);

            // Release the image memory
            cvReleaseImage( &image );
        }
        else
        {
	    printf("Not an image\n");
            /* assume it is a text file containing the
               list of the image filenames to be processed - one per line */
            FILE* f = fopen( filename, "rt" );
            if( f )
            {
                char buf[1000+1];

                // Get the line from the file
                while( fgets( buf, 1000, f ) )
                {

                    // Remove the spaces if any, and clean up the name
                    int len = (int)strlen(buf);
                    while( len > 0 && isspace(buf[len-1]) )
                        len--;
                    buf[len] = '\0';

                    // Load the image from the filename present in the buffer
                    image = cvLoadImage( buf, 1 );

                    // If the image was loaded succesfully, then:
                    if( image )
                    {
                        // Detect and draw the face from the image
                        //detect_and_draw( image );
                        process_image(image);
                        // Wait for the user input, and release the memory
                        cvWaitKey(0);
                        cvReleaseImage( &image );
                    }
                }
                // Close the file
                fclose(f);
            }
        }

    }
    
    // Destroy the window previously created with filename: "result"
    cvDestroyWindow("result");

    // return 0 to indicate successfull execution of the program
    return 0;
}
Example #18
0
int main(int argc, char **argv)
{
  CvCapture *capture = 0;
  int resize_factor = 100;

  if(argc > 1)
  {
    std::cout << "Openning: " << argv[1] << std::endl;
    capture = cvCaptureFromAVI(argv[1]);
  }
  else
  {
    capture = cvCaptureFromCAM(0);
    resize_factor = 50; // set size = 50% of original image
  }

  if(!capture)
  {
    std::cerr << "Cannot initialize video!" << std::endl;
    return 1;
  }
  
  IplImage *frame_aux = cvQueryFrame(capture);
  IplImage *frame = cvCreateImage(cvSize((int)((frame_aux->width*resize_factor)/100) , (int)((frame_aux->height*resize_factor)/100)), frame_aux->depth, frame_aux->nChannels);
  cvResize(frame_aux, frame);

  /* Background Subtraction Methods */
  IBGS *bgs;

  /*** Default Package ***/
  bgs = new FrameDifferenceBGS;
  //bgs = new StaticFrameDifferenceBGS;
  //bgs = new WeightedMovingMeanBGS;
  //bgs = new WeightedMovingVarianceBGS;
  //bgs = new MixtureOfGaussianV1BGS;
  //bgs = new MixtureOfGaussianV2BGS;
  //bgs = new AdaptiveBackgroundLearning;
  //bgs = new GMG;
  
  /*** DP Package (adapted from Donovan Parks) ***/
  //bgs = new DPAdaptiveMedianBGS;
  //bgs = new DPGrimsonGMMBGS;
  //bgs = new DPZivkovicAGMMBGS;
  //bgs = new DPMeanBGS;
  //bgs = new DPWrenGABGS;
  //bgs = new DPPratiMediodBGS;
  //bgs = new DPEigenbackgroundBGS;
  //bgs = new DPTextureBGS;

  /*** TB Package (adapted from Thierry Bouwmans) ***/
  //bgs = new T2FGMM_UM;
  //bgs = new T2FGMM_UV;
  //bgs = new T2FMRF_UM;
  //bgs = new T2FMRF_UV;
  //bgs = new FuzzySugenoIntegral;
  //bgs = new FuzzyChoquetIntegral;

  /*** JMO Package (adapted from Jean-Marc Odobez) ***/
  //bgs = new MultiLayerBGS;

  /*** PT Package (adapted from Hofmann) ***/
  //bgs = new PixelBasedAdaptiveSegmenter;

  /*** LB Package (adapted from Laurence Bender) ***/
  //bgs = new LBSimpleGaussian;
  //bgs = new LBFuzzyGaussian;
  //bgs = new LBMixtureOfGaussians;
  //bgs = new LBAdaptiveSOM;
  //bgs = new LBFuzzyAdaptiveSOM;

  /*** LBP-MRF Package (adapted from Csaba Kertész) ***/
  //bgs = new LbpMrf;

  /*** AV Package (adapted from Antoine Vacavant) ***/
  //bgs = new VuMeter;

  /*** EG Package (adapted from Ahmed Elgammal) ***/
  //bgs = new KDE;

  int key = 0;
  while(key != 'q')
  {
    frame_aux = cvQueryFrame(capture);
    if(!frame_aux) break;

    cvResize(frame_aux, frame);
    
    cv::Mat img_input(frame);
    cv::imshow("input", img_input);

    cv::Mat img_mask;
    cv::Mat img_bkgmodel;
    bgs->process(img_input, img_mask, img_bkgmodel); // automatically shows the foreground mask image
    
    //if(!img_mask.empty())
    //  do something
    
    key = cvWaitKey(33);
  }

  delete bgs;

  cvDestroyAllWindows();
  cvReleaseCapture(&capture);
  
  return 0;
}
Example #19
0
 Capture::Capture(const char* filepath) :
 previousFrame(0) {
     capture = cvCaptureFromAVI(filepath);
 }
Example #20
0
int
main (int argc, char **argv)
{
  //読み込む動画ファイル名
  char* filename;
  char* imagename;
  if (argc == 2){
    filename = argv[1];
  }else{
    usage();
    return -1;
  }

    printf ("########### #############\n"
      "video_test, using OpenCV version %s (%d.%d.%d)\n",
	    CV_VERSION,
	    CV_MAJOR_VERSION, CV_MINOR_VERSION, CV_SUBMINOR_VERSION);

    printf( "Hot keys: \n"
            "\tESC - quit the program\n"
            "\tp - play/stop\n"
            "\ts - save current frame as jpg\n"
            "\tother key - next frame\n"
            "\n" );

  double w = 320, h = 240;
  int c;

  //指定したAVIファイルが見つからない場合
  if(NULL==(capture = cvCaptureFromAVI(filename))){
    fprintf(stderr,"指定のaviファイル %s が見つかりませんでした.", filename);
    return -1;
  }

  // キャプチャサイズを設定する.
  cvSetCaptureProperty (capture, CV_CAP_PROP_FRAME_WIDTH, w);
  cvSetCaptureProperty (capture, CV_CAP_PROP_FRAME_HEIGHT, h);

  cvNamedWindow ("Capture", CV_WINDOW_AUTOSIZE);

  //スライドバーとかつけちゃう
  int frames = (int)cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_COUNT);
  if(frames != 0){
    cvCreateTrackbar("Position", "Capture", &slider_position, frames, onTrackBarSlide);
  }

  int play = 0;
  // ビデオから画像をキャプチャする
  while (1) {
    frame = cvQueryFrame (capture);

    slider_position = (int)cvGetCaptureProperty(capture, CV_CAP_PROP_POS_FRAMES);
    cvSetTrackbarPos("Position", "Capture", slider_position);

    cvShowImage ("Capture", frame);

    if(play){
      c = cvWaitKey (2);
    }else{
      c = cvWaitKey (0);
    }

    // ESC で終了
    if (c == '\x1b')
      break;
 
    // キー入力色々 
    switch( (char) c ) {
      case 'p':
          play ^= 1;
          break;
      case 's':
          //printf("%d\n", num);
          // ↓なぜかBus error
          //sprintf(imagename, "image_%d.jpg", num);

          imagename = "image.jpg";
          cvSaveImage(imagename, frame);
          printf("Current frame is saved as %s\n", imagename); 
          break;
      default:
          ;
    }
  }

  cvReleaseCapture (&capture);
  cvDestroyWindow ("Capture");

  return 0;
}
Example #21
0
int main( int argc, char** argv )
{
  const char avi_name[]="..\\res\\de.avi";
  //聲明IplImage指針
  IplImage* pFrame = NULL; 
  IplImage* pFrImg = NULL;
  IplImage* pBkImg = NULL;
 
  CvMat* pFrameMat = NULL;
  CvMat* pFrMat = NULL;
  CvMat* pBkMat = NULL;
 
  CvCapture* pCapture = NULL;
 
  int nFrmNum = 0;
 
  //創建視窗
  cvNamedWindow("video", 1);
  cvNamedWindow("background",1);
  cvNamedWindow("foreground",1);
  //使視窗有序排列
  cvMoveWindow("video", 30, 0);
  cvMoveWindow("background", 360, 0);
  cvMoveWindow("foreground", 690, 0);
 
 
 
  if( argc > 2 )
    {
      fprintf(stderr, "Usage: bkgrd [video_file_name]\n");
      return -1;
    }
 
  //打開攝像頭
  if (argc ==1)
    if( !(pCapture = cvCaptureFromAVI(avi_name)))
      {
	fprintf(stderr, "Can not open camera.\n");
	return -2;
      }
 
  //打開視頻文件
  if(argc == 2)
    if( !(pCapture = cvCaptureFromFile(argv[1])))
      {
	fprintf(stderr, "Can not open video file %s\n", argv[1]);
	return -2;
      }
 
  //逐幀讀取視頻
  while(pFrame = cvQueryFrame( pCapture ))
    {
      nFrmNum++;
 
      //如果是第一幀,需要申請記憶體,並初始化
      if(nFrmNum == 1)
	{
	  pBkImg = cvCreateImage(cvSize(pFrame->width, pFrame->height),  IPL_DEPTH_8U,1);
	  pFrImg = cvCreateImage(cvSize(pFrame->width, pFrame->height),  IPL_DEPTH_8U,1);
 
	  pBkMat = cvCreateMat(pFrame->height, pFrame->width, CV_32FC1);
	  pFrMat = cvCreateMat(pFrame->height, pFrame->width, CV_32FC1);
	  pFrameMat = cvCreateMat(pFrame->height, pFrame->width, CV_32FC1);
 
	  //轉化成單通道圖像再處理
	  cvCvtColor(pFrame, pBkImg, CV_BGR2GRAY);
	  cvCvtColor(pFrame, pFrImg, CV_BGR2GRAY);
 
	  cvConvert(pFrImg, pFrameMat);
	  cvConvert(pFrImg, pFrMat);
	  cvConvert(pFrImg, pBkMat);
	}
      else
	{
	  cvCvtColor(pFrame, pFrImg, CV_BGR2GRAY);
	  cvConvert(pFrImg, pFrameMat);
	  //高斯濾波先,以平滑圖像
	  //cvSmooth(pFrameMat, pFrameMat, CV_GAUSSIAN, 3, 0, 0);
 
	  //當前幀跟背景圖相減
	  cvAbsDiff(pFrameMat, pBkMat, pFrMat);
 
	  //二值化前景圖
	  cvThreshold(pFrMat, pFrImg, 60, 255.0, CV_THRESH_BINARY);
 
	  //進行形態學濾波,去掉噪音  
	  //cvErode(pFrImg, pFrImg, 0, 1);
	  //cvDilate(pFrImg, pFrImg, 0, 1);
 
	  //更新背景
	  cvRunningAvg(pFrameMat, pBkMat, 0.003, 0);
	  //將背景轉化為圖像格式,用以顯示
	  cvConvert(pBkMat, pBkImg);
 
	  //顯示圖像
	  cvShowImage("video", pFrame);
	  cvShowImage("background", pBkImg);
	  cvShowImage("foreground", pFrImg);
 
	  //如果有按鍵事件,則跳出迴圈
	  //此等待也為cvShowImage函數提供時間完成顯示
	  //等待時間可以根據CPU速度調整
	  if( cvWaitKey(2) >= 0 )
	    break;
 
 
	}
 
    }
 
 
 
 
  //銷毀視窗
  cvDestroyWindow("video");
  cvDestroyWindow("background");
  cvDestroyWindow("foreground");
 
  //釋放圖像和矩陣
  cvReleaseImage(&pFrImg);
  cvReleaseImage(&pBkImg);
 
  cvReleaseMat(&pFrameMat);
  cvReleaseMat(&pFrMat);
  cvReleaseMat(&pBkMat);
 
  cvReleaseCapture(&pCapture);
 
  return 0;
}
Example #22
0
int main222( int argc,   char** argv )
{
    CvCapture* capture = 0;

    if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0])))
        capture = cvCaptureFromCAM( argc == 2 ? argv[1][0] - '0' : 0 );
    else if( argc == 2 )
        capture = cvCaptureFromAVI( argv[1] );

    if( !capture )
    {
        fprintf(stderr,"Could not initialize capturing...\n");
        return -1;
    }

    printf( "Hot keys: \n"
        "\tESC - quit the program\n"
        "\tc - stop the tracking\n"
        "\tb - switch to/from backprojection view\n"
        "\th - show/hide object histogram\n"
        "To initialize tracking, select the object with mouse\n" );

    cvNamedWindow( "Histogram", 1 );
    cvNamedWindow( "CamShiftDemo", 1 );
    cvSetMouseCallback( "CamShiftDemo", on_mouse, 0 );
    cvCreateTrackbar( "Vmin", "CamShiftDemo", &vmin, 256, 0 );
    cvCreateTrackbar( "Vmax", "CamShiftDemo", &vmax, 256, 0 );
    cvCreateTrackbar( "Smin", "CamShiftDemo", &smin, 256, 0 );

    for(;;)
    {
        IplImage* frame = 0;
        int i, bin_w, c;


        if( !frame )
            break;

        if( !image )
        {
            /* allocate all the buffers */
            image = cvCreateImage( cvGetSize(frame), 8, 3 );
            image->origin = frame->origin;
            hsv = cvCreateImage( cvGetSize(frame), 8, 3 );
            hue = cvCreateImage( cvGetSize(frame), 8, 1 );
            mask = cvCreateImage( cvGetSize(frame), 8, 1 );
            backproject = cvCreateImage( cvGetSize(frame), 8, 1 );
            hist = cvCreateHist( 1, &hdims, CV_HIST_ARRAY, &hranges, 1 );
            histimg = cvCreateImage( cvSize(320,200), 8, 3 );
            cvZero( histimg );
        }

        cvCopy( frame, image, 0 );
        cvCvtColor( image, hsv, CV_BGR2HSV );

        if( track_object )
        {
            int _vmin = vmin, _vmax = vmax;

            cvInRangeS( hsv, cvScalar(0,smin,MIN(_vmin,_vmax),0),
                        cvScalar(180,256,MAX(_vmin,_vmax),0), mask );
            cvSplit( hsv, hue, 0, 0, 0 );

            if( track_object < 0 )
            {
                float max_val = 0.f;
                cvSetImageROI( hue, selection );
                cvSetImageROI( mask, selection );
                cvCalcHist( &hue, hist, 0, mask );
                cvGetMinMaxHistValue( hist, 0, &max_val, 0, 0 );
                cvConvertScale( hist->bins, hist->bins, max_val ? 255. / max_val : 0., 0 );
                cvResetImageROI( hue );
                cvResetImageROI( mask );
                track_window = selection;
                track_object = 1;

                cvZero( histimg );
                bin_w = histimg->width / hdims;
                for( i = 0; i < hdims; i++ )
                {
                    int val = cvRound( cvGetReal1D(hist->bins,i)*histimg->height/255 );
                    CvScalar color = hsv2rgb(i*180.f/hdims);
                    cvRectangle( histimg, cvPoint(i*bin_w,histimg->height),
                                 cvPoint((i+1)*bin_w,histimg->height - val),
                                 color, -1, 8, 0 );
                }
            }

            cvCalcBackProject( &hue, backproject, hist );
            cvAnd( backproject, mask, backproject, 0 );
            cvCamShift( backproject, track_window,
                        cvTermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ),
                        &track_comp, &track_box );
            track_window = track_comp.rect;

            if( backproject_mode )
                cvCvtColor( backproject, image, CV_GRAY2BGR );
            if( !image->origin )
                track_box.angle = -track_box.angle;
            cvEllipseBox( image, track_box, CV_RGB(255,0,0), 3, CV_AA, 0 );
        }

        if( select_object && selection.width > 0 && selection.height > 0 )
        {
            cvSetImageROI( image, selection );
            cvXorS( image, cvScalarAll(255), image, 0 );
            cvResetImageROI( image );
        }

        cvShowImage( "CamShiftDemo", image );
        cvShowImage( "Histogram", histimg );

        c = cvWaitKey(10);
        if( (char) c == 27 )
            break;
        switch( (char) c )
        {
        case 'b':
            backproject_mode ^= 1;
            break;
        case 'c':
            track_object = 0;
            cvZero( histimg );
            break;
        case 'h':
            show_hist ^= 1;
            if( !show_hist )
                cvDestroyWindow( "Histogram" );
            else
                cvNamedWindow( "Histogram", 1 );
            break;
        default:
            ;
        }
    }

    cvReleaseCapture( &capture );
    cvDestroyWindow("CamShiftDemo");

    return 0;
}
int main(int argc, const char* argv[])
{
	// read data from AVI file
	CvCapture* readerAvi = cvCaptureFromAVI("data/fountain.avi");
	if(readerAvi == NULL)
	{
		std::cerr << "Could not open AVI file." << std::endl;
		return 0;
	}

	// retrieve information about AVI file
	cvQueryFrame(readerAvi); 
	int width	= (int) cvGetCaptureProperty(readerAvi, CV_CAP_PROP_FRAME_WIDTH);
	int height = (int) cvGetCaptureProperty(readerAvi, CV_CAP_PROP_FRAME_HEIGHT);
	int fps = (int) cvGetCaptureProperty(readerAvi, CV_CAP_PROP_FPS);
	int num_frames = (unsigned int) cvGetCaptureProperty(readerAvi,  CV_CAP_PROP_FRAME_COUNT);

	// setup marks to hold results of low and high thresholding
	BwImage low_threshold_mask = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 1);
	low_threshold_mask.Ptr()->origin = IPL_ORIGIN_BL;

	BwImage high_threshold_mask = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 1);
	high_threshold_mask.Ptr()->origin = IPL_ORIGIN_BL;

	// setup buffer to hold individual frames from video stream
	RgbImage frame_data;
	frame_data.ReleaseMemory(false);	// AVI frame data is released by with the AVI capture device

	// setup AVI writers (note: you will need to install the HUFFY codex at: 
	//   http://neuron2.net/www.math.berkeley.edu/benrg/huffyuv.html)
	CvVideoWriter* writerAvi = cvCreateVideoWriter("output/results.avi", CV_FOURCC('H', 'F', 'Y', 'U'),	
															fps, cvSize(width, height), 1);

	// setup background subtraction algorithm
	/*
	Algorithms::BackgroundSubtraction::AdaptiveMedianParams params;
	params.SetFrameSize(width, height);
	params.LowThreshold() = 40;
	params.HighThreshold() = 2*params.LowThreshold();	// Note: high threshold is used by post-processing 
	params.SamplingRate() = 7;
	params.LearningFrames() = 30;

	Algorithms::BackgroundSubtraction::AdaptiveMedianBGS bgs;
	bgs.Initalize(params);
	*/

	/*
	Algorithms::BackgroundSubtraction::GrimsonParams params;
	params.SetFrameSize(width, height);
	params.LowThreshold() = 3.0f*3.0f;
	params.HighThreshold() = 2*params.LowThreshold();	// Note: high threshold is used by post-processing 
	params.Alpha() = 0.001f;
	params.MaxModes() = 3;

	Algorithms::BackgroundSubtraction::GrimsonGMM bgs;
	bgs.Initalize(params);
	*/

	/*
	Algorithms::BackgroundSubtraction::ZivkovicParams params;
	params.SetFrameSize(width, height);
	params.LowThreshold() = 5.0f*5.0f;
	params.HighThreshold() = 2*params.LowThreshold();	// Note: high threshold is used by post-processing 
	params.Alpha() = 0.001f;
	params.MaxModes() = 3;

	Algorithms::BackgroundSubtraction::ZivkovicAGMM bgs;
	bgs.Initalize(params);
	*/

	/*
	Algorithms::BackgroundSubtraction::MeanParams params;
	params.SetFrameSize(width, height);
	params.LowThreshold() = 3*30*30;
	params.HighThreshold() = 2*params.LowThreshold();	// Note: high threshold is used by post-processing 
	params.Alpha() = 1e-6f;
	params.LearningFrames() = 30;

	Algorithms::BackgroundSubtraction::MeanBGS bgs;
	bgs.Initalize(params);
	*/

	/*
	Algorithms::BackgroundSubtraction::WrenParams params;
	params.SetFrameSize(width, height);
	params.LowThreshold() = 3.5f*3.5f;
	params.HighThreshold() = 2*params.LowThreshold();	// Note: high threshold is used by post-processing 
	params.Alpha() = 0.005f;
	params.LearningFrames() = 30;

	Algorithms::BackgroundSubtraction::WrenGA bgs;
	bgs.Initalize(params);
	*/

	/*
	Algorithms::BackgroundSubtraction::PratiParams params;
	params.SetFrameSize(width, height);
	params.LowThreshold() = 30;
	params.HighThreshold() = 2*params.LowThreshold();	// Note: high threshold is used by post-processing 
	params.SamplingRate() = 5;
	params.HistorySize() = 16;
	params.Weight() = 5;

	Algorithms::BackgroundSubtraction::PratiMediodBGS bgs;
	bgs.Initalize(params);
	*/

	Algorithms::BackgroundSubtraction::EigenbackgroundParams params;
	params.SetFrameSize(width, height);
	params.LowThreshold() = 15*15;
	params.HighThreshold() = 2*params.LowThreshold();	// Note: high threshold is used by post-processing 
	params.HistorySize() = 100;
	params.EmbeddedDim() = 20;

	Algorithms::BackgroundSubtraction::Eigenbackground bgs;
	bgs.Initalize(params);

	// perform background subtraction of each frame 
	for(int i = 0; i < num_frames-1; ++i)
	{
		if(i % 100 == 0)
			std::cout << "Processing frame " << i << " of " << num_frames << "..." << std::endl;

		// grad next frame from input video stream
		if(!cvGrabFrame(readerAvi))
		{           
			std::cerr << "Could not grab AVI frame." << std::endl;
			return 0;
		}		
		frame_data = cvRetrieveFrame(readerAvi); 
		
		// initialize background model to first frame of video stream
		if (i == 0)
			bgs.InitModel(frame_data); 

		// perform background subtraction
		bgs.Subtract(i, frame_data, low_threshold_mask, high_threshold_mask);

		// save results
		cvWriteFrame(writerAvi, low_threshold_mask.Ptr());

		// update background subtraction
		low_threshold_mask.Clear();	// disable conditional updating
		bgs.Update(i, frame_data, low_threshold_mask);
	}

	cvReleaseCapture(&readerAvi);
	cvReleaseVideoWriter(&writerAvi);
}
int main( int argc, char** argv ) 
{ 
     
    FILE *ptr; 
    ptr=fopen("dataerr.dat","w+"); 
    CvCapture* capture = 0; 
 
    int counter1=0; 
 
    IplImage* image2 = 0; 
 
    float sumX=0; 
    float sumY=0; 
 
    float err_X; 
    float err_Y; 
 
    int XX=0; 
    int YY=0; 
 
    CvPoint ipt1; 
 
    int tempxx1=0; 
    int tempyy1=0; 
    int tempxx2=0; 
    int tempyy2=0; 
 
     
 
    char *imgFmt="pgm"; 
    char str1[100]; 
 
    /* Initailize the error array */ 
    for(int kk=0;kk<=400;kk++) 
    { 
        optical_flow_error[0][kk]=0; 
        optical_flow_errorP[0][kk]=0; 
        optical_flow_error[1][kk]=0; 
        optical_flow_errorP[1][kk]=0; 
    } 
 
    //capturing frame from video 
    capture = cvCaptureFromAVI("soccer_track.mpeg"); 
 
    cvNamedWindow( "KLT-Tracking Group_R", 0 ); 
    cvSetMouseCallback( "KLT-Tracking Group_R", on_mouse, 0 ); 
 
    if(add_remove_pt==1) 
    { 
        flagg=1; 
    } 
 
    for(;;) 
    { 
        IplImage* frame = 0; 
         
        int i, k, c; 
 
        //creating file name 
        sprintf(str1,"%d.%s",counter1,imgFmt); 
        err_X=0; 
        err_Y=0; 
        sumX=0; 
        sumY=0; 
 
        //decompressing the grab images 
 
        frame = cvQueryFrame( capture ); 
 
     
        if( !frame ) 
            break; 
 
        if( !image ) 
            //The first frame:to allocation some memories,and do somen initialization work 
        { 
            // allocate all the image buffers  
            image = cvCreateImage( cvGetSize(frame), 8, 3 ); 
            image->origin = frame->origin; 
            grey = cvCreateImage( cvGetSize(frame), 8, 1 );//make it grey 
            prev_grey = cvCreateImage( cvGetSize(frame), 8, 1 );//the previous frame in grey mode 
            pyramid = cvCreateImage( cvGetSize(frame), 8, 1 );//pyramid frame 
            prev_pyramid = cvCreateImage( cvGetSize(frame), 8, 1 );//previous pyramid frame 
            /* Define two pointers */ 
            points[0] = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(points[0][0])); 
            points[1] = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(points[0][0])); 
            status = (char*)cvAlloc(MAX_COUNT); 
            flags = 0; 
        } 
 
        cvCopy( frame, image, 0 );//frame->image 
 
        //converting the image into gray scale for further computation 
        cvCvtColor( image, grey, CV_BGR2GRAY ); 
         
        if( need_to_init ) 
        { 
             
            IplImage* eig = cvCreateImage( cvGetSize(grey), 32, 1 ); 
            IplImage* temp = cvCreateImage( cvGetSize(grey), 32, 1 ); 
            double quality = 0.01; 
            double min_distance = 10; 
         
 
            //using good features to track 
            count = MAX_COUNT; 
            cvGoodFeaturesToTrack( grey, eig, temp, points[1], &count, 
                                   quality, min_distance, 0, 3, 0, 0.04 ); 
            cvFindCornerSubPix( grey, points[1], count, 
            cvSize(win_size,win_size), cvSize(-1,-1), 
            cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03)); 
            cvReleaseImage( &eig ); 
            cvReleaseImage( &temp ); 
 
 
 
            add_remove_pt = 0; 
        } 
        else if( count > 0 ) 
        { 
            //using pyramidal optical flow method 
            cvCalcOpticalFlowPyrLK(  
                    prev_grey, grey,  
                    prev_pyramid, pyramid, 
                    points[0], points[1],  
                    count, cvSize(win_size,win_size),  
                    5, status,0, 
                    cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03), flags ); 
             
            flags |= CV_LKFLOW_PYR_A_READY|CV_LKFLOW_PYR_B_READY; 
 
            for( i = k = 0; i < count; i++ ) 
            { 
                /* When need to add or remove the point */ 
                if( add_remove_pt ) 
                { 
 
                    double dx = pt.x - points[1][i].x; 
                    double dy = pt.y - points[1][i].y; 
                    /* Calulate the distance between the point you select and the point tracked  
                    if they are far from less than 5,stop the add or move action     
                    */ 
                    if( dx*dx + dy*dy <= 25 ) 
                    { 
                        add_remove_pt = 0; 
                        continue; 
                    } 
                } 
                 
                if( !status[i] )//if the point is not tracked correctly,pass! 
                    continue; 
                
                points[1][k++] = points[1][i]; 
 
                ipt1=cvPointFrom32f(points[1][i]);//get a point 
                 
            //calculating error here,initalize the error array 
                optical_flow_error[0][i]=ipt1.x; 
                optical_flow_error[1][i]=ipt1.y; 
 
 
            } 
            //taking average error for moving the window 
 
            for(int zz=0; zz<=count;zz++) 
                { 
                    errX[zz]=optical_flow_error[0][zz]- optical_flow_errorP[0][zz]; 
                    errY[zz]=optical_flow_error[1][zz]- optical_flow_errorP[1][zz]; 
 
                    sumX=sumX+errX[zz]; 
                    sumY=sumY+errY[zz]; 
 
                    optical_flow_errorP[0][zz]=optical_flow_error[0][zz]; 
                    optical_flow_errorP[1][zz]=optical_flow_error[1][zz]; 
 
                } 
 
                fprintf(ptr,"%d\n",count); 
                 
                err_X=sumX/count; 
                err_Y=sumY/count; 
 
            if(flagg==1) 
            { 
              int static startonce=0; 
 
            if(startonce==0) 
            { 
                 
             
            tempxx1=pt.x-20; 
            tempyy1=pt.y-20; 
 
            tempxx2=pt.x+20; 
            tempyy2=pt.y+20; 
 
            XX=pt.x; 
            YY=pt.y; 
 
            startonce=1; 
 
            } 
            if(err_X<3) 
            { 
                tempxx1=tempxx1+err_X; 
                tempyy1=tempyy1+err_Y; 
                tempxx2=tempxx2+err_X; 
                tempyy2=tempyy2+err_Y; 
 
                XX=XX+err_X; 
                YY=YY+err_Y; 
                fprintf(ptr,"%f %f\n",err_X,err_Y); 
            } 
 
            printf("\n%f",err_X); 
 
            //moving window 
 
            cvRectangle(image, cvPoint(tempxx1,tempyy1), cvPoint(tempxx2,tempyy2), cvScalar(255,0,0), 1); 
            cvCircle(image, cvPoint(XX,YY), 3, cvScalar(0,0,255), 1); 
        } 
            count = k; 
        } 
 
 
        if( add_remove_pt && count < MAX_COUNT ) 
        { 
            points[1][count++] = cvPointTo32f(pt); 
            cvFindCornerSubPix( grey, points[1] + count - 1, 1, 
                cvSize(win_size,win_size), cvSize(-1,-1), 
                cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03)); 
            add_remove_pt = 0; 
        } 
 
        CV_SWAP( prev_grey, grey, swap_temp ); 
        CV_SWAP( prev_pyramid, pyramid, swap_temp ); 
        CV_SWAP( points[0], points[1], swap_points ); 
        need_to_init = 0; 
 
       
        //writing image file to the file 
        //if(!cvSaveImage(str1,image)) printf("Could not save: %s\n",str1); 
        //storing in a video also 
  
         
        cvShowImage( "KLT-Tracking Group_R", image ); 
 
        c = cvWaitKey(100); 
        if( (char)c == 27 ) 
            break; 
        switch( (char) c ) 
        { 
        case 's': 
            need_to_init = 1; 
          } 
 
        counter1++; 
    } 
 
    cvReleaseCapture( &capture ); 
    cvDestroyWindow("KLT-Tracking Group_R"); 
 
    fcloseall(); 
     
    return 0; 
} 
Example #25
0
// Main function, defines the entry point for the program.
int main( int argc, char** argv )
{

    // Structure for getting video from camera or avi
    CvCapture* capture = 0;

    // Images to capture the frame from video or camera or from file
    IplImage *frame, *frame_copy = 0;

    // Used for calculations
    int optlen = strlen("--cascade=");

    // Input file name for avi or image file.
    const char* input_name;

    // Check for the correct usage of the command line
    if( argc > 1 && strncmp( argv[1], "--cascade=", optlen ) == 0 )
    {
        cascade_name_face = argv[1] + optlen;
        input_name = argc > 2 ? argv[2] : 0;
    }
    else
    {
        fprintf( stderr,
                 "Usage: facedetect --cascade=\"<cascade_path>\" [filename|camera_index]\n" );
        //return -1;
        input_name = argc > 1 ? argv[1] : 0;
    }

    // Load the HaarClassifierCascade
    cascade_eye = (CvHaarClassifierCascade*)cvLoad( cascade_name_eye, 0, 0, 0 );
    cascade_face = (CvHaarClassifierCascade*)cvLoad( cascade_name_face, 0, 0, 0 );

    // Check whether the cascade has loaded successfully. Else report and error and quit
    if( !cascade_face )
    {
        fprintf( stderr, "ERROR: Could not load classifier cascade\n" );
        return -1;
    }

    // Allocate the memory storage
    storage = cvCreateMemStorage(0);

    // Find whether to detect the object from file or from camera.
    if( !input_name || (isdigit(input_name[0]) && input_name[1] == '\0') )
        capture = cvCaptureFromCAM( !input_name ? 0 : input_name[0] - '0' );
    else
        capture = cvCaptureFromAVI( input_name );


    // Create a new named window with title: result
    cvNamedWindow( "result", CV_WINDOW_AUTOSIZE );
    //cvResizeWindow( "result", 640, 480 );

    // Find if the capture is loaded successfully or not.

    // If loaded succesfully, then:
    if( capture )
    {
        // Capture from the camera.
        for(;;)
        {
            // Capture the frame and load it in IplImage
            if( !cvGrabFrame( capture ))
                break;
            frame = cvRetrieveFrame( capture );

            // If the frame does not exist, quit the loop
            if( !frame )
                break;

            // Allocate framecopy as the same size of the frame
            if( !frame_copy )
                frame_copy = cvCreateImage( cvSize(frame->width,frame->height),
                                            IPL_DEPTH_8U, frame->nChannels );

            // Check the origin of image. If top left, copy the image frame to frame_copy.
            if( frame->origin == IPL_ORIGIN_TL )
                cvCopy( frame, frame_copy, 0 );
            // Else flip and copy the image
            else
                cvFlip( frame, frame_copy, 0 );

            // Call the function to detect and draw the face
            //fprintf( stdout, "Passing to detectAndDraw\n");
            detect_and_draw( frame_copy );

            // Wait for a while before proceeding to the next frame
            if( cvWaitKey( 10 ) >= 0 )
                break;
        }

        // Release the images, and capture memory
        cvReleaseImage( &frame_copy );
        cvReleaseCapture( &capture );
    }

    // If the capture is not loaded succesfully, then:
    else
    {
        // Assume the image to be lena.jpg, or the input_name specified
        const char* filename = input_name ? input_name : (char*)"lena.jpg";

        // Load the image from that filename
        IplImage* image = cvLoadImage( filename, 1 );

        // If Image is loaded succesfully, then:
        if( image )
        {
            // Detect and draw the face
            detect_and_draw( image );

            // Wait for user input
            cvWaitKey(0);

            // Release the image memory
            cvReleaseImage( &image );
        }
        else
        {
            /* assume it is a text file containing the
               list of the image filenames to be processed - one per line */
            FILE* f = fopen( filename, "rt" );
            if( f )
            {
                char buf[1000+1];

                // Get the line from the file
                while( fgets( buf, 1000, f ) )
                {

                    // Remove the spaces if any, and clean up the name
                    int len = (int)strlen(buf);
                    while( len > 0 && isspace(buf[len-1]) )
                        len--;
                    buf[len] = '\0';

                    // Load the image from the filename present in the buffer
                    image = cvLoadImage( buf, 1 );

                    // If the image was loaded succesfully, then:
                    if( image )
                    {
                        // Detect and draw the face from the image
                        detect_and_draw( image );

                        // Wait for the user input, and release the memory
                        cvWaitKey(0);
                        cvReleaseImage( &image );
                    }
                }
                // Close the file
                fclose(f);
            }
        }

    }

    // Destroy the window previously created with filename: "result"
    cvDestroyWindow("result");

    // return 0 to indicate successfull execution of the program
    return 0;
}
Example #26
0
void imAcqInit(ImAcq *imAcq)
{
    if(imAcq->method == IMACQ_CAM)
    {
        imAcq->capture = cvCaptureFromCAM(imAcq->camNo);

	printf("%f\n", cvGetCaptureProperty(imAcq->capture, CV_CAP_PROP_FRAME_WIDTH));

        if(imAcq->capture == NULL)
        {
            printf("Error: Unable to initialize camera\n");
            exit(0);
        }
    }
    else if(imAcq->method == IMACQ_VID)
    {
        imAcq->capture = cvCaptureFromAVI(imAcq->imgPath);

        if(imAcq->capture == NULL)
        {
            printf("Error: Unable to open video\n");
            exit(0);
        }

        // take all frames
        if(imAcq->lastFrame == 0)
            imAcq->lastFrame = imAcqVidGetNumberOfFrames(imAcq); //This sometimes returns garbage

        // lastFrame out of bounds
        if(imAcq->lastFrame > imAcqVidGetNumberOfFrames(imAcq))
        {
            printf("Error: video has only %d frames you selected %d as last frame.\n",
                   imAcqVidGetNumberOfFrames(imAcq), imAcq->lastFrame);
            exit(1);
        }

        // something is wrong with startFrame and/or lastFrame
        if((imAcq->lastFrame < 1) || (imAcq->currentFrame < 1) || ((imAcq->currentFrame > imAcq->lastFrame)))
        {
            printf("Error: something is wrong with the start and last frame number. startFrame: %d lastFrame: %d\n",
                   imAcq->currentFrame, imAcq->lastFrame);
            exit(1);
        }

        // set the video position to the correct frame
        //This produces strange results on some videos and is deactivated for now.
        //imAcqVidSetNextFrameNumber(imAcq, imAcq->currentFrame);
    }
    else if(imAcq->method == IMACQ_STREAM)
    {
        imAcq->capture = cvCaptureFromFile(imAcq->imgPath);

        if(imAcq->capture == NULL)
        {
            printf("Error: Unable to open video\n");
            exit(0);
        }

    }
    
    imAcq->startFrame = imAcq->currentFrame;
    imAcq->startTime = cvGetTickCount();
}
Example #27
0
void nuiVideoFileSource::start()
{
	cap = cvCaptureFromAVI(this->property("path").asString().c_str());
	nuiModule::start();
};
Example #28
0
DWORD WINAPI CamProc(LPVOID lpv)
{
	l_t=SDL_GetTicks();
    HANDLE     hArray[1];
	DWORD dwRes;
	BOOL CamThreadDone=FALSE;
	MSG msg;

	hArray[0] = CamExitEvent;


	    char fname[256];
	//if (!capture)
	//{

	  if ((camobj->mode == 2) && (strstr(camobj->videofilename,".avi"))) capture = cvCaptureFromAVI( camobj->videofilename );
       else capture = cvCaptureFromCAM( 0 );
	  

      if((!capture ) && (camobj->mode!=1))
	  {
		if (!GLOBAL.loading) close_toolbox();
   		report_error("Could not connect to WebCamera");		
	  }
 
	  strcpy(fname,GLOBAL.resourcepath);
	  strcat(fname,cascade_name);
	  
	  cascade = (CvHaarClassifierCascade*)cvLoad( fname, 0, 0, 0 );
	  if( !cascade ) 
	  {	
		  SetDlgItemText(ghWndStatusbox, IDC_STATUS,"ERROR: Could not load classifier cascade");
	      return(0); 
	  }
    
      storage = cvCreateMemStorage(0);

   	  if (camobj->showlive)
	  {
		cvNamedWindow( "Camera", 0 );
	    cvMoveWindow("Camera",0,0);
        cvSetMouseCallback( "Camera", on_mouse, 0 );

	  }



	while (!CamThreadDone) 
	{
           dwRes = WaitForMultipleObjects(1, hArray, FALSE, 0);
 
            switch(dwRes)
            {
                case WAIT_OBJECT_0: 
                    CamThreadDone = TRUE;
		        case WAIT_TIMEOUT:   // timeouts are not reported
                    break;                       

                default:
                   // report_error("WaitForMultipleObjects(CamExitEvent) does not return");
                    break;
			}

			if(!GetMessage(&msg, NULL, 0, 0)) break;
				TranslateMessage(&msg);	
				DispatchMessage(&msg);	

			a_t=SDL_GetTicks();
			if (a_t-l_t>update_rate)
			{

		        frame = cvQueryFrame( capture );
				
				lk_work(camobj);
				cur_rate=a_t-l_t;
				l_t=a_t;
			}
			else 
			Sleep(3);
			
	}
	return(1);
}
bool VideoGrabber::init(){
	cap = cvCaptureFromAVI(filename.toAscii().data());
	return cap != NULL;
}
Example #30
0
int main( int argc, char** argv )
{
    IplImage *current_frame=NULL;
	CvSize size;
	size.height = 300; size.width = 200;
	IplImage *corrected_frame = cvCreateImage( size, IPL_DEPTH_8U, 3 );
	IplImage *labelled_image=NULL;
	IplImage *vertical_edge_image=NULL;
    int user_clicked_key=0;
    
    // Load the video (AVI) file
    CvCapture *capture = cvCaptureFromAVI( "./Postboxes.avi" );
    // Ensure AVI opened properly
    if( !capture )
		return 1;    
    
    // Get Frames Per Second in order to playback the video at the correct speed
    int fps = ( int )cvGetCaptureProperty( capture, CV_CAP_PROP_FPS );
    
	// Explain the User Interface
    printf( "Hot keys: \n"
		    "\tESC - quit the program\n"
            "\tSPACE - pause/resume the video\n");

	CvPoint2D32f from_points[4] = { {3, 6}, {221, 11}, {206, 368}, {18, 373} };
	CvPoint2D32f to_points[4] = { {0, 0}, {200, 0}, {200, 300}, {0, 300} };
	CvMat* warp_matrix = cvCreateMat( 3,3,CV_32FC1 );
	cvGetPerspectiveTransform( from_points, to_points, warp_matrix );

	// Create display windows for images
	cvNamedWindow( "Input video", 0 );
	cvNamedWindow( "Vertical edges", 0 );
    cvNamedWindow( "Results", 0 );

	// Setup mouse callback on the original image so that the user can see image values as they move the
	// cursor over the image.
    cvSetMouseCallback( "Input video", on_mouse_show_values, 0 );
	window_name_for_on_mouse_show_values="Input video";

    while( user_clicked_key != ESC ) {
		// Get current video frame
        current_frame = cvQueryFrame( capture );
		image_for_on_mouse_show_values=current_frame; // Assign image for mouse callback
        if( !current_frame ) // No new frame available
			break;

		cvWarpPerspective( current_frame, corrected_frame, warp_matrix );

		if (labelled_image == NULL)
		{	// The first time around the loop create the image for processing
			labelled_image = cvCloneImage( corrected_frame );
			vertical_edge_image = cvCloneImage( corrected_frame );
		}
		check_postboxes( corrected_frame, labelled_image, vertical_edge_image );

		// Display the current frame and results of processing
        cvShowImage( "Input video", current_frame );
        cvShowImage( "Vertical edges", vertical_edge_image );
        cvShowImage( "Results", labelled_image );
        
        // Wait for the delay between frames
        user_clicked_key = cvWaitKey( 1000 / fps );
		if (user_clicked_key == ' ')
		{
			user_clicked_key = cvWaitKey(0);
		}
	}
    
    /* free memory */
    cvReleaseCapture( &capture );
    cvDestroyWindow( "video" );
 
    return 0;
}