コード例 #1
0
ファイル: test_recorder.cpp プロジェクト: telnykha/VideoA
// отрисовка статуса
void DrawStatus(IplImage* img)
{
	CvPoint p = cvPoint(10, img->height - 10);	
	CvScalar color; 
	CvSize   ts;
	int	     bl;

	char buf[32];
	
	sprintf(buf, "%s", "record_video(r)");
	color = g_record_video ? CV_RGB(255,0,0):CV_RGB(192,192,192);
	cvPutText(img, buf, p, &g_font, color);
	cvGetTextSize(buf,&g_font, &ts, &bl);

	sprintf(buf, "%s", "set_zones(z)");
	p.x = 20 + ts.width;
	color = g_set_zones ? CV_RGB(255,0,0):CV_RGB(192,192,192);
	cvPutText(img, buf, p, &g_font, color);
	cvGetTextSize(buf,&g_font, &ts, &bl);

	sprintf(buf, "%s", "set_rects(t)");
	p.x += (10 + ts.width);
	color = g_set_rects ? CV_RGB(255,0,0):CV_RGB(192,192,192);
	cvPutText(img, buf, p, &g_font, color);
	cvGetTextSize(buf,&g_font, &ts, &bl);

	sprintf(buf, "%s", "set_contours(c)");
	p.x += (10 + ts.width);
	color = g_set_contours ? CV_RGB(255,0,0):CV_RGB(192,192,192);
	cvPutText(img, buf, p, &g_font, color);
	cvGetTextSize(buf,&g_font, &ts, &bl);

	p.x = 10;
	p.y = 10;

	sprintf(buf, "x=%i:y=%i", g_x, g_y);
	cvPutText(img, buf, p, &g_font, CV_RGB(192,192,192));
}
コード例 #2
0
void drawText(IplImage* img, int x, int y, string text)
{
         CvFont fnt;
         cvInitFont(&fnt,/*CV_FONT_HERSHEY_SIMPLEX*/CV_FONT_VECTOR0,2,2,0,5);

         string remainingText = text;

         if ((remainingText.find("\n") == string::npos))
		remainingText += "\n";

         while ((remainingText.find("\n") != string::npos))
         {
                size_t loc = remainingText.find("\n");
                string putText = remainingText.substr(0,loc);
                remainingText = remainingText.substr(loc+1);

                cvPutText(img,putText.c_str(),cvPoint(x,y),&fnt,CV_RGB(1,1,1));

                CvSize size;
                int belowBase;
                cvGetTextSize(putText.c_str(),&fnt,&size,&belowBase);
                y+=size.height - belowBase + 40;
         }
}
コード例 #3
0
ファイル: image_d_viewer.cpp プロジェクト: billow06/Autoware
static void showImage()
{
    IplImage* image_clone = cvCloneImage(image);
    char distance_string[32];
    CvFont dfont;
    float hscale      = 0.7f;
    float vscale      = 0.7f;
    float italicscale = 0.0f;
    int  thickness    = 1;

    std::string objectLabel;
    CvFont      dfont_label;
    float       hscale_label = 0.5f;
    float       vscale_label = 0.5f;
    CvSize      text_size;
    int         baseline     = 0;

    cvInitFont(&dfont_label, CV_FONT_HERSHEY_COMPLEX, hscale_label, vscale_label, italicscale, thickness, CV_AA);
    objectLabel = car_fused_objects.type;
    cvGetTextSize(objectLabel.data(),
                  &dfont_label,
                  &text_size,
                  &baseline);

    /*
     * Plot obstacle frame
     */
    showRects(image_clone,
              car_fused_objects.obj,
              ratio,
              cvScalar(255.0,255.0,0.0));
    showRects(image_clone,
              pedestrian_fused_objects.obj,
              ratio,
              cvScalar(0.0,255.0,0.0));


    /*
     * Plot car distance data on image
     */
    for (unsigned int i = 0; i < car_fused_objects.obj.size(); i++) {
      if(!isNearlyNODATA(car_fused_objects.obj.at(i).range)) {
          int rect_x      = car_fused_objects.obj.at(i).rect.x;
          int rect_y      = car_fused_objects.obj.at(i).rect.y;
          int rect_width  = car_fused_objects.obj.at(i).rect.width;
          int rect_height = car_fused_objects.obj.at(i).rect.height;
          float range     = car_fused_objects.obj.at(i).range;

          /* put label */
          CvPoint labelOrg = cvPoint(rect_x - OBJ_RECT_THICKNESS,
                                     rect_y - baseline - OBJ_RECT_THICKNESS);
          cvRectangle(image_clone,
                      cvPoint(labelOrg.x + 0, labelOrg.y + baseline),
                      cvPoint(labelOrg.x + text_size.width, labelOrg.y - text_size.height),
                      CV_RGB(0, 0, 0), // label background color is black
                      -1, 8, 0
                      );
          cvPutText(image_clone,
                    objectLabel.data(),
                    labelOrg,
                    &dfont_label,
                    CV_RGB(255, 255, 255) // label text color is white
                    );

          /* put distance data */
            cvRectangle(image_clone,
                        cv::Point(rect_x + (rect_width/2) - (((int)log10(range/100)+1) * 5 + 45),
                                  rect_y + rect_height + 5),
                        cv::Point(rect_x + (rect_width/2) + (((int)log10(range/100)+1) * 8 + 38),
                                  rect_y + rect_height + 30),
                        cv::Scalar(255,255,255), -1);
            cvInitFont (&dfont, CV_FONT_HERSHEY_COMPLEX , hscale, vscale, italicscale, thickness, CV_AA);
            sprintf(distance_string, "%.2f m", range / 100); //unit of length is meter
            cvPutText(image_clone,
                      distance_string,
                      cvPoint(rect_x + (rect_width/2) - (((int)log10(range/100)+1) * 5 + 40),
                              rect_y + rect_height + 25),
                      &dfont,
                      CV_RGB(255, 0, 0));
        }
    }

    objectLabel = pedestrian_fused_objects.type;
    cvGetTextSize(objectLabel.data(),
                  &dfont_label,
                  &text_size,
                  &baseline);

    /*
     * Plot pedestrian distance data on image
     */
    for (unsigned int i = 0; i < pedestrian_fused_objects.obj.size(); i++) {
      if(!isNearlyNODATA(pedestrian_fused_objects.obj.at(i).range)) {
          int rect_x      = pedestrian_fused_objects.obj.at(i).rect.x;
          int rect_y      = pedestrian_fused_objects.obj.at(i).rect.y;
          int rect_width  = pedestrian_fused_objects.obj.at(i).rect.width;
          int rect_height = pedestrian_fused_objects.obj.at(i).rect.height;
          float range     = pedestrian_fused_objects.obj.at(i).range;

          /* put label */
          CvPoint labelOrg = cvPoint(rect_x - OBJ_RECT_THICKNESS,
                                     rect_y - baseline - OBJ_RECT_THICKNESS);
          cvRectangle(image_clone,
                      cvPoint(labelOrg.x + 0, labelOrg.y + baseline),
                      cvPoint(labelOrg.x + text_size.width, labelOrg.y - text_size.height),
                      CV_RGB(0, 0, 0), // label background color is black
                      -1, 8, 0
                      );
          cvPutText(image_clone,
                    objectLabel.data(),
                    labelOrg,
                    &dfont_label,
                    CV_RGB(255, 255, 255) // label text color is white
                    );

          /* put distance data */
            cvRectangle(image_clone,
                        cv::Point(rect_x + (rect_width/2) - (((int)log10(range/100)+1) * 5 + 45),
                                  rect_y + rect_height + 5),
                        cv::Point(rect_x + (rect_width/2) + (((int)log10(range/100)+1) * 8 + 38),
                                  rect_y + rect_height + 30),
                        cv::Scalar(255,255,255), -1);
            cvInitFont (&dfont, CV_FONT_HERSHEY_COMPLEX , hscale, vscale, italicscale, thickness, CV_AA);
            sprintf(distance_string, "%.2f m", range / 100); //unit of length is meter
            cvPutText(image_clone,
                      distance_string,
                      cvPoint(rect_x + (rect_width/2) - (((int)log10(range/100)+1) * 5 + 40),
                              rect_y + rect_height + 25),
                      &dfont,
                      CV_RGB(255, 0, 0));
        }
    }

    /*
     * Show image
     */
    if (cvGetWindowHandle(window_name.c_str()) != NULL) // Guard not to write destroyed window by using close button on the window
      {
        cvShowImage(window_name.c_str(), image_clone);
        cvWaitKey(2);
      }
    cvReleaseImage(&image_clone);
}
コード例 #4
0
static void putDistance(IplImage *Image,
                        std::vector<autoware_msgs::ImageRectRanged> objects,
                        int threshold_height,
                        const char* objectLabel)
{
  char distance_string[32];
  CvFont dfont;
  float hscale	    = 0.7f;
  float vscale	    = 0.7f;
  float italicscale = 0.0f;
  int	thickness   = 1;

  CvFont      dfont_label;
  float       hscale_label = 0.5f;
  float       vscale_label = 0.5f;
  CvSize      text_size;
  int         baseline     = 0;

  cvInitFont(&dfont_label, CV_FONT_HERSHEY_COMPLEX, hscale_label, vscale_label, italicscale, thickness, CV_AA);
  cvGetTextSize(objectLabel,
                &dfont_label,
                &text_size,
                &baseline);

  for (unsigned int i=0; i<objects.size(); i++)
    {
      if (objects.at(i).rect.y > threshold_height) // temporal way to avoid drawing detections in the sky
        {
          if (!isNearlyNODATA(objects.at(i).range))
            {

              /*put label */
              CvPoint labelOrg = cvPoint(objects.at(i).rect.x - OBJ_RECT_THICKNESS,
                                         objects.at(i).rect.y - baseline - OBJ_RECT_THICKNESS);

              cvRectangle(Image,
                          cvPoint(labelOrg.x + 0, labelOrg.y + baseline),
                          cvPoint(labelOrg.x + text_size.width, labelOrg.y - text_size.height),
                          CV_RGB(0, 0, 0), // label background is black
                          -1, 8, 0
                          );
              cvPutText(Image,
                        objectLabel,
                        labelOrg,
                        &dfont_label,
                        CV_RGB(255, 255, 255) // label text color is white
                        );

              /* put distance data */
              cvRectangle(Image,
                          cv::Point(objects.at(i).rect.x + (objects.at(i).rect.width/2) - (((int)log10(objects.at(i).range/100)+1) * 5 + 45),
                                    objects.at(i).rect.y + objects.at(i).rect.height + 5),
                          cv::Point(objects.at(i).rect.x + (objects.at(i).rect.width/2) + (((int)log10(objects.at(i).range/100)+1) * 8 + 38),
                                    objects.at(i).rect.y + objects.at(i).rect.height + 30),
                          cv::Scalar(255,255,255),
                          -1);

              cvInitFont (&dfont,
                          CV_FONT_HERSHEY_COMPLEX,
                          hscale,
                          vscale,
                          italicscale,
                          thickness,
                          CV_AA);

              sprintf(distance_string, "%.2f m", objects.at(i).range / 100); //unit of length is meter
              cvPutText(Image,
                        distance_string,
                        cvPoint(objects.at(i).rect.x + (objects.at(i).rect.width/2) - (((int)log10(objects.at(i).range/100)+1) * 5 + 40),
                                objects.at(i).rect.y + objects.at(i).rect.height + 25),
                        &dfont,
                        CV_RGB(255, 0, 0));
            }

        }
    }
}
コード例 #5
0
ファイル: calibration.cpp プロジェクト: janfrs/kwc-ros-pkg
int main( int argc, char** argv )
{
    CvSize board_size = {0,0};
    float square_size = 1.f, aspect_ratio = 1.f;
    const char* out_filename = "out_camera_data.yml";
    const char* input_filename = 0;
    int i, image_count = 10;
    int write_extrinsics = 0, write_points = 0;
    int flags = 0;
    CvCapture* capture = 0;
    FILE* f = 0;
    char imagename[1024];
    CvMemStorage* storage;
    CvSeq* image_points_seq = 0;
    int elem_size, flip_vertical = 0;
    int delay = 1000;
    clock_t prev_timestamp = 0;
    CvPoint2D32f* image_points_buf = 0;
    CvFont font = cvFont( 1, 1 );
    double _camera[9], _dist_coeffs[4];
    CvMat camera = cvMat( 3, 3, CV_64F, _camera );
    CvMat dist_coeffs = cvMat( 1, 4, CV_64F, _dist_coeffs );
    CvMat *extr_params = 0, *reproj_errs = 0;
    double avg_reproj_err = 0;
    int mode = DETECTION;
    int undistort_image = 0;
    CvSize img_size = {0,0};
    const char* live_capture_help = 
        "When the live video from camera is used as input, the following hot-keys may be used:\n"
            "  <ESC>, 'q' - quit the program\n"
            "  'g' - start capturing images\n"
            "  'u' - switch undistortion on/off\n";

    if( argc < 2 )
    {
  // calibration -w 6 -h 8 -s 2 -n 10 -o camera.yml -op -oe [<list_of_views.txt>]
      printf( "This is a camera calibration sample.\n"
            "Usage: calibration\n"
            "     -w <board_width>         # the number of inner corners per one of board dimension\n"
            "     -h <board_height>        # the number of inner corners per another board dimension\n"
            "     [-n <number_of_frames>]  # the number of frames to use for calibration\n"
            "                              # (if not specified, it will be set to the number\n"
            "                              #  of board views actually available)\n"
	    "     [-di <disk_images>       # Number of disk images before triggering undistortion\n"
            "     [-d <delay>]             # a minimum delay in ms between subsequent attempts to capture a next view\n"
            "                              # (used only for video capturing)\n"
            "     [-s <square_size>]       # square size in some user-defined units (1 by default)\n"
            "     [-o <out_camera_params>] # the output filename for intrinsic [and extrinsic] parameters\n"
            "     [-op]                    # write detected feature points\n"
            "     [-oe]                    # write extrinsic parameters\n"
            "     [-zt]                    # assume zero tangential distortion\n"
            "     [-a <aspect_ratio>]      # fix aspect ratio (fx/fy)\n"
            "     [-p]                     # fix the principal point at the center\n"
            "     [-v]                     # flip the captured images around the horizontal axis\n"
            "     [input_data]             # input data, one of the following:\n"
            "                              #  - text file with a list of the images of the board\n"
            "                              #  - name of video file with a video of the board\n"
            "                              # if input_data not specified, a live view from the camera is used\n"
            "\n" );
        printf( "%s", live_capture_help );
        return 0;
    }

    for( i = 1; i < argc; i++ )
    {
        const char* s = argv[i];
        if( strcmp( s, "-w" ) == 0 )
        {
            if( sscanf( argv[++i], "%u", &board_size.width ) != 1 || board_size.width <= 0 )
                return fprintf( stderr, "Invalid board width\n" ), -1;
        }
        else if( strcmp( s, "-h" ) == 0 )
        {
            if( sscanf( argv[++i], "%u", &board_size.height ) != 1 || board_size.height <= 0 )
                return fprintf( stderr, "Invalid board height\n" ), -1;
        }
        else if( strcmp( s, "-s" ) == 0 )
        {
            if( sscanf( argv[++i], "%f", &square_size ) != 1 || square_size <= 0 )
                return fprintf( stderr, "Invalid board square width\n" ), -1;
        }
        else if( strcmp( s, "-n" ) == 0 )
        {
            if( sscanf( argv[++i], "%u", &image_count ) != 1 || image_count <= 3 )
                return printf("Invalid number of images\n" ), -1;
        }
	else if( strcmp( s, "-di") == 0)
	{
	    if( sscanf( argv[++i], "%d", &images_from_file) != 1 || images_from_file < 3)
		return printf("Invalid di, must be >= 3\n"), -1;
	}
        else if( strcmp( s, "-a" ) == 0 )
        {
            if( sscanf( argv[++i], "%f", &aspect_ratio ) != 1 || aspect_ratio <= 0 )
                return printf("Invalid aspect ratio\n" ), -1;
        }
        else if( strcmp( s, "-d" ) == 0 )
        {
            if( sscanf( argv[++i], "%u", &delay ) != 1 || delay <= 0 )
                return printf("Invalid delay\n" ), -1;
        }
        else if( strcmp( s, "-op" ) == 0 )
        {
            write_points = 1;
        }
        else if( strcmp( s, "-oe" ) == 0 )
        {
            write_extrinsics = 1;
        }
        else if( strcmp( s, "-zt" ) == 0 )
        {
            flags |= CV_CALIB_ZERO_TANGENT_DIST;
        }
        else if( strcmp( s, "-p" ) == 0 )
        {
            flags |= CV_CALIB_FIX_PRINCIPAL_POINT;
        }
        else if( strcmp( s, "-v" ) == 0 )
        {
            flip_vertical = 1;
        }
        else if( strcmp( s, "-o" ) == 0 )
        {
            out_filename = argv[++i];
        }
        else if( s[0] != '-' )
            input_filename = s;
        else
            return fprintf( stderr, "Unknown option %s", s ), -1;
    }

    if( input_filename )
    {
        capture = cvCreateFileCapture( input_filename );
        if( !capture )
        {
            f = fopen( input_filename, "rt" );
            if( !f )
                return fprintf( stderr, "The input file could not be opened\n" ), -1;
            image_count = -1;
        }
        mode = CAPTURING;
    }
    else
        capture = cvCreateCameraCapture(0);

    if( !capture && !f )
        return fprintf( stderr, "Could not initialize video capture\n" ), -2;

    if( capture )
        printf( "%s", live_capture_help );

    elem_size = board_size.width*board_size.height*sizeof(image_points_buf[0]);
    storage = cvCreateMemStorage( MAX( elem_size*4, 1 << 16 ));
    image_points_buf = (CvPoint2D32f*)cvAlloc( elem_size );
    image_points_seq = cvCreateSeq( 0, sizeof(CvSeq), elem_size, storage );

    cvNamedWindow( "Image View", 1 );
    cvNamedWindow( "Undistort",1);
    int disk_image_cnt = 0;

    for(;;)
    {
        IplImage *view = 0, *view_gray = 0;
        int count = 0, found, blink = 0;
        CvPoint text_origin;
        CvSize text_size = {0,0};
        int base_line = 0;
        char s[100];
        int key;
        
        if( f && fgets( imagename, sizeof(imagename)-2, f ))
        {
            int l = strlen(imagename);
            if( l > 0 && imagename[l-1] == '\n' )
                imagename[--l] = '\0';
            if( l > 0 )
            {
                if( imagename[0] == '#' )
                    continue;
                view = cvLoadImage( imagename, 1 );
                disk_image_cnt++;
           }
        }
        else if( capture )
        {
            IplImage* view0 = cvQueryFrame( capture );
            if( view0 )
            {
                view = cvCreateImage( cvGetSize(view0), IPL_DEPTH_8U, view0->nChannels );
                if( view0->origin == IPL_ORIGIN_BL )
                    cvFlip( view0, view, 0 );
                else
                    cvCopy( view0, view );
            }
        }

        if( !view || (disk_image_cnt == images_from_file))
        {
            if( image_points_seq->total > 0 )
            {
                image_count = image_points_seq->total;
                goto calibrate;
            }
            break;
        }

        if( flip_vertical )
            cvFlip( view, view, 0 );

        img_size = cvGetSize(view);
        found = cvFindChessboardCorners( view, board_size,
            image_points_buf, &count, CV_CALIB_CB_ADAPTIVE_THRESH );

#if 1
        // improve the found corners' coordinate accuracy
        view_gray = cvCreateImage( cvGetSize(view), 8, 1 );
        cvCvtColor( view, view_gray, CV_BGR2GRAY );
        cvFindCornerSubPix( view_gray, image_points_buf, count, cvSize(11,11),
            cvSize(-1,-1), cvTermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));
        cvReleaseImage( &view_gray );
#endif

        if( mode == CAPTURING && found && (f || clock() - prev_timestamp > delay*1e-3*CLOCKS_PER_SEC) )
        {
            cvSeqPush( image_points_seq, image_points_buf );
            prev_timestamp = clock();
            blink = !f;
#if 1
            if( capture )
            {
                sprintf( imagename, "view%03d.png", image_points_seq->total - 1 );
                cvSaveImage( imagename, view );
            }
#endif
        }

        cvDrawChessboardCorners( view, board_size, image_points_buf, count, found );

        cvGetTextSize( "100/100", &font, &text_size, &base_line );
        text_origin.x = view->width - text_size.width - 10;
        text_origin.y = view->height - base_line - 10;

        if( mode == CAPTURING )
        {
            if( image_count > 0 )
                sprintf( s, "%d/%d", image_points_seq ? image_points_seq->total : 0, image_count );
            else
                sprintf( s, "%d/?", image_points_seq ? image_points_seq->total : 0 );
        }
        else if( mode == CALIBRATED )
            sprintf( s, "Calibrated" );
        else
            sprintf( s, "Press 'g' to start" );

        cvPutText( view, s, text_origin, &font, mode != CALIBRATED ?
                                   CV_RGB(255,0,0) : CV_RGB(0,255,0));

        if( blink )
            cvNot( view, view );
        //Rectify or Undistort the image
        if( mode == CALIBRATED && undistort_image )
        {
            IplImage* t = cvCloneImage( view );
            cvShowImage("Image View", view);
            cvUndistort2( t, view, &camera, &dist_coeffs );
            cvReleaseImage( &t );
 	    cvShowImage( "Undistort", view );
            cvWaitKey(0);
       }
	else{
	        cvShowImage( "Image View", view );
        	key = cvWaitKey(capture ? 50 : 500);
	}
        if( key == 27 )
            break;
        
        if( key == 'u' && mode == CALIBRATED ){
            undistort_image = !undistort_image;
	}

        if( capture && key == 'g' )
        {
            mode = CAPTURING;
            cvClearMemStorage( storage );
            image_points_seq = cvCreateSeq( 0, sizeof(CvSeq), elem_size, storage );
        }

        if( mode == CAPTURING && (unsigned)image_points_seq->total >= (unsigned)image_count )
        {
calibrate:
            if(disk_image_cnt == images_from_file)
                 undistort_image = !undistort_image;
            cvReleaseMat( &extr_params );
            cvReleaseMat( &reproj_errs );
            int code = run_calibration( image_points_seq, img_size, board_size,
                square_size, aspect_ratio, flags, &camera, &dist_coeffs, &extr_params,
                &reproj_errs, &avg_reproj_err );
            // save camera parameters in any case, to catch Inf's/NaN's
            save_camera_params( out_filename, image_count, img_size,
                board_size, square_size, aspect_ratio, flags,
                &camera, &dist_coeffs, write_extrinsics ? extr_params : 0,
                write_points ? image_points_seq : 0, reproj_errs, avg_reproj_err );
            if( code )
                mode = CALIBRATED;
            else
                mode = DETECTION;
        }

        if( !view )
            break;
        cvReleaseImage( &view );
    }

    if( capture )
        cvReleaseCapture( &capture );
    return 0;
}
コード例 #6
0
ファイル: TextAdder.cpp プロジェクト: el-bart/WiFiChopper
size_t TextAdder::getFontHeight(void) const
{
  CvSize size;
  cvGetTextSize("ABCDEFGHIJKLMNOPQRSTUWVXYZ" "abcdefghijklmnopqrstuwvxyz" "0123456789", &font_, &size, nullptr);
  return size.height;
}
コード例 #7
0
ファイル: hv_dc1394.cpp プロジェクト: B12040331/handvu
void
capture_frame()
{
  static int                have_warned = 0;

  dc1394_dma_multi_capture(cameras, numCameras);

  if (!freeze && adaptor >= 0) {
    for (int i = 0; i < numCameras; i++) {
      // create OpenCV image
      switch (res) {
      case MODE_640x480_YUV411:
	if (!have_warned) {
	  have_warned = 1;
	  printf("WARNING: no OpenCV conversion available for this format\n");
	}
        //iyu12yuy2((unsigned char *) cameras[i].capture_buffer,
	//        frame_buffer + (i * frame_length),
	//        (device_width * device_height));
        break;

      case MODE_320x240_YUV422:
      case MODE_640x480_YUV422:
	if (!have_warned) {
	  have_warned = 1;
	  printf("WARNING: no OpenCV conversion available for this format\n");
	}
	//        memcpy(frame_buffer + (i * frame_length),
	//     cameras[i].capture_buffer, device_width * device_height * 2);
        break;

      case MODE_640x480_RGB:
	// don't convert for OpenCV, this is the correct format
	readOnlyImg->imageData = (char *) cameras[i].capture_buffer;
        if (async_processing) {
          if (i==0) {
            hvAsyncGetImageBuffer(&m_async_image, &m_async_bufID);
            cvCopy(readOnlyImg, m_async_image);
          }
        } else {
          cvCopy(readOnlyImg, iplImages[i]);
        }
        break;
      }
      
      if (verbose) {
        CvFont                    font;
        cvInitFont(&font, CV_FONT_VECTOR0, 0.5f /* hscale */ ,
                   0.5f /* vscale */ , 0.1f /*italic_scale */ ,
                   1 /* thickness */ , 8);
        char                      str[256];
        sprintf(str, "camera id: %d", i);
        CvSize                    textsize;
        int                       underline;
        cvGetTextSize(str, &font, &textsize, &underline);
        CvPoint                   pos =
          cvPoint(iplImages[i]->width - textsize.width - 5,
                  textsize.height + 10);
        cvPutText(iplImages[i], str, pos, &font, CV_RGB(0, 255, 0));

	//        cvRectangle(iplImages[i], cvPoint(10, 10), cvPoint(100, 100),
        //            cvScalar(255, 255, 255, 255), CV_FILLED, 8, 0);
      }
    }
  }

  for (int i = 0; i < numCameras; i++) {
    dc1394_dma_done_with_buffer(&cameras[i]);
  }
}
コード例 #8
0
/**
 * This method show a image in normal display
 *
 *
 *
 *
 */
void DiVADisplay::windowVisible() {
	unsigned int baseSize = MINGAP;
	unsigned int i = 0, j = 0, k = 0, w = 0;
	unsigned int limitX = 0, sizeText = 0;
	unsigned int x = 0, y = 0;
	unsigned int gapText = 0, sizefont = 0;
	IplImage *tmp = NULL, *tmp2 = NULL, *global = NULL;
	DiVAImage* newImage = NULL, *temporal = NULL;
	DiVASCALAR black = DiVAScalarAll(1);
	CvSize size;

	if (!update) {
		this->CreateImage();
		global = OpenCVConverter::getIplImage(this->getDiVAImage());

		// Check space color
		for(i = 1; i <= this->getRows(); i++) {
			for(j = 1; j <= this->getColumns(); j++) {
				if (this->getDiVAImageN(i,j) != NULL) {
					if (this->getDiVAImageN(i,j)->getNChannels() != 3) {
						temporal = new DiVAImage(this->getImWidthInput(),this->getImHeightInput(),3 ,this->getDiVAImageN(i,j)->getDepth()); 
						tmp2 = OpenCVConverter::getIplImage(temporal);
						delete temporal;
						tmp = OpenCVConverter::getIplImage(this->getDiVAImageN(i,j));
						cvCvtColor(tmp,tmp2,CV_GRAY2RGB);
						cvReleaseImage(&tmp);
					}
					else 
						tmp2 = OpenCVConverter::getIplImage(this->getDiVAImageN(i,j));
					// Allocate images (imagen inicial = tmp2)
					x = this->getPositionxN(i,j);
					y = this->getPositionyN(i,j);
					cvSetImageROI(global, cvRect(x,y,this->getImWidthInput(),this->getImHeightInput()));
					cvCopy(tmp2,global);
					cvReleaseImage(&tmp2);
					cvResetImageROI(global);
					// Include Title
					if (this->getTitleN(i,j) != NULL) {
						limitX = this->getGlobalWidth();
						x = this->getPositionxN(i,j);
						y = this->getGlobalHeight() - (this->getImHeightInput() * (i-1));
						cvGetTextSize(this->getTitleN(i,j),this->getFont(),&size,NULL);
						sizeText = this->getImWidthInput() - size.width;
						if (sizeText > 0)
							x += (((int)(this->getImWidthInput()/2)) - ((int)(size.width/2)));
	
						sizefont = MINGAP;
						sizeText = (int)(sizefont * this->getSizeFont());
						gapText = sizeText + (int)(2*(sizeText/8));
						y -= (gapText * (i));
						y += (int)(sizeText/8);

						temporal = new DiVAImage(sizeText,sizeText,3,8);
						temporal->setPixels(black);
						tmp = OpenCVConverter::getIplImage(temporal);
						delete temporal;
						cvSetImageROI(global, cvRect(x,y,sizeText,sizeText));
						cvCopy(tmp,global);
						cvResetImageROI(global);
						cvReleaseImage(&tmp);
						cvPutText(global ,this->getTitleN(i,j),cvPoint(x,y),this->getFont(),cvScalar(256,256,256,0));
					}
				}
			}
		}
	
		// Global image resize (imagen inicial = global)
		if (this->sizeWindow)
			temporal = new DiVAImage(this->wWidth,this->wHeight,3,this->getDiVAImage()->getDepth());
		else 
			temporal = new DiVAImage(this->getGlobalWidth(),this->getGlobalHeight(),3 ,this->getDiVAImage()->getDepth());
		tmp2 = OpenCVConverter::getIplImage(temporal);
		delete temporal;
		cvResize(global,tmp2);
		newImage = OpenCVConverter::getDiVAImage(tmp2);
		//newImage->setVAlign(1);
		newImage->setVAlign(0);
		this->setDiVAImage(newImage);
		delete newImage;
		cvReleaseImage(&global);
		cvReleaseImage(&tmp2);
	}
	// Check and Visualize
	if (this->getActive() == false)
		this->onWindows();
	if (this->getDiVAImage() != NULL) {
		tmp = OpenCVConverter::getIplImage(this->image);
		if (tmp != NULL) {
			cvShowImage(this->getWindowName(),tmp);
			cvWaitKey(1);
			cvReleaseImage(&tmp);
		}
	}
	this->update = true;
	return;
}
コード例 #9
0
ファイル: objects.cpp プロジェクト: AakashKhurana/qt_vision
void Buoy::TrackObjects(IplImage* imgThresh,IplImage** frame_resized){
    CvMoments *moments=(CvMoments*)malloc(sizeof(CvMoments));
    cvMoments(imgThresh,moments,1);
    double moment10=cvGetSpatialMoment(moments,1,0);
    double moment01=cvGetSpatialMoment(moments,0,1);
    double area=cvGetCentralMoment(moments,0,0);

    if(area>10){
        qDebug()<<area<<endl;
    //	cout<<area<<"\n";
        //printf(" Object Found: ");
        char coordinates_red[32];
        char coordinates_yellow[32];
        char coordinates_green[32];
        setXY(moment10/area,moment01/area);
        int posX=getX();
        int posY=getY();
         CvFont myfont;
        cvInitFont(&myfont,CV_FONT_HERSHEY_COMPLEX_SMALL,0.5,0.5,0.0,1,8);

        if(pHit>0.5){


        if(buoyColor=="yellow"){
            found=true;
            sprintf(coordinates_yellow,"YELLOW|X=%d|Y=%d",posX,posY);
            //sprintf(coordinates_yellow,"Yellow has just been found and");
            cvGetTextSize(coordinates_yellow,&myfont,myfontSize,&baseline);
            cvRectangle(*frame_resized,cvPoint(posX+5,posY+3),cvPoint(posX+125,posY-8),CV_RGB(0,0,0),CV_FILLED);
            cvPutText(*frame_resized,coordinates_yellow,cvPoint(posX+5,posY),&myfont,CV_RGB(255,255,255));
            cvCircle(*frame_resized,cvPoint(posX,posY),4,CV_RGB(255,255,0),2);

        }



        if(buoyColor=="red"){
            sprintf(coordinates_red,"RED|X=%d|Y=%d",posX,posY);
            cvGetTextSize(coordinates_red,&myfont,myfontSize,&baseline);
            cvRectangle(*frame_resized,cvPoint(posX+5,posY+3),cvPoint(posX+105,posY-8),CV_RGB(0,0,0),CV_FILLED);
            cvPutText(*frame_resized,coordinates_red,cvPoint(posX+5,posY),&myfont,CV_RGB(255,255,255));
            cvCircle(*frame_resized,cvPoint(posX,posY),4,CV_RGB(255,0,0),2);
        }



        if(buoyColor=="green"){
            sprintf(coordinates_green,"GREEN|X=%d|Y=%d",posX,posY);
            cvGetTextSize(coordinates_green,&myfont,myfontSize,&baseline);
            cvRectangle(*frame_resized,cvPoint(posX+5,posY+3),cvPoint(posX+124,posY-8),CV_RGB(0,0,0),CV_FILLED);
            cvPutText(*frame_resized,coordinates_green,cvPoint(posX+5,posY),&myfont,CV_RGB(255,255,255));
            cvCircle(*frame_resized,cvPoint(posX,posY),4,CV_RGB(0,255,0),2);

        }
}




    }
    else
          found=false;

    free(moments);
}
コード例 #10
0
/*!
  Initialize the display size, position and title.

  \param width, height : Width and height of the window.
  \param x, y : The window is set at position x,y (column index, row index).
  \param title : Window title.

*/
void
vpDisplayOpenCV::init(unsigned int width, unsigned int height,
                      int x, int y,
                      const char *title)
{
  this->width  = width;
  this->height = height;
  this->windowXPosition = x;
  this->windowYPosition = y;
  int flags = CV_WINDOW_AUTOSIZE;
  if (title != NULL)
  {
    if (this->title != NULL)
    {
      delete [] this->title ;
      this->title = NULL ;
    }
    this->title = new char[strlen(title) + 1] ;
    strcpy(this->title, title) ;
  }
  else{
    if (this->title != NULL)
    {
      delete [] this->title ;
      this->title = NULL ;
    }
    this->title = new char[50] ;
    sprintf(this->title,"Unnamed ViSP display <%02d>",count) ;
  }
  count++;
  /* Create the window*/
  window = cvNamedWindow( this->title, flags );
  cvMoveWindow( this->title, x, y );
  move = false;
  lbuttondown = false;
  mbuttondown = false;
  rbuttondown = false;
  lbuttonup = false;
  mbuttonup = false;
  rbuttonup = false;
  cvSetMouseCallback( this->title, on_mouse, this );
  /* Create background pixmap */
//   background = cvCreateImage(cvSize((int)width,(int)height),IPL_DEPTH_8U,3);
//
//   cvShowImage( this->title,background);

  col = new CvScalar[vpColor::id_unknown] ;

  /* Create color */
  vpColor pcolor; // Predefined colors
  pcolor = vpColor::lightBlue;
  col[vpColor::id_lightBlue]   = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::blue;
  col[vpColor::id_blue]   = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::darkBlue;
  col[vpColor::id_darkBlue]   = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::lightRed;
  col[vpColor::id_lightRed]   = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::red;
  col[vpColor::id_red]    = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::darkRed;
  col[vpColor::id_darkRed]   = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::lightGreen;
  col[vpColor::id_lightGreen]   = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::green;
  col[vpColor::id_green]  = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::darkGreen;
  col[vpColor::id_darkGreen]   = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::yellow;
  col[vpColor::id_yellow] = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::cyan;
  col[vpColor::id_cyan]   = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::orange;
  col[vpColor::id_orange] = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::purple;
  col[vpColor::id_purple] = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::white;
  col[vpColor::id_white]  = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::black;
  col[vpColor::id_black]  = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::lightGray;
  col[vpColor::id_lightGray]   = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::gray;
  col[vpColor::id_gray]  = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::darkGray;
  col[vpColor::id_darkGray]   = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;

  font = new CvFont;
  cvInitFont( font, CV_FONT_HERSHEY_PLAIN, 0.70f,0.70f);

  CvSize fontSize;
  int baseline;
  cvGetTextSize( "A", font, &fontSize, &baseline );
  fontHeight = fontSize.height + baseline;
  displayHasBeenInitialized = true ;
}
コード例 #11
0
//将所有模块连接使用的函数
//根据这个来修改自己的
 int RunBlobTrackingAuto2323(CvCapture* pCap, CvBlobTrackerAuto* pTracker, char* fgavi_name , char* btavi_name )
{
	int                     OneFrameProcess = 0;
	int                     key;
	int                     FrameNum = 0;
	CvVideoWriter*          pFGAvi = NULL;
	CvVideoWriter*          pBTAvi = NULL;

	/* Main loop: */
	/*OneFrameProcess =0 时,为waitkey(0) 不等待了,返回-1,waitkey(1)表示等1ms,如果按键了返回按键,超时返回-1*/
	for (FrameNum = 0; pCap && (key = cvWaitKey(OneFrameProcess ? 0 : 1)) != 27;//按下esc键整个程序结束。 
		FrameNum++)
	{   /* Main loop: */// 整个程序的主循环。这个循环终止,意味着这个程序结束。
		IplImage*   pImg = NULL;
		IplImage*   pMask = NULL;

		if (key != -1)
		{
			OneFrameProcess = 1;
			if (key == 'r')OneFrameProcess = 0;
		}

		pImg = cvQueryFrame(pCap);//读取视频
		if (pImg == NULL) break;


		/* Process: */
		pTracker->Process(pImg, pMask);//处理图像。这个函数应该执行完了所有的处理过程。

		if (fgavi_name)//参数设置了fg前景要保存的文件名
		if (pTracker->GetFGMask())//前景的图像的mask存在的话,保存前景。画出团块 
		{   /* Debug FG: */
			IplImage*           pFG = pTracker->GetFGMask();//得到前景的mask
			CvSize              S = cvSize(pFG->width, pFG->height);
			static IplImage*    pI = NULL;

			if (pI == NULL)pI = cvCreateImage(S, pFG->depth, 3);
			cvCvtColor(pFG, pI, CV_GRAY2BGR);

			if (fgavi_name)//保存前景到视频
			{   /* Save fg to avi file: */
				if (pFGAvi == NULL)
				{
					pFGAvi = cvCreateVideoWriter(
						fgavi_name,
						CV_FOURCC('x', 'v', 'i', 'd'),
						25,
						S);
				}
				cvWriteFrame(pFGAvi, pI);//写入一张图
			}

			//画出团块的椭圆
			if (pTracker->GetBlobNum() > 0) //pTracker找到了blob
			{   /* Draw detected blobs: */
				int i;
				for (i = pTracker->GetBlobNum(); i > 0; i--)
				{
					CvBlob* pB = pTracker->GetBlob(i - 1);//得到第i-1个blob
					CvPoint p = cvPointFrom32f(CV_BLOB_CENTER(pB));//团块中心
					//这个宏竟然是个强制转换得来的。见下行。
					//#define CV_BLOB_CENTER(pB) cvPoint2D32f(((CvBlob*)(pB))->x,((CvBlob*)(pB))->y)
					CvSize  s = cvSize(MAX(1, cvRound(CV_BLOB_RX(pB))), MAX(1, cvRound(CV_BLOB_RY(pB))));
					//通过宏 获得团块的w 和h 的size
					int c = cvRound(255 * pTracker->GetState(CV_BLOB_ID(pB)));
					cvEllipse(pI,//在图中,对团块画圆
						p,
						s,
						0, 0, 360,
						CV_RGB(c, 255 - c, 0), cvRound(1 + (3 * c) / 255));
				}   /* Next blob: */;
			}
			cvNamedWindow("FG", 0);
			cvShowImage("FG", pI);
		}   /* Debug FG. *///如果要保存结果,对前景保存,画出团块


		//在原图上:找到的blob附近写下id
		/* Draw debug info: */
		if (pImg)//原始的每帧图像。
		{   /* Draw all information about test sequence: */
			char        str[1024];
			int         line_type = CV_AA;   // Change it to 8 to see non-antialiased graphics.
			CvFont      font;
			int         i;
			IplImage*   pI = cvCloneImage(pImg);

			cvInitFont(&font, CV_FONT_HERSHEY_PLAIN, 0.7, 0.7, 0, 1, line_type);

			for (i = pTracker->GetBlobNum(); i > 0; i--)
			{
				CvSize  TextSize;
				CvBlob* pB = pTracker->GetBlob(i - 1);
				CvPoint p = cvPoint(cvRound(pB->x * 256), cvRound(pB->y * 256));
				CvSize  s = cvSize(MAX(1, cvRound(CV_BLOB_RX(pB) * 256)), MAX(1, cvRound(CV_BLOB_RY(pB) * 256)));
				int c = cvRound(255 * pTracker->GetState(CV_BLOB_ID(pB)));


				//画团块到原始图像上
				cvEllipse(pI,
					p,
					s,
					0, 0, 360,
					CV_RGB(c, 255 - c, 0), cvRound(1 + (3 * 0) / 255), CV_AA, 8);


				//下面代码的大概意思就是在找到的blob附近写下id
				p.x >>= 8;
				p.y >>= 8;
				s.width >>= 8;
				s.height >>= 8;
				sprintf(str, "%03d", CV_BLOB_ID(pB));
				cvGetTextSize(str, &font, &TextSize, NULL);
				p.y -= s.height;
				cvPutText(pI, str, p, &font, CV_RGB(0, 255, 255));
				{
					const char* pS = pTracker->GetStateDesc(CV_BLOB_ID(pB));

					if (pS)
					{
						char* pStr = MY_STRDUP(pS);
						char* pStrFree = pStr;

						while (pStr && strlen(pStr) > 0)
						{
							char* str_next = strchr(pStr, '\n');

							if (str_next)
							{
								str_next[0] = 0;
								str_next++;
							}

							p.y += TextSize.height + 1;
							cvPutText(pI, pStr, p, &font, CV_RGB(0, 255, 255));
							pStr = str_next;
						}
						free(pStrFree);
					}
				}

			}   /* Next blob. */;

			cvNamedWindow("Tracking", 0);
			cvShowImage("Tracking", pI);

			if (btavi_name && pI)//如果这一帧存在且,你想把图像存起来,就是传过来的参数不为空例如  btavi_name=“1.avi"   就能存起来了。
			{   /* Save to avi file: */
				CvSize      S = cvSize(pI->width, pI->height);
				if (pBTAvi == NULL)
				{
					pBTAvi = cvCreateVideoWriter(
						btavi_name,
						CV_FOURCC('x', 'v', 'i', 'd'),
						25,
						S);
				}
				cvWriteFrame(pBTAvi, pI);
			}

			cvReleaseImage(&pI);
		}   /* Draw all information about test sequence. */
	}   /*  Main loop. */

	if (pFGAvi)cvReleaseVideoWriter(&pFGAvi);
	if (pBTAvi)cvReleaseVideoWriter(&pBTAvi);
	return 0;
}   /* RunBlobTrackingAuto */
コード例 #12
0
ファイル: Camera.cpp プロジェクト: ACAVJW4H/BlobTracking
    /*************************************************************************
    Process
        Process the frames in a video one by one.
            1) FG detection
            2) Blob Detection
            3) Blob Tracking and Association
            4) Blob Post Processing
            5) Blob Analysis
            6) Store the results
    Exceptions
        None
    *************************************************************************/
    void Camera::Process(const int startFrameIndex, const int endFrameIndex)
    {
        ASSERT_TRUE ( m_initializied );
        ASSERT_TRUE ( m_pTracker != NULL );

        InitializeDisplayWindows( );

        LOG_CONSOLE( "Start processing " + m_videoFileName );

        int key, oneFrameProcess=0, frameNum; 
        for ( frameNum = 1; 
             m_videoCap.grab() &&
            ( key = cvWaitKey( oneFrameProcess ? 0 : 1 ) ) != 27 &&
            ( frameNum <=  endFrameIndex || endFrameIndex < 0 );
            frameNum++ )
        {
            if ( frameNum >= startFrameIndex )
            {
                std::cout << "frameNum:  " << frameNum << '\r';

                // get the video frame
                m_videoCap.retrieve( m_originalFrameMat );

                // downscale the image if required
                if ( m_downScaleImage )
                {
                    cv::resize( m_originalFrameMat, m_frame,  m_frame.size() );
                }
                else
                {
                    m_frame = m_originalFrameMat;
                }

                m_frameIpl = m_frame; 

                if ( key != -1 )
                {
                    oneFrameProcess = ( key == 'r' ) ? 0 : 1;
                }

                // Process the current frame
                m_pTracker->Process( &m_frameIpl, m_pFGMaskIpl);
                m_fgMask        = m_pTracker->GetFGMask();


                // Process the current video frame using the blob tracker
                IplImage fgMaskIpl = m_fgMask;


                // Save Blob Information in a file
                for( int i = m_pTracker->GetBlobNum(); i> 0; i-- )
                {
                    CvBlob* pBlob = m_pTracker->GetBlob(i-1);

                    ASSERT_TRUE( pBlob != NULL );

                    // Save blob record
                    SaveBlobRecord( pBlob, frameNum );
                }

                if ( m_displayIntermediateResult || m_saveIntermediateResult )
                {
                    char tempString[128];
                    std::string textMessage;
                    //display intermediate result if necessary
                    CvFont    font; 
                    CvSize  TextSize;
                    cvInitFont( &font, CV_FONT_HERSHEY_PLAIN, 0.7, 0.7, 0, 1, CV_AA );

                    sprintf(tempString,"frame # %d", frameNum);
                    textMessage = tempString;
                    cv::putText( m_originalFrameMat, textMessage, cv::Point(10,20), CV_FONT_HERSHEY_PLAIN, 1, cv::Scalar((0,255,255)));
                    cv::putText( m_fgMask,textMessage, cv::Point(10,20), CV_FONT_HERSHEY_PLAIN, 1, cv::Scalar((0,255,255)));
                    cv::putText( m_frame, textMessage, cv::Point(10,20), CV_FONT_HERSHEY_PLAIN, 1, cv::Scalar((0,255,255)));

                    //drawing blobs if any with green ellipse with m_cvBlob id displayed next to it.
                    int c = 0; // 0: g; 255: red
                    for ( int i = m_pTracker->GetBlobNum(); i > 0; i-- )
                    {
                        CvBlob* pBlob = m_pTracker->GetBlob(i-1);

                        ASSERT_TRUE( pBlob != NULL );

                        cv::Point blobCorner( cvRound( pBlob->x * 256 ), cvRound( pBlob->y * 256 ) );

                        CvSize  blobSize = cvSize( MAX( 1, cvRound( CV_BLOB_RX(pBlob) * 256 ) ), 
                                                   MAX( 1, cvRound( CV_BLOB_RY(pBlob) * 256 ) ) );

                        cv::Scalar boundingBoxColor( c, 255-c, 0 );

                        if ( m_pTracker->GetState( CV_BLOB_ID( pBlob ) ) != 0 )
                        {
                            boundingBoxColor = cv::Scalar( 255-c, c, 0 );
                        }

                        cv::ellipse( m_frame, 
                                    cv::RotatedRect( cv::Point2f( pBlob->x, pBlob->y ), cv::Size2f( pBlob->w, pBlob->h ), 0 ),
                                    cv::Scalar( c, 255-c, 0 ) );
                        blobCorner.x >>= 8;      
                        blobCorner.y >>= 8;
                        
                        blobSize.width >>= 8;
                        blobSize.height >>= 8;
                        blobCorner.y -= blobSize.height;

                        sprintf( tempString, "BlobId=%03d", CV_BLOB_ID(pBlob) );
                        cvGetTextSize( tempString, &font, &TextSize, NULL );
                        
                        cv::putText( m_frame,
                                     std::string( tempString ),
                                     blobCorner,
                                     CV_FONT_HERSHEY_PLAIN,
                                     1,
                                     cv::Scalar( 255, 255, 0, 0 ) );
                    }
                }

                if ( m_displayIntermediateResult )
                {
                    cv::imshow(m_videoFileName+"_FGMask", m_fgMask);
                    cv::imshow(m_videoFileName+"_Tracking", m_frame);
                }

                if ( m_saveIntermediateResult )
                {
                    cv::Mat tmpFrame;
                    cv::cvtColor( m_fgMask, tmpFrame, CV_GRAY2BGR );
                    *m_pFGAvi << tmpFrame;             
                    *m_pBTAvi << m_frame;
                }
            }