Esempio n. 1
0
void sum_rgb( IplImage* src, IplImage* dst ) {

  // Allocate individual image planes.
  IplImage* r = cvCreateImage( cvGetSize(src), IPL_DEPTH_8U, 1 );
  IplImage* g = cvCreateImage( cvGetSize(src), IPL_DEPTH_8U, 1 );
  IplImage* b = cvCreateImage( cvGetSize(src), IPL_DEPTH_8U, 1 );

  // Temporary storage.
  IplImage* s = cvCreateImage(cvGetSize(src), IPL_DEPTH_32F, 1);
      
  // Split image onto the color planes.
  cvSplit( src, r, g, b, NULL );
     
  // Accumulate separate planes, combine and threshold.
	cvZero(s);
	cvAcc(b,s);
	cvAcc(g,s);
	cvAcc(r,s);
	
	// Truncate values above 100 and rescale into dst.
	cvThreshold( s, s, 100, 100, CV_THRESH_TRUNC );
	cvConvertScale( s, dst, 1, 0 );
 
  cvReleaseImage( &r );
  cvReleaseImage( &g );   
  cvReleaseImage( &b );   
  cvReleaseImage( &s );

}
// Learn the background statistics for one more frame
void accumulateBackground( IplImage *I ){
	static int first = 1;
	cvCvtScale( I, Iscratch, 1, 0 );
	if( !first ){
		cvAcc( Iscratch, IavgF );
		cvAbsDiff( Iscratch, IprevF, Iscratch2 );
		cvAcc( Iscratch2, IdiffF );
		Icount += 1.0;
	}
	first = 0;
	cvCopy( Iscratch, IprevF );
}
Esempio n. 3
0
// Accumulate the background statistics for one more frame
// We accumulate the images, the image differences and the count of images for the 
//    the routine createModelsfromStats() to work on after we're done accumulating N frames.
// I		Background image, 3 channel, 8u
// number	Camera number
void accumulateBackground(IplImage *I, int number)
{
	static int first = 1;
	cvCvtScale(I,Iscratch,1,0); //To float;
	if (!first){
		cvAcc(Iscratch,IavgF[number]);
		cvAbsDiff(Iscratch,IprevF[number],Iscratch2);
		cvAcc(Iscratch2,IdiffF[number]);
		Icount[number] += 1.0;
	}
	first = 0;
	cvCopy(Iscratch,IprevF[number]);
}
Esempio n. 4
0
// --------------------------------------------------------------------------
void BlobModel::UpdateHeatMap(IplImage* motionmap) {
	// for each head candidate draw a circle on the floor; add it to the heatmap image
	// NOTE: due to scaling, circle becomes an ellipse
	double BLOB_RADIUS = 50; // blob projection on the floor average radius in centimeters 
	bool oneclose = false;
	for (int i=0;i<blob.GetCount();i++) {
		CvSeq* heads = doc->blobmodel.blob[i]->heads;
		for (int j=0;j<heads->total;j++) {
			BlobRay* br = (BlobRay*)cvGetSeqElem(heads, j);
			CvPoint3D32f head, foot;
			doc->cameramodel.coordsImage2RealSameXY_Feet2Floor(cvPointTo32f(br->p1), cvPointTo32f(br->p2), &head, &foot);
			// ignore short candidates
			if (head.z < doc->bodymodel.m_minHeight)
				continue;
			// ignore repeating artifact closeby candidates
			if (oneclose)
				continue;
			if (d(foot) < BLOB_RADIUS*2)
				oneclose = true;

			CvPoint c = doc->floormodel.coordsReal2Floor(foot);
			CvSize axes = doc->floormodel.sizeReal2Floor(cvSize2D32f(BLOB_RADIUS, BLOB_RADIUS)); 
			cvZero(motionmaptemp);
			cvEllipse(motionmaptemp, c, axes, 0, 0, 360, cvScalar(1), CV_FILLED);
			cvAcc(motionmaptemp, motionmap);
		}
	}
}
bool MT_BackgroundFrameCreator::DoStep()
{
  
  
    if(m_iStatus == MT_MAKEBG_ERR)
    {
        return false;
    }
  
    IplImage* Frame;  // this memory is managed by the capture object
  
    if(m_pCapture->getMode() == MT_FC_MODE_AVI)
    {
        // generate a random frame index
        double frame_index = floor(MT_frandr(m_iStartFrame, m_iEndFrame));

        // get the frame
        Frame = m_pCapture->getFrame(frame_index);
    
        if(m_bDoInpaint)
        {
            do_inpainting(Frame, m_iInpaintRadius, m_InpaintROI);
        }
      
        // accumulate this frame into the BG frame (divide at end)
        cvAcc(Frame, m_pBG);
    }
  
    if(m_pCapture->getMode() == MT_FC_MODE_CAM)
    {
        /* for now all we'll do is get consecutive frames */
        Frame = m_pCapture->getFrame();

        if(m_bDoInpaint)
        {
            do_inpainting(Frame, m_iInpaintRadius, m_InpaintROI);
        }
      
        // accumulate this frame into the BG frame (divide at end)
        cvAcc(Frame, m_pBG);
    }
  
    return true;
  
}
// Accumulate the background statistics for one more frame.
// We accumulate the images, the image differences and the count of images for the 
//    the routine createModelsfromStats() to work on after we're done accumulating N frames.
//
// Parameters:
//   I:       Background image, 3 channel, 8u
//   number:	Camera number
void accumulateBackground(IplImage *I, int number)
{
	static int first = 1;  // nb. Not thread safe

  // Turn the raw background 8-bit-per-channel, three-color-channel image into a
  // floating-point three-channel image.
	cvCvtScale(I, Iscratch, 1, 0);

	if (!first)
  {
    // Learn the accumulated background image.
		cvAcc(Iscratch, IavgF[number]);

    // Learn the accumulated absolute value of frame-to-frame image differences.
		cvAbsDiff(Iscratch, IprevF[number], Iscratch2);
		cvAcc(Iscratch2, IdiffF[number]);

		Icount[number] += 1.0;
	}
	first = 0;
	cvCopy(Iscratch, IprevF[number]);
}
void
icvCrossCorr( const CvArr* _img, const CvArr* _templ, CvArr* _corr,
              CvPoint anchor, double delta, int borderType )
{
    // disable OpenMP in the case of Visual Studio,
    // otherwise the performance drops significantly
#undef USE_OPENMP
#if !defined _MSC_VER || defined CV_ICC
    #define USE_OPENMP 1
#endif

    const double block_scale = 4.5;
    const int min_block_size = 256;
    cv::Ptr<CvMat> dft_img[CV_MAX_THREADS];
    cv::Ptr<CvMat> dft_templ;
    std::vector<uchar> buf[CV_MAX_THREADS];
    int k, num_threads = 0;
    
    CvMat istub, *img = (CvMat*)_img;
    CvMat tstub, *templ = (CvMat*)_templ;
    CvMat cstub, *corr = (CvMat*)_corr;
    CvSize dftsize, blocksize;
    int depth, templ_depth, corr_depth, max_depth = CV_32F,
        cn, templ_cn, corr_cn, buf_size = 0,
        tile_count_x, tile_count_y, tile_count;

    img = cvGetMat( img, &istub );
    templ = cvGetMat( templ, &tstub );
    corr = cvGetMat( corr, &cstub );

    if( CV_MAT_DEPTH( img->type ) != CV_8U &&
        CV_MAT_DEPTH( img->type ) != CV_16U &&
        CV_MAT_DEPTH( img->type ) != CV_32F &&
        CV_MAT_DEPTH( img->type ) != CV_64F )
        CV_Error( CV_StsUnsupportedFormat,
        "The function supports only 8u, 16u and 32f data types" );

    if( !CV_ARE_DEPTHS_EQ( img, templ ) && CV_MAT_DEPTH( templ->type ) != CV_32F )
        CV_Error( CV_StsUnsupportedFormat,
        "Template (kernel) must be of the same depth as the input image, or be 32f" );
    
    if( !CV_ARE_DEPTHS_EQ( img, corr ) && CV_MAT_DEPTH( corr->type ) != CV_32F &&
        CV_MAT_DEPTH( corr->type ) != CV_64F )
        CV_Error( CV_StsUnsupportedFormat,
        "The output image must have the same depth as the input image, or be 32f/64f" );

    if( (!CV_ARE_CNS_EQ( img, corr ) || CV_MAT_CN(templ->type) > 1) &&
        (CV_MAT_CN( corr->type ) > 1 || !CV_ARE_CNS_EQ( img, templ)) )
        CV_Error( CV_StsUnsupportedFormat,
        "The output must have the same number of channels as the input (when the template has 1 channel), "
        "or the output must have 1 channel when the input and the template have the same number of channels" );

    depth = CV_MAT_DEPTH(img->type);
    cn = CV_MAT_CN(img->type);
    templ_depth = CV_MAT_DEPTH(templ->type);
    templ_cn = CV_MAT_CN(templ->type);
    corr_depth = CV_MAT_DEPTH(corr->type);
    corr_cn = CV_MAT_CN(corr->type);

    CV_Assert( corr_cn == 1 || delta == 0 );

    max_depth = MAX( max_depth, templ_depth );
    max_depth = MAX( max_depth, depth );
    max_depth = MAX( max_depth, corr_depth );
    if( depth > CV_8U )
        max_depth = CV_64F;

    /*if( img->cols < templ->cols || img->rows < templ->rows )
        CV_Error( CV_StsUnmatchedSizes,
        "Such a combination of image and template/filter size is not supported" );*/

    if( corr->rows > img->rows + templ->rows - 1 ||
        corr->cols > img->cols + templ->cols - 1 )
        CV_Error( CV_StsUnmatchedSizes,
        "output image should not be greater than (W + w - 1)x(H + h - 1)" );

    blocksize.width = cvRound(templ->cols*block_scale);
    blocksize.width = MAX( blocksize.width, min_block_size - templ->cols + 1 );
    blocksize.width = MIN( blocksize.width, corr->cols );
    blocksize.height = cvRound(templ->rows*block_scale);
    blocksize.height = MAX( blocksize.height, min_block_size - templ->rows + 1 );
    blocksize.height = MIN( blocksize.height, corr->rows );

    dftsize.width = cvGetOptimalDFTSize(blocksize.width + templ->cols - 1);
    if( dftsize.width == 1 )
        dftsize.width = 2;
    dftsize.height = cvGetOptimalDFTSize(blocksize.height + templ->rows - 1);
    if( dftsize.width <= 0 || dftsize.height <= 0 )
        CV_Error( CV_StsOutOfRange, "the input arrays are too big" );

    // recompute block size
    blocksize.width = dftsize.width - templ->cols + 1;
    blocksize.width = MIN( blocksize.width, corr->cols );
    blocksize.height = dftsize.height - templ->rows + 1;
    blocksize.height = MIN( blocksize.height, corr->rows );

    dft_templ = cvCreateMat( dftsize.height*templ_cn, dftsize.width, max_depth );

#ifdef USE_OPENMP
    num_threads = cvGetNumThreads();
#else
    num_threads = 1;
#endif

    for( k = 0; k < num_threads; k++ )
        dft_img[k] = cvCreateMat( dftsize.height, dftsize.width, max_depth );

    if( templ_cn > 1 && templ_depth != max_depth )
        buf_size = templ->cols*templ->rows*CV_ELEM_SIZE(templ_depth);

    if( cn > 1 && depth != max_depth )
        buf_size = MAX( buf_size, (blocksize.width + templ->cols - 1)*
            (blocksize.height + templ->rows - 1)*CV_ELEM_SIZE(depth));

    if( (corr_cn > 1 || cn > 1) && corr_depth != max_depth )
        buf_size = MAX( buf_size, blocksize.width*blocksize.height*CV_ELEM_SIZE(corr_depth));

    if( buf_size > 0 )
    {
        for( k = 0; k < num_threads; k++ )
            buf[k].resize(buf_size);
    }

    // compute DFT of each template plane
    for( k = 0; k < templ_cn; k++ )
    {
        CvMat dstub, *src, *dst, temp;
        CvMat* planes[] = { 0, 0, 0, 0 };
        int yofs = k*dftsize.height;

        src = templ;
        dst = cvGetSubRect( dft_templ, &dstub, cvRect(0,yofs,templ->cols,templ->rows));
    
        if( templ_cn > 1 )
        {
            planes[k] = templ_depth == max_depth ? dst :
                cvInitMatHeader( &temp, templ->rows, templ->cols, templ_depth, &buf[0][0] );
            cvSplit( templ, planes[0], planes[1], planes[2], planes[3] );
            src = planes[k];
            planes[k] = 0;
        }

        if( dst != src )
            cvConvert( src, dst );

        if( dft_templ->cols > templ->cols )
        {
            cvGetSubRect( dft_templ, dst, cvRect(templ->cols, yofs,
                          dft_templ->cols - templ->cols, templ->rows) );
            cvZero( dst );
        }
        cvGetSubRect( dft_templ, dst, cvRect(0,yofs,dftsize.width,dftsize.height) );
        cvDFT( dst, dst, CV_DXT_FORWARD + CV_DXT_SCALE, templ->rows );
    }

    tile_count_x = (corr->cols + blocksize.width - 1)/blocksize.width;
    tile_count_y = (corr->rows + blocksize.height - 1)/blocksize.height;
    tile_count = tile_count_x*tile_count_y;

#if defined _OPENMP && defined USE_OPENMP
    #pragma omp parallel for num_threads(num_threads) schedule(dynamic)
#endif
    // calculate correlation by blocks
    for( k = 0; k < tile_count; k++ )
    {
#ifdef USE_OPENMP
        int thread_idx = cvGetThreadNum();
#else
        int thread_idx = 0;
#endif
        int x = (k%tile_count_x)*blocksize.width;
        int y = (k/tile_count_x)*blocksize.height;
        int i, yofs;
        CvMat sstub, dstub, *src, *dst, temp;
        CvMat* planes[] = { 0, 0, 0, 0 };
        CvMat* _dft_img = dft_img[thread_idx];
        uchar* _buf = buf_size > 0 ? &buf[thread_idx][0] : 0;
        CvSize csz = { blocksize.width, blocksize.height }, isz;
        int x0 = x - anchor.x, y0 = y - anchor.y;
        int x1 = MAX( 0, x0 ), y1 = MAX( 0, y0 ), x2, y2;
        csz.width = MIN( csz.width, corr->cols - x );
        csz.height = MIN( csz.height, corr->rows - y );
        isz.width = csz.width + templ->cols - 1;
        isz.height = csz.height + templ->rows - 1;
        x2 = MIN( img->cols, x0 + isz.width );
        y2 = MIN( img->rows, y0 + isz.height );
        
        for( i = 0; i < cn; i++ )
        {
            CvMat dstub1, *dst1;
            yofs = i*dftsize.height;

            src = cvGetSubRect( img, &sstub, cvRect(x1,y1,x2-x1,y2-y1) );
            dst = cvGetSubRect( _dft_img, &dstub,
                cvRect(0,0,isz.width,isz.height) );
            dst1 = dst;
            
            if( x2 - x1 < isz.width || y2 - y1 < isz.height )
                dst1 = cvGetSubRect( _dft_img, &dstub1,
                    cvRect( x1 - x0, y1 - y0, x2 - x1, y2 - y1 ));

            if( cn > 1 )
            {
                planes[i] = dst1;
                if( depth != max_depth )
                    planes[i] = cvInitMatHeader( &temp, y2 - y1, x2 - x1, depth, _buf );
                cvSplit( src, planes[0], planes[1], planes[2], planes[3] );
                src = planes[i];
                planes[i] = 0;
            }

            if( dst1 != src )
                cvConvert( src, dst1 );

            if( dst != dst1 )
                cvCopyMakeBorder( dst1, dst, cvPoint(x1 - x0, y1 - y0), borderType );

            if( dftsize.width > isz.width )
            {
                cvGetSubRect( _dft_img, dst, cvRect(isz.width, 0,
                      dftsize.width - isz.width,dftsize.height) );
                cvZero( dst );
            }

            cvDFT( _dft_img, _dft_img, CV_DXT_FORWARD, isz.height );
            cvGetSubRect( dft_templ, dst,
                cvRect(0,(templ_cn>1?yofs:0),dftsize.width,dftsize.height) );

            cvMulSpectrums( _dft_img, dst, _dft_img, CV_DXT_MUL_CONJ );
            cvDFT( _dft_img, _dft_img, CV_DXT_INVERSE, csz.height );

            src = cvGetSubRect( _dft_img, &sstub, cvRect(0,0,csz.width,csz.height) );
            dst = cvGetSubRect( corr, &dstub, cvRect(x,y,csz.width,csz.height) );

            if( corr_cn > 1 )
            {
                planes[i] = src;
                if( corr_depth != max_depth )
                {
                    planes[i] = cvInitMatHeader( &temp, csz.height, csz.width,
                                                 corr_depth, _buf );
                    cvConvertScale( src, planes[i], 1, delta );
                }
                cvMerge( planes[0], planes[1], planes[2], planes[3], dst );
                planes[i] = 0;                    
            }
            else
            {
                if( i == 0 )
                    cvConvertScale( src, dst, 1, delta );
                else
                {
                    if( max_depth > corr_depth )
                    {
                        cvInitMatHeader( &temp, csz.height, csz.width,
                                         corr_depth, _buf );
                        cvConvert( src, &temp );
                        src = &temp;
                    }
                    cvAcc( src, dst );
                }
            }
        }
    }
}
Esempio n. 8
0
/*
  Generate a calibration image based on the average of some
  accumulated images.
*/
int generate_cal_image (const char *dir, int num_cal_images)
{
	int key, i;
	CvScalar bkgd_color, one;
	IplImage *img, *cal_img, *sum_img, *mask;
	CvSize size = { IMAGE_WIDTH, IMAGE_HEIGHT };

	img = cvCreateImage(size, IPL_DEPTH_8U, 1);
	if (!img)
		return 0;
	
	cal_img = cvCreateImage(size, IPL_DEPTH_8U, 1);
	if (!cal_img) {
		cvReleaseImage(&img);
		return 0;
	}

	mask = cvCreateImage(size, IPL_DEPTH_8U, 1);
	if (!mask) {
		cvReleaseImage(&img);
		cvReleaseImage(&cal_img);
		return 0;
	}

	sum_img = cvCreateImage(size, IPL_DEPTH_32F, 1);
	if (!sum_img) {
		cvReleaseImage(&img);
		cvReleaseImage(&cal_img);
		cvReleaseImage(&mask);
		return 0;
	}

	bkgd_color.val[0] = BACKGROUND_COLOR;
	cvSet(img, bkgd_color, NULL);
	cvSet(cal_img, bkgd_color, NULL);
	
	one.val[0] = 1.0;
	cvSet(mask, one, NULL);

	cvZero(sum_img);
	
	for (i = 0; i < num_cal_images; i++) {		
		draw_shapes(img);
		save_image(dir, img, i);

		cvAcc(img, sum_img, mask);
		cvZero(cal_img);

		cvConvertScale(sum_img, cal_img, 1.0 / (i + 1), 0);
		cvShowImage("shapegen", cal_img);

		if (dir)
			key = 0xff & cvWaitKey(500);
		else
			key = 0xff & cvWaitKey(0);

		if (key == ESCAPE_KEY)
			break;
	}

	if (key != ESCAPE_KEY)
		save_image(dir, cal_img, -1);

	cvReleaseImage(&img);
	cvReleaseImage(&cal_img);
	cvReleaseImage(&sum_img);
	cvReleaseImage(&mask);

	return (key != ESCAPE_KEY);
}
Esempio n. 9
0
int main(int argc, char **argv)
{
  bool isStop = false;
  const int INIT_TIME = 50;
  const double BG_RATIO = 0.02; // 背景領域更新レート
  const double OBJ_RATIO = 0.005; // 物体領域更新レート
  const double Zeta = 10.0;
  IplImage *img = NULL;

  CvCapture *capture = NULL;
  capture = cvCreateCameraCapture(0);
  //capture = cvCaptureFromAVI("test.avi");
  if(capture == NULL){
    printf("capture device not found!!");
    return -1;
  }

  img = cvQueryFrame(capture);
  int w = img->width;
  int h = img->height;

  IplImage *imgAverage = cvCreateImage(cvSize(w, h), IPL_DEPTH_32F, 3);
  IplImage *imgSgm = cvCreateImage(cvSize(w, h), IPL_DEPTH_32F, 3);
  IplImage *imgTmp = cvCreateImage(cvSize(w, h), IPL_DEPTH_32F, 3);
  IplImage *img_lower = cvCreateImage(cvSize(w, h), IPL_DEPTH_32F, 3);
  IplImage *img_upper = cvCreateImage(cvSize(w, h), IPL_DEPTH_32F, 3);
  IplImage *imgSilhouette = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 1);
  IplImage *imgSilhouetteInv = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 1);
  IplImage *imgResult = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 3);

  printf("背景初期化中...\n");
  cvSetZero(imgAverage);
  for(int i = 0; i < INIT_TIME; i++){
    img = cvQueryFrame(capture);
    cvAcc(img, imgAverage);
    printf("輝度平均 %d/%d\n", i, INIT_TIME);
  }
  cvConvertScale(imgAverage, imgAverage, 1.0 / INIT_TIME);
  cvSetZero(imgSgm);
  for(int i = 0; i < INIT_TIME; i++){
    img = cvQueryFrame(capture);
    cvConvert(img, imgTmp);
    cvSub(imgTmp, imgAverage, imgTmp);
    cvPow(imgTmp, imgTmp, 2.0);
    cvConvertScale(imgTmp, imgTmp, 2.0);
    cvPow(imgTmp, imgTmp, 0.5);
    cvAcc(imgTmp, imgSgm);
    printf("輝度振幅 %d/%d\n", i, INIT_TIME);
  }
  cvConvertScale(imgSgm, imgSgm, 1.0 / INIT_TIME);
  printf("背景初期化完了\n");

  char winNameCapture[] = "Capture";
  char winNameSilhouette[] = "Silhouette";
  cvNamedWindow(winNameCapture, CV_WINDOW_AUTOSIZE);
  cvNamedWindow(winNameSilhouette, CV_WINDOW_AUTOSIZE);

  while(1){
    if(!isStop){
      img = cvQueryFrame(capture);
      if(img == NULL) break;
      cvConvert(img, imgTmp);

      // 輝度範囲
      cvSub(imgAverage, imgSgm, img_lower);
      cvSubS(img_lower, cvScalarAll(Zeta), img_lower);
      cvAdd(imgAverage, imgSgm, img_upper);
      cvAddS(img_upper, cvScalarAll(Zeta), img_upper);
      cvInRange(imgTmp, img_lower, img_upper, imgSilhouette);

      // 輝度振幅
      cvSub(imgTmp, imgAverage, imgTmp);
      cvPow(imgTmp, imgTmp, 2.0);
      cvConvertScale(imgTmp, imgTmp, 2.0);
      cvPow(imgTmp, imgTmp, 0.5);

      // 背景領域を更新
      cvRunningAvg(img, imgAverage, BG_RATIO, imgSilhouette);
      cvRunningAvg(imgTmp, imgSgm, BG_RATIO, imgSilhouette);

      // 物体領域を更新
      cvNot(imgSilhouette, imgSilhouetteInv);
      cvRunningAvg(imgTmp, imgSgm, OBJ_RATIO, imgSilhouetteInv);

      cvErode(imgSilhouette, imgSilhouette, NULL, 1); // 収縮
      cvDilate(imgSilhouette, imgSilhouette, NULL, 2); // 膨張
      cvErode(imgSilhouette, imgSilhouette, NULL, 1); // 収縮

      cvMerge(imgSilhouette, imgSilhouette, imgSilhouette, NULL, imgResult);
      cvShowImage(winNameCapture, img);
      cvShowImage(winNameSilhouette, imgResult);
    }
    int waitKey = cvWaitKey(33);
    if(waitKey == 'q') break;
    if(waitKey == ' '){
      isStop = !isStop;
      if(isStop) printf("stop\n");
      else printf("start\n");
    }
  }

  cvReleaseCapture(&capture);
  cvDestroyWindow(winNameCapture);
  cvDestroyWindow(winNameSilhouette);

  return 0;
}