Exemple #1
0
void cv::calcMotionGradient( const Mat& mhi, Mat& mask,
                             Mat& orientation,
                             double delta1, double delta2,
                             int aperture_size )
{
    mask.create(mhi.size(), CV_8U);
    orientation.create(mhi.size(), CV_32F);
    CvMat _mhi = mhi, _mask = mask, _orientation = orientation;
    cvCalcMotionGradient(&_mhi, &_mask, &_orientation, delta1, delta2, aperture_size);
}
Exemple #2
0
void cv::calcMotionGradient( InputArray _mhi, OutputArray _mask,
                             OutputArray _orientation,
                             double delta1, double delta2,
                             int aperture_size )
{
    Mat mhi = _mhi.getMat();
    _mask.create(mhi.size(), CV_8U);
    _orientation.create(mhi.size(), CV_32F);
    CvMat c_mhi = mhi, c_mask = _mask.getMat(), c_orientation = _orientation.getMat();
    cvCalcMotionGradient(&c_mhi, &c_mask, &c_orientation, delta1, delta2, aperture_size);
}
Exemple #3
0
// parameters:
//  img - input video frame
//  dst - resultant motion picture
//  args - optional parameters
void  update_mhi( IplImage* img, IplImage* dst, int diff_threshold )
{
    double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds
    CvSize size = cvSize(img->width,img->height); // get current frame size
    int i, idx1 = last, idx2;
    IplImage* silh;
    CvSeq* seq;
    CvRect comp_rect;
    double count;
    double angle;
    CvPoint center;
    double magnitude;
    CvScalar color;

    // allocate images at the beginning or
    // reallocate them if the frame size is changed
    if( !mhi || mhi->width != size.width || mhi->height != size.height ) {
        if( buf == 0 ) {
            buf = (IplImage**)malloc(N*sizeof(buf[0]));
            memset( buf, 0, N*sizeof(buf[0]));
        }

        for( i = 0; i < N; i++ ) {
            cvReleaseImage( &buf[i] );
            buf[i] = cvCreateImage( size, IPL_DEPTH_8U, 1 );
            cvZero( buf[i] );
        }
        cvReleaseImage( &mhi );
        cvReleaseImage( &orient );
        cvReleaseImage( &segmask );
        cvReleaseImage( &mask );

        mhi = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        cvZero( mhi ); // clear MHI at the beginning
        orient = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        segmask = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        mask = cvCreateImage( size, IPL_DEPTH_8U, 1 );
    }

    cvCvtColor( img, buf[last], CV_BGR2GRAY ); // convert frame to grayscale

    idx2 = (last + 1) % N; // index of (last - (N-1))th frame
    last = idx2;

    silh = buf[idx2];
    cvAbsDiff( buf[idx1], buf[idx2], silh ); // get difference between frames

    cvThreshold( silh, silh, diff_threshold, 1, CV_THRESH_BINARY ); // and threshold it
    cvUpdateMotionHistory( silh, mhi, timestamp, MHI_DURATION ); // update MHI

    // convert MHI to blue 8u image
    cvCvtScale( mhi, mask, 255./MHI_DURATION,
                (MHI_DURATION - timestamp)*255./MHI_DURATION );
    cvZero( dst );
    cvMerge( mask, 0, 0, 0, dst );

    // calculate motion gradient orientation and valid orientation mask
    cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 );

    if( !storage )
        storage = cvCreateMemStorage(0);
    else
        cvClearMemStorage(storage);

    // segment motion: get sequence of motion components
    // segmask is marked motion components map. It is not used further
    seq = cvSegmentMotion( mhi, segmask, storage, timestamp, MAX_TIME_DELTA );

    // iterate through the motion components,
    // One more iteration (i == -1) corresponds to the whole image (global motion)
    for( i = -1; i < seq->total; i++ ) {

        if( i < 0 ) { // case of the whole image
            comp_rect = cvRect( 0, 0, size.width, size.height );
            color = CV_RGB(255,255,255);
            magnitude = 100;
        }
        else { // i-th motion component
            comp_rect = ((CvConnectedComp*)cvGetSeqElem( seq, i ))->rect;
            if( comp_rect.width + comp_rect.height < 100 ) // reject very small components
                continue;
            color = CV_RGB(255,0,0);
            magnitude = 30;
        }

        // select component ROI
        cvSetImageROI( silh, comp_rect );
        cvSetImageROI( mhi, comp_rect );
        cvSetImageROI( orient, comp_rect );
        cvSetImageROI( mask, comp_rect );

        // calculate orientation
        angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION);
        angle = 360.0 - angle;  // adjust for images with top-left origin

        count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI

        cvResetImageROI( mhi );
        cvResetImageROI( orient );
        cvResetImageROI( mask );
        cvResetImageROI( silh );

        // check for the case of little motion
        if( count < comp_rect.width*comp_rect.height * 0.05 )
            continue;

        // draw a clock with arrow indicating the direction
        center = cvPoint( (comp_rect.x + comp_rect.width/2),
                          (comp_rect.y + comp_rect.height/2) );

        cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 );
        cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)),
                cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 );
    }
}
Exemple #4
0
static void  update_mhi( IplImage* img, IplImage* dst, int diff_threshold )
{
    double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds
    CvSize size = cvSize(img->width,img->height); // get current frame size
    int i, idx1 = last, idx2;
    IplImage* silh;
    CvSeq* seq;
    CvRect comp_rect;
    double count;
    double angle;
    CvPoint center;
    double magnitude;
    CvScalar color;

    unsigned  int tmpx=0,tmpy=0;
    int tmppcount=0;
    // allocate images at the beginning or
    // reallocate them if the frame size is changed
    if( !mhi || mhi->width != size.width || mhi->height != size.height ) {
        if( mbuf == 0 ) {
            mbuf = (IplImage**)malloc(N*sizeof(mbuf[0]));
            memset( mbuf, 0, N*sizeof(mbuf[0]));
        }

        for( i = 0; i < N; i++ ) {
            cvReleaseImage( &mbuf[i] );
            mbuf[i] = cvCreateImage( size, IPL_DEPTH_8U, 1 );
            cvZero( mbuf[i] );
        }
        cvReleaseImage( &mhi );
        cvReleaseImage( &orient );
        cvReleaseImage( &segmask );
        cvReleaseImage( &mask );

        mhi = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        cvZero( mhi ); // clear MHI at the beginning
        orient = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        segmask = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        mask = cvCreateImage( size, IPL_DEPTH_8U, 1 );
    }

    cvCvtColor( img, mbuf[last], CV_BGR2GRAY ); // convert frame to grayscale

    idx2 = (last + 1) % N; // index of (last - (N-1))th frame
    last = idx2;

    silh=mbuf[idx2];
    cvAbsDiff( mbuf[idx1], mbuf[idx2], silh );                          //

    cvThreshold( silh, silh, diff_threshold, 1, CV_THRESH_BINARY ); // 类似二值化 可是只有0 1这么小的差距
    cvUpdateMotionHistory( silh, mhi, timestamp, MHI_DURATION );    // 更新以时间为单位的运动历史图

    // 将运动历史图转换为具有像素值的图片,并merge到输出图像
    cvCvtScale( mhi, mask, 255./MHI_DURATION,
                (MHI_DURATION - timestamp)*255./MHI_DURATION );
    cvZero( dst );
    cvMerge( mask,0,0,0,dst );

    // calculate motion gradient orientation and valid orientation mask
    cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 );//orient中保存每个运动点的方向角度值

    if( !mstorage )
        mstorage = cvCreateMemStorage(0);
    else
        cvClearMemStorage(mstorage);

    // segment motion: get sequence of motion components
    // segmask is marked motion components map. It is not used further
    seq = cvSegmentMotion( mhi, segmask, mstorage, timestamp, MAX_TIME_DELTA );//对运动历史图片进行运动单元的分割

    // iterate through the motion components,
    // One more iteration (i == -1) corresponds to the whole image (global motion)
    for( i = -1; i < seq->total; i++ ) {

        if( i < 0 ) { // case of the whole image
            comp_rect = cvRect( 0, 0, size.width, size.height );
            color = CV_RGB(255,255,255);
            magnitude = 100;
	   // printf("ALL image.\n");
        }
        else { // i-th motion component
            comp_rect = ((CvConnectedComp*)cvGetSeqElem( seq, i ))->rect;
            if( comp_rect.width + comp_rect.height < 30 ) // reject very small components
                continue;
            color = CV_RGB(255,0,0);
            magnitude = 30;
        }

        // select component ROI
        cvSetImageROI( silh, comp_rect );
        cvSetImageROI( mhi, comp_rect );
        cvSetImageROI( orient, comp_rect );
        cvSetImageROI( mask, comp_rect );

        // calculate orientation
        angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION);//统计感兴趣区域内的运动单元的总体方向
        angle = 360.0 - angle;  // adjust for images with top-left origin

        count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI

        cvResetImageROI( mhi );
        cvResetImageROI( orient );
        cvResetImageROI( mask );
        cvResetImageROI( silh );

        // check for the case of little motion
        if( count < comp_rect.width*comp_rect.height * 0.05*0.1  )
            continue;

        // draw a clock with arrow indicating the direction
        center = cvPoint( (comp_rect.x + comp_rect.width/2),
                          (comp_rect.y + comp_rect.height/2) );

	if(Zmode == ZMODE_Z && i !=-1)
	{
		//放大模式下的稳定算法++++++++++++++++++++++++++
		tmpx = comp_rect.x+comp_rect.width/2;//当前获取到的目标坐标之一
		tmpy = comp_rect.y+comp_rect.height/2;
		tmpal1 =  sqrt(abs(tx-ax))+sqrt(abs(ty-ay));

		if(tmpal2 > tmpal1 || tmppcount == 0)//更接近中心
		{
			tmpal2 = tmpal1;
			tx = tmpx;
			ty = tmpy;
		}
		catchflag = 1;
	}
	
	if(Zmode == ZMOOE_S)
	{	
		//缩小模式下的选择目标规则
		if(tmppcount == 0 && i != -1)
		{
			tx = comp_rect.x+comp_rect.width/2;
			ty = comp_rect.y+comp_rect.height/2;
			catchflag = 1;
		}
	}
	tmppcount++;
	printf("The %dth rect:(%d,%d)\n",tmppcount,comp_rect.x+comp_rect.width/2,comp_rect.y+comp_rect.height/2);
        cvCircle( img, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 );
        cvLine( img, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)),
                cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 );
    }

}
Exemple #5
0
void  update_mhi( IplImage* img, IplImage* dst, int diff_threshold )
{
    double timestamp = (double)clock()/CLOCKS_PER_SEC; 
    CvSize size = cvSize(img->width,img->height); 
    int i, idx1 = last, idx2;
    IplImage* silh;
    CvSeq* seq;
    CvRect comp_rect;
    double count;
    double angle;
    CvPoint center;
    double magnitude;         
    CvScalar color;
 

	if( !mhi || mhi->width != size.width || mhi->height != size.height ) {
        if( buf == 0 ) {
            buf = (IplImage**)malloc(N*sizeof(buf[0]));
            memset( buf, 0, N*sizeof(buf[0]));
        }

        for( i = 0; i < N; i++ ) {
            cvReleaseImage( &buf[i] );
            buf[i] = cvCreateImage( size, IPL_DEPTH_8U, 1 );
            cvZero( buf[i] );
        }

        cvReleaseImage( &mhi );
        cvReleaseImage( &orient );
        cvReleaseImage( &segmask );
        cvReleaseImage( &mask );

        mhi = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        cvZero( mhi );
        orient = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        segmask = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        mask = cvCreateImage( size, IPL_DEPTH_8U, 1 );
    }

    cvCvtColor( img, buf[last], CV_BGR2GRAY );
    idx2 = (last + 1) % N; 
    last = idx2;
    silh = buf[idx2];
    cvAbsDiff( buf[idx1], buf[idx2], silh );
    cvThreshold( silh, silh, diff_threshold, 1, CV_THRESH_BINARY ); 
	cvUpdateMotionHistory( silh, mhi, timestamp, MHI_DURATION );

     
	cvCvtScale( mhi, mask, 255./MHI_DURATION,
    (MHI_DURATION - timestamp)*255./MHI_DURATION );
    cvZero( dst );
    cvCvtPlaneToPix( mask, 0, 0, 0, dst );

   
    cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 );
  

    if( !storage )
        storage = cvCreateMemStorage(0);
    else
        cvClearMemStorage(storage);

   
    seq = cvSegmentMotion( mhi, segmask, storage, timestamp, MAX_TIME_DELTA );

   
    for( i = -1; i < seq->total; i++ ) {
        if( i < 0 ) { 
            comp_rect = cvRect( 0, 0, size.width, size.height );
            color = CV_RGB(255,255,255);
            magnitude = 100;
        }
        else { 
            comp_rect = ((CvConnectedComp*)cvGetSeqElem( seq, i ))->rect;
            if( comp_rect.width + comp_rect.height < 100 ) 
                continue;
            color = CV_RGB(255,0,0);
            magnitude = 30;
        }

       
        cvSetImageROI( silh, comp_rect );
        cvSetImageROI( mhi, comp_rect );
        cvSetImageROI( orient, comp_rect );
        cvSetImageROI( mask, comp_rect );

		angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION);
        angle = 360.0 - angle;  
        count = cvNorm( silh, 0, CV_L1, 0 ); 
 

        cvResetImageROI( mhi );
        cvResetImageROI( orient );
        cvResetImageROI( mask );
        cvResetImageROI( silh );

      
        if( count < comp_rect.width*comp_rect.height * 0.05 )
            continue;
 
       
        center = cvPoint( (comp_rect.x + comp_rect.width/2),
                          (comp_rect.y + comp_rect.height/2) );
 
         cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 );
		 cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)),
                cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 );
    }
}
int main (int argc, const char * argv[]){
    
    IplImage* rawImage = NULL; // Original Image
    IplImage** buf     = NULL;
    IplImage* blur     = NULL; // Blur
    IplImage* bw       = NULL; // Black & White
    IplImage* canny    = NULL; // Detección de Border
    IplImage* silhouette  = NULL; // Silhouette to calc. Motion History Image
    IplImage* mhi      = NULL; // Motion History Image
    IplImage* mask      = NULL; // Motion History Gradient
    IplImage* orient      = NULL; // Motion History Gradient
    const double MHI_DURATION = 1;
    const double MAX_TIME_DELTA = 0.5;
    const double MIN_TIME_DELTA = 0.05;
    double timestamp   = 0.1;
    int numeroFrames   = 0;
    int keyPressed     = 0;
    int ancho = 0, alto = 0, umbral1 = 0, umbral2 = 30, apertura = 3; // Detección de Bordr
    int currBufIndex   = 0;
    CvSize size;            
    
    // Fuente de Captura (Camara/Video)
    printf("Capture from camera\n");
    CvCapture* capture = 0;
    capture = cvCaptureFromCAM( 0 );
    // capture = cvCaptureFromFile( argv[1] );
    
    // Fuentes de Display
    cvNamedWindow( "WithoutBorder", CV_WINDOW_AUTOSIZE );
    
    // Loop Principal
    while(TRUE){
        // Obtengo un Frame
        ++numeroFrames;
        rawImage = cvQueryFrame( capture );
	size     = cvGetSize(rawImage);

	/*
	 * To Black and White
	 */
        bw      = cvCreateImage(size, IPL_DEPTH_8U, 1);
        cvConvertImage(rawImage, bw, 0);

	/*
	 * Blur Images
	 */
        blur      = cvCreateImage(size, IPL_DEPTH_8U, 1);
	cvSmooth( bw, blur, CV_BLUR, 11, 11);
		
        /*
         * Detección de bordes
         * Gracias a http://redstar.linaresdigital.com/robotica/filtro_canny.c
         */
        canny   = cvCreateImage(size, IPL_DEPTH_8U, 1);
        cvCanny(blur, canny, umbral1, umbral2, apertura);

	/*
	 * MHI
	 */
        if(!mhi){
		cvReleaseImage( &mhi );
		mhi	= cvCreateImage(size, IPL_DEPTH_32F, 1);
		cvZero(mhi);
	}
	if(!silhouette){
		cvReleaseImage( &silhouette );
		silhouette	= cvCreateImage(size, IPL_DEPTH_8U, 1);
		cvZero(silhouette);
	}
        if( buf == NULL ) {
		buf = (IplImage**)malloc(2*sizeof(buf[0]));
		memset( buf, 0, 2*sizeof(buf[0]));
		for(int i = 0; i < 2; i++ ) {
			cvReleaseImage( &buf[i] );
			buf[i] = cvCreateImage(size, IPL_DEPTH_8U, 1 );
			cvZero( buf[i] );
		}
        }
	cvCopy(canny, buf[currBufIndex]);
	currBufIndex = (currBufIndex + 1) % 2; // Me voy moviendo entre el buffer
	cvAbsDiff( canny, buf[currBufIndex], silhouette );
	timestamp	= (double)clock()/CLOCKS_PER_SEC;
	cvUpdateMotionHistory( silhouette, mhi, timestamp, MHI_DURATION );

	/*
	 * MHG
	 */
        if(!mask || ! orient){
		cvReleaseImage( &mask );
		mask	= cvCreateImage(size, IPL_DEPTH_8U, 1);
		cvZero(mask);
		cvReleaseImage( &orient );
		orient	= cvCreateImage(size, IPL_DEPTH_8U, 1);
		cvZero(orient);

	}
	cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 );

        cvShowImage("WithoutBorder", mhi);

        /*
	 * Display
	 * Ahora calculamos el área de la imagen donde queremos pegar la miniatur
	 *
        ancho = rawImage->width / 4;
        alto = rawImage->height / 4;
        cvSetImageROI(canny, cvRect(rawImage->width - (ancho * 1.1) , rawImage->height - (alto * 1.1), ancho, alto));
        // Ahora volcamos el contenido del fotograma a la zona deseada de la copia
        cvResize(bn, canny, CV_INTER_LINEAR);
        // Si no deshacemos la región de interés sólo veremos la zona recién copiada
        cvResetImageROI(canny); 
        // Mostramos la imagen escalada
        cvShowImage("WithoutBorder", canny);
	*/
       	
	 
	// Leo el teclado para posibles señales de salida, esperamos 100 ms
        keyPressed = cvWaitKey(10);
        if(keyPressed == 27 || keyPressed == 'q'){
            break;
        }
    }
    
    // Destruyo Elementos
    cvDestroyWindow( "WithoutBorder" );
    cvReleaseImage( &rawImage );
    return 0;
}
void CV_MHIGradientTest::run_func()
{
    cvCalcMotionGradient( test_array[INPUT][0], test_array[OUTPUT][0],
                          test_array[OUTPUT][1], delta1, delta2, aperture_size );
}
Exemple #8
0
// parameters:
//  img - input video frame
//  dst - resultant motion picture
//  args - optional parameters
void update_mhi(IplImage* img, IplImage* dst, int diff_threshold)
{
    // 获取当前时间
    double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds

    // 获取当前帧大小
    CvSize size = cvSize(img->width,img->height); // get current frame size
    int i, idx1 = last, idx2;
    IplImage* silh;
    CvSeq* seq;
    CvRect comp_rect;
    double count;
    double angle;
    CvPoint center;
    double magnitude;          
    CvScalar color;

    // 给图像分配空间或者在尺寸改变的时候重新分配
    // allocate images at the beginning or
    // reallocate them if the frame size is changed
    if (!mhi || mhi->width != size.width || mhi->height != size.height) 
    {
        if (buf == 0) 
        {
            buf = (IplImage**)malloc(N*sizeof(buf[0]));
            memset( buf, 0, N*sizeof(buf[0]));
        }
        
        for( i = 0; i < N; i++ ) {
            cvReleaseImage( &buf[i] );
            buf[i] = cvCreateImage( size, IPL_DEPTH_8U, 1 );
            cvZero( buf[i] );
        }
        cvReleaseImage( &mhi );
        cvReleaseImage( &orient );
        cvReleaseImage( &segmask );
        cvReleaseImage( &mask );
        
        mhi = cvCreateImage( size, IPL_DEPTH_32F, 1 );

        // 在开始时清空MHI
        cvZero( mhi ); // clear MHI at the beginning

		// 按img的尺寸创建图像
        orient = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        segmask = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        mask = cvCreateImage( size, IPL_DEPTH_8U, 1 );
    }

    // 转换为灰度
    cvCvtColor( img, buf[last], CV_BGR2GRAY ); // convert frame to grayscale

    idx2 = (last + 1) % N; // index of (last - (N-1))th frame
    last = idx2;

    silh = buf[idx2];

    // 获取两帧间的差异,当前帧跟背景图相减,放到silh里面
    cvAbsDiff(buf[idx1], buf[idx2], silh); // get difference between frames

    // 二值化
    cvThreshold(silh, silh, diff_threshold, 1, CV_THRESH_BINARY); // and threshold it

    // 去掉影像(silhouette) 以更新运动历史图像
    cvUpdateMotionHistory(silh, mhi, timestamp, MHI_DURATION); // update MHI

    // 转换MHI到蓝色8位图
    // convert MHI to blue 8u image
    cvCvtScale( mhi, mask, 255./MHI_DURATION,
                (MHI_DURATION - timestamp)*255./MHI_DURATION );
    cvZero( dst );
    cvCvtPlaneToPix( mask, 0, 0, 0, dst );

    // 计算运动历史图像的梯度方向 
    // 计算运动梯度趋向和合法的趋向掩码
    // calculate motion gradient orientation and valid orientation mask
    cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 );
    
    if( !storage )
        storage = cvCreateMemStorage(0);
    else
        cvClearMemStorage(storage);

    // 将整个运动分割为独立的运动部分 
    // 分割运动:获取运动组件序列
    // 分割掩码是运动组件图标识出来的,不再过多的使用
    // segment motion: get sequence of motion components
    // segmask is marked motion components map. It is not used further
    seq = cvSegmentMotion(mhi, segmask, storage, timestamp, MAX_TIME_DELTA);

	// 按运动组件的数目来循环
    // 通过运动组件迭代
    // 根据整幅图像(全局运动)进行相应的一次或多次迭代
    // iterate through the motion components,
    // One more iteration (i == -1) corresponds to the whole image (global motion)
    for (i = -1; i < seq->total; i++) 
    {

        if (i < 0) 
        { 
            // 全局运动事件
            // case of the whole image
            // 获取当前帧的范围
            comp_rect = cvRect( 0, 0, size.width, size.height );

			// 设置颜色为白色
			color = CV_RGB(255,255,255);

			// 设置放大倍数为100
            magnitude = 100;
        }
        else 
        { 
            // 第i个运动组件
            // i-th motion component
            // 获取当前运动组件的范围
            comp_rect = ((CvConnectedComp*)cvGetSeqElem( seq, i ))->rect;

			// 丢弃很小的组件
			if( comp_rect.width + comp_rect.height < 100 ) // reject very small components
                continue;

			// 设置颜色为红色
			color = CV_RGB(255,0,0);

			// 设置放大倍数为30
            magnitude = 30;
        }

        // 选择组件感兴趣的区域
        // select component ROI
        cvSetImageROI( silh, comp_rect );
        cvSetImageROI( mhi, comp_rect );
        cvSetImageROI( orient, comp_rect );
        cvSetImageROI( mask, comp_rect );

        // 计算某些选择区域的全局运动方向 
        // 每个运动部件的运动方向就可以被这个函数利用提取的特定部件的掩模(mask)计算出来(使用cvCmp) 
        // 计算趋势
        // calculate orientation
        angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION);

        // 根据左上角的原点来调整图像的角度
        angle = 360.0 - angle;  // adjust for images with top-left origin

        // 计算数组的绝对范数, 绝对差分范数或者相对差分范数 
        // 计算轮廓感兴趣区域中点的个数
        count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI

        cvResetImageROI( mhi );
        cvResetImageROI( orient );
        cvResetImageROI( mask );
        cvResetImageROI( silh );

        // 检测小运动事件
        // check for the case of little motion
        if (count < comp_rect.width*comp_rect.height * 0.05)
        {
            continue;
        }

        // 画一个带箭头的时钟来指示方向
        // draw a clock with arrow indicating the direction
        center = cvPoint( (comp_rect.x + comp_rect.width/2),
                          (comp_rect.y + comp_rect.height/2) );

        cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 );
        cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)),
                cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 );
    }
}
// Update Motion History Image: Calculate motion features and orientation.
void motionDetection(IplImage* image, IplImage* destination_image, MotionInfo* motionInfo)
{
    double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds
    CvSize image_size = cvSize(image->width, image->height); // get current frame image_size
    int previous_frame_index = last_index, current_frame_index;
    
    initialize_images(image_size);
    
    cvCvtColor(image, image_buffer[last_index], CV_BGR2GRAY); // convert frame to grayscale
    
    current_frame_index = (last_index + 1) % N; // index of (last_index - (N-1))th frame
    last_index = current_frame_index;
    
    silhouette = image_buffer[current_frame_index];
    
    cvAbsDiff(image_buffer[previous_frame_index], image_buffer[current_frame_index], silhouette); // Get difference between frames
    cvThreshold(silhouette, silhouette, DIFFERENCE_THRESHOLD, 1, CV_THRESH_BINARY); // Add threshold
    //cvDilate(silhouette, silhouette, 0, 18);
    //cvErode(silhouette, silhouette, 0, 10);
    
    cvUpdateMotionHistory(silhouette, mhi, timestamp, MHI_DURATION); // Update MHI
    
    // Convert MHI to blue 8U image
    cvCvtScale(mhi, orientation_mask, 255./MHI_DURATION, (MHI_DURATION - timestamp)*255./MHI_DURATION);
    
    if (destination_image) {
      cvZero(destination_image);
      cvCvtPlaneToPix(orientation_mask, 0, 0, 0, destination_image);
    }
    
    // Calculate motion gradient orientation and valid orientation mask
    cvCalcMotionGradient(mhi, orientation_mask, orientation, MAX_TIME_DELTA, MIN_TIME_DELTA, 3);
    
    // motion_feature_sequence = extract_motion_features();
    if(!storage)
        storage = cvCreateMemStorage(0);
    else
        cvClearMemStorage(storage);
    
    CvSeq* motion_feature_sequence = cvSegmentMotion(mhi, segment_mask, storage, timestamp, MAX_TIME_DELTA);
    
    int SEGMENT_WIDTH = image_size.width / MAX_SEGMENTS_X;
    int SEGMENT_HEIGHT = image_size.height / MAX_SEGMENTS_Y;
    
    // Global motion
    CvRect global_motion_segment = cvRect(0, 0, image_size.width, image_size.height);
    motionInfo->global_angle = calculate_orientation(global_motion_segment, silhouette);
    
    if (destination_image)
      draw_orientation(destination_image, &global_motion_segment, motionInfo->global_angle, 100, CV_RGB(0, 255, 0), true);
    
    long area = 0;
    long totalArea = 0;
    int totalMovingSegments = 0;
    bool hasValidMovement = false;
    CvRect segmentRect;
    
    // Segmented motion
    for(int x = 0; x < MAX_SEGMENTS_X; x++)
    {
      for(int y = 0; y < MAX_SEGMENTS_Y; y++)
      {
        segmentRect = cvRect(x * SEGMENT_WIDTH, y * SEGMENT_HEIGHT, SEGMENT_WIDTH, SEGMENT_HEIGHT);
        area = calculate_motion(&segmentRect, motion_feature_sequence);
        hasValidMovement = (area > MIN_MOTION_FEATURE_AREA);
        
        motionInfo->segment_motion_areas[x][y] = area;
        motionInfo->segment_movements[x][y] = hasValidMovement;
        motionInfo->segment_angles[x][y] = calculate_orientation(segmentRect, silhouette);
        
        totalArea += area;
        totalMovingSegments += (area > MIN_MOTION_FEATURE_AREA);
        
        //printf("%i, ", area);
        //fflush(stdout);
        
        if (hasValidMovement)
          if (destination_image)
            draw_orientation(destination_image, &segmentRect, motionInfo->segment_angles[x][y], 20, CV_RGB(255, 0, 0), true);
      }
    }
    motionInfo->total_motion_area = totalArea;
    motionInfo->total_segments_with_movements = totalMovingSegments;
    motionInfo->SEGMENTS_X = MAX_SEGMENTS_X;
    motionInfo->SEGMENTS_Y = MAX_SEGMENTS_Y;
    
    printf("%i, %f\n", totalArea, (float)totalArea / (float)(image_size.width*image_size.height));
    //fflush(stdout);
}
static void computeVectors( IplImage* img, IplImage* dst, short wROI, short hROI){
	if(DEBUG){
		std::cout << "-- VECTOR COMPUTING" << std::endl;		
	}
	double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds
	CvSize size = cvSize(img->width,img->height); // get current frame size 640x480
	int i, idx1 = last, idx2;
	CvSeq* seq;
	CvRect comp_rect;
	CvRect roi;
	double count;
	double angle;
	CvPoint center;
	double magnitude;
	CvScalar color;

	//--SURF CORNERS--
	if(DEBUG){
		std::cout << "--- SURF CORNERS" << std::endl;
	}
        color = CV_RGB(0,255,0);
        CvMemStorage* storage2 = cvCreateMemStorage(0);
        CvSURFParams params = cvSURFParams(SURF_THRESHOLD, 1);
        CvSeq *imageKeypoints = 0, *imageDescriptors = 0;
        cvExtractSURF( dst, 0, &imageKeypoints, &imageDescriptors, storage2, params );
        if(DEBUG){
			printf("Image Descriptors: %d\n", imageDescriptors->total);
		}
        for( int j = 0; j < imageKeypoints->total; j++ ){
            CvSURFPoint* r = (CvSURFPoint*)cvGetSeqElem( imageKeypoints, j );
			center.x = cvRound(r->pt.x);
            center.y = cvRound(r->pt.y);
			if(DEBUG){
				printf("j: %d \t", j);               
				printf("total: %d \t", imageKeypoints->total);               
				printf("valor hessiano: %f \t", r->hessian);
				printf("x: %d \t", center.x);
				printf("y: %d \n", center.y);
			}
			// Agrego el Punto en donde es la region que nos interesa
			cvCircle( dst, center, cvRound(r->hessian*0.02), color, 3, CV_AA, 0 );
			// Lleno la matriz con los vectores
			relevancePointToVector(center.x, center.y, wROI, hROI, 5);
		}
	//--SURF CORNERS


	// calculate motion gradient orientation and valid orientation mask
	cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 );
	
	// Compute Motion on 4x4 Cuadrants
	if(DEBUG){
		std::cout << "--- MOTION CUADRANTS" << std::endl;
	}
	i	 = 25;
	color = CV_RGB(255,0,0);
	magnitude = 30;
	for (int r = 0; r < size.height; r += hROI){
		for (int c = 0; c < size.width; c += wROI){
			comp_rect.x = c;
			comp_rect.y = r;
			comp_rect.width = (c + wROI > size.width) ? (size.width - c) : wROI;
			comp_rect.height = (r + hROI > size.height) ? (size.height - r) : hROI;

			cvSetImageROI( mhi, comp_rect );
			cvSetImageROI( orient, comp_rect );
			cvSetImageROI( mask, comp_rect );
			cvSetImageROI( silh, comp_rect );
			cvSetImageROI( img, comp_rect );

			// Process Motion
			angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION);
			angle = 360.0 - angle;  // adjust for images with top-left origin
			count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI
			roi = cvGetImageROI(mhi);
			center = cvPoint( (comp_rect.x + comp_rect.width/2),
					  (comp_rect.y + comp_rect.height/2) );
			cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 );
			cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)),
			cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 );	
			
			if(DEBUG){
				std::cout << "Motion " << i << " -> x: " << roi.x << " y: " << roi.y << " count: " << count << " angle: " << angle << std::endl; // print the roi
			}
			cvResetImageROI( mhi );
			cvResetImageROI( orient );
			cvResetImageROI( mask );
			cvResetImageROI( silh );
			cvResetImageROI(img);
			relevanceDirectionToVector(i, angle);
			++i;
		}
	}

	// Compute Global Motion
	if(DEBUG){
		std::cout << "--- MOTION GLOBAL" << std::endl;
	}
	comp_rect = cvRect( 0, 0, size.width, size.height );
	color = CV_RGB(255,255,255);
	magnitude = 100;
	angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION);
	angle = 360.0 - angle;  // adjust for images with top-left origin
	count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI
	roi = cvGetImageROI(mhi);
	center = cvPoint( (comp_rect.x + comp_rect.width/2),
			  (comp_rect.y + comp_rect.height/2) );
	cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 );
	cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)),
	cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 );
	if(DEBUG){
		std::cout << "Motion Main-> x: " << roi.x << " y: " << roi.y << " count: " << count << std::endl; // print the roi
	}
	relevanceDirectionToVector(50, angle);
}
Exemple #11
0
public:bool analizarMhi( IplImage* img, IplImage* dst, int diff_threshold, CvRect rect ) {
        double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds
        int i, idx1 = last, idx2;
        IplImage* silh;
        CvSeq* seq;
        CvRect comp_rect;
        cv::Rect result;
        double count;
        double angle;
        CvPoint center;
        double magnitude;
        CvScalar color;

        cvCvtColor( img, buf[last], CV_BGR2GRAY ); // convert frame to grayscale

        idx2 = (last + 1) % N; // index of (last - (N-1))th frame
        last = idx2;

        silh = buf[idx2];
        cvAbsDiff( buf[idx1], buf[idx2], silh ); // get difference between frames

        cvThreshold( silh, silh, diff_threshold, 1, CV_THRESH_BINARY ); // and threshold it
        cvUpdateMotionHistory( silh, mhi, timestamp, MHI_DURATION ); // update MHI

        // convert MHI to blue 8u image
        cvCvtScale( mhi, mask, 255./MHI_DURATION,
                    (MHI_DURATION - timestamp)*255./MHI_DURATION );
        cvZero( dst );
        cvCvtPlaneToPix( mask, 0, 0, 0, dst );

        // calculate motion gradient orientation and valid orientation mask
        cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 );

        if( !storage )
            storage = cvCreateMemStorage(0);
        else
            cvClearMemStorage(storage);

        // segment motion: get sequence of motion components
        // segmask is marked motion components map. It is not used further
        seq = cvSegmentMotion( mhi, segmask, storage, timestamp, MAX_TIME_DELTA );

        // iterate through the motion components,
        // One more iteration (i == -1) corresponds to the whole image (global motion)
    //    for( i = -1; i < seq->total; i++ ) {
        i = 1;
        comp_rect = cvRect( 0, 0, img->width, img->height );
        color = CV_RGB(255,255,255);
        magnitude = 100;
        while (result.area() < 10 & i < seq->total) {

                comp_rect = ((CvConnectedComp*)cvGetSeqElem( seq, i ))->rect;
                if( comp_rect.width + comp_rect.height < 100 ) {// reject very small components
                    i++;
                    continue;
                }
                color = CV_RGB(255,0,0);
                magnitude = 30;

            // select component ROI
            cvSetImageROI( silh, comp_rect );
            cvSetImageROI( mhi, comp_rect );
            cvSetImageROI( orient, comp_rect );
            cvSetImageROI( mask, comp_rect );

            // calculate orientation
            angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION);
            angle = 360.0 - angle;  // adjust for images with top-left origin

            count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI

            cvResetImageROI( mhi );
            cvResetImageROI( orient );
            cvResetImageROI( mask );
            cvResetImageROI( silh );

            center = cvPoint( (comp_rect.x + comp_rect.width/2),
                              (comp_rect.y + comp_rect.height/2) );

            cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 );
            cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)),
                    cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 );

            result = (cv::Rect)comp_rect & (cv::Rect)rect;
            i++;
        }

        if (result.area() > 10) {
            return true;
        } else {
            return false;
        }
    }