double cv::calcGlobalOrientation( const Mat& orientation, const Mat& mask, const Mat& mhi, double timestamp, double duration ) { CvMat _orientation = orientation, _mask = mask, _mhi = mhi; return cvCalcGlobalOrientation(&_orientation, &_mask, &_mhi, timestamp, duration); }
double cv::calcGlobalOrientation( InputArray _orientation, InputArray _mask, InputArray _mhi, double timestamp, double duration ) { Mat orientation = _orientation.getMat(), mask = _mask.getMat(), mhi = _mhi.getMat(); CvMat c_orientation = orientation, c_mask = mask, c_mhi = mhi; return cvCalcGlobalOrientation(&c_orientation, &c_mask, &c_mhi, timestamp, duration); }
// Calculate orientation angle for a specified image segment. int calculate_orientation(CvRect rect, IplImage* silhouette) { double motion_angle; cvSetImageROI(mhi, rect); cvSetImageROI(orientation, rect); cvSetImageROI(orientation_mask, rect); cvSetImageROI(silhouette, rect); motion_angle = 360.0 - cvCalcGlobalOrientation(orientation, orientation_mask, mhi, timestamp, MHI_DURATION); cvResetImageROI(mhi); cvResetImageROI(orientation); cvResetImageROI(orientation_mask); cvResetImageROI(silhouette); return motion_angle; }
static void update_mhi( IplImage* img, IplImage* dst, int diff_threshold ) { double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds CvSize size = cvSize(img->width,img->height); // get current frame size int i, idx1 = last, idx2; IplImage* silh; CvSeq* seq; CvRect comp_rect; double count; double angle; CvPoint center; double magnitude; CvScalar color; unsigned int tmpx=0,tmpy=0; int tmppcount=0; // allocate images at the beginning or // reallocate them if the frame size is changed if( !mhi || mhi->width != size.width || mhi->height != size.height ) { if( mbuf == 0 ) { mbuf = (IplImage**)malloc(N*sizeof(mbuf[0])); memset( mbuf, 0, N*sizeof(mbuf[0])); } for( i = 0; i < N; i++ ) { cvReleaseImage( &mbuf[i] ); mbuf[i] = cvCreateImage( size, IPL_DEPTH_8U, 1 ); cvZero( mbuf[i] ); } cvReleaseImage( &mhi ); cvReleaseImage( &orient ); cvReleaseImage( &segmask ); cvReleaseImage( &mask ); mhi = cvCreateImage( size, IPL_DEPTH_32F, 1 ); cvZero( mhi ); // clear MHI at the beginning orient = cvCreateImage( size, IPL_DEPTH_32F, 1 ); segmask = cvCreateImage( size, IPL_DEPTH_32F, 1 ); mask = cvCreateImage( size, IPL_DEPTH_8U, 1 ); } cvCvtColor( img, mbuf[last], CV_BGR2GRAY ); // convert frame to grayscale idx2 = (last + 1) % N; // index of (last - (N-1))th frame last = idx2; silh=mbuf[idx2]; cvAbsDiff( mbuf[idx1], mbuf[idx2], silh ); // cvThreshold( silh, silh, diff_threshold, 1, CV_THRESH_BINARY ); // 类似二值化 可是只有0 1这么小的差距 cvUpdateMotionHistory( silh, mhi, timestamp, MHI_DURATION ); // 更新以时间为单位的运动历史图 // 将运动历史图转换为具有像素值的图片,并merge到输出图像 cvCvtScale( mhi, mask, 255./MHI_DURATION, (MHI_DURATION - timestamp)*255./MHI_DURATION ); cvZero( dst ); cvMerge( mask,0,0,0,dst ); // calculate motion gradient orientation and valid orientation mask cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 );//orient中保存每个运动点的方向角度值 if( !mstorage ) mstorage = cvCreateMemStorage(0); else cvClearMemStorage(mstorage); // segment motion: get sequence of motion components // segmask is marked motion components map. It is not used further seq = cvSegmentMotion( mhi, segmask, mstorage, timestamp, MAX_TIME_DELTA );//对运动历史图片进行运动单元的分割 // iterate through the motion components, // One more iteration (i == -1) corresponds to the whole image (global motion) for( i = -1; i < seq->total; i++ ) { if( i < 0 ) { // case of the whole image comp_rect = cvRect( 0, 0, size.width, size.height ); color = CV_RGB(255,255,255); magnitude = 100; // printf("ALL image.\n"); } else { // i-th motion component comp_rect = ((CvConnectedComp*)cvGetSeqElem( seq, i ))->rect; if( comp_rect.width + comp_rect.height < 30 ) // reject very small components continue; color = CV_RGB(255,0,0); magnitude = 30; } // select component ROI cvSetImageROI( silh, comp_rect ); cvSetImageROI( mhi, comp_rect ); cvSetImageROI( orient, comp_rect ); cvSetImageROI( mask, comp_rect ); // calculate orientation angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION);//统计感兴趣区域内的运动单元的总体方向 angle = 360.0 - angle; // adjust for images with top-left origin count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI cvResetImageROI( mhi ); cvResetImageROI( orient ); cvResetImageROI( mask ); cvResetImageROI( silh ); // check for the case of little motion if( count < comp_rect.width*comp_rect.height * 0.05*0.1 ) continue; // draw a clock with arrow indicating the direction center = cvPoint( (comp_rect.x + comp_rect.width/2), (comp_rect.y + comp_rect.height/2) ); if(Zmode == ZMODE_Z && i !=-1) { //放大模式下的稳定算法++++++++++++++++++++++++++ tmpx = comp_rect.x+comp_rect.width/2;//当前获取到的目标坐标之一 tmpy = comp_rect.y+comp_rect.height/2; tmpal1 = sqrt(abs(tx-ax))+sqrt(abs(ty-ay)); if(tmpal2 > tmpal1 || tmppcount == 0)//更接近中心 { tmpal2 = tmpal1; tx = tmpx; ty = tmpy; } catchflag = 1; } if(Zmode == ZMOOE_S) { //缩小模式下的选择目标规则 if(tmppcount == 0 && i != -1) { tx = comp_rect.x+comp_rect.width/2; ty = comp_rect.y+comp_rect.height/2; catchflag = 1; } } tmppcount++; printf("The %dth rect:(%d,%d)\n",tmppcount,comp_rect.x+comp_rect.width/2,comp_rect.y+comp_rect.height/2); cvCircle( img, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 ); cvLine( img, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)), cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 ); } }
// parameters: // img - input video frame // dst - resultant motion picture // args - optional parameters void update_mhi( IplImage* img, IplImage* dst, int diff_threshold ) { double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds CvSize size = cvSize(img->width,img->height); // get current frame size int i, idx1 = last, idx2; IplImage* silh; CvSeq* seq; CvRect comp_rect; double count; double angle; CvPoint center; double magnitude; CvScalar color; // allocate images at the beginning or // reallocate them if the frame size is changed if( !mhi || mhi->width != size.width || mhi->height != size.height ) { if( buf == 0 ) { buf = (IplImage**)malloc(N*sizeof(buf[0])); memset( buf, 0, N*sizeof(buf[0])); } for( i = 0; i < N; i++ ) { cvReleaseImage( &buf[i] ); buf[i] = cvCreateImage( size, IPL_DEPTH_8U, 1 ); cvZero( buf[i] ); } cvReleaseImage( &mhi ); cvReleaseImage( &orient ); cvReleaseImage( &segmask ); cvReleaseImage( &mask ); mhi = cvCreateImage( size, IPL_DEPTH_32F, 1 ); cvZero( mhi ); // clear MHI at the beginning orient = cvCreateImage( size, IPL_DEPTH_32F, 1 ); segmask = cvCreateImage( size, IPL_DEPTH_32F, 1 ); mask = cvCreateImage( size, IPL_DEPTH_8U, 1 ); } cvCvtColor( img, buf[last], CV_BGR2GRAY ); // convert frame to grayscale idx2 = (last + 1) % N; // index of (last - (N-1))th frame last = idx2; silh = buf[idx2]; cvAbsDiff( buf[idx1], buf[idx2], silh ); // get difference between frames cvThreshold( silh, silh, diff_threshold, 1, CV_THRESH_BINARY ); // and threshold it cvUpdateMotionHistory( silh, mhi, timestamp, MHI_DURATION ); // update MHI // convert MHI to blue 8u image cvCvtScale( mhi, mask, 255./MHI_DURATION, (MHI_DURATION - timestamp)*255./MHI_DURATION ); cvZero( dst ); cvMerge( mask, 0, 0, 0, dst ); // calculate motion gradient orientation and valid orientation mask cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 ); if( !storage ) storage = cvCreateMemStorage(0); else cvClearMemStorage(storage); // segment motion: get sequence of motion components // segmask is marked motion components map. It is not used further seq = cvSegmentMotion( mhi, segmask, storage, timestamp, MAX_TIME_DELTA ); // iterate through the motion components, // One more iteration (i == -1) corresponds to the whole image (global motion) for( i = -1; i < seq->total; i++ ) { if( i < 0 ) { // case of the whole image comp_rect = cvRect( 0, 0, size.width, size.height ); color = CV_RGB(255,255,255); magnitude = 100; } else { // i-th motion component comp_rect = ((CvConnectedComp*)cvGetSeqElem( seq, i ))->rect; if( comp_rect.width + comp_rect.height < 100 ) // reject very small components continue; color = CV_RGB(255,0,0); magnitude = 30; } // select component ROI cvSetImageROI( silh, comp_rect ); cvSetImageROI( mhi, comp_rect ); cvSetImageROI( orient, comp_rect ); cvSetImageROI( mask, comp_rect ); // calculate orientation angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION); angle = 360.0 - angle; // adjust for images with top-left origin count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI cvResetImageROI( mhi ); cvResetImageROI( orient ); cvResetImageROI( mask ); cvResetImageROI( silh ); // check for the case of little motion if( count < comp_rect.width*comp_rect.height * 0.05 ) continue; // draw a clock with arrow indicating the direction center = cvPoint( (comp_rect.x + comp_rect.width/2), (comp_rect.y + comp_rect.height/2) ); cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 ); cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)), cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 ); } }
void update_mhi( IplImage* img, IplImage* dst, int diff_threshold ) { double timestamp = (double)clock()/CLOCKS_PER_SEC; CvSize size = cvSize(img->width,img->height); int i, idx1 = last, idx2; IplImage* silh; CvSeq* seq; CvRect comp_rect; double count; double angle; CvPoint center; double magnitude; CvScalar color; if( !mhi || mhi->width != size.width || mhi->height != size.height ) { if( buf == 0 ) { buf = (IplImage**)malloc(N*sizeof(buf[0])); memset( buf, 0, N*sizeof(buf[0])); } for( i = 0; i < N; i++ ) { cvReleaseImage( &buf[i] ); buf[i] = cvCreateImage( size, IPL_DEPTH_8U, 1 ); cvZero( buf[i] ); } cvReleaseImage( &mhi ); cvReleaseImage( &orient ); cvReleaseImage( &segmask ); cvReleaseImage( &mask ); mhi = cvCreateImage( size, IPL_DEPTH_32F, 1 ); cvZero( mhi ); orient = cvCreateImage( size, IPL_DEPTH_32F, 1 ); segmask = cvCreateImage( size, IPL_DEPTH_32F, 1 ); mask = cvCreateImage( size, IPL_DEPTH_8U, 1 ); } cvCvtColor( img, buf[last], CV_BGR2GRAY ); idx2 = (last + 1) % N; last = idx2; silh = buf[idx2]; cvAbsDiff( buf[idx1], buf[idx2], silh ); cvThreshold( silh, silh, diff_threshold, 1, CV_THRESH_BINARY ); cvUpdateMotionHistory( silh, mhi, timestamp, MHI_DURATION ); cvCvtScale( mhi, mask, 255./MHI_DURATION, (MHI_DURATION - timestamp)*255./MHI_DURATION ); cvZero( dst ); cvCvtPlaneToPix( mask, 0, 0, 0, dst ); cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 ); if( !storage ) storage = cvCreateMemStorage(0); else cvClearMemStorage(storage); seq = cvSegmentMotion( mhi, segmask, storage, timestamp, MAX_TIME_DELTA ); for( i = -1; i < seq->total; i++ ) { if( i < 0 ) { comp_rect = cvRect( 0, 0, size.width, size.height ); color = CV_RGB(255,255,255); magnitude = 100; } else { comp_rect = ((CvConnectedComp*)cvGetSeqElem( seq, i ))->rect; if( comp_rect.width + comp_rect.height < 100 ) continue; color = CV_RGB(255,0,0); magnitude = 30; } cvSetImageROI( silh, comp_rect ); cvSetImageROI( mhi, comp_rect ); cvSetImageROI( orient, comp_rect ); cvSetImageROI( mask, comp_rect ); angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION); angle = 360.0 - angle; count = cvNorm( silh, 0, CV_L1, 0 ); cvResetImageROI( mhi ); cvResetImageROI( orient ); cvResetImageROI( mask ); cvResetImageROI( silh ); if( count < comp_rect.width*comp_rect.height * 0.05 ) continue; center = cvPoint( (comp_rect.x + comp_rect.width/2), (comp_rect.y + comp_rect.height/2) ); cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 ); cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)), cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 ); } }
void CV_MHIGlobalOrientTest::run_func() { angle = cvCalcGlobalOrientation( test_array[INPUT][2], test_array[INPUT][1], test_array[INPUT][0], timestamp, duration ); }
// parameters: // img - input video frame // dst - resultant motion picture // args - optional parameters void update_mhi(IplImage* img, IplImage* dst, int diff_threshold) { // 获取当前时间 double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds // 获取当前帧大小 CvSize size = cvSize(img->width,img->height); // get current frame size int i, idx1 = last, idx2; IplImage* silh; CvSeq* seq; CvRect comp_rect; double count; double angle; CvPoint center; double magnitude; CvScalar color; // 给图像分配空间或者在尺寸改变的时候重新分配 // allocate images at the beginning or // reallocate them if the frame size is changed if (!mhi || mhi->width != size.width || mhi->height != size.height) { if (buf == 0) { buf = (IplImage**)malloc(N*sizeof(buf[0])); memset( buf, 0, N*sizeof(buf[0])); } for( i = 0; i < N; i++ ) { cvReleaseImage( &buf[i] ); buf[i] = cvCreateImage( size, IPL_DEPTH_8U, 1 ); cvZero( buf[i] ); } cvReleaseImage( &mhi ); cvReleaseImage( &orient ); cvReleaseImage( &segmask ); cvReleaseImage( &mask ); mhi = cvCreateImage( size, IPL_DEPTH_32F, 1 ); // 在开始时清空MHI cvZero( mhi ); // clear MHI at the beginning // 按img的尺寸创建图像 orient = cvCreateImage( size, IPL_DEPTH_32F, 1 ); segmask = cvCreateImage( size, IPL_DEPTH_32F, 1 ); mask = cvCreateImage( size, IPL_DEPTH_8U, 1 ); } // 转换为灰度 cvCvtColor( img, buf[last], CV_BGR2GRAY ); // convert frame to grayscale idx2 = (last + 1) % N; // index of (last - (N-1))th frame last = idx2; silh = buf[idx2]; // 获取两帧间的差异,当前帧跟背景图相减,放到silh里面 cvAbsDiff(buf[idx1], buf[idx2], silh); // get difference between frames // 二值化 cvThreshold(silh, silh, diff_threshold, 1, CV_THRESH_BINARY); // and threshold it // 去掉影像(silhouette) 以更新运动历史图像 cvUpdateMotionHistory(silh, mhi, timestamp, MHI_DURATION); // update MHI // 转换MHI到蓝色8位图 // convert MHI to blue 8u image cvCvtScale( mhi, mask, 255./MHI_DURATION, (MHI_DURATION - timestamp)*255./MHI_DURATION ); cvZero( dst ); cvCvtPlaneToPix( mask, 0, 0, 0, dst ); // 计算运动历史图像的梯度方向 // 计算运动梯度趋向和合法的趋向掩码 // calculate motion gradient orientation and valid orientation mask cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 ); if( !storage ) storage = cvCreateMemStorage(0); else cvClearMemStorage(storage); // 将整个运动分割为独立的运动部分 // 分割运动:获取运动组件序列 // 分割掩码是运动组件图标识出来的,不再过多的使用 // segment motion: get sequence of motion components // segmask is marked motion components map. It is not used further seq = cvSegmentMotion(mhi, segmask, storage, timestamp, MAX_TIME_DELTA); // 按运动组件的数目来循环 // 通过运动组件迭代 // 根据整幅图像(全局运动)进行相应的一次或多次迭代 // iterate through the motion components, // One more iteration (i == -1) corresponds to the whole image (global motion) for (i = -1; i < seq->total; i++) { if (i < 0) { // 全局运动事件 // case of the whole image // 获取当前帧的范围 comp_rect = cvRect( 0, 0, size.width, size.height ); // 设置颜色为白色 color = CV_RGB(255,255,255); // 设置放大倍数为100 magnitude = 100; } else { // 第i个运动组件 // i-th motion component // 获取当前运动组件的范围 comp_rect = ((CvConnectedComp*)cvGetSeqElem( seq, i ))->rect; // 丢弃很小的组件 if( comp_rect.width + comp_rect.height < 100 ) // reject very small components continue; // 设置颜色为红色 color = CV_RGB(255,0,0); // 设置放大倍数为30 magnitude = 30; } // 选择组件感兴趣的区域 // select component ROI cvSetImageROI( silh, comp_rect ); cvSetImageROI( mhi, comp_rect ); cvSetImageROI( orient, comp_rect ); cvSetImageROI( mask, comp_rect ); // 计算某些选择区域的全局运动方向 // 每个运动部件的运动方向就可以被这个函数利用提取的特定部件的掩模(mask)计算出来(使用cvCmp) // 计算趋势 // calculate orientation angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION); // 根据左上角的原点来调整图像的角度 angle = 360.0 - angle; // adjust for images with top-left origin // 计算数组的绝对范数, 绝对差分范数或者相对差分范数 // 计算轮廓感兴趣区域中点的个数 count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI cvResetImageROI( mhi ); cvResetImageROI( orient ); cvResetImageROI( mask ); cvResetImageROI( silh ); // 检测小运动事件 // check for the case of little motion if (count < comp_rect.width*comp_rect.height * 0.05) { continue; } // 画一个带箭头的时钟来指示方向 // draw a clock with arrow indicating the direction center = cvPoint( (comp_rect.x + comp_rect.width/2), (comp_rect.y + comp_rect.height/2) ); cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 ); cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)), cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 ); } }
static void computeVectors( IplImage* img, IplImage* dst, short wROI, short hROI){ if(DEBUG){ std::cout << "-- VECTOR COMPUTING" << std::endl; } double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds CvSize size = cvSize(img->width,img->height); // get current frame size 640x480 int i, idx1 = last, idx2; CvSeq* seq; CvRect comp_rect; CvRect roi; double count; double angle; CvPoint center; double magnitude; CvScalar color; //--SURF CORNERS-- if(DEBUG){ std::cout << "--- SURF CORNERS" << std::endl; } color = CV_RGB(0,255,0); CvMemStorage* storage2 = cvCreateMemStorage(0); CvSURFParams params = cvSURFParams(SURF_THRESHOLD, 1); CvSeq *imageKeypoints = 0, *imageDescriptors = 0; cvExtractSURF( dst, 0, &imageKeypoints, &imageDescriptors, storage2, params ); if(DEBUG){ printf("Image Descriptors: %d\n", imageDescriptors->total); } for( int j = 0; j < imageKeypoints->total; j++ ){ CvSURFPoint* r = (CvSURFPoint*)cvGetSeqElem( imageKeypoints, j ); center.x = cvRound(r->pt.x); center.y = cvRound(r->pt.y); if(DEBUG){ printf("j: %d \t", j); printf("total: %d \t", imageKeypoints->total); printf("valor hessiano: %f \t", r->hessian); printf("x: %d \t", center.x); printf("y: %d \n", center.y); } // Agrego el Punto en donde es la region que nos interesa cvCircle( dst, center, cvRound(r->hessian*0.02), color, 3, CV_AA, 0 ); // Lleno la matriz con los vectores relevancePointToVector(center.x, center.y, wROI, hROI, 5); } //--SURF CORNERS // calculate motion gradient orientation and valid orientation mask cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 ); // Compute Motion on 4x4 Cuadrants if(DEBUG){ std::cout << "--- MOTION CUADRANTS" << std::endl; } i = 25; color = CV_RGB(255,0,0); magnitude = 30; for (int r = 0; r < size.height; r += hROI){ for (int c = 0; c < size.width; c += wROI){ comp_rect.x = c; comp_rect.y = r; comp_rect.width = (c + wROI > size.width) ? (size.width - c) : wROI; comp_rect.height = (r + hROI > size.height) ? (size.height - r) : hROI; cvSetImageROI( mhi, comp_rect ); cvSetImageROI( orient, comp_rect ); cvSetImageROI( mask, comp_rect ); cvSetImageROI( silh, comp_rect ); cvSetImageROI( img, comp_rect ); // Process Motion angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION); angle = 360.0 - angle; // adjust for images with top-left origin count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI roi = cvGetImageROI(mhi); center = cvPoint( (comp_rect.x + comp_rect.width/2), (comp_rect.y + comp_rect.height/2) ); cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 ); cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)), cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 ); if(DEBUG){ std::cout << "Motion " << i << " -> x: " << roi.x << " y: " << roi.y << " count: " << count << " angle: " << angle << std::endl; // print the roi } cvResetImageROI( mhi ); cvResetImageROI( orient ); cvResetImageROI( mask ); cvResetImageROI( silh ); cvResetImageROI(img); relevanceDirectionToVector(i, angle); ++i; } } // Compute Global Motion if(DEBUG){ std::cout << "--- MOTION GLOBAL" << std::endl; } comp_rect = cvRect( 0, 0, size.width, size.height ); color = CV_RGB(255,255,255); magnitude = 100; angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION); angle = 360.0 - angle; // adjust for images with top-left origin count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI roi = cvGetImageROI(mhi); center = cvPoint( (comp_rect.x + comp_rect.width/2), (comp_rect.y + comp_rect.height/2) ); cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 ); cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)), cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 ); if(DEBUG){ std::cout << "Motion Main-> x: " << roi.x << " y: " << roi.y << " count: " << count << std::endl; // print the roi } relevanceDirectionToVector(50, angle); }
public:bool analizarMhi( IplImage* img, IplImage* dst, int diff_threshold, CvRect rect ) { double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds int i, idx1 = last, idx2; IplImage* silh; CvSeq* seq; CvRect comp_rect; cv::Rect result; double count; double angle; CvPoint center; double magnitude; CvScalar color; cvCvtColor( img, buf[last], CV_BGR2GRAY ); // convert frame to grayscale idx2 = (last + 1) % N; // index of (last - (N-1))th frame last = idx2; silh = buf[idx2]; cvAbsDiff( buf[idx1], buf[idx2], silh ); // get difference between frames cvThreshold( silh, silh, diff_threshold, 1, CV_THRESH_BINARY ); // and threshold it cvUpdateMotionHistory( silh, mhi, timestamp, MHI_DURATION ); // update MHI // convert MHI to blue 8u image cvCvtScale( mhi, mask, 255./MHI_DURATION, (MHI_DURATION - timestamp)*255./MHI_DURATION ); cvZero( dst ); cvCvtPlaneToPix( mask, 0, 0, 0, dst ); // calculate motion gradient orientation and valid orientation mask cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 ); if( !storage ) storage = cvCreateMemStorage(0); else cvClearMemStorage(storage); // segment motion: get sequence of motion components // segmask is marked motion components map. It is not used further seq = cvSegmentMotion( mhi, segmask, storage, timestamp, MAX_TIME_DELTA ); // iterate through the motion components, // One more iteration (i == -1) corresponds to the whole image (global motion) // for( i = -1; i < seq->total; i++ ) { i = 1; comp_rect = cvRect( 0, 0, img->width, img->height ); color = CV_RGB(255,255,255); magnitude = 100; while (result.area() < 10 & i < seq->total) { comp_rect = ((CvConnectedComp*)cvGetSeqElem( seq, i ))->rect; if( comp_rect.width + comp_rect.height < 100 ) {// reject very small components i++; continue; } color = CV_RGB(255,0,0); magnitude = 30; // select component ROI cvSetImageROI( silh, comp_rect ); cvSetImageROI( mhi, comp_rect ); cvSetImageROI( orient, comp_rect ); cvSetImageROI( mask, comp_rect ); // calculate orientation angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION); angle = 360.0 - angle; // adjust for images with top-left origin count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI cvResetImageROI( mhi ); cvResetImageROI( orient ); cvResetImageROI( mask ); cvResetImageROI( silh ); center = cvPoint( (comp_rect.x + comp_rect.width/2), (comp_rect.y + comp_rect.height/2) ); cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 ); cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)), cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 ); result = (cv::Rect)comp_rect & (cv::Rect)rect; i++; } if (result.area() > 10) { return true; } else { return false; } }