// define a trackbar callback void on_trackbar( int h ) { int j; int distStep = dist -> widthStep / 4; float* currPointer; cvThreshold( gray, edge, ( float )( edge_thresh ), ( float )( edge_thresh ), CV_THRESH_BINARY ); //Distance transform cvDistTransform( edge, dist, CV_DIST_L2, CV_DIST_MASK_5, NULL ); cvConvertScale( dist, dist, 5000.0, 0 ); for( j = 0, currPointer = dist -> imageData; j < dist -> height; j++, currPointer += distStep ) { cvbSqrt( ( float* )( currPointer ), ( float* )( currPointer ), dist -> width ); } cvConvertScale( dist, dist32s, 1.0, 0.5 ); cvAndS( dist32s, cvScalarAll(255), dist32s, 0 ); cvConvertScale( dist32s, dist8u1, 1, 0 ); cvConvertScale( dist32s, dist32s, -1, 0 ); cvAddS( dist32s, cvScalarAll(255), dist32s, 0 ); cvConvertScale( dist32s, dist8u2, 1, 0 ); cvCvtPlaneToPix( dist8u1, dist8u2, dist8u2, 0, dist8u ); show_iplimage( wndname, dist8u ); }
int main() { IplImage *billy = cvLoadImage("lena.jpg", CV_LOAD_IMAGE_GRAYSCALE); IplImage *herrington = cvLoadImage("lena.jpg"); makeEDD(billy); IplImage *Rh = cvCreateImage(cvGetSize(herrington), 8, 1); IplImage *Gh = cvCreateImage(cvGetSize(herrington), 8, 1); IplImage *Bh = cvCreateImage(cvGetSize(herrington), 8, 1); cvSplit(herrington, Rh, Gh, Bh, 0); makeEDD(Rh); makeEDD(Gh); makeEDD(Bh); cvCvtPlaneToPix(Rh, Gh, Bh, NULL, herrington); cvSaveImage("lena_Gray_Error_Diffusion_Filtered.png", billy); cvSaveImage("lena_Colour_Error_Diffusion_Filtered.png", herrington); cvReleaseImage(&billy); cvReleaseImage(&herrington); cvReleaseImage(&Rh); cvReleaseImage(&Gh); cvReleaseImage(&Bh); return 0; }
// ÀÆÕÀ˹±ä»» int main( int argc, char** argv ) { IplImage* laplace = 0; IplImage* colorlaplace = 0; IplImage* planes[3] = { 0, 0, 0 }; CvCapture* capture = 0; if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0]))) capture = cvCaptureFromCAM( argc == 2 ? argv[1][0] - '0' : 0 ); else if( argc == 2 ) capture = cvCaptureFromAVI( argv[1] ); if( !capture ) { fprintf(stderr,"Could not initialize capturing...\n"); return -1; } cvNamedWindow( "Laplacian", 0 ); for(;;) { IplImage* frame = 0; int i; frame = cvQueryFrame( capture ); if( !frame ) break; if( !laplace ) { for( i = 0; i < 3; i++ ) planes[i] = cvCreateImage( cvSize(frame->width,frame->height), 8, 1 ); laplace = cvCreateImage( cvSize(frame->width,frame->height), IPL_DEPTH_16S, 1 ); colorlaplace = cvCreateImage( cvSize(frame->width,frame->height), 8, 3 ); } cvCvtPixToPlane( frame, planes[0], planes[1], planes[2], 0 ); for( i = 0; i < 3; i++ ) { cvLaplace( planes[i], laplace, 3 ); cvConvertScaleAbs( laplace, planes[i], 1, 0 ); } cvCvtPlaneToPix( planes[0], planes[1], planes[2], 0, colorlaplace ); colorlaplace->origin = frame->origin; cvShowImage("Laplacian", colorlaplace ); if( cvWaitKey(10) >= 0 ) break; } cvReleaseCapture( &capture ); cvDestroyWindow("Laplacian"); return 0; }
//-------------------------------------------------------------------------------- void ofxCvColorImage::setFromGrayscalePlanarImages( ofxCvGrayscaleImage& red, ofxCvGrayscaleImage& green, ofxCvGrayscaleImage& blue){ if( red.width == width && red.height == height && green.width == width && green.height == height && blue.width == width && blue.height == height ) { cvCvtPlaneToPix(red.getCvImage(), green.getCvImage(), blue.getCvImage(),NULL, cvImage); flagImageChanged(); } else { ofLog(OF_LOG_ERROR, "in setFromGrayscalePlanarImages, images are different sizes"); } }
//-------------------------------------------------------------------------------- void ofxCvColorImage::setFromGrayscalePlanarImages( ofxCvGrayscaleImage& red, ofxCvGrayscaleImage& green, ofxCvGrayscaleImage& blue){ ofRectangle roi = getROI(); ofRectangle redRoi = red.getROI(); ofRectangle greenRoi = green.getROI(); ofRectangle blueRoi = blue.getROI(); if( redRoi.width == roi.width && redRoi.height == roi.height && greenRoi.width == roi.width && greenRoi.height == roi.height && blueRoi.width == roi.width && blueRoi.height == roi.height ) { cvCvtPlaneToPix(red.getCvImage(), green.getCvImage(), blue.getCvImage(),NULL, cvImage); flagImageChanged(); } else { ofLog(OF_LOG_ERROR, "in setFromGrayscalePlanarImages, ROI/size mismatch"); } }
//Draw box around object void CObjectTracker::DrawObjectBox(IplImage *frame) { SINT16 x_diff = 0; SINT16 x_sum = 0; SINT16 y_diff = 0; SINT16 y_sum = 0; SINT16 x = 0; SINT16 y = 0; ULONG_32 pixelValues = 0; IplImage* r, * g, * b; r = cvCreateImage( cvGetSize(frame), frame->depth, 1 ); g = cvCreateImage( cvGetSize(frame), frame->depth, 1 ); b = cvCreateImage( cvGetSize(frame), frame->depth, 1 ); cvCvtPixToPlane( frame, b, g, r, NULL ); pixelValues = GetBoxColor(); //the x left and right bounds x_sum = min(m_sTrackingObjectTable[m_cActiveObject].X+m_sTrackingObjectTable[m_cActiveObject].W/2+1,m_nImageWidth-1);//右边界 x_diff = max(m_sTrackingObjectTable[m_cActiveObject].X-m_sTrackingObjectTable[m_cActiveObject].W/2,0);//左边界 //the y upper and lower bounds y_sum = min(m_sTrackingObjectTable[m_cActiveObject].Y+m_sTrackingObjectTable[m_cActiveObject].H/2+1,m_nImageHeight-1);//下边界 y_diff = max(m_sTrackingObjectTable[m_cActiveObject].Y-m_sTrackingObjectTable[m_cActiveObject].H/2,0);//上边界 for (y=y_diff;y<=y_sum;y++) { SetPixelValues(r, g, b,pixelValues,x_diff,y); SetPixelValues(r, g, b,pixelValues,x_diff+1,y); SetPixelValues(r, g, b,pixelValues,x_sum-1,y); SetPixelValues(r, g, b,pixelValues,x_sum,y); } for (x=x_diff;x<=x_sum;x++) { SetPixelValues(r, g, b,pixelValues,x,y_diff); SetPixelValues(r, g, b,pixelValues,x,y_diff+1); SetPixelValues(r, g, b,pixelValues,x,y_sum-1); SetPixelValues(r, g, b,pixelValues,x,y_sum); } cvCvtPlaneToPix(b, g, r, NULL, frame); cvReleaseImage(&r); cvReleaseImage(&g); cvReleaseImage(&b); }
void COpenCVMFCView::OnLaplace() { // TODO: Add your command handler code here IplImage* pImage; IplImage* pImgLaplace = NULL; IplImage* pImgPlanes[3] = {0,0,0}; int i; pImage = workImg; pImgLaplace = cvCreateImage(cvGetSize(pImage), IPL_DEPTH_16S,1); if (workImg->nChannels == 1) { cvLaplace(pImage,pImgLaplace,3); cvConvertScaleAbs(pImgLaplace,pImage, 1, 0 ); } else { for (i = 0; i < 3; i++) { pImgPlanes[i] = cvCreateImage(cvGetSize(pImage), IPL_DEPTH_8U,1); } cvCvtPixToPlane(pImage,pImgPlanes[0], pImgPlanes[1],pImgPlanes[2],0); for (i = 0; i < 3; i++) { cvLaplace(pImgPlanes[i],pImgLaplace,3); cvConvertScaleAbs(pImgLaplace,pImgPlanes[i], 1, 0 ); } cvCvtPlaneToPix(pImgPlanes[0],pImgPlanes[1], pImgPlanes[2],0,pImage); for (i = 0; i < 3; i++) { cvReleaseImage(&pImgPlanes[i]); } } cvReleaseImage(&pImgLaplace); Invalidate(); }
void COpenCVMFCView::OnSobel() { // TODO: Add your command handler code here IplImage* pImage; IplImage* pImgSobel = NULL; IplImage* pImgPlanes[3] = {0,0,0}; int i; pImage = workImg; pImgSobel = cvCreateImage(cvGetSize(pImage), IPL_DEPTH_16S,1); // Create Working Image if (workImg->nChannels == 1) { // Handle Single Channel cvSobel(pImage,pImgSobel,1,1,3); cvConvertScaleAbs(pImgSobel,pImage, 1, 0 ); } else { // Handle Triad Ones for (i = 0; i < 3; i++) { pImgPlanes[i] = cvCreateImage(cvGetSize(pImage), IPL_DEPTH_8U,1); // Create Sub Image } cvCvtPixToPlane(pImage,pImgPlanes[0], pImgPlanes[1],pImgPlanes[2],0); // Get Sub for (i = 0; i < 3; i++) { // Handle Sub Independently cvSobel(pImgPlanes[i],pImgSobel,1,1,3); cvConvertScaleAbs(pImgSobel,pImgPlanes[i], 1, 0 ); } cvCvtPlaneToPix(pImgPlanes[0],pImgPlanes[1], pImgPlanes[2],0,pImage); // Form Color Image From Sub Images for (i = 0; i < 3; i++) { cvReleaseImage(&pImgPlanes[i]); // Release Sub Image } } cvReleaseImage(&pImgSobel); // Release Working Image Invalidate(); }
//-------------------------------------------------------------------------------- void ofxCvColorImage::setFromGrayscalePlanarImages( ofxCvGrayscaleImage& red, ofxCvGrayscaleImage& green, ofxCvGrayscaleImage& blue){ ofRectangle roi = getROI(); ofRectangle redRoi = red.getROI(); ofRectangle greenRoi = green.getROI(); ofRectangle blueRoi = blue.getROI(); if( !bAllocated ){ ofLogNotice("ofxCvColorImage") << "setFromGrayscalePlanarImages(): allocating to match dimensions"; allocate(red.getWidth(), red.getHeight()); } if( redRoi.width == roi.width && redRoi.height == roi.height && greenRoi.width == roi.width && greenRoi.height == roi.height && blueRoi.width == roi.width && blueRoi.height == roi.height ) { cvCvtPlaneToPix(red.getCvImage(), green.getCvImage(), blue.getCvImage(),NULL, cvImage); flagImageChanged(); } else { ofLogError("ofxCvColorImage") << "setFromGrayscalePlanarImages(): image size or region of interest mismatch"; } }
// parameters: // img – input video frame // dst – resultant motion picture // args – optional parameters void update_mhi( IplImage* img, IplImage* dst, int diff_threshold ) { double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds CvSize size = cvSize(img->width,img->height); // get current frame size int i, idx1 = last, idx2; IplImage* silh; CvSeq* seq; CvRect comp_rect; double count; double angle; CvPoint center; double magnitude; CvScalar color; // allocate images at the beginning or // reallocate them if the frame size is changed if( !mhi || mhi->width != size.width || mhi->height != size.height ) { if( buf == 0 ) { buf = (IplImage**)malloc(N*sizeof(buf[0])); memset( buf, 0, N*sizeof(buf[0])); } for( i = 0; i < N; i++ ) { cvReleaseImage( &buf[i] ); buf[i] = cvCreateImage( size, IPL_DEPTH_8U, 1 ); cvZero( buf[i] ); } cvReleaseImage( &mhi ); cvReleaseImage( &orient ); cvReleaseImage( &segmask ); cvReleaseImage( &mask ); mhi = cvCreateImage( size, IPL_DEPTH_32F, 1 ); cvZero( mhi ); // clear MHI at the beginning orient = cvCreateImage( size, IPL_DEPTH_32F, 1 ); segmask = cvCreateImage( size, IPL_DEPTH_32F, 1 ); mask = cvCreateImage( size, IPL_DEPTH_8U, 1 ); } cvCvtColor( img, buf[last], CV_BGR2GRAY ); // convert frame to grayscale idx2 = (last + 1) % N; // index of (last - (N-1))th frame last = idx2; silh = buf[idx2]; cvAbsDiff( buf[idx1], buf[idx2], silh ); // get difference between frames cvThreshold( silh, silh, diff_threshold, 1, CV_THRESH_BINARY ); // and threshold it cvUpdateMotionHistory( silh, mhi, timestamp, MHI_DURATION ); // update MHI // convert MHI to blue 8u image cvCvtScale( mhi, mask, 255./MHI_DURATION, (MHI_DURATION - timestamp)*255./MHI_DURATION ); cvZero( dst ); cvCvtPlaneToPix( mask, 0, 0, 0, dst ); // calculate motion gradient orientation and valid orientation mask cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 ); if( !storage ) storage = cvCreateMemStorage(0); else cvClearMemStorage(storage); // segment motion: get sequence of motion components // segmask is marked motion components map. It is not used further seq = cvSegmentMotion( mhi, segmask, storage, timestamp, MAX_TIME_DELTA ); // iterate through the motion components, // One more iteration (i == -1) corresponds to the whole image (global motion) for( i = -1; i < seq->total; i++ ) { if( i < 0 ) { // case of the whole image comp_rect = cvRect( 0, 0, size.width, size.height ); color = CV_RGB(255,255,255); magnitude = 100; } else { // i-th motion component comp_rect = ((CvConnectedComp*)cvGetSeqElem( seq, i ))->rect; if( comp_rect.width + comp_rect.height < 100 ) // reject very small components continue; color = CV_RGB(255,0,0); magnitude = 30; } // select component ROI cvSetImageROI( silh, comp_rect ); cvSetImageROI( mhi, comp_rect ); cvSetImageROI( orient, comp_rect ); cvSetImageROI( mask, comp_rect ); // calculate orientation angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION); angle = 360.0 - angle; // adjust for images with top-left origin count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI cvResetImageROI( mhi ); cvResetImageROI( orient ); cvResetImageROI( mask ); cvResetImageROI( silh ); // check for the case of little motion if( count < comp_rect.width*comp_rect.height * 0.05 ) continue; // draw a clock with arrow indicating the direction center = cvPoint( (comp_rect.x + comp_rect.width/2), (comp_rect.y + comp_rect.height/2) ); cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 ); cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)), cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 ); } }
void update_mhi( IplImage* img, IplImage* dst, int diff_threshold ) { double timestamp = (double)clock()/CLOCKS_PER_SEC; CvSize size = cvSize(img->width,img->height); int i, idx1 = last, idx2; IplImage* silh; CvSeq* seq; CvRect comp_rect; double count; double angle; CvPoint center; double magnitude; CvScalar color; if( !mhi || mhi->width != size.width || mhi->height != size.height ) { if( buf == 0 ) { buf = (IplImage**)malloc(N*sizeof(buf[0])); memset( buf, 0, N*sizeof(buf[0])); } for( i = 0; i < N; i++ ) { cvReleaseImage( &buf[i] ); buf[i] = cvCreateImage( size, IPL_DEPTH_8U, 1 ); cvZero( buf[i] ); } cvReleaseImage( &mhi ); cvReleaseImage( &orient ); cvReleaseImage( &segmask ); cvReleaseImage( &mask ); mhi = cvCreateImage( size, IPL_DEPTH_32F, 1 ); cvZero( mhi ); orient = cvCreateImage( size, IPL_DEPTH_32F, 1 ); segmask = cvCreateImage( size, IPL_DEPTH_32F, 1 ); mask = cvCreateImage( size, IPL_DEPTH_8U, 1 ); } cvCvtColor( img, buf[last], CV_BGR2GRAY ); idx2 = (last + 1) % N; last = idx2; silh = buf[idx2]; cvAbsDiff( buf[idx1], buf[idx2], silh ); cvThreshold( silh, silh, diff_threshold, 1, CV_THRESH_BINARY ); cvUpdateMotionHistory( silh, mhi, timestamp, MHI_DURATION ); cvCvtScale( mhi, mask, 255./MHI_DURATION, (MHI_DURATION - timestamp)*255./MHI_DURATION ); cvZero( dst ); cvCvtPlaneToPix( mask, 0, 0, 0, dst ); cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 ); if( !storage ) storage = cvCreateMemStorage(0); else cvClearMemStorage(storage); seq = cvSegmentMotion( mhi, segmask, storage, timestamp, MAX_TIME_DELTA ); for( i = -1; i < seq->total; i++ ) { if( i < 0 ) { comp_rect = cvRect( 0, 0, size.width, size.height ); color = CV_RGB(255,255,255); magnitude = 100; } else { comp_rect = ((CvConnectedComp*)cvGetSeqElem( seq, i ))->rect; if( comp_rect.width + comp_rect.height < 100 ) continue; color = CV_RGB(255,0,0); magnitude = 30; } cvSetImageROI( silh, comp_rect ); cvSetImageROI( mhi, comp_rect ); cvSetImageROI( orient, comp_rect ); cvSetImageROI( mask, comp_rect ); angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION); angle = 360.0 - angle; count = cvNorm( silh, 0, CV_L1, 0 ); cvResetImageROI( mhi ); cvResetImageROI( orient ); cvResetImageROI( mask ); cvResetImageROI( silh ); if( count < comp_rect.width*comp_rect.height * 0.05 ) continue; center = cvPoint( (comp_rect.x + comp_rect.width/2), (comp_rect.y + comp_rect.height/2) ); cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 ); cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)), cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 ); } }
// parameters: // img - input video frame // dst - resultant motion picture // args - optional parameters void update_mhi(IplImage* img, IplImage* dst, int diff_threshold) { // 获取当前时间 double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds // 获取当前帧大小 CvSize size = cvSize(img->width,img->height); // get current frame size int i, idx1 = last, idx2; IplImage* silh; CvSeq* seq; CvRect comp_rect; double count; double angle; CvPoint center; double magnitude; CvScalar color; // 给图像分配空间或者在尺寸改变的时候重新分配 // allocate images at the beginning or // reallocate them if the frame size is changed if (!mhi || mhi->width != size.width || mhi->height != size.height) { if (buf == 0) { buf = (IplImage**)malloc(N*sizeof(buf[0])); memset( buf, 0, N*sizeof(buf[0])); } for( i = 0; i < N; i++ ) { cvReleaseImage( &buf[i] ); buf[i] = cvCreateImage( size, IPL_DEPTH_8U, 1 ); cvZero( buf[i] ); } cvReleaseImage( &mhi ); cvReleaseImage( &orient ); cvReleaseImage( &segmask ); cvReleaseImage( &mask ); mhi = cvCreateImage( size, IPL_DEPTH_32F, 1 ); // 在开始时清空MHI cvZero( mhi ); // clear MHI at the beginning // 按img的尺寸创建图像 orient = cvCreateImage( size, IPL_DEPTH_32F, 1 ); segmask = cvCreateImage( size, IPL_DEPTH_32F, 1 ); mask = cvCreateImage( size, IPL_DEPTH_8U, 1 ); } // 转换为灰度 cvCvtColor( img, buf[last], CV_BGR2GRAY ); // convert frame to grayscale idx2 = (last + 1) % N; // index of (last - (N-1))th frame last = idx2; silh = buf[idx2]; // 获取两帧间的差异,当前帧跟背景图相减,放到silh里面 cvAbsDiff(buf[idx1], buf[idx2], silh); // get difference between frames // 二值化 cvThreshold(silh, silh, diff_threshold, 1, CV_THRESH_BINARY); // and threshold it // 去掉影像(silhouette) 以更新运动历史图像 cvUpdateMotionHistory(silh, mhi, timestamp, MHI_DURATION); // update MHI // 转换MHI到蓝色8位图 // convert MHI to blue 8u image cvCvtScale( mhi, mask, 255./MHI_DURATION, (MHI_DURATION - timestamp)*255./MHI_DURATION ); cvZero( dst ); cvCvtPlaneToPix( mask, 0, 0, 0, dst ); // 计算运动历史图像的梯度方向 // 计算运动梯度趋向和合法的趋向掩码 // calculate motion gradient orientation and valid orientation mask cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 ); if( !storage ) storage = cvCreateMemStorage(0); else cvClearMemStorage(storage); // 将整个运动分割为独立的运动部分 // 分割运动:获取运动组件序列 // 分割掩码是运动组件图标识出来的,不再过多的使用 // segment motion: get sequence of motion components // segmask is marked motion components map. It is not used further seq = cvSegmentMotion(mhi, segmask, storage, timestamp, MAX_TIME_DELTA); // 按运动组件的数目来循环 // 通过运动组件迭代 // 根据整幅图像(全局运动)进行相应的一次或多次迭代 // iterate through the motion components, // One more iteration (i == -1) corresponds to the whole image (global motion) for (i = -1; i < seq->total; i++) { if (i < 0) { // 全局运动事件 // case of the whole image // 获取当前帧的范围 comp_rect = cvRect( 0, 0, size.width, size.height ); // 设置颜色为白色 color = CV_RGB(255,255,255); // 设置放大倍数为100 magnitude = 100; } else { // 第i个运动组件 // i-th motion component // 获取当前运动组件的范围 comp_rect = ((CvConnectedComp*)cvGetSeqElem( seq, i ))->rect; // 丢弃很小的组件 if( comp_rect.width + comp_rect.height < 100 ) // reject very small components continue; // 设置颜色为红色 color = CV_RGB(255,0,0); // 设置放大倍数为30 magnitude = 30; } // 选择组件感兴趣的区域 // select component ROI cvSetImageROI( silh, comp_rect ); cvSetImageROI( mhi, comp_rect ); cvSetImageROI( orient, comp_rect ); cvSetImageROI( mask, comp_rect ); // 计算某些选择区域的全局运动方向 // 每个运动部件的运动方向就可以被这个函数利用提取的特定部件的掩模(mask)计算出来(使用cvCmp) // 计算趋势 // calculate orientation angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION); // 根据左上角的原点来调整图像的角度 angle = 360.0 - angle; // adjust for images with top-left origin // 计算数组的绝对范数, 绝对差分范数或者相对差分范数 // 计算轮廓感兴趣区域中点的个数 count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI cvResetImageROI( mhi ); cvResetImageROI( orient ); cvResetImageROI( mask ); cvResetImageROI( silh ); // 检测小运动事件 // check for the case of little motion if (count < comp_rect.width*comp_rect.height * 0.05) { continue; } // 画一个带箭头的时钟来指示方向 // draw a clock with arrow indicating the direction center = cvPoint( (comp_rect.x + comp_rect.width/2), (comp_rect.y + comp_rect.height/2) ); cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 ); cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)), cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 ); } }
//-------------------------------------------------------------------------------- void ofxCvColorImage::setFromGrayscalePlanarImages(ofxCvGrayscaleImage& red, ofxCvGrayscaleImage& green, ofxCvGrayscaleImage& blue){ cvCvtPlaneToPix(red.getCvImage(), green.getCvImage(), blue.getCvImage(), NULL, cvImage); }
// Update Motion History Image: Calculate motion features and orientation. void motionDetection(IplImage* image, IplImage* destination_image, MotionInfo* motionInfo) { double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds CvSize image_size = cvSize(image->width, image->height); // get current frame image_size int previous_frame_index = last_index, current_frame_index; initialize_images(image_size); cvCvtColor(image, image_buffer[last_index], CV_BGR2GRAY); // convert frame to grayscale current_frame_index = (last_index + 1) % N; // index of (last_index - (N-1))th frame last_index = current_frame_index; silhouette = image_buffer[current_frame_index]; cvAbsDiff(image_buffer[previous_frame_index], image_buffer[current_frame_index], silhouette); // Get difference between frames cvThreshold(silhouette, silhouette, DIFFERENCE_THRESHOLD, 1, CV_THRESH_BINARY); // Add threshold //cvDilate(silhouette, silhouette, 0, 18); //cvErode(silhouette, silhouette, 0, 10); cvUpdateMotionHistory(silhouette, mhi, timestamp, MHI_DURATION); // Update MHI // Convert MHI to blue 8U image cvCvtScale(mhi, orientation_mask, 255./MHI_DURATION, (MHI_DURATION - timestamp)*255./MHI_DURATION); if (destination_image) { cvZero(destination_image); cvCvtPlaneToPix(orientation_mask, 0, 0, 0, destination_image); } // Calculate motion gradient orientation and valid orientation mask cvCalcMotionGradient(mhi, orientation_mask, orientation, MAX_TIME_DELTA, MIN_TIME_DELTA, 3); // motion_feature_sequence = extract_motion_features(); if(!storage) storage = cvCreateMemStorage(0); else cvClearMemStorage(storage); CvSeq* motion_feature_sequence = cvSegmentMotion(mhi, segment_mask, storage, timestamp, MAX_TIME_DELTA); int SEGMENT_WIDTH = image_size.width / MAX_SEGMENTS_X; int SEGMENT_HEIGHT = image_size.height / MAX_SEGMENTS_Y; // Global motion CvRect global_motion_segment = cvRect(0, 0, image_size.width, image_size.height); motionInfo->global_angle = calculate_orientation(global_motion_segment, silhouette); if (destination_image) draw_orientation(destination_image, &global_motion_segment, motionInfo->global_angle, 100, CV_RGB(0, 255, 0), true); long area = 0; long totalArea = 0; int totalMovingSegments = 0; bool hasValidMovement = false; CvRect segmentRect; // Segmented motion for(int x = 0; x < MAX_SEGMENTS_X; x++) { for(int y = 0; y < MAX_SEGMENTS_Y; y++) { segmentRect = cvRect(x * SEGMENT_WIDTH, y * SEGMENT_HEIGHT, SEGMENT_WIDTH, SEGMENT_HEIGHT); area = calculate_motion(&segmentRect, motion_feature_sequence); hasValidMovement = (area > MIN_MOTION_FEATURE_AREA); motionInfo->segment_motion_areas[x][y] = area; motionInfo->segment_movements[x][y] = hasValidMovement; motionInfo->segment_angles[x][y] = calculate_orientation(segmentRect, silhouette); totalArea += area; totalMovingSegments += (area > MIN_MOTION_FEATURE_AREA); //printf("%i, ", area); //fflush(stdout); if (hasValidMovement) if (destination_image) draw_orientation(destination_image, &segmentRect, motionInfo->segment_angles[x][y], 20, CV_RGB(255, 0, 0), true); } } motionInfo->total_motion_area = totalArea; motionInfo->total_segments_with_movements = totalMovingSegments; motionInfo->SEGMENTS_X = MAX_SEGMENTS_X; motionInfo->SEGMENTS_Y = MAX_SEGMENTS_Y; printf("%i, %f\n", totalArea, (float)totalArea / (float)(image_size.width*image_size.height)); //fflush(stdout); }
// This function is copied from http://mehrez.kristou.org/opencv-change-contrast-and-brightness-of-an-image/ boost::shared_ptr< Image > Image::ContrastBrightness( int contrast, int brightness ) const { if(contrast > 100) contrast = 100; if(contrast < -100) contrast = -100; if(brightness > 100) brightness = 100; if(brightness < -100) brightness = -100; uchar lut[256]; CvMat* lut_mat; int hist_size = 256; float range_0[]={0,256}; float* ranges[] = { range_0 }; int i; IplImage * dest = cvCloneImage(this); IplImage * GRAY; if (this->nChannels == 3) { GRAY = cvCreateImage(cvGetSize(this),this->depth,1); cvCvtColor(this,GRAY,CV_RGB2GRAY); } else { GRAY = cvCloneImage(this); } lut_mat = cvCreateMatHeader( 1, 256, CV_8UC1 ); cvSetData( lut_mat, lut, 0 ); /* * The algorithm is by Werner D. Streidt * (http://visca.com/ffactory/archives/5-99/msg00021.html) */ if( contrast > 0 ) { double delta = 127.* contrast/100; double a = 255./(255. - delta*2); double b = a*(brightness - delta); for( i = 0; i < 256; i++ ) { int v = cvRound(a*i + b); if( v < 0 ) v = 0; if( v > 255 ) v = 255; lut[i] = v; } } else { double delta = -128.* contrast/100; double a = (256.-delta*2)/255.; double b = a* brightness + delta; for( i = 0; i < 256; i++ ) { int v = cvRound(a*i + b); if( v < 0 ) v = 0; if( v > 255 ) v = 255; lut[i] = v; } } if (this->nChannels ==3) { IplImage * R = cvCreateImage(cvGetSize(this),this->depth,1); IplImage * G = cvCreateImage(cvGetSize(this),this->depth,1); IplImage * B = cvCreateImage(cvGetSize(this),this->depth,1); cvCvtPixToPlane(this,R,G,B,NULL); cvLUT( R, R, lut_mat ); cvLUT( G, G, lut_mat ); cvLUT( B, B, lut_mat ); cvCvtPlaneToPix(R,G,B,NULL,dest); cvReleaseImage(&R); cvReleaseImage(&G); cvReleaseImage(&B); } else { cvLUT( GRAY, dest, lut_mat ); } cvReleaseImage(&GRAY); cvReleaseMat( &lut_mat); return boost::shared_ptr< Image >( new Image( dest, true ) ); }
public:bool analizarMhi( IplImage* img, IplImage* dst, int diff_threshold, CvRect rect ) { double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds int i, idx1 = last, idx2; IplImage* silh; CvSeq* seq; CvRect comp_rect; cv::Rect result; double count; double angle; CvPoint center; double magnitude; CvScalar color; cvCvtColor( img, buf[last], CV_BGR2GRAY ); // convert frame to grayscale idx2 = (last + 1) % N; // index of (last - (N-1))th frame last = idx2; silh = buf[idx2]; cvAbsDiff( buf[idx1], buf[idx2], silh ); // get difference between frames cvThreshold( silh, silh, diff_threshold, 1, CV_THRESH_BINARY ); // and threshold it cvUpdateMotionHistory( silh, mhi, timestamp, MHI_DURATION ); // update MHI // convert MHI to blue 8u image cvCvtScale( mhi, mask, 255./MHI_DURATION, (MHI_DURATION - timestamp)*255./MHI_DURATION ); cvZero( dst ); cvCvtPlaneToPix( mask, 0, 0, 0, dst ); // calculate motion gradient orientation and valid orientation mask cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 ); if( !storage ) storage = cvCreateMemStorage(0); else cvClearMemStorage(storage); // segment motion: get sequence of motion components // segmask is marked motion components map. It is not used further seq = cvSegmentMotion( mhi, segmask, storage, timestamp, MAX_TIME_DELTA ); // iterate through the motion components, // One more iteration (i == -1) corresponds to the whole image (global motion) // for( i = -1; i < seq->total; i++ ) { i = 1; comp_rect = cvRect( 0, 0, img->width, img->height ); color = CV_RGB(255,255,255); magnitude = 100; while (result.area() < 10 & i < seq->total) { comp_rect = ((CvConnectedComp*)cvGetSeqElem( seq, i ))->rect; if( comp_rect.width + comp_rect.height < 100 ) {// reject very small components i++; continue; } color = CV_RGB(255,0,0); magnitude = 30; // select component ROI cvSetImageROI( silh, comp_rect ); cvSetImageROI( mhi, comp_rect ); cvSetImageROI( orient, comp_rect ); cvSetImageROI( mask, comp_rect ); // calculate orientation angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION); angle = 360.0 - angle; // adjust for images with top-left origin count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI cvResetImageROI( mhi ); cvResetImageROI( orient ); cvResetImageROI( mask ); cvResetImageROI( silh ); center = cvPoint( (comp_rect.x + comp_rect.width/2), (comp_rect.y + comp_rect.height/2) ); cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 ); cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)), cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 ); result = (cv::Rect)comp_rect & (cv::Rect)rect; i++; } if (result.area() > 10) { return true; } else { return false; } }