IplImage * BouyObject::FindCircles(const IplImage * imgIn) const { IplImage * imgOut = cvCloneImage(imgIn); CvSize imageSize = cvSize(imgIn->width & -2, imgIn->height & -2 ); IplImage* imgSmallCopy = cvCreateImage( cvSize(imageSize.width/2, imageSize.height/2), IPL_DEPTH_8U, 1 ); cvPyrDown( imgOut, imgSmallCopy); cvPyrUp( imgSmallCopy, imgOut); cvSmooth(imgOut, imgOut, CV_GAUSSIAN, 5); cvReleaseImage(&imgSmallCopy); CvMemStorage* storage = cvCreateMemStorage(0); CvSeq* results = cvHoughCircles( imgOut, storage, CV_HOUGH_GRADIENT, 2, imgOut->width/10, 100, 100); cvZero(imgOut); for( int i = 0; i < results->total; i++ ) { float* p = (float*) cvGetSeqElem( results, i ); CvPoint pt = cvPoint( cvRound( p[0] ), cvRound( p[1] ) ); cvCircle( imgOut, pt, cvRound( p[2] ), CV_RGB(0xff,0xff,0xff),CV_FILLED); } cvReleaseMemStorage(&storage); return imgOut; }
IplImage* BouyObject::ShapeMask(const IplImage * imgIn) const { IplImage * hsv = cvCloneImage(imgIn); IplImage * imgOut = cvCreateImage(cvGetSize(imgIn),IPL_DEPTH_8U, 1); //cvEqualizeHist( imgIn, hsv); IplImage * chan0 = cvCreateImage(cvGetSize(imgIn),IPL_DEPTH_8U, 1); IplImage * chan1 = cvCreateImage(cvGetSize(imgIn),IPL_DEPTH_8U, 1); IplImage * chan2 = cvCreateImage(cvGetSize(imgIn),IPL_DEPTH_8U, 1); IplImage * chan3 = cvCreateImage(cvGetSize(imgIn),IPL_DEPTH_8U, 1); cvCvtColor(imgIn, hsv, CV_BGR2YCrCb); cvSplit(hsv,chan0,chan1,chan2, NULL); CvScalar white = cvRealScalar(255); //imgOut = SegmentationMask(imgIn); imgOut = cvCloneImage(chan2); //invert black and white cvAbsDiffS(imgOut, imgOut, white); // cvShowImage("hue",chan0); // cvShowImage("sat",chan1); // cvShowImage("val",chan2); // cvShowImage("inv",imgOut); //cvWaitKey(0); cvReleaseImage(&hsv); cvReleaseImage(&chan0); cvReleaseImage(&chan1); cvReleaseImage(&chan2); cvReleaseImage(&chan3); CvSize imageSize = cvSize(imgIn->width & -2, imgIn->height & -2 ); IplImage* imgSmallCopy = cvCreateImage( cvSize(imageSize.width/2, imageSize.height/2), IPL_DEPTH_8U, 1 ); cvPyrDown( imgOut, imgSmallCopy); cvPyrUp( imgSmallCopy, imgOut); cvSmooth(imgOut, imgOut, CV_GAUSSIAN, 5); cvReleaseImage(&imgSmallCopy); CvMemStorage* storage = cvCreateMemStorage(0); CvSeq* results = cvHoughCircles( imgOut, storage, CV_HOUGH_GRADIENT, 2, imgOut->width/10, 100, 100); cvZero(imgOut); for( int i = 0; i < results->total; i++ ) { float* p = (float*) cvGetSeqElem( results, i ); CvPoint pt = cvPoint( cvRound( p[0] ), cvRound( p[1] ) ); cvCircle( imgOut, pt, cvRound( p[2] ), CV_RGB(0xff,0xff,0xff),CV_FILLED); } cvReleaseMemStorage(&storage); return imgOut; }
typename image<T, D>::create_new pyramid_up(const image<T, D>& a) { IplImage* src = a.ipl(); IplImage* dst = cvCreateImage( image_details::cv_size(a.width() * 2, a.height() * 2), image_details::ipl_depth<T>(), int(a.channels())); cvPyrUp(src, dst); typename image<T, D>::create_new r(dst); cvReleaseImage(&src); cvReleaseImage(&dst); return r; }
void ImgProducer::calcFGRAY() { ///* Conversion de l'image couleur sous echantillonnee en gris */ //cvCvtColor(imgPYRDOWN(),img[idMGRAY],CV_BGR2GRAY); /* Sous echantillonnage de l'image GRAY */ cvPyrDown(imgGRAY(),img[idMGRAY],CV_GAUSSIAN_5x5); /* Retour a la dimension correcte */ cvPyrUp(img[idMGRAY],img[idFGRAY],CV_GAUSSIAN_5x5); imgOK[idMGRAY] = 1; imgOK[idFGRAY] = 1; }
IplImage* BouyBaseObject::EdgeMask(const IplImage * imgIn) const { IplImage * imgOut = cvCreateImage(cvGetSize(imgIn),IPL_DEPTH_8U, 1); IplImage * gray = cvCreateImage(cvGetSize(imgIn),IPL_DEPTH_8U, 1); cvConvertImage(imgIn,gray); CvSize imageSize = cvSize(imgIn->width & -2, imgIn->height & -2 ); IplImage* imgSmallCopy = cvCreateImage( cvSize(imageSize.width/2, imageSize.height/2), IPL_DEPTH_8U, 1 ); cvPyrDown( imgOut, imgSmallCopy); cvPyrUp( imgSmallCopy, imgOut); cvSmooth(imgOut, imgOut, CV_GAUSSIAN, 5); cvReleaseImage(&imgSmallCopy); cvCanny(gray,imgOut,100,100); cvDilate(imgOut,imgOut, NULL, 1); //cvShowImage("canny", imgOut); return imgOut; }
IplImage* PathObject::ChannelMask2(const IplImage * imgIn) const { if(imgIn == NULL) return NULL; IplImage * hsv = cvCloneImage(imgIn); IplImage * imgOut = cvCreateImage(cvGetSize(imgIn),IPL_DEPTH_8U, 1); //cvEqualizeHist( imgIn, hsv); IplImage * chan0 = cvCreateImage(cvGetSize(imgIn),IPL_DEPTH_8U, 1); IplImage * chan1 = cvCreateImage(cvGetSize(imgIn),IPL_DEPTH_8U, 1); IplImage * chan2 = cvCreateImage(cvGetSize(imgIn),IPL_DEPTH_8U, 1); IplImage * chan3 = cvCreateImage(cvGetSize(imgIn),IPL_DEPTH_8U, 1); cvCvtColor(imgIn, hsv, CV_BGR2YCrCb); cvSplit(hsv,chan0,chan1,chan2, NULL); CvScalar white = cvRealScalar(255); imgOut = cvCloneImage(chan2); //invert black and white cvAbsDiffS(imgOut, imgOut, white); cvShowImage("hue",chan0); cvShowImage("sat",chan1); cvShowImage("val",chan2); cvShowImage("inv",imgOut); cvWaitKey(0); cvReleaseImage(&hsv); cvReleaseImage(&chan0); cvReleaseImage(&chan1); cvReleaseImage(&chan2); cvReleaseImage(&chan3); CvSize imageSize = cvSize(imgOut->width & -2, imgOut->height & -2 ); IplImage* imgSmallCopy = cvCreateImage( cvSize(imageSize.width/2, imageSize.height/2), IPL_DEPTH_8U, 1 ); cvPyrDown( imgOut, imgSmallCopy); cvPyrUp( imgSmallCopy, imgOut); cvReleaseImage(&imgSmallCopy); //cvThreshold(imgOut,imgOut,200, 255,CV_THRESH_TOZERO); return imgOut; }
void PlateFinder::ImageRestoration(IplImage *src) { int w = src->width; int h = src->height; IplImage *mImg = cvCreateImage(cvSize(w/2, h/2), IPL_DEPTH_8U, 1); // Anh su dung cho bien doi hinh thai hoc IplImage *src_pyrdown = cvCreateImage (cvSize(w/2, h/2), IPL_DEPTH_8U, 1); IplImage *tmp = cvCreateImage (cvSize(w/2, h/2), IPL_DEPTH_8U, 1); IplImage *thresholed = cvCreateImage (cvSize(w/2, h/2), IPL_DEPTH_8U, 1); // Anh nhi phan voi nguong IplImage *mini_thresh = cvCreateImage (cvSize(w/2, h/2), IPL_DEPTH_8U, 1); IplImage *dst = cvCreateImage (cvSize(w/2, h/2), IPL_DEPTH_8U, 1); // Anh lam ro vung bien so cvPyrDown (src, src_pyrdown); cvMorphologyEx(src_pyrdown, mImg, tmp, S2, CV_MOP_BLACKHAT); cvNormalize(mImg, mImg, 0, 255, CV_MINMAX); // Nhi phan hoa anh mImg cvThreshold(mImg, thresholed, (int)10*cvAvg(mImg).val[0], 255, CV_THRESH_BINARY); cvZero(dst); cvCopy(thresholed, mini_thresh); // Su dung hinh chu nhat co size = 8x16 truot tren toan bo anh int cnt; int nonZero1, nonZero2, nonZero3, nonZero4; CvRect rect; for (int i = 0; i < mini_thresh->width-32; i+=4) { for (int j = 0; j < mini_thresh->height-16; j+=4) { rect = cvRect(i, j, 16, 8); cvSetImageROI (mini_thresh, rect); //ROI = Region of Interest nonZero1 = cvCountNonZero(mini_thresh); cvResetImageROI(mini_thresh); rect = cvRect(i+16, j, 16, 8); cvSetImageROI (mini_thresh, rect); //ROI = Region of Interest nonZero2 = cvCountNonZero(mini_thresh); cvResetImageROI(mini_thresh); rect = cvRect(i, j+8, 16, 8); cvSetImageROI (mini_thresh, rect); //ROI = Region of Interest nonZero3 = cvCountNonZero(mini_thresh); cvResetImageROI(mini_thresh); rect = cvRect(i+16, j+8, 16, 8); cvSetImageROI (mini_thresh, rect); //ROI = Region of Interest nonZero4 = cvCountNonZero(mini_thresh); cvResetImageROI(mini_thresh); cnt = 0; if (nonZero1 > 15) { cnt++; } if (nonZero2 > 15) { cnt++; } if (nonZero3 > 15) { cnt++; } if (nonZero4 > 15) { cnt++; } if (cnt > 2) { rect = cvRect (i, j, 32, 16); cvSetImageROI(dst, rect); cvSetImageROI(mini_thresh, rect); cvCopy(mini_thresh, dst); cvResetImageROI(dst); cvResetImageROI(mini_thresh); } } } IplImage* dst_clone = cvCloneImage(dst); cvDilate(dst, dst, NULL, 2); cvErode(dst, dst, NULL, 2); cvDilate(dst, dst, S1, 9); cvErode(dst, dst, S1, 10); cvDilate(dst, dst); /*cvShowImage("Source" , src); cvShowImage("mImg", mImg); cvShowImage("mini_thresh", mini_thresh); cvShowImage("dst_clone", dst_clone); cvShowImage("dst", dst);*/ cvPyrUp(dst, src); cvReleaseImage(&mini_thresh); cvReleaseImage(&mImg); cvReleaseImage(&tmp); cvReleaseImage(&dst); cvReleaseImage(&src_pyrdown); cvReleaseImage(&thresholed); cvReleaseImage(&dst_clone); }
// returns sequence of squares detected on the image. // the sequence is stored in the specified memory storage CvSeq* findSquares4( IplImage* img, CvMemStorage* storage ) { CvSeq* contours; int i, c, l, N = 11; CvSize sz = cvSize( img->width & -2, img->height & -2 ); IplImage* timg = cvCloneImage( img ); // make a copy of input image IplImage* gray = cvCreateImage( sz, 8, 1 ); IplImage* pyr = cvCreateImage( cvSize(sz.width/2, sz.height/2), 8, 3 ); IplImage* tgray; CvSeq* result; double s, t; // create empty sequence that will contain points - // 4 points per square (the square's vertices) CvSeq* squares = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvPoint), storage ); // select the maximum ROI in the image // with the width and height divisible by 2 cvSetImageROI( timg, cvRect( 0, 0, sz.width, sz.height )); // down-scale and upscale the image to filter out the noise cvPyrDown( timg, pyr, 7 ); cvPyrUp( pyr, timg, 7 ); tgray = cvCreateImage( sz, 8, 1 ); // find squares in every color plane of the image for( c = 0; c < 3; c++ ) { // extract the c-th color plane cvSetImageCOI( timg, c+1 ); cvCopy( timg, tgray, 0 ); // try several threshold levels for( l = 0; l < N; l++ ) { // hack: use Canny instead of zero threshold level. // Canny helps to catch squares with gradient shading if( l == 0 ) { // apply Canny. Take the upper threshold from slider // and set the lower to 0 (which forces edges merging) cvCanny( tgray, gray, 0, thresh, 5 ); // dilate canny output to remove potential // holes between edge segments cvDilate( gray, gray, 0, 1 ); } else { // apply threshold if l!=0: // tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0 cvThreshold( tgray, gray, (l+1)*255/N, 255, CV_THRESH_BINARY ); } // find contours and store them all as a list cvFindContours( gray, storage, &contours, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) ); // test each contour while( contours ) { // approximate contour with accuracy proportional // to the contour perimeter result = cvApproxPoly( contours, sizeof(CvContour), storage, CV_POLY_APPROX_DP, cvContourPerimeter(contours)*0.02, 0 ); // square contours should have 4 vertices after approximation // relatively large area (to filter out noisy contours) // and be convex. // Note: absolute value of an area is used because // area may be positive or negative - in accordance with the // contour orientation if( result->total == 4 && cvContourArea(result,CV_WHOLE_SEQ,0) > 500 && cvCheckContourConvexity(result) ) { s = 0; for( i = 0; i < 5; i++ ) { // find minimum angle between joint // edges (maximum of cosine) if( i >= 2 ) { t = fabs(angle( (CvPoint*)cvGetSeqElem( result, i ), (CvPoint*)cvGetSeqElem( result, i-2 ), (CvPoint*)cvGetSeqElem( result, i-1 ))); s = s > t ? s : t; } } // if cosines of all angles are small // (all angles are ~90 degree) then write quandrange // vertices to resultant sequence if( s < 0.3 ) for( i = 0; i < 4; i++ ) cvSeqPush( squares, (CvPoint*)cvGetSeqElem( result, i )); } // take the next contour contours = contours->h_next; } } } // release all the temporary images cvReleaseImage( &gray ); cvReleaseImage( &pyr ); cvReleaseImage( &tgray ); cvReleaseImage( &timg ); return squares; }
CV_IMPL void cvPyrMeanShiftFiltering( const CvArr* srcarr, CvArr* dstarr, double sp0, double sr, int max_level, CvTermCriteria termcrit ) { const int cn = 3; const int MAX_LEVELS = 8; CvMat* src_pyramid[MAX_LEVELS+1]; CvMat* dst_pyramid[MAX_LEVELS+1]; CvMat* mask0 = 0; int i, j, level; //uchar* submask = 0; #define cdiff(ofs0) (tab[c0-dptr[ofs0]+255] + \ tab[c1-dptr[(ofs0)+1]+255] + tab[c2-dptr[(ofs0)+2]+255] >= isr22) memset( src_pyramid, 0, sizeof(src_pyramid) ); memset( dst_pyramid, 0, sizeof(dst_pyramid) ); CV_FUNCNAME( "cvPyrMeanShiftFiltering" ); __BEGIN__; double sr2 = sr * sr; int isr2 = cvRound(sr2), isr22 = MAX(isr2,16); int tab[768]; CvMat sstub0, *src0; CvMat dstub0, *dst0; CV_CALL( src0 = cvGetMat( srcarr, &sstub0 )); CV_CALL( dst0 = cvGetMat( dstarr, &dstub0 )); if( CV_MAT_TYPE(src0->type) != CV_8UC3 ) CV_ERROR( CV_StsUnsupportedFormat, "Only 8-bit, 3-channel images are supported" ); if( !CV_ARE_TYPES_EQ( src0, dst0 )) CV_ERROR( CV_StsUnmatchedFormats, "The input and output images must have the same type" ); if( !CV_ARE_SIZES_EQ( src0, dst0 )) CV_ERROR( CV_StsUnmatchedSizes, "The input and output images must have the same size" ); if( (unsigned)max_level > (unsigned)MAX_LEVELS ) CV_ERROR( CV_StsOutOfRange, "The number of pyramid levels is too large or negative" ); if( !(termcrit.type & CV_TERMCRIT_ITER) ) termcrit.max_iter = 5; termcrit.max_iter = MAX(termcrit.max_iter,1); termcrit.max_iter = MIN(termcrit.max_iter,100); if( !(termcrit.type & CV_TERMCRIT_EPS) ) termcrit.epsilon = 1.f; termcrit.epsilon = MAX(termcrit.epsilon, 0.f); for( i = 0; i < 768; i++ ) tab[i] = (i - 255)*(i - 255); // 1. construct pyramid src_pyramid[0] = src0; dst_pyramid[0] = dst0; for( level = 1; level <= max_level; level++ ) { CV_CALL( src_pyramid[level] = cvCreateMat( (src_pyramid[level-1]->rows+1)/2, (src_pyramid[level-1]->cols+1)/2, src_pyramid[level-1]->type )); CV_CALL( dst_pyramid[level] = cvCreateMat( src_pyramid[level]->rows, src_pyramid[level]->cols, src_pyramid[level]->type )); CV_CALL( cvPyrDown( src_pyramid[level-1], src_pyramid[level] )); //CV_CALL( cvResize( src_pyramid[level-1], src_pyramid[level], CV_INTER_AREA )); } CV_CALL( mask0 = cvCreateMat( src0->rows, src0->cols, CV_8UC1 )); //CV_CALL( submask = (uchar*)cvAlloc( (sp+2)*(sp+2) )); // 2. apply meanshift, starting from the pyramid top (i.e. the smallest layer) for( level = max_level; level >= 0; level-- ) { CvMat* src = src_pyramid[level]; CvSize size = cvGetMatSize(src); uchar* sptr = src->data.ptr; int sstep = src->step; uchar* mask = 0; int mstep = 0; uchar* dptr; int dstep; float sp = (float)(sp0 / (1 << level)); sp = MAX( sp, 1 ); if( level < max_level ) { CvSize size1 = cvGetMatSize(dst_pyramid[level+1]); CvMat m = cvMat( size.height, size.width, CV_8UC1, mask0->data.ptr ); dstep = dst_pyramid[level+1]->step; dptr = dst_pyramid[level+1]->data.ptr + dstep + cn; mstep = m.step; mask = m.data.ptr + mstep; //cvResize( dst_pyramid[level+1], dst_pyramid[level], CV_INTER_CUBIC ); cvPyrUp( dst_pyramid[level+1], dst_pyramid[level] ); cvZero( &m ); for( i = 1; i < size1.height-1; i++, dptr += dstep - (size1.width-2)*3, mask += mstep*2 ) { for( j = 1; j < size1.width-1; j++, dptr += cn ) { int c0 = dptr[0], c1 = dptr[1], c2 = dptr[2]; mask[j*2 - 1] = cdiff(-3) || cdiff(3) || cdiff(-dstep-3) || cdiff(-dstep) || cdiff(-dstep+3) || cdiff(dstep-3) || cdiff(dstep) || cdiff(dstep+3); } } cvDilate( &m, &m, 0, 1 ); mask = m.data.ptr; } dptr = dst_pyramid[level]->data.ptr; dstep = dst_pyramid[level]->step; for( i = 0; i < size.height; i++, sptr += sstep - size.width*3, dptr += dstep - size.width*3, mask += mstep ) { for( j = 0; j < size.width; j++, sptr += 3, dptr += 3 ) { int x0 = j, y0 = i, x1, y1, iter; int c0, c1, c2; if( mask && !mask[j] ) continue; c0 = sptr[0], c1 = sptr[1], c2 = sptr[2]; // iterate meanshift procedure for( iter = 0; iter < termcrit.max_iter; iter++ ) { uchar* ptr; int x, y, count = 0; int minx, miny, maxx, maxy; int s0 = 0, s1 = 0, s2 = 0, sx = 0, sy = 0; double icount; int stop_flag; //mean shift: process pixels in window (p-sigmaSp)x(p+sigmaSp) minx = cvRound(x0 - sp); minx = MAX(minx, 0); miny = cvRound(y0 - sp); miny = MAX(miny, 0); maxx = cvRound(x0 + sp); maxx = MIN(maxx, size.width-1); maxy = cvRound(y0 + sp); maxy = MIN(maxy, size.height-1); ptr = sptr + (miny - i)*sstep + (minx - j)*3; for( y = miny; y <= maxy; y++, ptr += sstep - (maxx-minx+1)*3 ) { int row_count = 0; x = minx; for( ; x + 3 <= maxx; x += 4, ptr += 12 ) { int t0 = ptr[0], t1 = ptr[1], t2 = ptr[2]; if( tab[t0-c0+255] + tab[t1-c1+255] + tab[t2-c2+255] <= isr2 ) { s0 += t0; s1 += t1; s2 += t2; sx += x; row_count++; } t0 = ptr[3], t1 = ptr[4], t2 = ptr[5]; if( tab[t0-c0+255] + tab[t1-c1+255] + tab[t2-c2+255] <= isr2 ) { s0 += t0; s1 += t1; s2 += t2; sx += x+1; row_count++; } t0 = ptr[6], t1 = ptr[7], t2 = ptr[8]; if( tab[t0-c0+255] + tab[t1-c1+255] + tab[t2-c2+255] <= isr2 ) { s0 += t0; s1 += t1; s2 += t2; sx += x+2; row_count++; } t0 = ptr[9], t1 = ptr[10], t2 = ptr[11]; if( tab[t0-c0+255] + tab[t1-c1+255] + tab[t2-c2+255] <= isr2 ) { s0 += t0; s1 += t1; s2 += t2; sx += x+3; row_count++; } } for( ; x <= maxx; x++, ptr += 3 ) { int t0 = ptr[0], t1 = ptr[1], t2 = ptr[2]; if( tab[t0-c0+255] + tab[t1-c1+255] + tab[t2-c2+255] <= isr2 ) { s0 += t0; s1 += t1; s2 += t2; sx += x; row_count++; } } count += row_count; sy += y*row_count; } if( count == 0 ) break; icount = 1./count; x1 = cvRound(sx*icount); y1 = cvRound(sy*icount); s0 = cvRound(s0*icount); s1 = cvRound(s1*icount); s2 = cvRound(s2*icount); stop_flag = (x0 == x1 && y0 == y1) || abs(x1-x0) + abs(y1-y0) + tab[s0 - c0 + 255] + tab[s1 - c1 + 255] + tab[s2 - c2 + 255] <= termcrit.epsilon; x0 = x1; y0 = y1; c0 = s0; c1 = s1; c2 = s2; if( stop_flag ) break; } dptr[0] = (uchar)c0; dptr[1] = (uchar)c1; dptr[2] = (uchar)c2; } } } __END__; for( i = 1; i <= MAX_LEVELS; i++ ) { cvReleaseMat( &src_pyramid[i] ); cvReleaseMat( &dst_pyramid[i] ); } cvReleaseMat( &mask0 ); }
IplImage* BouyObject::GetMask(const IplImage * imgIn, IplImage * debugOut) const { if(imgIn == NULL) return NULL; CvSize imageSize = cvSize(imgIn->width & -2, imgIn->height & -2 ); IplImage* imgSmallCopy = cvCreateImage( cvSize(imageSize.width/2, imageSize.height/2), IPL_DEPTH_8U, 3 ); IplImage* smallDebug = cvCreateImage( cvSize(imageSize.width/2, imageSize.height/2), IPL_DEPTH_8U, 3 ); IplImage* mask = NULL; IplImage* result = cvCreateImage(cvGetSize(imgIn), IPL_DEPTH_8U, 1); cvPyrDown( imgIn, imgSmallCopy); if(debugOut) { cvPyrDown( debugOut, smallDebug); } mask = SegmentationMask2(imgSmallCopy,smallDebug); cvPyrUp(mask,result); if(debugOut) { cvPyrUp(smallDebug,debugOut); } cvReleaseImage(&smallDebug); cvReleaseImage(&mask); cvReleaseImage(&imgSmallCopy); return result; // if(imgIn == NULL) return NULL; // CvSize imageSize = cvSize(imgIn->width & -2, imgIn->height & -2 ); // IplImage* imgSmallCopy = cvCreateImage( cvSize(imageSize.width/2, imageSize.height/2), IPL_DEPTH_8U, 3 ); // cvPyrDown( imgIn, imgSmallCopy); // IplImage* colormask = NULL; // IplImage* gvcolormask = NULL; // //IplImage* shapemask = ShapeMask(imgIn); // IplImage* segmentationmask = NULL; // IplImage* segmentationmask2 = NULL; // IplImage* histogrammask = NULL; // //IplImage* edgemask = EdgeMask(imgIn); // IplImage* templatemask = NULL; // IplImage* channelmask = NULL; //// if(colormask == NULL || shapemask == NULL || //// segmentationmask== NULL || histogrammask == NULL || //// edgemask == NULL ) return NULL; // //cvShowImage("colormask", colormask); // //cvShowImage("channelmask", channelmask); // IplImage * imgOut = cvCreateImage(cvGetSize(imgIn),IPL_DEPTH_8U, 1); // IplImage * imgOutSmall = cvCreateImage(cvGetSize(imgSmallCopy),IPL_DEPTH_8U, 1); // IplImage * threshold = cvCreateImage(cvGetSize(imgSmallCopy),IPL_DEPTH_8U, 1); // cvZero(imgOut); // if(mEnableHist) // { // histogrammask = HistogramMask(imgSmallCopy); // } // if(mEnableColor) // { // colormask = ColorMask(imgSmallCopy); // } // if(mEnableSegment) // { // segmentationmask = SegmentationMask(imgSmallCopy); // } // if(mEnableSegment2) // { // segmentationmask2 = SegmentationMask2(imgSmallCopy,debugOut); // } // if(mEnableGVColor) // { // gvcolormask = GVColorMask(imgSmallCopy); // } // int count = 1; // if(VisionUtils::CombineMasks(imgOutSmall,histogrammask,imgOutSmall,count,mHistWeight)) // { // count++; // } // if(VisionUtils::CombineMasks(imgOutSmall,colormask,imgOutSmall, count, mColorWeight)) // { // count++; // } // if(VisionUtils::CombineMasks(imgOutSmall,segmentationmask,imgOutSmall,count,mSegmentWeight)) // { // count++; // } // if(VisionUtils::CombineMasks(imgOutSmall,segmentationmask2,imgOutSmall,count,mSegmentWeight2)) // { // count++; // } // if(VisionUtils::CombineMasks(imgOutSmall,gvcolormask,imgOutSmall,count,mGVColorWeight)) // { // count++; // } // for(unsigned int i = 0; i < mMaskOptions.size(); i++) // { // if(mMaskOptions[i].mEnabledFlag) // { // channelmask = cvCreateImage(cvGetSize(imgSmallCopy),IPL_DEPTH_8U, 1); // if(VisionUtils::ConvertAndGetSingleColorChannel(imgSmallCopy,channelmask,mMaskOptions[i].mCvColorConversionName, mMaskOptions[i].mChannelIndex)) // { // if(mMaskOptions[i].mInvertFlag) // { // VisionUtils::Invert(channelmask,channelmask); // } // if(VisionUtils::CombineMasks(imgOutSmall,channelmask,imgOutSmall,count,mMaskOptions[i].mWeight)) // { // count++; // } // } // if(mDebug) // { // cvShowImage("channelmask", channelmask); // //cvWaitKey(0); // } // cvReleaseImage(&channelmask); // } // } // //VisionUtils::CombineMasks(imgOut,edgemask,imgOut,2,1); // //VisionUtils::CombineMasks(imgOut,histogrammask,imgOut,2,1); // //cvNormalize(imgOut,imgOut,255,0,CV_MINMAX); // if(mDebug) // { // cvShowImage("combined", imgOut); // } // //if(debugOut) cvConvertImage(imgOut,debugOut); // cvThreshold(imgOutSmall,threshold,mMainThreshold,255,CV_THRESH_BINARY ); // std::list<CvBox2D> blobList; // blobList = Zebulon::Vision::VisionUtils::GetBlobBoxes(threshold,0,mMinNoiseSizePercent); // for(std::list<CvBox2D>::iterator it = blobList.begin(); it != blobList.end(); it++) // { // CvPoint2D32f boxCorners32[4]; // CvPoint boxCorners[4]; // cvBoxPoints(*it,boxCorners32); // for(int i = 0; i < 4; i ++) // { // boxCorners[i] = cvPointFrom32f(boxCorners32[i]); // } // cvFillConvexPoly(threshold,boxCorners,4,cvScalar(0,0,0),4); // //Zebulon::Vision::VisionUtils::DrawSquare(imgOut,*it); // } // if(debugOut) cvSet(debugOut,mNearColor,threshold); // //shapemask = FindCircles(imgOut); // IplImage * tempImage = TemplateMask(imgOutSmall, threshold, mBouyTemplate); // cvPyrUp( tempImage, imgOut); // //cvConvertImage(tempImage,imgOut); // //VisionUtils::CombineMasks(imgOut,templatemask,imgOut); // if(mDebug) // { // cvShowImage("clean", threshold); // cvShowImage("final", imgOut); // cvShowImage("color", colormask); // cvShowImage("hist", histogrammask); // cvShowImage("segment", segmentationmask); // cvShowImage("template", templatemask); // cvShowImage("gvcolor", gvcolormask); // } // cvReleaseImage(&colormask); // cvReleaseImage(&segmentationmask); // cvReleaseImage(&segmentationmask2); // cvReleaseImage(&histogrammask); // cvReleaseImage(&gvcolormask); // cvReleaseImage(&channelmask); // cvReleaseImage(&templatemask); // cvReleaseImage(&threshold); // cvReleaseImage(&tempImage); // return imgOut; }
// 参数: // img - 输入视频帧 // dst - 检测结果 void Invade::update_mhi(IplImage* img, IplImage* dst, int diff_threshold) { double timestamp = clock() / 100.; // get current time in seconds 时间戳 CvSize size = cvSize(img->width, img->height); // get current frame size,得到当前帧的尺寸 int i, idx1, idx2; IplImage* silh; IplImage* pyr = cvCreateImage(cvSize((size.width & -2) / 2, (size.height & -2) / 2), 8, 1); CvMemStorage *stor; CvSeq *cont; /*先进行数据的初始化*/ if (!mhi || mhi->width != size.width || mhi->height != size.height) { if (buf == 0) //若尚没有初始化则分配内存给他 { buf = (IplImage**)malloc(N*sizeof(buf[0])); memset(buf, 0, N*sizeof(buf[0])); } for (i = 0; i < N; i++) { cvReleaseImage(&buf[i]); buf[i] = cvCreateImage(size, IPL_DEPTH_8U, 1); cvZero(buf[i]);// clear Buffer Frame at the beginning } cvReleaseImage(&mhi); mhi = cvCreateImage(size, IPL_DEPTH_32F, 1); cvZero(mhi); // clear MHI at the beginning } // end of if(mhi) /*将当前要处理的帧转化为灰度放到buffer的最后一帧中*/ cvCvtColor(img, buf[last], CV_BGR2GRAY); // convert frame to grayscale /*设定帧的序号*/ idx1 = last; idx2 = (last + 1) % N; // index of (last - (N-1))th frame last = idx2; // 做帧差 silh = buf[idx2];//差值的指向idx2 cvAbsDiff(buf[idx1], buf[idx2], silh); // get difference between frames // 对差图像做二值化 cvThreshold(silh, silh, 50, 255, CV_THRESH_BINARY); //threshold it,二值化 //去掉超时的影像以更新运动历史图像 cvUpdateMotionHistory(silh, mhi, timestamp, MHI_DURATION); // update MHI cvConvert(mhi, dst);//将mhi转化为dst,dst=mhi // 中值滤波,消除小的噪声 cvSmooth(dst, dst, CV_MEDIAN, 3, 0, 0, 0); cvPyrDown(dst, pyr, CV_GAUSSIAN_5x5);// 向下采样,去掉噪声,图像是原图像的四分之一 cvDilate(pyr, pyr, 0, 1); // 做膨胀操作,消除目标的不连续空洞 cvPyrUp(pyr, dst, CV_GAUSSIAN_5x5);// 向上采样,恢复图像,图像是原图像的四倍 // 下面的程序段用来找到轮廓 // Create dynamic structure and sequence. stor = cvCreateMemStorage(0); cont = cvCreateSeq(CV_SEQ_ELTYPE_POINT, sizeof(CvSeq), sizeof(CvPoint), stor); // 找到所有轮廓 cvFindContours(dst, stor, &cont, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0, 0)); // 直接使用CONTOUR中的矩形来画轮廓 for (; cont; cont = cont->h_next) { CvRect r = ((CvContour*)cont)->rect; if (r.height * r.width > CONTOUR_MAX_AERA) // 面积小的方形抛弃掉 { cvRectangle(img, cvPoint(r.x, r.y), cvPoint(r.x + r.width, r.y + r.height), CV_RGB(255, 0, 0), 1, CV_AA, 0); } } // free memory cvReleaseMemStorage(&stor); cvReleaseImage(&pyr); }
/************************************** * Definition: Finds squares in an image with the given minimum size * * (Taken from the API and modified slightly) * Doesn't require exactly 4 sides, convexity or near 90 deg angles either ('findBlobs') * * Parameters: the image to find squares in and the minimum area for a square * * Returns: a squares_t linked list **************************************/ squares_t* Camera::findSquares(IplImage *img, int areaThreshold) { CvSeq* contours; CvMemStorage *storage; int i, j, area; CvPoint ul, lr, pt, centroid; CvSize sz = cvSize( img->width, img->height); IplImage * canny = cvCreateImage(sz, 8, 1); squares_t *sq_head, *sq, *sq_last; CvSeqReader reader; // Create storage storage = cvCreateMemStorage(0); // Pyramid images for blurring the result IplImage* pyr = cvCreateImage(cvSize(sz.width/2, sz.height/2), 8, 1); CvSeq* result; double s, t; // Create an empty sequence that will contain the square's vertices CvSeq* squares = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvPoint), storage); // Select the maximum ROI in the image with the width and height divisible by 2 cvSetImageROI(img, cvRect(0, 0, sz.width, sz.height)); // Down and up scale the image to reduce noise cvPyrDown( img, pyr, CV_GAUSSIAN_5x5 ); cvPyrUp( pyr, img, CV_GAUSSIAN_5x5 ); // Apply the canny edge detector and set the lower to 0 (which forces edges merging) cvCanny(img, canny, 0, 50, 3); // Dilate canny output to remove potential holes between edge segments cvDilate(canny, canny, 0, 2); // Find the contours and store them all as a list // was CV_RETR_EXTERNAL cvFindContours(canny, storage, &contours, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0)); // Test each contour to find squares while (contours) { // Approximate a contour with accuracy proportional to the contour perimeter result = cvApproxPoly(contours, sizeof(CvContour), storage, CV_POLY_APPROX_DP, cvContourPerimeter(contours)*0.1, 0 ); // Note: absolute value of an area is used because // area may be positive or negative - in accordance with the // contour orientation if (result->total >= 4 && fabs(cvContourArea(result,CV_WHOLE_SEQ,0)) > areaThreshold) { s=0; for(i=0; i<5; i++) { // Find the minimum angle between joint edges (maximum of cosine) if(i >= 2) { t = fabs(ri_angle((CvPoint*)cvGetSeqElem(result, i), (CvPoint*)cvGetSeqElem(result, i-2), (CvPoint*)cvGetSeqElem( result, i-1 ))); s = s > t ? s : t; } } for( i = 0; i < 4; i++ ) { cvSeqPush(squares, (CvPoint*)cvGetSeqElem(result, i)); } } // Get the next contour contours = contours->h_next; } // initialize reader of the sequence cvStartReadSeq(squares, &reader, 0); sq_head = NULL; sq_last = NULL; sq = NULL; // Now, we have a list of contours that are squares, find the centroids and area for(i=0; i<squares->total; i+=4) { // Find the upper left and lower right coordinates ul.x = 1000; ul.y = 1000; lr.x = 0; lr.y = 0; for(j=0; j<4; j++) { CV_READ_SEQ_ELEM(pt, reader); // Upper Left if(pt.x < ul.x) ul.x = pt.x; if(pt.y < ul.y) ul.y = pt.y; // Lower right if(pt.x > lr.x) lr.x = pt.x; if(pt.y > lr.y) lr.y = pt.y; } // Find the centroid centroid.x = ((lr.x - ul.x) / 2) + ul.x; centroid.y = ((lr.y - ul.y) / 2) + ul.y; // Find the area area = (lr.x - ul.x) * (lr.y - ul.y); // Add it to the storage sq = new squares_t; // Fill in the data sq->area = area; sq->center.x = centroid.x; sq->center.y = centroid.y; sq->next = NULL; if(sq_last == NULL) sq_head = sq; else sq_last->next = sq; sq_last = sq; } // Release the temporary images and data cvReleaseImage(&canny); cvReleaseImage(&pyr); cvReleaseMemStorage(&storage); return sq_head; }
IplImage* imgPyrUp(IplImage* img){ assert(img->width%2==0 && img->height%2==0); IplImage* out= cvCreateImage(cvSize(img->width*2,img->height*2),img->depth,img->nChannels); cvPyrUp(img,out, IPL_GAUSSIAN_5x5); return out; }
void ExtraFront(IplImage* frame,IplImage* front_bin ,IplImage* background){ // 创建存储矩阵格式的背景灰度图的变量并将初始背景转化为矩阵格式 CvMat* background_grayMat = cvCreateMat(background -> height, background -> width, CV_32FC1); cvConvert(background, background_grayMat); // 创建与原背景进行加权叠加的背景更新图 CvMat* background_renewMat = cvCreateMat(background -> height, background -> width, CV_32FC1); // 创建存储当前帧及其灰度图的变量 IplImage* frame_gray = NULL; // 创建中值滤波后的当前帧灰度图以及存储其矩阵格式的变量 IplImage* frame_median = NULL; CvMat* frame_medianMat = NULL; // 创建存储前景的变量 IplImage* front = NULL; CvMat* frontMat = NULL; // 创建存储二值化后的前景的变量 CvMat* front_binMat = NULL; // 创建显示输入以及输出的视频的窗口 if (!frame) return; // 将当前帧转化为灰度图 if (frame_gray == NULL) frame_gray = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 1); cvCvtColor(frame, frame_gray, CV_BGR2GRAY); // 对当前帧进行中值滤波 if (frame_median == NULL) { frame_median = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 1); frame_medianMat = cvCreateMat(frame -> height, frame -> width, CV_32FC1); } CvSize size = cvSize(frame->width,frame->height); // get current frame size,得到当前帧的尺寸 cvSmooth(frame_gray, frame_median, CV_MEDIAN); //默认为3*3的掩膜 /*float k[9] = {0,-1,0,-1,5,-1,0,-1,0}; CvMat km = cvMat(3,3,CV_32FC1,k); cvFilter2D(frame_median,frame_median,&km);*/ IplImage*pyr= cvCreateImage( cvSize((size.width & -2)/2, (size.height & -2)/2), 8, 1 ); cvPyrDown(frame_median, pyr, CV_GAUSSIAN_5x5 );// 向下采样,去掉噪声,图像是原图像的四分之一 //cvDilate( pyr, pyr, 0, 1 ); // 做膨胀操作,消除目标的不连续空洞 cvPyrUp( pyr, frame_median, CV_GAUSSIAN_5x5 );// 向上采样,恢复图像,图像是原图像的四倍 cvConvert(frame_median, frame_medianMat); // 进行减景操作得到前景 if (front == NULL) { front = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 1); frontMat = cvCreateMat(frame -> height, frame -> width, CV_32FC1); } cvAbsDiff(frame_medianMat, background_grayMat, frontMat); cvConvert(frontMat, front); // 对前景进行二值化,算法为改进的OTSU if (front_binMat == NULL) { front_binMat = cvCreateMat(frame -> height, frame -> width, CV_32FC1); } int threshold = Otsu(front); //printf("\n*threshold:%d*\n",threshold); cvThreshold(front, front_bin, threshold, 255, CV_THRESH_BINARY); // 对二值化后的前景做开运算 cvErode(front_bin, front_bin); //腐蚀,迭代次数1 cvSmooth( front_bin, front_bin, CV_MEDIAN, 3, 0, 0, 0 ); cvConvert(front_bin, front_binMat); cvReleaseMat(&background_grayMat); cvReleaseMat(&background_renewMat); cvReleaseImage(&frame_gray); cvReleaseImage(&frame_median); cvReleaseMat(&frame_medianMat); cvReleaseImage(&front); cvReleaseMat(&frontMat); cvReleaseMat(&front_binMat); }
/** Returns a CvSeq (An OpenCV sequence) of Tetris pieces detected in an image. Based on the OpenCV example of identifying a square. Modified to detect L-shaped Tetris pieces. Effectiveness dependent upon thresholds of edge dectection and camera being positioned orthogonal to the Tetris piece. */ CvSeq* Camera::findTetris( IplImage* img, CvMemStorage* storage ) { thresh = 50; CvSeq* contours; int i, c, l, N = 11; CvSize sz = cvSize( img->width & -2, img->height & -2 ); /// Copy of image so that the detection is non-destructive IplImage* timg = cvCloneImage( img ); /// Gray scale needed IplImage* gray = cvCreateImage( sz, 8, 1 ); /// Smaller version to do scaling IplImage* pyr = cvCreateImage( cvSize(sz.width/2, sz.height/2), 8, 3 ); IplImage* tgray; CvSeq* result; double s, t; // create empty sequence that will contain points - /// 6 points per tetris piece (the vertices) CvSeq* tetrisPieces = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvPoint), storage ); // select the maximum region of interest (ROI) in the image // with the width and height divisible by 2. What is the biggest // size of the object. cvSetImageROI( timg, cvRect( 0, 0, sz.width, sz.height )); // down-scale and upscale the image to filter out the noise // I get the filter, but why down and upscale? cvPyrDown( timg, pyr, 7 ); cvPyrUp( pyr, timg, 7 ); tgray = cvCreateImage( sz, 8, 1 ); /// find pieces in every color plane of the image for( c = 0; c < 3; c++ ) { /// extract the c-th color plane cvSetImageCOI( timg, c+1 ); cvCopy( timg, tgray, 0 ); /// try several threshold levels for( l = 0; l < N; l++ ) { /// hack: use Canny instead of zero threshold level. /// Canny helps to catch tetrisPieces with gradient shading if( l == 0 ) { // apply Canny. Take the upper threshold from slider // and set the lower to 0 (which forces edges merging) cvCanny( tgray, gray, 50, 120, 5 ); // dilate canny output to remove potential // holes between edge segments cvDilate( gray, gray, 0, 1 ); } else { // apply threshold if l!=0: // tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0 cvThreshold( tgray, gray, (l+1)*255/N, 255, CV_THRESH_BINARY ); } // find contours and store them all as a list cvFindContours( gray, storage, &contours, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) ); // test each contour while( contours ) { // approximate contour with accuracy proportional // to the contour perimeter result = cvApproxPoly( contours, sizeof(CvContour), storage, CV_POLY_APPROX_DP, cvContourPerimeter(contours)*0.02, 0 ); /* Tetris pieces have 6 vertices. The approximation of large * area is used to filter out "noisy contours." // Note: absolute value of an area is used because // area may be positive or negative - in accordance with the // contour orientation*/ if( result->total == 6 && fabs(cvContourArea(result,CV_WHOLE_SEQ)) > 1000 && fabs(cvContourArea(result,CV_WHOLE_SEQ)) < 10000 ) { s = 0; for( i = 0; i < 7; i++ ) { // find minimum angle between joint // edges (maximum of cosine) if( i >= 2 ) { t = fabs(angle( (CvPoint*)cvGetSeqElem( result, i ), (CvPoint*)cvGetSeqElem( result, i-2 ), (CvPoint*)cvGetSeqElem( result, i-1 ))); s = s > t ? s : t; } } // if cosines of all angles are small // (all angles are ~90 degree) then write quandrange // vertices to resultant sequence if( s < 0.3 ) for( i = 0; i < 6; i++ ) cvSeqPush( tetrisPieces, (CvPoint*)cvGetSeqElem( result, i )); } // take the next contour contours = contours->h_next; } } } // release all the temporary images cvReleaseImage( &gray ); cvReleaseImage( &pyr ); cvReleaseImage( &tgray ); cvReleaseImage( &timg ); return tetrisPieces; }
/*每次从摄像头获得一张图片后调用,进行运动目标检测,并画框, 当完成一个分组后返回true, 分组没结束,则返回false, 该函数由UI来调用*/ PREPROCESS_API bool PreProcessFrame(Frame frame, Frame &lastFrame) { Frame tempFrame; tempFrame = prevFrame; currImg = frame.image; CvSize imgSize = cvSize(currImg->width,currImg->height); IplImage *grayImg = cvCreateImage(imgSize,IPL_DEPTH_8U,1);//当前帧的灰度图 IplImage *gxImg = cvCreateImage(imgSize,IPL_DEPTH_8U,1);//当前帧的X方向梯度图 IplImage *gyImg = cvCreateImage(imgSize,IPL_DEPTH_8U,1);//当前帧的Y方向梯度图 IplImage *diffImg = cvCreateImage(imgSize,IPL_DEPTH_8U,1);//当前帧的差分图 IplImage *diffImg_2 = cvCreateImage(imgSize,IPL_DEPTH_8U,1);//前一帧差分图 IplImage *pyr = cvCreateImage(cvSize((imgSize.width&-2)/2,(imgSize.height&-2)/2),8,1); //进行腐蚀去除噪声的中间临时图片 int height,width;//定义图像的高,宽,步长 char Kx[9] = {1,0,-1,2,0,-2,1,0,-1};//X方向掩模,用于得到X方向梯度图 char Ky[9] = {1,2,1,0,0,0,-1,-2,-1};//Y方向掩模,用于得到Y方向梯度图 CvMat KX,KY; KX = cvMat(3,3,CV_8S,Kx);//构建掩模内核 KY = cvMat(3,3,CV_8S,Ky);//构建掩模内核 cvCvtColor(currImg,grayImg,CV_BGR2GRAY); cvSmooth(grayImg,grayImg,CV_GAUSSIAN,7,7);//进行平滑处理 cvFilter2D(grayImg,gxImg,&KX,cvPoint(-1,-1));//得到X方向的梯度图 cvFilter2D(grayImg,gyImg,&KY,cvPoint(-1,-1));//得到Y方向的梯度图 cvAdd(gxImg,gyImg,grayImg,NULL);//得到梯度图 cvReleaseImage(&gxImg); cvReleaseImage(&gyImg); height = grayImg->height; width = grayImg->width; bool alarm = false;//警戒区域是否有运动目标 CvRect rect;//定义矩形框 if(!firstFrmRec)//如果是第一帧 { firstFrmRec = true; lastGrayImg = cvCreateImage(imgSize,IPL_DEPTH_8U,1); lastDiffImg = cvCreateImage(imgSize,IPL_DEPTH_8U,1); cvCopy(grayImg,lastGrayImg,NULL);//如果是第一帧,设置为背景 xRightAlarm = currImg->width - 100; yBottomAlarm = currImg->height - 100; yTopAlarm = currImg->height - 200; } else { cvAbsDiff(grayImg,lastGrayImg,diffImg);//得到当前帧的差分图 cvCopy(grayImg,lastGrayImg,NULL);//将当前帧的梯度图作为下一帧的背景 cvThreshold(diffImg,diffImg,15,255,CV_THRESH_BINARY);//二值化当前差分图 if(secondFrmRec)//如果大于等于第三帧 { cvAnd(diffImg,lastDiffImg,diffImg_2);//进行“与”运算,得到前一帧灰度图的“准确”运动目标 cvPyrDown(diffImg_2,pyr,7);//向下采样 cvErode(pyr,pyr,0,1);//腐蚀,消除小的噪声 cvPyrUp(pyr,diffImg_2,7); cvReleaseImage(&pyr); if(drawAlarmArea) cvRectangle(tempFrame.image, cvPoint(xLeftAlarm, yTopAlarm), cvPoint(xRightAlarm, yBottomAlarm), CV_RGB(0, 0, 255), 3, CV_AA, 0); alarm = AlarmArea(xLeftAlarm, yTopAlarm, xRightAlarm, yBottomAlarm, diffImg_2); } cvCopy(diffImg,lastDiffImg,NULL);//备份当前差分图的二值化图 cvReleaseImage(&diffImg); minLeftX = 3000; minLeftY = 3000; maxRightX = 0; maxRightY = 0; if (alarm)//若检测出整个图片有运动目标 { FindRectX(diffImg_2, 0, height); FindRectY(diffImg_2, minLeftX, maxRightX); } if (!secondFrmRec)//设置第二帧已经收到 { secondFrmRec = true; } } if(maxRightX*maxRightY)//如果当前帧检测到运动目标,则,画框,分组 { //防止右下角出界 maxRightX = maxRightX>1 ? maxRightX:2; maxRightY = maxRightY>1 ? maxRightY:2; maxRightX = maxRightX<(width+1) ? maxRightX:width; maxRightY = maxRightY<(height+1) ? maxRightY:height; //防止左上角出界 minLeftX = minLeftX>0 ? minLeftX:1; minLeftY = minLeftY>0 ? minLeftY:1; minLeftX = minLeftX<maxRightX ? minLeftX:(maxRightX-1); minLeftY = minLeftY<maxRightY ? minLeftY:(maxRightY-1); if (drawRect) cvRectangle(tempFrame.image, cvPoint(minLeftX, minLeftY), cvPoint(maxRightX, maxRightY), CV_RGB(255, 0, 0), 3, CV_AA, 0); //outobj<<minLeftX<<" "<<minLeftY<<" "<<maxRightX-minLeftX<<" "<<maxRightY-minLeftY<<endl; rect = cvRect(minLeftX, minLeftY, maxRightX-minLeftX, maxRightY-minLeftY); tempFrame.searchRect = rect; signelCount++; cvReleaseImage(&grayImg); cvReleaseImage(&diffImg_2); prevFrame = frame; lastFrame = tempFrame; if(signelCount == groupCount)//如果连续检测到5个单人的情况,分组结束 { signelCount = 0; return true; } else { return false; } //if((minLeftY < 360) && ((maxRightX-minLeftX) < 420))//如果检测到框为单人大小 //{ // signelCount++; // cvReleaseImage(&grayImg); // cvReleaseImage(&diffImg_2); // prevFrame = frame; // *lastFrame = tempFrame; // if(signelCount == groupCount)//如果连续检测到5个单人的情况,分组结束 // { // signelCount = 0; // return true; // } // else // { // return false; // } //} //else //如果检测到多人情况,每张图片分为一组 //{ // return false; //} if((minLeftY < 360) && ((maxRightX-minLeftX) < 420))//如果检测到框为单人大小 { signelCount++; cvReleaseImage(&grayImg); cvReleaseImage(&diffImg_2); prevFrame = frame; lastFrame = tempFrame; if(signelCount == groupCount)//如果连续检测到5个单人的情况,分组结束 { signelCount = 0; return true; } else { return false; } } else //如果检测到多人情况,每张图片分为一组 { signelCount = 0; cvReleaseImage(&grayImg); cvReleaseImage(&diffImg_2); prevFrame = frame; lastFrame = tempFrame; return true; } } else //当前帧没检测到 { cvReleaseImage(&grayImg); cvReleaseImage(&diffImg_2); prevFrame = frame; lastFrame = tempFrame; if (signelCount > 0)//如果前一帧为单人,当前帧没有人 { signelCount = 0; return true; } else { return false; } } }
int main(int argc, char** argv){ setlocale( LC_CTYPE, "jpn"); char* filename = argc >= 2 ? argv[1] : (char*)"card_ex2.JPG"; //CvSeq* lines = 0; char _filename[100]; if((img0 = cvLoadImage(filename, -1)) == 0) return 0; //カード用変数宣言 CvRect rect; double area; //パートごとのインスタンス宣言 eachParts face; eachParts number; eachParts name; //操作説明 printf("Hot Keys: \n""\tESC - プログラムを終了します.\n""\tr - 画像を初期化します.\n""\tf - 学生証を認識します.\n""\t1, 2, 3 - それぞれのキーで写真、番号、名前を個別に保存します(fを押した後のみ)\n""\tA - All param are saved and written(fを押したあとのみ)\n""\th - write to HTML file.\n"); //ウィンドウ作成 cvNamedWindow("binary1_frame", 1); cvNamedWindow("binary2_scan", 1); cvNamedWindow("学生証スキャナー", 1); cvMoveWindow("学生証スキャナー", 0, 0); cvMoveWindow("binary1_frame", 640, 0 ); cvMoveWindow("binary2_scan", 0, 480); //コピー作成 img = cvCloneImage(img0); tmp = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 3); //2値化 gray = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1); gray2 = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1); //初期表示 cvShowImage("学生証スキャナー", img); //cvShowImage("gray", gray); //トラックバーの作成、スキャン用画像二値化 cvCreateTrackbar( "Threshold", "binary2_scan", &level, 255, on_change); binarization(gray2, 1); //フレーム用画像二値化 //cvCreateTrackbar( "Threshold", "binary1_frame", &level2, 255, on_change); cvCreateTrackbar( "Threshold1", "binary1_frame", &thr1, 255, on_change2); cvCreateTrackbar( "Threshold2", "binary1_frame", &thr2, 255, on_change2); binarization_canny(gray, thr1, thr2); cvShowImage("binary1_frame", gray); cvShowImage("binary2_scan", gray2); //set callback function for mouse operations cvSetMouseCallback("学生証スキャナー", on_mouse, 0); //キー操作 for(;;){ int c = cvWaitKey(0); if(c == 27) break; if(c == 'r'){ cvZero(tmp); cvCopy(img0, img); binarization_canny(gray, thr1, thr2); cvShowImage("学生証スキャナー", img); } //枠検出 if(c == 'f'){ //ノイズ削減 tmp2 = cvCreateImage (cvSize (img->width / 2, img->height / 2), img->depth, img->nChannels); cvPyrDown (img, tmp2, CV_GAUSSIAN_5x5); cvPyrUp(tmp2, img, CV_GAUSSIAN_5x5); CvMemStorage* storages = cvCreateMemStorage (0); //メモリストレージ CvSeq *contours = 0; //シーケンス //lines = cvHoughLines2(gray, storages, CV_HOUGH_PROBABILISTIC, 1, CV_PI/180, 80, 30, 10); int x = cvFindContours (gray, storages, &contours, sizeof (CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE); //cvErode(gray, gray); //cvDilate(gray, gray); /*for(int i = 0; i < lines->total; i++ ){ CvPoint* line = (CvPoint*)cvGetSeqElem(lines,i); cvLine(img, line[0], line[1], CV_RGB(255,0,0), 3, 8 ); }*/ //cvDrawContours(img, contours, CV_RGB (255, 0, 0), CV_RGB (0, 255, 0), 1,1, CV_AA); rect = cvBoundingRect(contours, 0); area = cvContourArea (contours); std::cout << "矩形面積 = " << area << std::endl; if(area < 150000.0){ std::cout << "読み取りエラー...パラメータを調整するか再起動してください" << std::endl; } cvRectangle (img, cvPoint (rect.x, rect.y), cvPoint (rect.x + rect.width, rect.y + rect.height), CV_RGB (255, 0, 0), 2); cvShowImage("binary1_frame" ,gray); cvShowImage("学生証スキャナー", img); cvReleaseMemStorage(&storages); cvCopy(img0, img); } /* //認識(仮) if(c == 'g'){ cvSetImageROI(gray2, cvRect(min(pt.x, prev_pt.x), min(pt.y, prev_pt.y), abs(pt.x - prev_pt.x) ,abs(pt.y - prev_pt.y))); cvSaveImage("test.tiff", gray2); // コマンド実行(結果はtmp.txtに保存) system("tesseract test.tiff tmp -l jpn"); std::ifstream ifs("tmp.txt"); std::string str; std::getline(ifs, str); std::cout << "「" << str << "」…と、検出されました。" << std::endl; cvResetImageROI(gray2); } */ if(c == 'l'){ count++; std::cout << "新たな画像ファイルの名前を入力してください。" << std::endl; scanf("%s", _filename); if((img0 = cvLoadImage(_filename, 1)) == 0) return 0; img = cvCloneImage(img0); binarization(gray2, 1); binarization_canny(gray, thr1, thr2); cvShowImage("学生証スキャナー", img); cvShowImage("binary1_frame", gray); cvShowImage("binary2_scan", gray2); } if(c == '1'){ face.set(0.75, 0.21, 0.2, 0.5, rect, (char*)"face.jpg"); if(face.makeRect() == 0){ std::cout << "画像保存エラー!" << std::endl; }else{ std::cout << "顔写真が保存されました。" << std::endl; } } if(c == '2'){ number.set(0.22, 0.4, 0.40, 0.1, rect, (char*)"number.tiff"); if(number.makeRect_ocr() == 0){ std::cout << "画像保存エラー!" << std::endl; }else{ std::cout << "所属と学生証番号が保存されました。" << std::endl; } } if(c == '3'){ name.set(0.19, 0.58, 0.54, 0.13, rect, (char*)"name.tiff"); if(name.makeRect_ocr() == 0){ std::cout << "画像保存エラー!" << std::endl; }else{ std::cout << "名前が保存されました。" << std::endl; } } //ALL IN ONE if(c == 'a'){ face.set(0.75, 0.21, 0.2, 0.5, rect, (char*)"face.jpg"); if(face.makeRect() == 0){ std::cout << "画像保存エラー!" << std::endl; }else{ std::cout << "顔写真が保存されました。" << std::endl; } number.set(0.22, 0.4, 0.40, 0.1, rect, (char*)"number.tiff"); if(number.makeRect_ocr() == 0){ std::cout << "画像保存エラー!" << std::endl; }else{ std::cout << "所属と学生証番号が保存されました。" << std::endl; } name.set(0.19, 0.58, 0.54, 0.13, rect, (char*)"name.tiff"); if(name.makeRect_ocr() == 0){ std::cout << "画像保存エラー!" << std::endl; }else{ std::cout << "名前が保存されました。" << std::endl; } } //HTML書き出し if(c == 'h'){ std::ofstream ofs( "card.htm" ); ofs << "<!DOCTYPE html><html><head><meta charset=\"utf-8\" /><title>学生証スキャナー</title></head><body><div style=\"margin:0px;padding:0px;\" align=\"center\"><table width=\"98%\" style=\"border-collapse: collapse;border:3px solid #FFFF66;background-color:#FFFFFF;color:#000000;text-align:left;\"><tbody><tr><th style=\"border:3px solid #FFFF66;background-color:#FFB533;color:#339900;text-align:center;\">番号</th><th style=\"border:3px solid #FFFF66;background-color:#FFB533;color:#339900;text-align:center;\">名前</th><th style=\"border:3px solid #FFFF66;background-color:#FFB533;color:#339900;text-align:center;\">顔写真</th></tr>"; for(int j = 0; j < count; j++){ char temp[50]; sprintf(temp, "pic/00%dface.jpg", j+1); ofs << "<tr><td style=\"border:3px solid #FFFF66;text-align:center;\">" << save.str0[j] << "</td><td style=\"border:3px solid #FFFF66;text-align:center;\">" << save.str1[j] << "</td><td style=\"border:3px solid #FFFF66;text-align:center;\"><img src = \"" << temp << "\"></td></tr>"; } ofs << "</tbody></table><div style=\"width:98%;\" align=\"right\"><a href=\"http://sunafukey.fc2web.com/htmltag/tablelist.html\" title=\"HTML表テンプレート\" style=\"color:#C0C0C0;font-size:50%;text-decoration:none;\">HTML表</a></div></div></body></html>" << std::endl; system("google-chrome file:///home/denjo/opencv/card.htm"); } } //画像領域のリリース cvReleaseImage(&img); cvReleaseImage(&img0); cvReleaseImage(&tmp); cvReleaseImage(&tmp2); cvReleaseImage(&gray); cvReleaseImage(&gray2); return 0; }