/*在图像上画单个OXFD特征点 参数: img:图像指针 feat:要画的特征点 color:颜色 */ static void draw_oxfd_feature(IplImage* img, struct feature* feat, CvScalar color) { double m[4] = { feat->a, feat->b, feat->b, feat->c }; double v[4] = { 0 }; //特征向量的数据 double e[2] = { 0 }; //特征值的数据 CvMat M, V, E; double alpha, l1, l2; //计算椭圆的轴线和方向 cvInitMatHeader(&M, 2, 2, CV_64FC1, m, CV_AUTOSTEP); //矩阵 cvInitMatHeader(&V, 2, 2, CV_64FC1, v, CV_AUTOSTEP); //2个2*1的特征向量组成的矩阵 cvInitMatHeader(&E, 2, 1, CV_64FC1, e, CV_AUTOSTEP); //特征值 cvEigenVV(&M, &V, &E, DBL_EPSILON, 0, 0); //计算特征值和特征向量 l1 = 1 / sqrt(e[1]); l2 = 1 / sqrt(e[0]); alpha = -atan2(v[1], v[0]); alpha *= 180 / CV_PI; //画椭圆和十字星 cvEllipse(img, cvPoint(feat->x, feat->y), cvSize(l2, l1), alpha, 0, 360, CV_RGB(0, 0, 0), 3, 8, 0); cvEllipse(img, cvPoint(feat->x, feat->y), cvSize(l2, l1), alpha, 0, 360, color, 1, 8, 0); cvLine(img, cvPoint(feat->x + 2, feat->y), cvPoint(feat->x - 2, feat->y), color, 1, 8, 0); cvLine(img, cvPoint(feat->x, feat->y + 2), cvPoint(feat->x, feat->y - 2), color, 1, 8, 0); }
/* Draws a single Oxford-type feature @param img image on which to draw @param feat feature to be drawn @param color color in which to draw */ void draw_oxfd_feature( IplImage* img, struct feature* feat, CvScalar color ) { double m[4] = { feat->a, feat->b, feat->b, feat->c }; double v[4] = { 0 }; double e[2] = { 0 }; CvMat M, V, E; double alpha, l1, l2; /* compute axes and orientation of ellipse surrounding affine region */ cvInitMatHeader( &M, 2, 2, CV_64FC1, m, CV_AUTOSTEP ); cvInitMatHeader( &V, 2, 2, CV_64FC1, v, CV_AUTOSTEP ); cvInitMatHeader( &E, 2, 1, CV_64FC1, e, CV_AUTOSTEP ); #if CV_MAJOR_VERSION==1 cvEigenVV( &M, &V, &E, DBL_EPSILON ); #else cvEigenVV( &M, &V, &E, DBL_EPSILON, -1,-1 ); #endif l1 = 1 / sqrt( e[1] ); l2 = 1 / sqrt( e[0] ); alpha = -atan2( v[1], v[0] ); alpha *= 180 / CV_PI; cvEllipse( img, cvPoint( feat->x, feat->y ), cvSize( l2, l1 ), alpha, 0, 360, CV_RGB(0,0,0), 3, 8, 0 ); cvEllipse( img, cvPoint( feat->x, feat->y ), cvSize( l2, l1 ), alpha, 0, 360, color, 1, 8, 0 ); cvLine( img, cvPoint( feat->x+2, feat->y ), cvPoint( feat->x-2, feat->y ), color, 1, 8, 0 ); cvLine( img, cvPoint( feat->x, feat->y+2 ), cvPoint( feat->x, feat->y-2 ), color, 1, 8, 0 ); }
//在图像srcImg上根据contour轮廓画上最小外接椭圆 CvBox2D DrawMinAreaEllipse(IplImage *srcImg,CvSeq *contour,CvScalar color/*=CV_RGB(255,0,0)*/) { int count = contour->total; // This is number point in contour CvPoint center; CvSize size; CvBox2D box; if( count < 6 ) return box; CvMat* points_f = cvCreateMat( 1, count, CV_32FC2 ); CvMat points_i = cvMat( 1, count, CV_32SC2, points_f->data.ptr ); cvCvtSeqToArray( contour, points_f->data.ptr, CV_WHOLE_SEQ ); cvConvert( &points_i, points_f ); // 椭圆拟合 box = cvFitEllipse2( points_f ); cout<<"拟合的椭圆参数:angle="<<box.angle<<",center=("<<box.center.x<<"," <<box.center.y<<")"<<",size(w,h)=("<<box.size.width<<","<<box.size.height<<")"<<endl; // 获得椭圆参数 center = cvPointFrom32f(box.center); size.width = cvRound(box.size.width*0.5)+1; size.height = cvRound(box.size.height*0.5)+1; // 画椭圆 cvEllipse(srcImg, center, size, -box.angle, 0, 360, color, 1, CV_AA, 0); cvReleaseMat(&points_f); return box; }
void MarkOut(IplImage *img, CvRect *rc, CvRect *parentRc, double scale, char shape, CvScalar color/* = CV_RGB(255, 0, 0)*/, int thickness/* = 1*/) { // 如果指定了parentRc,则parentRc是相对于img的坐标,而rc是相对于parentRc的坐标 int px = (parentRc ? parentRc->x : 0), py = (parentRc ? parentRc->y : 0); if(shape == 'r') // 画矩形 { cvRectangle(img, cvPoint(cvRound((px + rc->x)*scale), cvRound((py + rc->y)*scale)), cvPoint(cvRound((px + rc->x + rc->width)*scale), cvRound((py + rc->y + rc->height)*scale)), color, thickness, 8, 0); } else if (shape == 'e') // 画椭圆 { CvPoint center; CvSize axes; center.x = cvRound((px+rc->x + rc->width*0.5)*scale); center.y = cvRound((py+rc->y + rc->height*0.5)*scale); axes.width = cvRound(rc->width*0.5*scale); axes.height = cvRound(rc->height*0.5*scale); //printf("w:%d h:%d\n", axes.width, axes.height); cvEllipse(img, center, axes, 0, 0, 360, color, thickness); } else if (shape == 'l') // 画线 { cvLine(img, cvPoint(cvRound((px + rc->x)*scale), cvRound((py + rc->y + rc->height/2)*scale)), cvPoint(cvRound((px + rc->x + rc->width)*scale), cvRound((py + rc->y + rc->height/2)*scale)), color, thickness); } }
void Ellipse::draw(Image& img, double intensity, int thickness, int line_type) const { CV_FUNCNAME( "Ellipse::draw" ); __BEGIN__; int shift = 0; double fixpoint_factor = static_cast<double>(1 << shift); CvPoint center = cvPoint( cvRound(this->params_.center.x * fixpoint_factor), cvRound(this->params_.center.y * fixpoint_factor) ); CvSize axes = cvSize(cvRound(this->params_.size.width * fixpoint_factor * 0.5) , cvRound(this->params_.size.height * fixpoint_factor * 0.5) ); double angle = static_cast<double>( - this->params_.angle ) * 180 / CV_PI; CV_CALL( cvEllipse(img.get_ipl_image(), center, axes, angle, 0, 360, cvRealScalar(intensity), thickness, line_type) ); __END__; __ISL_CHECK_ERROR__; }
// -------------------------------------------------------------------------- void BlobModel::UpdateHeatMap(IplImage* motionmap) { // for each head candidate draw a circle on the floor; add it to the heatmap image // NOTE: due to scaling, circle becomes an ellipse double BLOB_RADIUS = 50; // blob projection on the floor average radius in centimeters bool oneclose = false; for (int i=0;i<blob.GetCount();i++) { CvSeq* heads = doc->blobmodel.blob[i]->heads; for (int j=0;j<heads->total;j++) { BlobRay* br = (BlobRay*)cvGetSeqElem(heads, j); CvPoint3D32f head, foot; doc->cameramodel.coordsImage2RealSameXY_Feet2Floor(cvPointTo32f(br->p1), cvPointTo32f(br->p2), &head, &foot); // ignore short candidates if (head.z < doc->bodymodel.m_minHeight) continue; // ignore repeating artifact closeby candidates if (oneclose) continue; if (d(foot) < BLOB_RADIUS*2) oneclose = true; CvPoint c = doc->floormodel.coordsReal2Floor(foot); CvSize axes = doc->floormodel.sizeReal2Floor(cvSize2D32f(BLOB_RADIUS, BLOB_RADIUS)); cvZero(motionmaptemp); cvEllipse(motionmaptemp, c, axes, 0, 0, 360, cvScalar(1), CV_FILLED); cvAcc(motionmaptemp, motionmap); } } }
// 画椭圆函数 void draw_ellipse(IplImage* img) { if(g_ellipse_center.x == -1) return; CvScalar value = getRandColor(); cvEllipse( img, g_ellipse_center, g_ellipse_axes, 0, 0, 360, value, 2 ); }
// Define trackbar callback functon. This function find contours, // draw it and approximate it by ellipses. void process_image(int h) { CvMemStorage* storage; CvSeq* contour; // Create dynamic structure and sequence. storage = cvCreateMemStorage(0); contour = cvCreateSeq(CV_SEQ_ELTYPE_POINT, sizeof(CvSeq), sizeof(CvPoint) , storage); // Threshold the source image. This needful for cvFindContours(). cvThreshold( image03, image02, slider_pos, 255, CV_THRESH_BINARY ); // Find all contours. cvFindContours( image02, storage, &contour, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_NONE, cvPoint(0,0)); // Clear images. IPL use. cvZero(image02); cvZero(image04); // This cycle draw all contours and approximate it by ellipses. for(;contour;contour = contour->h_next) { int count = contour->total; // This is number point in contour CvPoint center; CvSize size; CvBox2D box; // Number point must be more than or equal to 6 (for cvFitEllipse_32f). if( count < 6 ) continue; CvMat* points_f = cvCreateMat( 1, count, CV_32FC2 ); CvMat points_i = cvMat( 1, count, CV_32SC2, points_f->data.ptr ); cvCvtSeqToArray( contour, points_f->data.ptr, CV_WHOLE_SEQ ); cvConvert( &points_i, points_f ); // Fits ellipse to current contour. box = cvFitEllipse2( points_f ); // Draw current contour. cvDrawContours(image04,contour,CV_RGB(255,255,255),CV_RGB(255,255,255),0,1,8,cvPoint(0,0)); // Convert ellipse data from float to integer representation. center = cvPointFrom32f(box.center); size.width = cvRound(box.size.width*0.5); size.height = cvRound(box.size.height*0.5); // Draw ellipse. cvEllipse(image04, center, size, -box.angle, 0, 360, CV_RGB(0,0,255), 1, CV_AA, 0); cvReleaseMat(&points_f); } // Show image. HighGUI use. cvShowImage( "Result", image04 ); }
void cvEllipseAA( CvArr* img, CvPoint center, CvSize axes, double angle, double start_angle, double end_angle, double color, int scale) { cvEllipse( img, center, axes, angle, start_angle, end_angle, cvColorToScalar(color, cvGetElemType(img)), 1, CV_AA, scale ); }
void moBlobTrackerModule::applyFilter(IplImage *src) { IplImage* fg_map = NULL; assert( src != NULL ); CvSize size = cvGetSize(src); if ( src->nChannels != 1 ) { this->setError("BlobTracker input image must be a single channel binary image."); this->stop(); return; } this->tracker->Process(src, fg_map); cvSet(this->output_buffer, CV_RGB(0,0,0)); this->clearBlobs(); for ( int i = this->tracker->GetBlobNum(); i > 0; i-- ) { CvBlob* pB = this->tracker->GetBlob(i-1); int minsize = this->property("min_size").asInteger(); int maxsize = this->property("max_size").asInteger(); // Assume circular blobs if (pB->w < minsize || maxsize < pB->w || pB->h < minsize || maxsize < pB->h) continue; // draw the blob on output image if ( this->output->getObserverCount() > 0 ) { CvPoint p = cvPoint(cvRound(pB->x*256),cvRound(pB->y*256)); CvSize s = cvSize(MAX(1,cvRound(CV_BLOB_RX(pB)*256)), MAX(1,cvRound(CV_BLOB_RY(pB)*256))); int c = cvRound(255*this->tracker->GetState(CV_BLOB_ID(pB))); cvEllipse(this->output_buffer, p, s, 0, 0, 360, CV_RGB(c,255-c,0), cvRound(1+(3*0)/255), CV_AA, 8); } LOGM(MO_DEBUG, "Blob: id="<< pB->ID <<" pos=" << pB->x \ << "," << pB->y << "size=" << pB->w << "," << pB->h); // add the blob in data moDataGenericContainer *touch = new moDataGenericContainer(); touch->properties["type"] = new moProperty("blob"); touch->properties["id"] = new moProperty(pB->ID); touch->properties["x"] = new moProperty(pB->x / size.width); touch->properties["y"] = new moProperty(pB->y / size.height); touch->properties["w"] = new moProperty(pB->w); touch->properties["h"] = new moProperty(pB->h); this->blobs.push_back(touch); }; this->output_data->push(&this->blobs); }
/*================================================================ Name : DrawFundamental Argument : IplImage* img : 描写する画像のIplImage構造体 Return : no return About : x軸y軸を描写した後、 -45〜225(deg)までの円弧を描写する Author : Ryodo Tanaka =================================================================*/ void DrawFundamental(IplImage* img) { int i; //原点座標 center_pt.x = (int)(img -> width / 2); center_pt.y = (int)(img -> height / 2); //横軸 la_pt1->la_pt2 la_pt1.x = 0; la_pt1.y = center_pt.y; la_pt2.x = (int)(img -> width); la_pt2.y = center_pt.y; cvLine(img,la_pt1,la_pt2,CV_RGB(250,250,250),2,8,0); //縦軸 lo_pt1->lo_pt2 lo_pt1.x = center_pt.x; lo_pt1.y = 0; lo_pt2.x = center_pt.x; lo_pt2.y = (int)(img -> height); cvLine(img,lo_pt1,lo_pt2,CV_RGB(250,250,250),2,8,0); //斜め軸(+-30deg直線) center_pt->slant_pt1 and slant_pt2 slant_pt1.x = 0; slant_pt1.y = (int)(img -> height); slant_pt2.x = (int)(img -> width); slant_pt2.y = (int)(img -> height); cvLine(img,center_pt,slant_pt1,CV_RGB(250,250,250),2,8,0); cvLine(img,center_pt,slant_pt2,CV_RGB(250,250,250),2,8,0); /* //指定表示角度の線 center_pt->limit_pt1 and limit_pt2 */ /* limit_pt1.x = center_pt.x + (int)((img -> width/2)*cos(120*PI/180)); */ /* limit_pt1.y = center_pt.y - (int)((img -> height/2)*sin(120*PI/180)); */ /* limit_pt2.x = center_pt.x + (int)((img -> width/2)*cos(60*PI/180)); */ /* limit_pt2.y = center_pt.y - (int)((img -> height/2)*sin(60*PI/180)); */ /* cvLine(img,center_pt,limit_pt1,CV_RGB(255,0,0),1,8,0); */ /* cvLine(img,center_pt,limit_pt2,CV_RGB(255,0,0),1,8,0); */ //FIRSTRANGE〜LASTRANGE(deg)までの円を40pix(=1m)おきに描写する for(i=0; i<5; i++){ ellipse.width = (i+1)*(img->width/2)/5; ellipse.height = (i+1)*(img->height/2)/5; cvEllipse(img,center_pt,ellipse,0,-225,45,CV_RGB(250,250,250),1,8,0); } }
Point2f EyeTracker::detectPupil() { /* * Uses cvHoughCircles to find circles on gray eye image. * Sets prev_center with last circle. */ doHoughTransform(); /* Sets featurePoints */ noktadanAcil(); CvPoint2D32f p32[NPOINTS]; CvBox2D box; for(size_t i = 0; i < featurePoints.size(); ++i) { p32[i] = cvPoint2D32f( featurePoints[i].x, featurePoints[i].y ); } cvFitEllipse(p32, NPOINTS, &box); cvEllipse(grayEyeImagePts, cvPoint((int)box.center.x, (int)box.center.y), cvSize((int)(box.size.width/2), (int)(box.size.height/2)), box.angle, 0, 360, cvScalar(WHITE, 0, 0, 0), 3); cout << "x=" << (int)box.center.x << "-y=" << (int) box.center.y << endl; if(box.size.width < 1) { firstDetect = false; } else { /* Adds point to centerList, sets aver_center to average of centerList */ addToList(cvPoint( cvRound(box.center.x), cvRound(box.center.y) ) ); prev_center.x = (int)box.center.x; prev_center.y = (int)box.center.y; } return box.center; }
void draw_ellipses(IplImage *img, int min_cnt, int max_cnt) { int i, val, cnt; CvPoint center; CvSize axes; double angle; CvScalar color; cnt = random_range(min_cnt, max_cnt); for (i = 0; i < cnt; i++) { val = random_range(BACKGROUND_COLOR - 60, BACKGROUND_COLOR - 20); color = cvScalarAll(val); center.x = random_range(50, IMAGE_WIDTH - 50); center.y = random_range(50, IMAGE_HEIGHT - 50); axes.width = random_range(5, 40); axes.height = random_range(5, 40); angle = random_range(0, 180); cvEllipse(img, center, axes, angle, 0.0, 360.0, color, 1, CV_AA, 0); } }
void DrawImage(IplImage* pImg) { if (pImg == NULL) return; cvZero(pImg); for (int i = 0; i < 6; i++) { int dx = (i % 2) * 250 - 30; int dy = (i / 2) * 150; CvScalar white = cvRealScalar(255); CvScalar black = cvRealScalar(0); if (i == 0) { for (int j = 0; j <= 10; j++) { double angle = (j + 5)*CV_PI / 21; cvLine(pImg, cvPoint(cvRound(dx + 100 + j * 10 - 80 * cos(angle)), cvRound(dy + 100 - 90 * sin(angle))), cvPoint(cvRound(dx + 100 + j * 10 - 30 * cos(angle)), cvRound(dy + 100 - 30 * sin(angle))), white, 1, 8, 0); } } cvEllipse(pImg, cvPoint(dx + 150, dy + 100), cvSize(100, 70), 0, 0, 360, white, -1, 8, 0); cvEllipse(pImg, cvPoint(dx + 115, dy + 70), cvSize(30, 20), 0, 0, 360, black, -1, 8, 0); cvEllipse(pImg, cvPoint(dx + 185, dy + 70), cvSize(30, 20), 0, 0, 360, black, -1, 8, 0); cvEllipse(pImg, cvPoint(dx + 115, dy + 70), cvSize(15, 15), 0, 0, 360, white, -1, 8, 0); cvEllipse(pImg, cvPoint(dx + 185, dy + 70), cvSize(15, 15), 0, 0, 360, white, -1, 8, 0); cvEllipse(pImg, cvPoint(dx + 115, dy + 70), cvSize(5, 5), 0, 0, 360, black, -1, 8, 0); cvEllipse(pImg, cvPoint(dx + 185, dy + 70), cvSize(5, 5), 0, 0, 360, black, -1, 8, 0); cvEllipse(pImg, cvPoint(dx + 150, dy + 100), cvSize(10, 5), 0, 0, 360, black, -1, 8, 0); cvEllipse(pImg, cvPoint(dx + 150, dy + 150), cvSize(40, 10), 0, 0, 360, black, -1, 8, 0); cvEllipse(pImg, cvPoint(dx + 27, dy + 100), cvSize(20, 35), 0, 0, 360, white, -1, 8, 0); cvEllipse(pImg, cvPoint(dx + 273, dy + 100), cvSize(20, 35), 0, 0, 360, white, -1, 8, 0); } }
void CueContrastKernel::obtainInput() { CVImage* cvgrayimg = cvGrayImageIn.getBuffer(); if(!cvgrayimg) { std::cerr << getName() << "::execute()::ERROR::cvGrayImageIn is NULL!\n"; return; } if(!mp_cvimg1) { CvSize imgsize = cvSize(cvgrayimg->width, cvgrayimg->height); mp_cvimg1 = new CVImage(imgsize, CV_8UC1, 0); mp_cvimg2 = new CVImage(imgsize, CV_8UC1, 0); mp_cvminimg = new CVImage(imgsize, CV_8UC1, 0); mp_cvmaximg = new CVImage(imgsize, CV_8UC1, 0); mdp_cvimg[0] = mp_cvimg1; mdp_cvimg[1] = mp_cvimg2; } Ipp8u* srcData = (Ipp8u*)(cvgrayimg->ipl->imageData); int width = cvgrayimg->width; int height = cvgrayimg->height; for(unsigned int i = 0;i<m_dimData;++i){ int scale = m_contrastscale[i]; IppiSize kernel1 = {scale, scale}; IppiPoint anchor1 = {(scale-1)>>2, (scale-1)>>2}; IppiSize roi1 = {width-scale+1, height-scale+1}; Ipp8u* targetData = (Ipp8u*)(mp_cvminimg->ipl->imageData); ippiFilterMin_8u_C1R(srcData, width, targetData, width, roi1, kernel1, anchor1); targetData = (Ipp8u*)(mp_cvmaximg->ipl->imageData); ippiFilterMax_8u_C1R(srcData, width, targetData, width, roi1, kernel1, anchor1); unsigned char* maxdata = (unsigned char*)(mp_cvmaximg->ipl->imageData); unsigned char* mindata = (unsigned char*)(mp_cvminimg->ipl->imageData); unsigned char* contdata = (unsigned char*)(mdp_cvimg[i]->ipl->imageData); int widthstep = mdp_cvimg[i]->ipl->widthStep; for(int y = 0; y < height; ++y) { for(int x = 0; x < width; ++x) { int pos = x + y*widthstep; float tempmin = (float)(mindata[pos]); float tempmax = (float)(maxdata[pos]); if ( (tempmin + tempmax) != 0.0 ) contdata[pos] = cvRound((tempmax-tempmin)/(tempmax+tempmin)*255.0); else contdata[pos] = 0; } } } cvImage1Out.setBuffer(mdp_cvimg[0]); cvImage1Out.out(); cvImage2Out.setBuffer(mdp_cvimg[1]); cvImage2Out.out(); /* CVImage* cvrgbimg = cvImageIn.getBuffer(); if(!cvrgbimg) { std::cerr << getName() << "::execute()::ERROR::cvImageIn is NULL!\n"; return; } if(!mp_cvimg1) { CvSize imgsize = cvSize(cvrgbimg->width, cvrgbimg->height); mp_cvimg1 = new CVImage(imgsize, CV_8UC1, 0); mp_cvimg2 = new CVImage(imgsize, CV_8UC1, 0); mp_cvimg3 = new CVImage(imgsize, CV_8UC1, 0); } if(m_colormode == USE_RGB){ cvCvtPixToPlane( cvrgbimg->ipl, mp_cvimg3->ipl, mp_cvimg2->ipl, mp_cvimg1->ipl, NULL ); } else if(m_colormode == USE_HSV || m_colormode == USE_HS){ if(!mp_cvhsvimg) { CvSize imgsize = cvSize(cvrgbimg->width, cvrgbimg->height); mp_cvhsvimg = new CVImage(imgsize, CV_8UC3, 0); } cvCvtColor( cvrgbimg->ipl, mp_cvhsvimg->ipl, CV_BGR2HSV); cvCvtPixToPlane( mp_cvhsvimg->ipl, mp_cvimg1->ipl, mp_cvimg2->ipl, mp_cvimg3->ipl, NULL ); } mdp_cvimg[0] = mp_cvimg1; mdp_cvimg[1] = mp_cvimg2; mdp_cvimg[2] = mp_cvimg3; cvImage1Out.setBuffer(mp_cvimg1); cvImage1Out.out(); cvImage2Out.setBuffer(mp_cvimg2); cvImage2Out.out(); cvImage3Out.setBuffer(mp_cvimg3); cvImage3Out.out(); */ } void CueContrastKernel::cropNResize(CvPoint2D32f* pos, CvSize* size, float hval) { if(!mp_cvmaskimg){ mp_cvmaskimg = new CVImage(*size, CV_8UC1, 0); } CvRect inputrect; inputrect.x = cvRound((float)pos->x - (float)size->width/2.0); inputrect.y = cvRound((float)pos->y - (float)size->height/2.0); inputrect.width = size->width; inputrect.height = size->height; for(unsigned int i = 0;i<m_dimData;++i) { mdpCropResize[i]->cvImageIn.setBuffer(mdp_cvimg[i]); mdpCropResize[i]->rectIn.setBuffer(&inputrect); mdpCropResize[i]->sizeIn.setBuffer(size); mdpCropResize[i]->execute(); mdp_cvcroppedimg[i] = mdpCropResize[i]->cvImageOut.getBuffer(); } CvPoint maskcenter; maskcenter.x = cvRound((float)(size->width)/2.0 - 1.0); maskcenter.y = cvRound((float)(size->height)/2.0 - 1.0); CvSize maskaxis; maskaxis.width = cvRound((float)(size->width)/2.0 - 1.0); maskaxis.height = cvRound((float)(size->height)/2.0 - 1.0); drawEllipse(mp_cvmaskimg, &maskcenter, &maskaxis); cvTemplateOut.setBuffer(mdp_cvcroppedimg[0]); cvTemplateOut.out(); cvMaskOut.setBuffer(mp_cvmaskimg); cvMaskOut.out(); } void CueContrastKernel::drawEllipse(CVImage* maskimg, CvPoint* maskcenter, CvSize* maskaxis) { cvSetZero(maskimg->ipl); float angle = 0; float startAngle = 0; float endAngle = 360; int thickness = -1; cvEllipse( maskimg->ipl, *maskcenter, *maskaxis, angle, startAngle, endAngle, CV_RGB(255,255,255), thickness ); }
/* * Performs the face detection */ static GstFlowReturn gst_face_detect_transform_ip (GstOpencvVideoFilter * base, GstBuffer * buf, IplImage * img) { GstFaceDetect *filter = GST_FACE_DETECT (base); if (filter->cvFaceDetect) { GstMessage *msg = NULL; GstStructure *s; GValue facelist = { 0 }; GValue facedata = { 0 }; CvSeq *faces; CvSeq *mouth = NULL, *nose = NULL, *eyes = NULL; gint i; gboolean do_display = FALSE; if (filter->display) { if (gst_buffer_is_writable (buf)) { do_display = TRUE; } else { GST_LOG_OBJECT (filter, "Buffer is not writable, not drawing faces."); } } cvCvtColor (img, filter->cvGray, CV_RGB2GRAY); cvClearMemStorage (filter->cvStorage); faces = gst_face_detect_run_detector (filter, filter->cvFaceDetect, filter->min_size_width, filter->min_size_height); msg = gst_face_detect_message_new (filter, buf); g_value_init (&facelist, GST_TYPE_LIST); for (i = 0; i < (faces ? faces->total : 0); i++) { CvRect *r = (CvRect *) cvGetSeqElem (faces, i); guint mw = filter->min_size_width / 8; guint mh = filter->min_size_height / 8; guint rnx = 0, rny = 0, rnw, rnh; guint rmx = 0, rmy = 0, rmw, rmh; guint rex = 0, rey = 0, rew, reh; gboolean have_nose, have_mouth, have_eyes; /* detect face features */ if (filter->cvNoseDetect) { rnx = r->x + r->width / 4; rny = r->y + r->height / 4; rnw = r->width / 2; rnh = r->height / 2; cvSetImageROI (filter->cvGray, cvRect (rnx, rny, rnw, rnh)); nose = gst_face_detect_run_detector (filter, filter->cvNoseDetect, mw, mh); have_nose = (nose && nose->total); cvResetImageROI (filter->cvGray); } else { have_nose = FALSE; } if (filter->cvMouthDetect) { rmx = r->x; rmy = r->y + r->height / 2; rmw = r->width; rmh = r->height / 2; cvSetImageROI (filter->cvGray, cvRect (rmx, rmy, rmw, rmh)); mouth = gst_face_detect_run_detector (filter, filter->cvMouthDetect, mw, mh); have_mouth = (mouth && mouth->total); cvResetImageROI (filter->cvGray); } else { have_mouth = FALSE; } if (filter->cvEyesDetect) { rex = r->x; rey = r->y; rew = r->width; reh = r->height / 2; cvSetImageROI (filter->cvGray, cvRect (rex, rey, rew, reh)); eyes = gst_face_detect_run_detector (filter, filter->cvEyesDetect, mw, mh); have_eyes = (eyes && eyes->total); cvResetImageROI (filter->cvGray); } else { have_eyes = FALSE; } GST_LOG_OBJECT (filter, "%2d/%2d: x,y = %4u,%4u: w.h = %4u,%4u : features(e,n,m) = %d,%d,%d", i, faces->total, r->x, r->y, r->width, r->height, have_eyes, have_nose, have_mouth); s = gst_structure_new ("face", "x", G_TYPE_UINT, r->x, "y", G_TYPE_UINT, r->y, "width", G_TYPE_UINT, r->width, "height", G_TYPE_UINT, r->height, NULL); if (have_nose) { CvRect *sr = (CvRect *) cvGetSeqElem (nose, 0); GST_LOG_OBJECT (filter, "nose/%d: x,y = %4u,%4u: w.h = %4u,%4u", nose->total, rnx + sr->x, rny + sr->y, sr->width, sr->height); gst_structure_set (s, "nose->x", G_TYPE_UINT, rnx + sr->x, "nose->y", G_TYPE_UINT, rny + sr->y, "nose->width", G_TYPE_UINT, sr->width, "nose->height", G_TYPE_UINT, sr->height, NULL); } if (have_mouth) { CvRect *sr = (CvRect *) cvGetSeqElem (mouth, 0); GST_LOG_OBJECT (filter, "mouth/%d: x,y = %4u,%4u: w.h = %4u,%4u", mouth->total, rmx + sr->x, rmy + sr->y, sr->width, sr->height); gst_structure_set (s, "mouth->x", G_TYPE_UINT, rmx + sr->x, "mouth->y", G_TYPE_UINT, rmy + sr->y, "mouth->width", G_TYPE_UINT, sr->width, "mouth->height", G_TYPE_UINT, sr->height, NULL); } if (have_eyes) { CvRect *sr = (CvRect *) cvGetSeqElem (eyes, 0); GST_LOG_OBJECT (filter, "eyes/%d: x,y = %4u,%4u: w.h = %4u,%4u", eyes->total, rex + sr->x, rey + sr->y, sr->width, sr->height); gst_structure_set (s, "eyes->x", G_TYPE_UINT, rex + sr->x, "eyes->y", G_TYPE_UINT, rey + sr->y, "eyes->width", G_TYPE_UINT, sr->width, "eyes->height", G_TYPE_UINT, sr->height, NULL); } g_value_init (&facedata, GST_TYPE_STRUCTURE); g_value_take_boxed (&facedata, s); gst_value_list_append_value (&facelist, &facedata); g_value_unset (&facedata); s = NULL; if (do_display) { CvPoint center; CvSize axes; gdouble w, h; gint cb = 255 - ((i & 3) << 7); gint cg = 255 - ((i & 12) << 5); gint cr = 255 - ((i & 48) << 3); w = r->width / 2; h = r->height / 2; center.x = cvRound ((r->x + w)); center.y = cvRound ((r->y + h)); axes.width = w; axes.height = h * 1.25; /* tweak for face form */ cvEllipse (img, center, axes, 0.0, 0.0, 360.0, CV_RGB (cr, cg, cb), 3, 8, 0); if (have_nose) { CvRect *sr = (CvRect *) cvGetSeqElem (nose, 0); w = sr->width / 2; h = sr->height / 2; center.x = cvRound ((rnx + sr->x + w)); center.y = cvRound ((rny + sr->y + h)); axes.width = w; axes.height = h * 1.25; /* tweak for nose form */ cvEllipse (img, center, axes, 0.0, 0.0, 360.0, CV_RGB (cr, cg, cb), 1, 8, 0); } if (have_mouth) { CvRect *sr = (CvRect *) cvGetSeqElem (mouth, 0); w = sr->width / 2; h = sr->height / 2; center.x = cvRound ((rmx + sr->x + w)); center.y = cvRound ((rmy + sr->y + h)); axes.width = w * 1.5; /* tweak for mouth form */ axes.height = h; cvEllipse (img, center, axes, 0.0, 0.0, 360.0, CV_RGB (cr, cg, cb), 1, 8, 0); } if (have_eyes) { CvRect *sr = (CvRect *) cvGetSeqElem (eyes, 0); w = sr->width / 2; h = sr->height / 2; center.x = cvRound ((rex + sr->x + w)); center.y = cvRound ((rey + sr->y + h)); axes.width = w * 1.5; /* tweak for eyes form */ axes.height = h; cvEllipse (img, center, axes, 0.0, 0.0, 360.0, CV_RGB (cr, cg, cb), 1, 8, 0); } } } gst_structure_set_value ((GstStructure *) gst_message_get_structure (msg), "faces", &facelist); g_value_unset (&facelist); gst_element_post_message (GST_ELEMENT (filter), msg); } return GST_FLOW_OK; }
void Figure::draw(IplImage* image) { int ca, co, r; float cos60=0.5,sin60=0.8660; float cos72=0.3090,sin72=0.9510; int xm=getHalfOf('x'); int ym=getHalfOf('y'); //r=(lp.x-fp.x)/2; r=lp.x-fp.x>=0?(lp.x-fp.x)/2:(fp.x-lp.x)/2; switch(drawtype) { case 1: case 2://linea { cvLine(image,fp,lp,color,thickness,8); break; } case 3://cuadrilatero { cvRectangle(image,fp,lp,color,thickness,8); break; } case 4://triangulo { cvLine(image,cvPoint(fp.x<lp.x?fp.x:lp.x,fp.y<lp.y?lp.y:fp.y),cvPoint(lp.x>fp.x?lp.x:fp.x,lp.y>fp.y?lp.y:fp.y),color,thickness,8); //base cvLine(image,cvPoint(xm,fp.y<lp.y?fp.y:lp.y),cvPoint(fp.x<lp.x?fp.x:lp.x,fp.y<lp.y?lp.y:fp.y),color,thickness,8); //lado izq cvLine(image,cvPoint(xm,fp.y<lp.y?fp.y:lp.y),cvPoint(lp.x>fp.x?lp.x:fp.x,lp.y>fp.y?lp.y:fp.y),color,thickness,8); //lado der break; } case 'c'://circulo { cvCircle(image,cvPoint(xm,ym),r,color,thickness,8); break; } case 5://elipse { int firstaxis, secondaxis; //firstaxis=(lp.x-fp.x)/2; firstaxis=lp.x-fp.x>=0?(lp.x-fp.x)/2:(fp.x-lp.x)/2; //secondaxis=(lp.y-fp.y)/2; secondaxis=lp.y-fp.y>=0?(lp.y-fp.y)/2:(fp.y-lp.y)/2; cvEllipse(image,cvPoint(xm,ym),cvSize(firstaxis,secondaxis),0,180,-180,color,thickness); break; } case 6://pentagono { ca=r*cos72; co=r*sin72; //CvPoint pp1= cvPoint(xm,fp.y); CvPoint pp1= cvPoint(xm,fp.y<lp.y?fp.y:lp.y); CvPoint pp2= cvPoint(xm+co,ym-ca); CvPoint pp3= cvPoint(xm-co,ym-ca); CvPoint pp4= cvPoint((xm+co)-ca,(ym-ca)+co); CvPoint pp5= cvPoint((xm-co)+ca,(ym-ca)+co); cvLine(image,pp1,pp2,color,thickness,8);//linea sup-der cvLine(image,pp2,pp4,color,thickness,8);//linea inf-der cvLine(image,pp4,pp5,color,thickness,8);//linea inf cvLine(image,pp5,pp3,color,thickness,8);//linea inf-izq cvLine(image,pp3,pp1,color,thickness,8);//linea sup-izqr break; } case 7://Hexagono { ca=r*cos60; co=r*sin60; //CvPoint ph1= cvPoint(lp.x,ym); CvPoint ph1= cvPoint(lp.x>fp.x?lp.x:fp.x,ym); CvPoint ph2= cvPoint(xm+ca,ym-co); CvPoint ph3= cvPoint(xm+ca,ym+co); //CvPoint ph4= cvPoint(fp.x,ym); CvPoint ph4= cvPoint(fp.x<lp.x?fp.x:lp.x,ym); CvPoint ph5= cvPoint(xm-ca,ym-co); CvPoint ph6= cvPoint(xm-ca,ym+co); cvLine(image,ph1,ph2,color,thickness,8);//linea sup-der cvLine(image,ph2,ph5,color,thickness,8);//linea sup cvLine(image,ph5,ph4,color,thickness,8);//linea sup-izq cvLine(image,ph4,ph6,color,thickness,8);//linea inf-izq cvLine(image,ph6,ph3,color,thickness,8);//linea inf cvLine(image,ph3,ph1,color,thickness,8);//linea inf-der break; } default: std::cout<<"\n!!Aviso no hay figura seleccionada¡¡\n";drawtype = '?';break; } }
static int drawing_test() { static int read_params = 0; static int read = 0; const int channel = 3; CvSize size = cvSize(600, 300); int i, j; int Errors = 0; if( !read_params ) { read_params = 1; trsCaseRead( &read, "/n/y", "y", "Read from file ?" ); } // Create image IplImage* image = cvCreateImage( size, IPL_DEPTH_8U, channel ); // cvLine cvZero( image ); for( i = 0; i < 100; i++ ) { CvPoint p1 = cvPoint( i - 30, i * 4 + 10 ); CvPoint p2 = cvPoint( size.width + 30 - i, size.height - 10 - i * 4 ); cvLine( image, p1, p2, CV_RGB(178+i, 255-i, i), i % 10 ); } Errors += ProcessImage( image, "cvLine", read ); // cvLineAA cvZero( image ); for( i = 0; i < 100; i++ ) { CvPoint p1 = cvPoint( i - 30, i * 4 + 10 ); CvPoint p2 = cvPoint( size.width + 30 - i, size.height - 10 - i * 4 ); cvLine( image, p1, p2, CV_RGB(178+i, 255-i, i), 1, CV_AA, 0 ); } //Errors += ProcessImage( image, "cvLineAA", read ); // cvRectangle cvZero( image ); for( i = 0; i < 100; i++ ) { CvPoint p1 = cvPoint( i - 30, i * 4 + 10 ); CvPoint p2 = cvPoint( size.width + 30 - i, size.height - 10 - i * 4 ); cvRectangle( image, p1, p2, CV_RGB(178+i, 255-i, i), i % 10 ); } Errors += ProcessImage( image, "cvRectangle", read ); #if 0 named_window( "Diff", 0 ); #endif // cvCircle cvZero( image ); for( i = 0; i < 100; i++ ) { CvPoint p1 = cvPoint( i * 3, i * 2 ); CvPoint p2 = cvPoint( size.width - i * 3, size.height - i * 2 ); cvCircle( image, p1, i, CV_RGB(178+i, 255-i, i), i % 10 ); cvCircle( image, p2, i, CV_RGB(178+i, 255-i, i), i % 10 ); #if 0 show_iplimage( "Diff", image ); wait_key(0); #endif } Errors += ProcessImage( image, "cvCircle", read ); // cvCircleAA cvZero( image ); for( i = 0; i < 100; i++ ) { CvPoint p1 = cvPoint( i * 3, i * 2 ); CvPoint p2 = cvPoint( size.width - i * 3, size.height - i * 2 ); cvCircleAA( image, p1, i, RGB(i, 255 - i, 178 + i), 0 ); cvCircleAA( image, p2, i, RGB(i, 255 - i, 178 + i), 0 ); } Errors += ProcessImage( image, "cvCircleAA", read ); // cvEllipse cvZero( image ); for( i = 10; i < 100; i += 10 ) { CvPoint p1 = cvPoint( i * 6, i * 3 ); CvSize axes = cvSize( i * 3, i * 2 ); cvEllipse( image, p1, axes, 180 * i / 100, 90 * i / 100, 90 * (i - 100) / 100, CV_RGB(178+i, 255-i, i), i % 10 ); } Errors += ProcessImage( image, "cvEllipse", read ); // cvEllipseAA cvZero( image ); for( i = 10; i < 100; i += 10 ) { CvPoint p1 = cvPoint( i * 6, i * 3 ); CvSize axes = cvSize( i * 3, i * 2 ); cvEllipseAA( image, p1, axes, 180 * i / 100, 90 * i / 100, 90 * (i - 100) / 100, RGB(i, 255 - i, 178 + i), i % 10 ); } Errors += ProcessImage( image, "cvEllipseAA", read ); // cvFillConvexPoly cvZero( image ); for( j = 0; j < 5; j++ ) for( i = 0; i < 100; i += 10 ) { CvPoint p[4] = {{ j * 100 - 10, i }, { j * 100 + 10, i }, { j * 100 + 30, i * 2 }, { j * 100 + 170, i * 3 }}; cvFillConvexPoly( image, p, 4, CV_RGB(178+i, 255-i, i) ); } Errors += ProcessImage( image, "cvFillConvexPoly", read ); // cvFillPoly cvZero( image ); for( i = 0; i < 100; i += 10 ) { CvPoint p0[] = {{-10, i}, { 10, i}, { 30, i * 2}, {170, i * 3}}; CvPoint p1[] = {{ 90, i}, {110, i}, {130, i * 2}, {270, i * 3}}; CvPoint p2[] = {{190, i}, {210, i}, {230, i * 2}, {370, i * 3}}; CvPoint p3[] = {{290, i}, {310, i}, {330, i * 2}, {470, i * 3}}; CvPoint p4[] = {{390, i}, {410, i}, {430, i * 2}, {570, i * 3}}; CvPoint* p[] = {p0, p1, p2, p3, p4}; int n[] = {4, 4, 4, 4, 4}; cvFillPoly( image, p, n, 5, CV_RGB(178+i, 255-i, i) ); } Errors += ProcessImage( image, "cvFillPoly", read ); // cvPolyLine cvZero( image ); for( i = 0; i < 100; i += 10 ) { CvPoint p0[] = {{-10, i}, { 10, i}, { 30, i * 2}, {170, i * 3}}; CvPoint p1[] = {{ 90, i}, {110, i}, {130, i * 2}, {270, i * 3}}; CvPoint p2[] = {{190, i}, {210, i}, {230, i * 2}, {370, i * 3}}; CvPoint p3[] = {{290, i}, {310, i}, {330, i * 2}, {470, i * 3}}; CvPoint p4[] = {{390, i}, {410, i}, {430, i * 2}, {570, i * 3}}; CvPoint* p[] = {p0, p1, p2, p3, p4}; int n[] = {4, 4, 4, 4, 4}; cvPolyLine( image, p, n, 5, 1, CV_RGB(178+i, 255-i, i), i % 10 ); } Errors += ProcessImage( image, "cvPolyLine", read ); // cvPolyLineAA cvZero( image ); for( i = 0; i < 100; i += 10 ) { CvPoint p0[] = {{-10, i}, { 10, i}, { 30, i * 2}, {170, i * 3}}; CvPoint p1[] = {{ 90, i}, {110, i}, {130, i * 2}, {270, i * 3}}; CvPoint p2[] = {{190, i}, {210, i}, {230, i * 2}, {370, i * 3}}; CvPoint p3[] = {{290, i}, {310, i}, {330, i * 2}, {470, i * 3}}; CvPoint p4[] = {{390, i}, {410, i}, {430, i * 2}, {570, i * 3}}; CvPoint* p[] = {p0, p1, p2, p3, p4}; int n[] = {4, 4, 4, 4, 4}; cvPolyLineAA( image, p, n, 5, 1, RGB(i, 255 - i, 178 + i), 0 ); } Errors += ProcessImage( image, "cvPolyLineAA", read ); // cvPolyLineAA cvZero( image ); for( i = 1; i < 10; i++ ) { CvFont font; cvInitFont( &font, CV_FONT_VECTOR0, (double)i / 5, (double)i / 5, (double)i / 10, i ); cvPutText( image, "privet. this is test. :)", cvPoint(0, i * 20), &font, CV_RGB(178+i, 255-i, i) ); } Errors += ProcessImage( image, "cvPutText", read ); cvReleaseImage( &image ); return Errors ? trsResult( TRS_FAIL, "errors" ) : trsResult( TRS_OK, "ok" ); }
void process_image() { int i, j; int *inliers_index; CvSize ellipse_axis; CvPoint gaze_point; static int lost_frame_num = 0; Grab_Camera_Frames(); cvZero(ellipse_image); cvSmooth(eye_image, eye_image, CV_GAUSSIAN, 5, 5); Reduce_Line_Noise(eye_image); if (save_image == 1) { printf("save image %d\n", image_no); sprintf(eye_file, "./Eye/Eye_%05d.jpg", image_no); image_no++; cvSaveImage(eye_file, eye_image); } //corneal reflection remove_corneal_reflection(eye_image, threshold_image, (int)start_point.x, (int)start_point.y, cr_window_size, (int)eye_image->height/10, corneal_reflection.x, corneal_reflection.y, corneal_reflection_r); printf("corneal reflection: (%d, %d)\n", corneal_reflection.x, corneal_reflection.y); Draw_Cross(ellipse_image, corneal_reflection.x, corneal_reflection.y, 15, 15, Yellow); //starburst pupil contour detection starburst_pupil_contour_detection((UINT8*)eye_image->imageData, eye_image->width, eye_image->height, edge_threshold, rays, min_feature_candidates); inliers_num = 0; inliers_index = pupil_fitting_inliers((UINT8*)eye_image->imageData, eye_image->width, eye_image->height, inliers_num); ellipse_axis.width = (int)pupil_param[0]; ellipse_axis.height = (int)pupil_param[1]; pupil.x = (int)pupil_param[2]; pupil.y = (int)pupil_param[3]; Draw_Cross(ellipse_image, pupil.x, pupil.y, 15, 15, Red); cvLine(eye_image, pupil, corneal_reflection, Red, 4, 8); cvLine(ellipse_image, pupil, corneal_reflection, Red, 4, 8); printf("ellipse a:%lf; b:%lf, cx:%lf, cy:%lf, theta:%lf; inliers_num:%d\n\n", pupil_param[0], pupil_param[1], pupil_param[2], pupil_param[3], pupil_param[4], inliers_num); bool is_inliers = 0; for (int i = 0; i < edge_point.size(); i++) { is_inliers = 0; for (int j = 0; j < inliers_num; j++) { if (i == inliers_index[j]) is_inliers = 1; } stuDPoint *edge = edge_point.at(i); if (is_inliers) Draw_Cross(ellipse_image, (int)edge->x,(int)edge->y, 5, 5, Green); else Draw_Cross(ellipse_image, (int)edge->x,(int)edge->y, 3, 3, Yellow); } free(inliers_index); if (ellipse_axis.width > 0 && ellipse_axis.height > 0) { start_point.x = pupil.x; start_point.y = pupil.y; //printf("start_point: %d,%d\n", start_point.x, start_point.y); Draw_Cross(eye_image, pupil.x, pupil.y, 10, 10, Green); cvEllipse(eye_image, pupil, ellipse_axis, -pupil_param[4]*180/PI, 0, 360, Red, 2); cvEllipse(ellipse_image, pupil, ellipse_axis, -pupil_param[4]*180/PI, 0, 360, Green, 2); diff_vector.x = pupil.x - corneal_reflection.x; diff_vector.y = pupil.y - corneal_reflection.y; if (do_map2scene) { gaze_point = homography_map_point(diff_vector); printf("gaze_point: (%d,%d)\n", gaze_point.x, gaze_point.y); Draw_Cross(scene_image, gaze_point.x, gaze_point.y, 60, 60, Red); } lost_frame_num = 0; } else { lost_frame_num++; } if (lost_frame_num > 5) { start_point.x = FRAMEW/2; start_point.y = FRAMEH/2; } Draw_Cross(ellipse_image, (int)start_point.x, (int)start_point.y, 7, 7, Blue); Draw_Cross(eye_image, (int)start_point.x, (int)start_point.y, 7, 7, Blue); if (save_ellipse == 1) { printf("save ellipse %d\n", ellipse_no); sprintf(ellipse_file, "./Ellipse/Ellipse_%05d.jpg", ellipse_no); ellipse_no++; cvSaveImage(ellipse_file, ellipse_image); fprintf(ellipse_log, "%.3f\t %8.2lf %8.2lf %8.2lf %8.2lf %8.2lf\n", Time_Elapsed(), pupil_param[0], pupil_param[1], pupil_param[2], pupil_param[3], pupil_param[4]); } printf("Time elapsed: %.3f\n", Time_Elapsed()); fprintf(logfile,"%.3f\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n", Time_Elapsed(), pupil.x, pupil.y, corneal_reflection.x, corneal_reflection.y, diff_vector.x, diff_vector.y, gaze_point.x, gaze_point.y); if (view_cal_points) Show_Calibration_Points(); }
bool IrisFinderHough::Find(IplImage* image, CvRect eyeROI) { if (!ParametersValid()) return false; if (m_sizeData.SizeChanged(eyeROI)) PrepareImage(eyeROI); // some helper imgs IplImage* imgSobelH = cvCreateImage(cvSize(eyeROI.width, eyeROI.height), IPL_DEPTH_16S, 1); IplImage* imgSobelV = cvCreateImage(cvSize(eyeROI.width, eyeROI.height), IPL_DEPTH_16S, 1); // copy roi to internal image ImgLib::CopyRect(image, m_eyeImg, eyeROI, cvPoint(0, 0)); cvSobel(m_eyeImg, imgSobelH, 1, 0, 3); cvSobel(m_eyeImg, imgSobelV, 0, 1, 3); double angle; double dx, dy; double thetaRad; double xPrim, yPrim; double xsi; double max_e = 2.2; HoughAccumulator acc(m_accPrecision); acc.AddParam(0, m_eyeImg->width); // x0 acc.AddParam(0, m_eyeImg->height); // x1 acc.AddParam(m_thetaMin, m_thetaMax); // theta acc.AddParam(m_aMin, m_aMax); // a acc.AddParam(m_bMin, m_bMax); // b acc.Init(); DOUBLEVECT indices; indices.resize(5); cvSmooth(m_eyeImg, m_eyeImg); cvCanny(m_eyeImg, m_eyeImg, 250, 100); for(int y = 0; y < m_eyeImg->height; y++) { short* sh_row = (short*)(imgSobelH->imageData + y * imgSobelH->widthStep); short* sv_row = (short*)(imgSobelV->imageData + y * imgSobelV->widthStep); uchar* canny_row = (uchar *)(m_eyeImg->imageData + y * m_eyeImg->widthStep); double x0, y0; double a, b, theta=0; for (int x = 0; x < m_eyeImg->width; x++) { if (canny_row[x] == 0) continue; short dX = sh_row[x]; short dY = sv_row[x]; if ( (abs(dX) + abs(dY)) < m_minGradStrength) { cvLine(m_eyeImg, cvPoint(x,y),cvPoint(x,y),CV_RGB(0,0,0)); continue; } for (a = m_aMin; a < m_aMax; a+= (1 / m_accPrecision)) for (b = m_bMin; b < m_bMax; b+= (1 / m_accPrecision)) { double e = a / b; if (e < 1) e = b / a; if (e > max_e) continue; for (theta = m_thetaMin; theta < m_thetaMax; theta += (1 / m_accPrecision)) { angle = atan2((float)dY, (float)dX); thetaRad = 2 * CV_PI * theta / 360.0; angle -= (thetaRad + CV_PI / 2.0); xsi = tan(angle); //xsi = (float) dY / (float) dX; dx = -SignX(dX, dY) * a / sqrt(1 + (b * b) / (a * a * xsi * xsi)); dy = -SignY(dX, dY) * b / sqrt(1 + (a * a * xsi * xsi) / (b * b)); // rotate by theta xPrim = cos(thetaRad) * dx - sin(thetaRad) * dy; yPrim = sin(thetaRad) * dx + cos(thetaRad) * dy; dx = xPrim; dy = yPrim; x0 = x + dx; y0 = y + dy; indices[0] = x0; indices[1] = y0; indices[2] = theta; indices[3] = a; indices[4] = b; acc.Increment(indices); } } } } indices = acc.FindBest(); if (indices.size() > 0) { cvEllipse(image, cvPoint(indices[0] + eyeROI.x, indices[1] + eyeROI.y), cvSize(indices[3], indices[4]), -indices[2], // 90, 0, 360, CV_RGB(255, 0, 0)); m_irisCentre.x = indices[0] + eyeROI.x; m_irisCentre.y = indices[1] + eyeROI.y; return true; } return false; }
int fmaFitEllipse(void) { long lErrors = 0; CvPoint points[1000]; CvPoint2D32f fpoints[1000]; CvBox2D box; CvMemStorage* storage = cvCreateMemStorage(0); CvContour* contour; CvSize axis; IplImage* img = cvCreateImage( cvSize(200,200), IPL_DEPTH_8U, 1 ); for( int k = 0 ; k < 1000; k++ ) { iplSet( img, 0 ); CvPoint center = { 100, 100 }; double angle = atsInitRandom( 0, 360 ); axis.height = (int)atsInitRandom( 5, 50 ); axis.width = (int)atsInitRandom( 5, 50 ); cvEllipse( img, center, axis, angle, 0, 360, 255, -1 ); cvFindContours( img, storage, (CvSeq**)&contour, sizeof(CvContour) ); cvCvtSeqToArray( (CvSeq*)contour, points ); for( int i = 0; i < contour->total; i++ ) { fpoints[i].x = (float)points[i].x; fpoints[i].y = (float)points[i].y; } cvFitEllipse( fpoints, contour->total, &box ); //compare boxes if( fabs( box.center.x - center.x) > 1 || fabs( box.center.y - center.y ) > 1 ) { lErrors++; } if( ( fabs( box.size.width - (axis.width * 2 ) ) > 4 || fabs( box.size.height - (axis.height * 2) ) > 4 ) && ( fabs( box.size.height - (axis.width * 2 ) ) > 4 || fabs( box.size.width - (axis.height * 2) ) > 4 ) ) { lErrors++; //graphic /*IplImage* rgb = cvCreateImage( cvSize(200,200), IPL_DEPTH_8U, 3 ); iplSet( rgb, 0 ); cvEllipse( rgb, center, axis, angle, 0, 360, CV_RGB(255,0,0) , 1 ); int window = atsCreateWindow( "proba", cvPoint(0,0), cvSize(200,200) ); cvEllipse( rgb, center, cvSize( box.size.width/2, box.size.height/2) , -box.angle, 0, 360, CV_RGB(0,255,0) , 1 ); //draw center cvEllipse( rgb, center, cvSize( 0, 0) , 0, 0, 360, CV_RGB(255,255,255) , -1 ); atsDisplayImage( rgb, window, cvPoint(0,0), cvSize(200,200) ); getch(); atsDestroyWindow( window ); //one more cvFitEllipse( fpoints, contour->total, &box ); */ } } cvReleaseMemStorage( &storage ); if( !lErrors) return trsResult(TRS_OK, "No errors"); else return trsResult(TRS_FAIL, "Fixed %d errors", lErrors); }
virtual void Process(IplImage* pImg, IplImage* pImgFG = NULL) { CvSeq* cnts; CvSeq* cnt; int i; //CvMat* pMC = NULL; if(m_BlobList.GetBlobNum() <= 0 ) return; /* Clear blob list for new blobs: */ m_BlobListNew.Clear(); assert(m_pMem); cvClearMemStorage(m_pMem); assert(pImgFG); { /* One contour - one blob: */ IplImage* pBin = cvCloneImage(pImgFG); assert(pBin); cvThreshold(pBin,pBin,128,255,CV_THRESH_BINARY); cvFindContours(pBin, m_pMem, &cnts, sizeof(CvContour), CV_RETR_EXTERNAL); /* Process each contour: */ for(cnt = cnts; cnt; cnt=cnt->h_next) { CvBlob NewBlob; /* Image moments: */ double M00,X,Y,XX,YY; CvMoments m; CvRect r = ((CvContour*)cnt)->rect; CvMat mat; if(r.height < 3 || r.width < 3) continue; cvMoments( cvGetSubRect(pImgFG,&mat,r), &m, 0 ); M00 = cvGetSpatialMoment( &m, 0, 0 ); if(M00 <= 0 ) continue; X = cvGetSpatialMoment( &m, 1, 0 )/M00; Y = cvGetSpatialMoment( &m, 0, 1 )/M00; XX = (cvGetSpatialMoment( &m, 2, 0 )/M00) - X*X; YY = (cvGetSpatialMoment( &m, 0, 2 )/M00) - Y*Y; NewBlob = cvBlob(r.x+(float)X,r.y+(float)Y,(float)(4*sqrt(XX)),(float)(4*sqrt(YY))); m_BlobListNew.AddBlob(&NewBlob); } /* Next contour. */ cvReleaseImage(&pBin); } for(i=m_BlobList.GetBlobNum(); i>0; --i) { /* Predict new blob position. */ CvBlob* pB = NULL; DefBlobTrackerCR* pBT = (DefBlobTrackerCR*)m_BlobList.GetBlob(i-1); /* Update predictor. */ pBT->pPredictor->Update(&(pBT->blob)); pB = pBT->pPredictor->Predict(); if(pB) { pBT->BlobPredict = pB[0]; } pBT->BlobPrev = pBT->blob; } /* Predict new blob position. */ if(m_BlobList.GetBlobNum()>0 && m_BlobListNew.GetBlobNum()>0) { /* Resolve new blob to old: */ int i,j; int NOld = m_BlobList.GetBlobNum(); int NNew = m_BlobListNew.GetBlobNum(); for(i=0; i<NOld; i++) { /* Set 0 collision and clear all hyp: */ DefBlobTrackerCR* pF = (DefBlobTrackerCR*)m_BlobList.GetBlob(i); pF->Collision = 0; pF->pBlobHyp->Clear(); } /* Set 0 collision. */ /* Create correspondence records: */ for(j=0; j<NNew; ++j) { CvBlob* pB1 = m_BlobListNew.GetBlob(j); DefBlobTrackerCR* pFLast = NULL; for(i=0; i<NOld; i++) { /* Check intersection: */ int Intersection = 0; DefBlobTrackerCR* pF = (DefBlobTrackerCR*)m_BlobList.GetBlob(i); CvBlob* pB2 = &(pF->BlobPredict); if( fabs(pB1->x-pB2->x)<0.5*(pB1->w+pB2->w) && fabs(pB1->y-pB2->y)<0.5*(pB1->h+pB2->h) ) Intersection = 1; if(Intersection) { if(pFLast) { pF->Collision = pFLast->Collision = 1; } pFLast = pF; pF->pBlobHyp->AddBlob(pB1); } } /* Check intersection. */ } /* Check next new blob. */ } /* Resolve new blob to old. */ for(i=m_BlobList.GetBlobNum(); i>0; --i) { /* Track each blob. */ CvBlob* pB = m_BlobList.GetBlob(i-1); DefBlobTrackerCR* pBT = (DefBlobTrackerCR*)pB; int BlobID = CV_BLOB_ID(pB); //CvBlob* pBBest = NULL; //double DistBest = -1; int j; if(pBT->pResolver) { pBT->pResolver->SetCollision(pBT->Collision); } if(pBT->Collision) { /* Tracking in collision: */ if(pBT->pResolver) { pB[0] = pBT->pResolver->Process(&(pBT->BlobPredict),pImg, pImgFG)[0]; } } /* Tracking in collision. */ else { /* Non-collision tracking: */ CvBlob NewCC = pBT->BlobPredict; if(pBT->pBlobHyp->GetBlobNum()==1) { /* One blob to one CC: */ NewCC = pBT->pBlobHyp->GetBlob(0)[0]; } else { /* One blob several CC: */ CvBlob* pBBest = NULL; double DistBest = -1; double CMax = 0; for(j=pBT->pBlobHyp->GetBlobNum();j>0;--j) { /* Find best CC: */ CvBlob* pBNew = pBT->pBlobHyp->GetBlob(j-1); if(pBT->pResolver) { /* Choose CC by confidence: */ // double dx = fabs(CV_BLOB_X(pB)-CV_BLOB_X(pBNew)); // double dy = fabs(CV_BLOB_Y(pB)-CV_BLOB_Y(pBNew)); double C = pBT->pResolver->GetConfidence(pBNew,pImg, pImgFG); if(C > CMax || pBBest == NULL) { CMax = C; pBBest = pBNew; } } else { /* Choose CC by distance: */ double dx = fabs(CV_BLOB_X(pB)-CV_BLOB_X(pBNew)); double dy = fabs(CV_BLOB_Y(pB)-CV_BLOB_Y(pBNew)); double Dist = sqrt(dx*dx+dy*dy); if(Dist < DistBest || pBBest == NULL) { DistBest = Dist; pBBest = pBNew; } } } /* Find best CC. */ if(pBBest) NewCC = pBBest[0]; } /* One blob several CC. */ pB->x = NewCC.x; pB->y = NewCC.y; pB->w = (m_AlphaSize)*NewCC.w+(1-m_AlphaSize)*pB->w; pB->h = (m_AlphaSize)*NewCC.h+(1-m_AlphaSize)*pB->h; pBT->pResolver->SkipProcess(&(pBT->BlobPredict),pImg, pImgFG); } /* Non-collision tracking. */ pBT->pResolver->Update(pB, pImg, pImgFG); CV_BLOB_ID(pB)=BlobID; } /* Track next blob. */ if(m_Wnd) { IplImage* pI = cvCloneImage(pImg); int i; for(i=m_BlobListNew.GetBlobNum(); i>0; --i) { /* Draw each new CC: */ CvBlob* pB = m_BlobListNew.GetBlob(i-1); CvPoint p = cvPointFrom32f(CV_BLOB_CENTER(pB)); int x = cvRound(CV_BLOB_RX(pB)), y = cvRound(CV_BLOB_RY(pB)); CvSize s = cvSize(MAX(1,x), MAX(1,y)); //int c = 255; cvEllipse( pI, p, s, 0, 0, 360, CV_RGB(255,255,0), 1 ); } for(i=m_BlobList.GetBlobNum(); i>0; --i) { /* Draw each new CC: */ DefBlobTrackerCR* pF = (DefBlobTrackerCR*)m_BlobList.GetBlob(i-1); CvBlob* pB = &(pF->BlobPredict); CvPoint p = cvPointFrom32f(CV_BLOB_CENTER(pB)); int x = cvRound(CV_BLOB_RX(pB)), y = cvRound(CV_BLOB_RY(pB)); CvSize s = cvSize(MAX(1,x), MAX(1,y)); cvEllipse( pI, p, s, 0, 0, 360, CV_RGB(0,0,255), 1 ); pB = &(pF->blob); p = cvPointFrom32f(CV_BLOB_CENTER(pB)); x = cvRound(CV_BLOB_RX(pB)); y = cvRound(CV_BLOB_RY(pB)); s = cvSize(MAX(1,x), MAX(1,y)); cvEllipse( pI, p, s, 0, 0, 360, CV_RGB(0,255,0), 1 ); } //cvNamedWindow("CCwithCR",0); //cvShowImage("CCwithCR",pI); cvReleaseImage(&pI); } } /* Process. */
int main(int argc, char* argv[]) { int i, j; CvMemStorage* storage = cvCreateMemStorage(0); IplImage* img = cvCreateImage( cvSize(w,w), 8, 1 ); IplImage* img32f = cvCreateImage( cvSize(w,w), IPL_DEPTH_32F, 1 ); IplImage* img32s = cvCreateImage( cvSize(w,w), IPL_DEPTH_32S, 1 ); IplImage* img3 = cvCreateImage( cvSize(w,w), 8, 3 ); (void)argc; (void)argv; help(); cvZero( img ); for( i=0; i < 6; i++ ) { int dx = (i%2)*250 - 30; int dy = (i/2)*150; CvScalar white = cvRealScalar(255); CvScalar black = cvRealScalar(0); if( i == 0 ) { for( j = 0; j <= 10; j++ ) { double angle = (j+5)*CV_PI/21; cvLine(img, cvPoint(cvRound(dx+100+j*10-80*cos(angle)), cvRound(dy+100-90*sin(angle))), cvPoint(cvRound(dx+100+j*10-30*cos(angle)), cvRound(dy+100-30*sin(angle))), white, 3, 8, 0); } } cvEllipse( img, cvPoint(dx+150, dy+100), cvSize(100,70), 0, 0, 360, white, -1, 8, 0 ); cvEllipse( img, cvPoint(dx+115, dy+70), cvSize(30,20), 0, 0, 360, black, -1, 8, 0 ); cvEllipse( img, cvPoint(dx+185, dy+70), cvSize(30,20), 0, 0, 360, black, -1, 8, 0 ); cvEllipse( img, cvPoint(dx+115, dy+70), cvSize(15,15), 0, 0, 360, white, -1, 8, 0 ); cvEllipse( img, cvPoint(dx+185, dy+70), cvSize(15,15), 0, 0, 360, white, -1, 8, 0 ); cvEllipse( img, cvPoint(dx+115, dy+70), cvSize(5,5), 0, 0, 360, black, -1, 8, 0 ); cvEllipse( img, cvPoint(dx+185, dy+70), cvSize(5,5), 0, 0, 360, black, -1, 8, 0 ); cvEllipse( img, cvPoint(dx+150, dy+100), cvSize(10,5), 0, 0, 360, black, -1, 8, 0 ); cvEllipse( img, cvPoint(dx+150, dy+150), cvSize(40,10), 0, 0, 360, black, -1, 8, 0 ); cvEllipse( img, cvPoint(dx+27, dy+100), cvSize(20,35), 0, 0, 360, white, -1, 8, 0 ); cvEllipse( img, cvPoint(dx+273, dy+100), cvSize(20,35), 0, 0, 360, white, -1, 8, 0 ); } cvNamedWindow( "image", 1 ); cvShowImage( "image", img ); cvConvert( img, img32f ); findCComp( img32f ); cvConvert( img32f, img32s ); cvFindContours( img32s, storage, &contours, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) ); //cvFindContours( img, storage, &contours, sizeof(CvContour), // CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) ); { const char* attrs[] = {"recursive", "1", 0}; cvSave("contours.xml", contours, 0, 0, cvAttrList(attrs, 0)); contours = (CvSeq*)cvLoad("contours.xml", storage, 0, 0); } // comment this out if you do not want approximation contours = cvApproxPoly( contours, sizeof(CvContour), storage, CV_POLY_APPROX_DP, 3, 1 ); cvNamedWindow( "contours", 1 ); cvCreateTrackbar( "levels+3", "contours", &levels, 7, on_trackbar ); { CvRNG rng = cvRNG(-1); CvSeq* tcontours = contours; cvCvtColor( img, img3, CV_GRAY2BGR ); while( tcontours->h_next ) tcontours = tcontours->h_next; for( ; tcontours != 0; tcontours = tcontours->h_prev ) { CvScalar color; color.val[0] = cvRandInt(&rng) % 256; color.val[1] = cvRandInt(&rng) % 256; color.val[2] = cvRandInt(&rng) % 256; color.val[3] = cvRandInt(&rng) % 256; cvDrawContours(img3, tcontours, color, color, 0, -1, 8, cvPoint(0,0)); if( tcontours->v_next ) { color.val[0] = cvRandInt(&rng) % 256; color.val[1] = cvRandInt(&rng) % 256; color.val[2] = cvRandInt(&rng) % 256; color.val[3] = cvRandInt(&rng) % 256; cvDrawContours(img3, tcontours->v_next, color, color, 1, -1, 8, cvPoint(0,0)); } } } cvShowImage( "colored", img3 ); on_trackbar(0); cvWaitKey(0); cvReleaseMemStorage( &storage ); cvReleaseImage( &img ); cvReleaseImage( &img32f ); cvReleaseImage( &img32s ); cvReleaseImage( &img3 ); return 0; }
int main( void ) { IplImage* img=cvLoadImage("obraz1.png"); if(!img) return 0; IplImage* hsv=cvCreateImage( cvGetSize(img), 8, 3 ); //obrazek dla przestrzeni HSV IplImage* binary=cvCreateImage( cvGetSize(img), 8, 1 );//tu bedzie maska na podstawie koloru cvCvtColor(img,hsv,CV_BGR2HSV); //konwersja na HSV - z tego bedzie robiona maska kolorow //szybki sposob dostepu do pikseli obrazu uchar* hsv_ptr = (uchar *)hsv->imageData; uchar* binary_ptr = (uchar *)binary->imageData; int hsv_step = hsv->widthStep; int binary_step = binary->widthStep; for (int i = 0; i < hsv->height; i++) for (int j = 0; j < hsv->width; j++) { //odczyt poszczegolnych skladowych int h=hsv_ptr[i*hsv_step+j*3+0]; int s=hsv_ptr[i*hsv_step+j*3+1]; int v=hsv_ptr[i*hsv_step+j*3+2]; //utworzenie maski binarnej dla koloru czerwonego if(v>=40 && (h>160 || h<20) && s>150) //jasnosc >40, kolor czerwony, nasycenie >150 binary_ptr[i*binary_step+j] = 255; else binary_ptr[i*binary_step+j] = 0; } cvReleaseImage(&hsv); //obraz hsv juz nie jest nam porzebny! cvNamedWindow( "Binarny", 1); cvShowImage("Binarny",binary); blobs.BlobAnalysis(binary, 0, 0, binary->width, binary->height, 0, 10); blobs.BlobExclude(BLOBCIRCULARITY,1.1,5); blobs.BlobExclude(BLOBCIRCULARITY,.1,.9); printf("Znaleziono %d blobow",blobs.BlobCount); blobs.PrintRegionDataArray(1); //zaznaczenie wykrytych blobow for(int i=1;i<=blobs.BlobCount;i++) { cvRectangle(img,cvPoint(blobs.RegionData[i][BLOBMINX],blobs.RegionData[i][BLOBMINY]),cvPoint(blobs.RegionData[i][BLOBMAXX],blobs.RegionData[i][BLOBMAXY]),CV_RGB(255,0,0)); cvEllipse(img,cvPoint(blobs.RegionData[i][BLOBSUMX],blobs.RegionData[i][BLOBSUMY]),cvSize(5,5),360, 0, 360, CV_RGB(255,255,255)); } cvNamedWindow( "Podglad", 1 ); cvShowImage("Podglad",img); cvWaitKey(0); cvDestroyAllWindows(); cvReleaseImage(&binary); cvReleaseImage(&img); return 0; }
void cvFindBlobsByCCClasters(IplImage* pFG, CvBlobSeq* pBlobs, CvMemStorage* storage) { /* Create contours: */ IplImage* pIB = NULL; CvSeq* cnt = NULL; CvSeq* cnt_list = cvCreateSeq(0,sizeof(CvSeq),sizeof(CvSeq*), storage ); CvSeq* clasters = NULL; int claster_cur, claster_num; pIB = cvCloneImage(pFG); cvThreshold(pIB,pIB,128,255,CV_THRESH_BINARY); cvFindContours(pIB,storage, &cnt, sizeof(CvContour), CV_RETR_EXTERNAL); cvReleaseImage(&pIB); /* Create cnt_list. */ /* Process each contour: */ for(; cnt; cnt=cnt->h_next) { cvSeqPush( cnt_list, &cnt); } claster_num = cvSeqPartition( cnt_list, storage, &clasters, CompareContour, NULL ); for(claster_cur=0; claster_cur<claster_num; ++claster_cur) { int cnt_cur; CvBlob NewBlob; double M00,X,Y,XX,YY; /* image moments */ CvMoments m; CvRect rect_res = cvRect(-1,-1,-1,-1); CvMat mat; for(cnt_cur=0; cnt_cur<clasters->total; ++cnt_cur) { CvRect rect; CvSeq* cnt; int k = *(int*)cvGetSeqElem( clasters, cnt_cur ); if(k!=claster_cur) continue; cnt = *(CvSeq**)cvGetSeqElem( cnt_list, cnt_cur ); rect = ((CvContour*)cnt)->rect; if(rect_res.height<0) { rect_res = rect; } else { /* Unite rects: */ int x0,x1,y0,y1; x0 = MIN(rect_res.x,rect.x); y0 = MIN(rect_res.y,rect.y); x1 = MAX(rect_res.x+rect_res.width,rect.x+rect.width); y1 = MAX(rect_res.y+rect_res.height,rect.y+rect.height); rect_res.x = x0; rect_res.y = y0; rect_res.width = x1-x0; rect_res.height = y1-y0; } } if(rect_res.height < 1 || rect_res.width < 1) { X = 0; Y = 0; XX = 0; YY = 0; } else { cvMoments( cvGetSubRect(pFG,&mat,rect_res), &m, 0 ); M00 = cvGetSpatialMoment( &m, 0, 0 ); if(M00 <= 0 ) continue; X = cvGetSpatialMoment( &m, 1, 0 )/M00; Y = cvGetSpatialMoment( &m, 0, 1 )/M00; XX = (cvGetSpatialMoment( &m, 2, 0 )/M00) - X*X; YY = (cvGetSpatialMoment( &m, 0, 2 )/M00) - Y*Y; } NewBlob = cvBlob(rect_res.x+(float)X,rect_res.y+(float)Y,(float)(4*sqrt(XX)),(float)(4*sqrt(YY))); pBlobs->AddBlob(&NewBlob); } /* Next cluster. */ #if 0 { // Debug info: IplImage* pI = cvCreateImage(cvSize(pFG->width,pFG->height),IPL_DEPTH_8U,3); cvZero(pI); for(claster_cur=0; claster_cur<claster_num; ++claster_cur) { int cnt_cur; CvScalar color = CV_RGB(rand()%256,rand()%256,rand()%256); for(cnt_cur=0; cnt_cur<clasters->total; ++cnt_cur) { CvSeq* cnt; int k = *(int*)cvGetSeqElem( clasters, cnt_cur ); if(k!=claster_cur) continue; cnt = *(CvSeq**)cvGetSeqElem( cnt_list, cnt_cur ); cvDrawContours( pI, cnt, color, color, 0, 1, 8); } CvBlob* pB = pBlobs->GetBlob(claster_cur); int x = cvRound(CV_BLOB_RX(pB)), y = cvRound(CV_BLOB_RY(pB)); cvEllipse( pI, cvPointFrom32f(CV_BLOB_CENTER(pB)), cvSize(MAX(1,x), MAX(1,y)), 0, 0, 360, color, 1 ); } cvNamedWindow( "Clusters", 0); cvShowImage( "Clusters",pI ); cvReleaseImage(&pI); } /* Debug info. */ #endif } /* cvFindBlobsByCCClasters */
int main( int argc, char** argv ) { int i, j; CvMemStorage* storage = cvCreateMemStorage(0); IplImage* img = cvCreateImage( cvSize(w,w), 8, 1 ); cvZero( img ); for( i=0; i < 6; i++ ) { int dx = (i%2)*250 - 30; int dy = (i/2)*150; CvScalar white = cvRealScalar(255); CvScalar black = cvRealScalar(0); if( i == 0 ) { for( j = 0; j <= 10; j++ ) { double angle = (j+5)*CV_PI/21; cvLine(img, cvPoint(cvRound(dx+100+j*10-80*cos(angle)), cvRound(dy+100-90*sin(angle))), cvPoint(cvRound(dx+100+j*10-30*cos(angle)), cvRound(dy+100-30*sin(angle))), white, 1, 8, 0); } } cvEllipse( img, cvPoint(dx+150, dy+100), cvSize(100,70), 0, 0, 360, white, -1, 8, 0 ); cvEllipse( img, cvPoint(dx+115, dy+70), cvSize(30,20), 0, 0, 360, black, -1, 8, 0 ); cvEllipse( img, cvPoint(dx+185, dy+70), cvSize(30,20), 0, 0, 360, black, -1, 8, 0 ); cvEllipse( img, cvPoint(dx+115, dy+70), cvSize(15,15), 0, 0, 360, white, -1, 8, 0 ); cvEllipse( img, cvPoint(dx+185, dy+70), cvSize(15,15), 0, 0, 360, white, -1, 8, 0 ); cvEllipse( img, cvPoint(dx+115, dy+70), cvSize(5,5), 0, 0, 360, black, -1, 8, 0 ); cvEllipse( img, cvPoint(dx+185, dy+70), cvSize(5,5), 0, 0, 360, black, -1, 8, 0 ); cvEllipse( img, cvPoint(dx+150, dy+100), cvSize(10,5), 0, 0, 360, black, -1, 8, 0 ); cvEllipse( img, cvPoint(dx+150, dy+150), cvSize(40,10), 0, 0, 360, black, -1, 8, 0 ); cvEllipse( img, cvPoint(dx+27, dy+100), cvSize(20,35), 0, 0, 360, white, -1, 8, 0 ); cvEllipse( img, cvPoint(dx+273, dy+100), cvSize(20,35), 0, 0, 360, white, -1, 8, 0 ); } cvNamedWindow( "image", 1 ); cvShowImage( "image", img ); cvFindContours( img, storage, &contours, sizeof(CvContour), CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) ); // comment this out if you do not want approximation contours = cvApproxPoly( contours, sizeof(CvContour), storage, CV_POLY_APPROX_DP, 3, 1 ); cvNamedWindow( "contours", 1 ); cvCreateTrackbar( "levels+3", "contours", &levels, 7, on_trackbar ); on_trackbar(0); cvWaitKey(0); cvReleaseMemStorage( &storage ); cvReleaseImage( &img ); return 0; }
virtual void Process(IplImage* pImg, IplImage* /*pFG*/) { int i; double MinTv = pImg->width / 1440.0; /* minimal threshold for speed difference */ double MinTv2 = MinTv * MinTv; for (i = m_Tracks.GetBlobNum(); i > 0; --i) { DefTrackForDist* pF = (DefTrackForDist*)m_Tracks.GetBlob(i - 1); pF->state = 0; if (pF->LastFrame == m_Frame || pF->LastFrame + 1 == m_Frame) { /* Process one blob trajectory: */ int NumEq = 0; int it; for (it = m_TrackDataBase.GetBlobNum(); it > 0; --it) { /* Check template: */ DefTrackForDist* pFT = (DefTrackForDist*)m_TrackDataBase.GetBlob(it - 1); int Num = pF->pTrack->GetPointNum(); int NumT = pFT->pTrack->GetPointNum(); int* pPairIdx = (int*)ReallocTempData(sizeof(int) * 2 * (Num + NumT) + sizeof(DefMatch) * Num * NumT); void* pTmpData = pPairIdx + 2 * (Num + NumT); int PairNum = 0; int k; int Equal = 1; int UseVel = 0; int UsePos = 0; if (i == it) { continue; } /* Match track: */ PairNum = cvTrackMatch(pF->pTrack, m_TraceLen, pFT->pTrack, pPairIdx, pTmpData); Equal = MAX(1, cvRound(PairNum * 0.1)); UseVel = 3 * pF->pTrack->GetPointNum() > m_TraceLen; UsePos = 10 * pF->pTrack->GetPointNum() > m_TraceLen; { /* Check continues: */ float D; int DI = pPairIdx[0*2+0] - pPairIdx[(PairNum-1)*2+0]; int DIt = pPairIdx[0*2+1] - pPairIdx[(PairNum-1)*2+1]; if (UseVel && DI != 0) { D = (float)(DI - DIt) / (float)DI; if (fabs(D) > m_VelThreshold) { Equal = 0; } if (fabs(D) > m_VelThreshold * 0.5) { Equal /= 2; } } } /* Check continues. */ for (k = 0; Equal > 0 && k < PairNum; ++k) { /* Compare with threshold: */ int j = pPairIdx[k*2+0]; int jt = pPairIdx[k*2+1]; DefTrackPoint* pB = pF->pTrack->GetPoint(j); DefTrackPoint* pBT = pFT->pTrack->GetPoint(jt); double dx = pB->x - pBT->x; double dy = pB->y - pBT->y; double dvx = pB->vx - pBT->vx; double dvy = pB->vy - pBT->vy; //double dv = pB->v - pBT->v; double D = dx * dx + dy * dy; double Td = pBT->r * m_PosThreshold; double dv2 = dvx * dvx + dvy * dvy; double Tv2 = (pBT->vx * pBT->vx + pBT->vy * pBT->vy) * m_VelThreshold * m_VelThreshold; double Tvm = pBT->v * m_VelThreshold; if (Tv2 < MinTv2) { Tv2 = MinTv2; } if (Tvm < MinTv) { Tvm = MinTv; } /* Check trajectory position: */ if (UsePos && D > Td * Td) { Equal--; } else /* Check trajectory velocity. */ /* Don't consider trajectory tail because its unstable for velocity computation. */ if (UseVel && j > 5 && jt > 5 && dv2 > Tv2) { Equal--; } } /* Compare with threshold. */ if (Equal > 0) { NumEq++; pFT->close++; } } /* Next template. */ { /* Calculate state: */ float T = m_TrackDataBase.GetBlobNum() * m_AbnormalThreshold; /* calc threshold */ if (T > 0) { pF->state = (T - NumEq) / (T * 0.2f) + 0.5f; } if (pF->state < 0) { pF->state = 0; } if (pF->state > 1) { pF->state = 1; } /*if(0)if(pF->state>0) {// if abnormal blob printf("Abnormal blob(%d) %d < %f, state=%f\n",CV_BLOB_ID(pF),NumEq,T, pF->state); }*/ } /* Calculate state. */ } /* Process one blob trajectory. */ else { /* Move track to tracks data base: */ m_TrackDataBase.AddBlob((CvBlob*)pF); m_Tracks.DelBlob(i - 1); } } /* Next blob. */ if (m_Wnd) { /* Debug output: */ int i; if (m_pDebugImg == NULL) { m_pDebugImg = cvCloneImage(pImg); } else { cvCopy(pImg, m_pDebugImg); } for (i = m_TrackDataBase.GetBlobNum(); i > 0; --i) { /* Draw all elements in track data base: */ int j; DefTrackForDist* pF = (DefTrackForDist*)m_TrackDataBase.GetBlob(i - 1); CvScalar color = CV_RGB(0, 0, 0); if (!pF->close) { continue; } if (pF->close) { color = CV_RGB(0, 0, 255); } else { color = CV_RGB(0, 0, 128); } for (j = pF->pTrack->GetPointNum(); j > 0; j--) { DefTrackPoint* pB = pF->pTrack->GetPoint(j - 1); int r = 0;//MAX(cvRound(pB->r),1); cvCircle(m_pDebugImg, cvPoint(cvRound(pB->x), cvRound(pB->y)), r, color); } pF->close = 0; } /* Draw all elements in track data base. */ for (i = m_Tracks.GetBlobNum(); i > 0; --i) { /* Draw all elements for all trajectories: */ DefTrackForDist* pF = (DefTrackForDist*)m_Tracks.GetBlob(i - 1); int j; int c = cvRound(pF->state * 255); CvScalar color = CV_RGB(c, 255 - c, 0); CvPoint p = cvPointFrom32f(CV_BLOB_CENTER(pF)); int x = cvRound(CV_BLOB_RX(pF)), y = cvRound(CV_BLOB_RY(pF)); CvSize s = cvSize(MAX(1, x), MAX(1, y)); cvEllipse(m_pDebugImg, p, s, 0, 0, 360, CV_RGB(c, 255 - c, 0), cvRound(1 + (0 * c) / 255)); for (j = pF->pTrack->GetPointNum(); j > 0; j--) { DefTrackPoint* pB = pF->pTrack->GetPoint(j - 1); if (pF->pTrack->GetPointNum() - j > m_TraceLen) { break; } cvCircle(m_pDebugImg, cvPoint(cvRound(pB->x), cvRound(pB->y)), 0, color); } pF->close = 0; } /* Draw all elements for all trajectories. */ //cvNamedWindow("Tracks",0); //cvShowImage("Tracks", m_pDebugImg); } /* Debug output. */ #if 0 if (m_pDebugImg && m_pDebugAVIName) { if (m_pDebugAVI == NULL) { /* Create avi file for writing: */ m_pDebugAVI = cvCreateVideoWriter( m_pDebugAVIName, CV_FOURCC('x', 'v', 'i', 'd'), 25, cvSize(m_pDebugImg->width, m_pDebugImg->height)); if (m_pDebugAVI == NULL) { printf("WARNING!!! Can not create AVI file %s for writing\n", m_pDebugAVIName); } } /* Create avi file for writing. */ if (m_pDebugAVI) { cvWriteFrame(m_pDebugAVI, m_pDebugImg); } } /* Write debug window to AVI file. */ #endif m_Frame++; };
//将所有模块连接使用的函数 //根据这个来修改自己的 int RunBlobTrackingAuto2323(CvCapture* pCap, CvBlobTrackerAuto* pTracker, char* fgavi_name , char* btavi_name ) { int OneFrameProcess = 0; int key; int FrameNum = 0; CvVideoWriter* pFGAvi = NULL; CvVideoWriter* pBTAvi = NULL; /* Main loop: */ /*OneFrameProcess =0 时,为waitkey(0) 不等待了,返回-1,waitkey(1)表示等1ms,如果按键了返回按键,超时返回-1*/ for (FrameNum = 0; pCap && (key = cvWaitKey(OneFrameProcess ? 0 : 1)) != 27;//按下esc键整个程序结束。 FrameNum++) { /* Main loop: */// 整个程序的主循环。这个循环终止,意味着这个程序结束。 IplImage* pImg = NULL; IplImage* pMask = NULL; if (key != -1) { OneFrameProcess = 1; if (key == 'r')OneFrameProcess = 0; } pImg = cvQueryFrame(pCap);//读取视频 if (pImg == NULL) break; /* Process: */ pTracker->Process(pImg, pMask);//处理图像。这个函数应该执行完了所有的处理过程。 if (fgavi_name)//参数设置了fg前景要保存的文件名 if (pTracker->GetFGMask())//前景的图像的mask存在的话,保存前景。画出团块 { /* Debug FG: */ IplImage* pFG = pTracker->GetFGMask();//得到前景的mask CvSize S = cvSize(pFG->width, pFG->height); static IplImage* pI = NULL; if (pI == NULL)pI = cvCreateImage(S, pFG->depth, 3); cvCvtColor(pFG, pI, CV_GRAY2BGR); if (fgavi_name)//保存前景到视频 { /* Save fg to avi file: */ if (pFGAvi == NULL) { pFGAvi = cvCreateVideoWriter( fgavi_name, CV_FOURCC('x', 'v', 'i', 'd'), 25, S); } cvWriteFrame(pFGAvi, pI);//写入一张图 } //画出团块的椭圆 if (pTracker->GetBlobNum() > 0) //pTracker找到了blob { /* Draw detected blobs: */ int i; for (i = pTracker->GetBlobNum(); i > 0; i--) { CvBlob* pB = pTracker->GetBlob(i - 1);//得到第i-1个blob CvPoint p = cvPointFrom32f(CV_BLOB_CENTER(pB));//团块中心 //这个宏竟然是个强制转换得来的。见下行。 //#define CV_BLOB_CENTER(pB) cvPoint2D32f(((CvBlob*)(pB))->x,((CvBlob*)(pB))->y) CvSize s = cvSize(MAX(1, cvRound(CV_BLOB_RX(pB))), MAX(1, cvRound(CV_BLOB_RY(pB)))); //通过宏 获得团块的w 和h 的size int c = cvRound(255 * pTracker->GetState(CV_BLOB_ID(pB))); cvEllipse(pI,//在图中,对团块画圆 p, s, 0, 0, 360, CV_RGB(c, 255 - c, 0), cvRound(1 + (3 * c) / 255)); } /* Next blob: */; } cvNamedWindow("FG", 0); cvShowImage("FG", pI); } /* Debug FG. *///如果要保存结果,对前景保存,画出团块 //在原图上:找到的blob附近写下id /* Draw debug info: */ if (pImg)//原始的每帧图像。 { /* Draw all information about test sequence: */ char str[1024]; int line_type = CV_AA; // Change it to 8 to see non-antialiased graphics. CvFont font; int i; IplImage* pI = cvCloneImage(pImg); cvInitFont(&font, CV_FONT_HERSHEY_PLAIN, 0.7, 0.7, 0, 1, line_type); for (i = pTracker->GetBlobNum(); i > 0; i--) { CvSize TextSize; CvBlob* pB = pTracker->GetBlob(i - 1); CvPoint p = cvPoint(cvRound(pB->x * 256), cvRound(pB->y * 256)); CvSize s = cvSize(MAX(1, cvRound(CV_BLOB_RX(pB) * 256)), MAX(1, cvRound(CV_BLOB_RY(pB) * 256))); int c = cvRound(255 * pTracker->GetState(CV_BLOB_ID(pB))); //画团块到原始图像上 cvEllipse(pI, p, s, 0, 0, 360, CV_RGB(c, 255 - c, 0), cvRound(1 + (3 * 0) / 255), CV_AA, 8); //下面代码的大概意思就是在找到的blob附近写下id p.x >>= 8; p.y >>= 8; s.width >>= 8; s.height >>= 8; sprintf(str, "%03d", CV_BLOB_ID(pB)); cvGetTextSize(str, &font, &TextSize, NULL); p.y -= s.height; cvPutText(pI, str, p, &font, CV_RGB(0, 255, 255)); { const char* pS = pTracker->GetStateDesc(CV_BLOB_ID(pB)); if (pS) { char* pStr = MY_STRDUP(pS); char* pStrFree = pStr; while (pStr && strlen(pStr) > 0) { char* str_next = strchr(pStr, '\n'); if (str_next) { str_next[0] = 0; str_next++; } p.y += TextSize.height + 1; cvPutText(pI, pStr, p, &font, CV_RGB(0, 255, 255)); pStr = str_next; } free(pStrFree); } } } /* Next blob. */; cvNamedWindow("Tracking", 0); cvShowImage("Tracking", pI); if (btavi_name && pI)//如果这一帧存在且,你想把图像存起来,就是传过来的参数不为空例如 btavi_name=“1.avi" 就能存起来了。 { /* Save to avi file: */ CvSize S = cvSize(pI->width, pI->height); if (pBTAvi == NULL) { pBTAvi = cvCreateVideoWriter( btavi_name, CV_FOURCC('x', 'v', 'i', 'd'), 25, S); } cvWriteFrame(pBTAvi, pI); } cvReleaseImage(&pI); } /* Draw all information about test sequence. */ } /* Main loop. */ if (pFGAvi)cvReleaseVideoWriter(&pFGAvi); if (pBTAvi)cvReleaseVideoWriter(&pBTAvi); return 0; } /* RunBlobTrackingAuto */