void Process::findHulls() { if (hullsStorage) cvReleaseMemStorage(&hullsStorage); hullsStorage = cvCreateMemStorage(0); bool isFirst = true; CvSeq *curHulls = NULL; hullsSeq = NULL; for(CvSeq* seq = contoursSeq; seq != 0; seq = seq->h_next){ if (! (seq->flags & CV_SEQ_FLAG_HOLE)) { if (isFirst) { isFirst = false; hullsSeq = cvConvexHull2(seq, hullsStorage, CV_CLOCKWISE, 1); curHulls = hullsSeq; } else { curHulls->h_next = cvConvexHull2(seq, hullsStorage, CV_CLOCKWISE, 1); curHulls = curHulls->h_next; } } } }
// Always return a new Mat of indices CvMat * cvConvexHull2_Shadow( const CvArr * points, int orientation, int return_points){ CvMat * hull; CvMat * points_mat=(CvMat *) points; CvSeq * points_seq=(CvSeq *) points; int npoints, type; CV_FUNCNAME("cvConvexHull2"); __BEGIN__; if(CV_IS_MAT(points_mat)){ npoints = MAX(points_mat->rows, points_mat->cols); type = return_points ? points_mat->type : CV_32S; } else if(CV_IS_SEQ(points_seq)){ npoints = points_seq->total; type = return_points ? CV_SEQ_ELTYPE(points_seq) : 1; } else{ CV_ERROR(CV_StsBadArg, "points must be a CvSeq or CvMat"); } CV_CALL( hull=cvCreateMat(1,npoints,type) ); CV_CALL( cvConvexHull2(points, hull, orientation, return_points) ); __END__; return hull; }
void bContourFinder::findConvexHulls(){ CvMemStorage *stor = cvCreateMemStorage(); CvSeq * ptseq = cvCreateSeq( CV_SEQ_KIND_CURVE|CV_32SC2, sizeof(CvContour), sizeof(CvPoint), stor ); CvSeq * hull; CvPoint pt; this->convexBlobs.clear(); for(int i = 0 ; i < this->blobs.size(); i++){ this->convexBlobs.push_back(ofxCvBlob()); this->convexBlobs[i] = this->blobs[i]; this->convexBlobs[i].pts.clear(); // get blob i for(int j = 0; j < this->blobs[i].pts.size(); j++){ // fill in blob points pt.x = this->blobs[i].pts[j].x; pt.y = this->blobs[i].pts[j].y; cvSeqPush( ptseq, &pt); } hull = cvConvexHull2(ptseq, 0, CV_CLOCKWISE, 0); // get the points for the blob: CvPoint pt = **CV_GET_SEQ_ELEM( CvPoint*, hull, hull->total - 1 ); for( int j=0; j < hull->total; j++ ) { pt = **CV_GET_SEQ_ELEM( CvPoint*, hull, j ); convexBlobs[i].pts.push_back( ofPoint((float)pt.x, (float)pt.y) ); } convexBlobs[i].nPts = convexBlobs[i].pts.size(); } cvClearMemStorage( stor ); }
/* compute_ContourTree: Compute the contours of the filtered camera image Returns updated image and sequence of points (CvPoint) Image returned within context shall be a 3-channel RGB image @img_8uc1: An 8-bit single channel image */ IplImage * compute_ContourTree(IplImage *img_8uc1) { IplImage *img_edge = cvCreateImage(cvGetSize(img_8uc1), 8, 1); IplImage *img_8uc3 = cvCreateImage(cvGetSize(img_8uc1), 8, 3); CvSeq *ptSeq = NULL; //point sequence CvMemStorage *storage = cvCreateMemStorage(); //storage for contours creation cvThreshold(img_8uc1, img_edge, 128, 255, CV_THRESH_BINARY); CvSeq *c, *first_contour = NULL; CvSeq *biggestContour = NULL; int numContours = cvFindContours(img_edge,storage,&first_contour, sizeof(CvContour), CV_RETR_LIST); if(numContours == 0) return NULL; /* double result1, result2; result1 = result2 = 0; // find the largest contour // this is the whole hand for(c = first_contour; c != NULL; c=c->h_next){ result1 = cvContourArea(c,CV_WHOLE_SEQ); if(result1 > result2){ result2 = result1; biggestContour = c; } } */ for(c = first_contour; c != NULL; c=c->h_next){ cvCvtColor(img_8uc1, img_8uc3, CV_GRAY2BGR); cvDrawContours(img_8uc3,c,CVX_RED,CVX_BLUE, 1,1,8); //note define (CVX...) if not including ocv.h CvSeq *hull; hull = cvConvexHull2(c, 0, CV_CLOCKWISE, 0); CvPoint pt0; pt0 = *(CvPoint *)cvGetSeqElem(hull, hull->total - 1); //?? for(int i=0; i < hull->total; ++i){ CvPoint pt = *(CvPoint *)cvGetSeqElem(hull, i); cvLine(img_8uc3, pt0, pt, CV_RGB( 0, 255, 0 )); pt0 = pt; } cvShowImage("CONVEX WINDOW", img_8uc3); } /*Convexctx_t *retCtx = (Convexctx_t *)malloc(sizeof(*retCtx)); retCtx->image = img_8uc3; retCtx->contour = c; retCtx->destroy = allocationCleanup; //destructor cvReleaseImage(&img_edge); return retCtx; */ return img_8uc3; }
void find_convex_hull(struct ctx *ctx) { CvSeq *defects; CvConvexityDefect *defect_array; int i; int x = 0, y = 0; int dist = 0; ctx->hull = NULL; if (!ctx->contour) return; ctx->hull = cvConvexHull2(ctx->contour, ctx->hull_st, CV_CLOCKWISE, 0); if (ctx->hull) { /* Get convexity defects of contour w.r.t. the convex hull */ defects = cvConvexityDefects(ctx->contour, ctx->hull, ctx->defects_st); if (defects && defects->total) { defect_array = calloc(defects->total, sizeof(CvConvexityDefect)); cvCvtSeqToArray(defects, defect_array, CV_WHOLE_SEQ); /* Average depth points to get hand center */ for (i = 0; i < defects->total && i < NUM_DEFECTS; i++) { x += defect_array[i].depth_point->x; y += defect_array[i].depth_point->y; ctx->defects[i] = cvPoint(defect_array[i].depth_point->x, defect_array[i].depth_point->y); } x /= defects->total; y /= defects->total; ctx->num_defects = defects->total; ctx->hand_center = cvPoint(x, y); /* Compute hand radius as mean of distances of defects' depth point to hand center */ for (i = 0; i < defects->total; i++) { int d = (x - defect_array[i].depth_point->x) * (x - defect_array[i].depth_point->x) + (y - defect_array[i].depth_point->y) * (y - defect_array[i].depth_point->y); dist += sqrt(d); } ctx->hand_radius = dist / defects->total; free(defect_array); } } }
/** - FUNCTION: GetConvexHull - FUNCTIONALITY: Calculates the convex hull polygon of the blob - PARAMETERS: - dst: where to store the result - RESULT: - true if no error ocurred - RESTRICTIONS: - AUTHOR: Ricard Borr� - CREATION DATE: 25-05-2005. - MODIFICATION: Date. Author. Description. */ bool CBlob::GetConvexHull( CvSeq **dst ) const { if( edges != NULL && edges->total > 0) { *dst = cvConvexHull2( edges, 0, CV_CLOCKWISE, 0 ); return true; } return false; }
/** - FUNCTION: CBlobGetHullPerimeter - FUNCTIONALITY: Calculates the convex hull perimeter of the blob - PARAMETERS: - RESULT: - returns the convex hull perimeter of the blob or the perimeter if the blob edges could not be retrieved - RESTRICTIONS: - AUTHOR: Ricard Borr� - CREATION DATE: 25-05-2005. - MODIFICATION: Date. Author. Description. */ double CBlobGetHullPerimeter::operator()(const CBlob &blob) const { if(blob.Edges() != NULL && blob.Edges()->total > 0) { CvSeq *hull = cvConvexHull2( blob.Edges(), 0, CV_CLOCKWISE, 1 ); return fabs(cvArcLength(hull,CV_WHOLE_SEQ,1)); } return blob.Perimeter(); }
/* Calculates exact convex hull of 2d point set */ void cvConvexHull( CvPoint* points, int num_points, CvRect*, int orientation, int* hull, int* hullsize ) { CvMat points1 = cvMat( 1, num_points, CV_32SC2, points ); CvMat hull1 = cvMat( 1, num_points, CV_32SC1, hull ); cvConvexHull2( &points1, &hull1, orientation, 0 ); *hullsize = hull1.cols; }
double CBlobGetHullArea::operator()(const CBlob &blob) const { if(blob.Edges() != NULL && blob.Edges()->total > 0) { CvSeq *hull = cvConvexHull2( blob.Edges(), 0, CV_CLOCKWISE, 1 ); return fabs(cvContourArea(hull)); } return blob.Perimeter(); }
static void find_connected_components (IplImage * mask, int poly1_hull0, float perimScale, CvMemStorage * mem_storage, CvSeq * contours) { CvContourScanner scanner; CvSeq *c; int numCont = 0; /* Just some convenience variables */ const CvScalar CVX_WHITE = CV_RGB (0xff, 0xff, 0xff); const CvScalar CVX_BLACK = CV_RGB (0x00, 0x00, 0x00); /* CLEAN UP RAW MASK */ cvMorphologyEx (mask, mask, 0, 0, CV_MOP_OPEN, CVCLOSE_ITR); cvMorphologyEx (mask, mask, 0, 0, CV_MOP_CLOSE, CVCLOSE_ITR); /* FIND CONTOURS AROUND ONLY BIGGER REGIONS */ if (mem_storage == NULL) { mem_storage = cvCreateMemStorage (0); } else { cvClearMemStorage (mem_storage); } scanner = cvStartFindContours (mask, mem_storage, sizeof (CvContour), CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cvPoint (0, 0)); while ((c = cvFindNextContour (scanner)) != NULL) { double len = cvContourArea (c, CV_WHOLE_SEQ, 0); /* calculate perimeter len threshold: */ double q = (mask->height + mask->width) / perimScale; /* Get rid of blob if its perimeter is too small: */ if (len < q) { cvSubstituteContour (scanner, NULL); } else { /* Smooth its edges if its large enough */ CvSeq *c_new; if (poly1_hull0) { /* Polygonal approximation */ c_new = cvApproxPoly (c, sizeof (CvContour), mem_storage, CV_POLY_APPROX_DP, CVCONTOUR_APPROX_LEVEL, 0); } else { /* Convex Hull of the segmentation */ c_new = cvConvexHull2 (c, mem_storage, CV_CLOCKWISE, 1); } cvSubstituteContour (scanner, c_new); numCont++; } } contours = cvEndFindContours (&scanner); /* PAINT THE FOUND REGIONS BACK INTO THE IMAGE */ cvZero (mask); /* DRAW PROCESSED CONTOURS INTO THE MASK */ for (c = contours; c != NULL; c = c->h_next) cvDrawContours (mask, c, CVX_WHITE, CVX_BLACK, -1, CV_FILLED, 8, cvPoint (0, 0)); }
/** - FUNCTION: GetConvexHull - FUNCTIONALITY: Calculates the convex hull polygon of the blob - PARAMETERS: - dst: where to store the result - RESULT: - true if no error ocurred - RESTRICTIONS: - AUTHOR: Ricard Borràs - CREATION DATE: 25-05-2005. - MODIFICATION: Date. Author. Description. */ t_PointList CBlob::GetConvexHull() { CvSeq *convexHull = NULL; if( m_externalContour.GetContourPoints() ) convexHull = cvConvexHull2( m_externalContour.GetContourPoints(), m_storage, CV_COUNTER_CLOCKWISE, 1 ); return convexHull; }
void getconvexhull() { hull = cvConvexHull2( contours, 0, CV_CLOCKWISE, 0 ); pt0 = **CV_GET_SEQ_ELEM( CvPoint*, hull, hull->total - 1 ); for(int i = 0; i < hull->total; i++ ) { pt = **CV_GET_SEQ_ELEM( CvPoint*, hull, i ); //printf("%d,%d\n",pt.x,pt.y); cvLine( frame, pt0, pt, CV_RGB( 128, 128, 128 ),2,8,0); pt0 = pt; } defect = cvConvexityDefects(contours,hull,defectstorage); //��M�ʳ� for(int i=0;i<defect->total;i++) { CvConvexityDefect* d=(CvConvexityDefect*)cvGetSeqElem(defect,i); // if(d->depth < 50) // { // p.x = d->start->x; // p.y = d->start->y; // cvCircle(frame,p,5,CV_RGB(255,255,255),-1,CV_AA,0); // p.x = d->end->x; // p.y = d->end->y; // cvCircle(frame,p,5,CV_RGB(255,255,255),-1,CV_AA,0); // } if(d->depth > 10) { p.x = d->depth_point->x; p.y = d->depth_point->y; cvCircle(frame,p,5,CV_RGB(255,255,0),-1,CV_AA,0); cvSeqPush(palm,&p); } } //if(palm->total>1) //{ // cvMinEnclosingCircle(palm,&mincirclecenter,&radius); // cvRound(radius); // mincirclecenter2.x = cvRound(mincirclecenter.x); // mincirclecenter2.y = cvRound(mincirclecenter.y); // cvCircle(frame,mincirclecenter2,cvRound(radius),CV_RGB(255,128,255),4,8,0); // cvCircle(frame,mincirclecenter2,10,CV_RGB(255,128,255),4,8,0); // palmcenter = cvMinAreaRect2(palm,0); // center.x = cvRound(palmcenter.center.x); // center.y = cvRound(palmcenter.center.y); // cvEllipseBox(frame,palmcenter,CV_RGB(128,128,255),2,CV_AA,0); // cvCircle(frame,center,10,CV_RGB(128,128,255),-1,8,0); //} }
CV_IMPL CvSeq* cvSegmentFGMask( CvArr* _mask, int poly1Hull0, float perimScale, CvMemStorage* storage, CvPoint offset ) { CvMat mstub, *mask = cvGetMat( _mask, &mstub ); CvMemStorage* tempStorage = storage ? storage : cvCreateMemStorage(); CvSeq *contours, *c; int nContours = 0; CvContourScanner scanner; // clean up raw mask cvMorphologyEx( mask, mask, 0, 0, CV_MOP_OPEN, 1 ); cvMorphologyEx( mask, mask, 0, 0, CV_MOP_CLOSE, 1 ); // find contours around only bigger regions scanner = cvStartFindContours( mask, tempStorage, sizeof(CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, offset ); while( (c = cvFindNextContour( scanner )) != 0 ) { double len = cvContourPerimeter( c ); double q = (mask->rows + mask->cols)/perimScale; // calculate perimeter len threshold if( len < q ) //Get rid of blob if it's perimeter is too small cvSubstituteContour( scanner, 0 ); else //Smooth it's edges if it's large enough { CvSeq* newC; if( poly1Hull0 ) //Polygonal approximation of the segmentation newC = cvApproxPoly( c, sizeof(CvContour), tempStorage, CV_POLY_APPROX_DP, 2, 0 ); else //Convex Hull of the segmentation newC = cvConvexHull2( c, tempStorage, CV_CLOCKWISE, 1 ); cvSubstituteContour( scanner, newC ); nContours++; } } contours = cvEndFindContours( &scanner ); // paint the found regions back into the image cvZero( mask ); for( c=contours; c != 0; c = c->h_next ) cvDrawContours( mask, c, cvScalarAll(255), cvScalarAll(0), -1, CV_FILLED, 8, cvPoint(-offset.x,-offset.y)); if( tempStorage != storage ) { cvReleaseMemStorage( &tempStorage ); contours = 0; } return contours; }
static CvSeq * get_defects (guint16* depth, guint width, guint height, guint start_x, guint start_y, guint start_z) { IplImage *img; IplImage *image = NULL; CvSeq *points = NULL; CvSeq *contours = NULL; CvMemStorage *g_storage, *hull_storage; img = segment_hand (depth, width, height, start_x, start_y, start_z); if (img == NULL) { return NULL; } image = cvCreateImage(cvGetSize(img), 8, 1); cvCopy(img, image, 0); cvSmooth(image, img, CV_MEDIAN, 7, 0, 0, 0); cvThreshold(img, image, 150, 255, CV_THRESH_OTSU); g_storage = cvCreateMemStorage (0); cvFindContours (image, g_storage, &contours, sizeof(CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0)); hull_storage = cvCreateMemStorage(0); if (contours) { points = cvConvexHull2(contours, hull_storage, CV_CLOCKWISE, 0); return cvConvexityDefects (contours, points, NULL); } return NULL; }
/* This method will calculate the convex hull of source landmarks and populate the pointsInsideHull vector with these points coordinates */ void PAW::populatePointsInsideHull(){ //calc scrLandmarks convex hull CvPoint* pointsHull = (CvPoint*)malloc( nLandmarks * sizeof(pointsHull[0])); int* hull = (int*)malloc( nLandmarks * sizeof(hull[0])); CvMat pointMat = cvMat( 1, nLandmarks, CV_32SC2, pointsHull ); CvMat hullMat = cvMat( 1, nLandmarks, CV_32SC1, hull ); for(int i = 0; i < nLandmarks; i++ ) { pointsHull[i] = cvPoint(srcLandmarks.at<int>(i,0),srcLandmarks.at<int>(i,1)); } cvConvexHull2( &pointMat, &hullMat, CV_CLOCKWISE, 0 ); int hullcount = hullMat.cols; CvPoint* pointsHullFinal = (CvPoint*)malloc( hullcount * sizeof(pointsHullFinal[0])); for(int i = 0; i < hullcount; i++ ){ int ptIndex = hull[i]; CvPoint pt = cvPoint( srcLandmarks.at<int>(ptIndex,0), srcLandmarks.at<int>(ptIndex,1)); pointsHullFinal[i] = pt; } CvMat hullMatPoints = cvMat( 1, hullcount, CV_32SC2, pointsHullFinal); //check if point belongs for (int j=0;j<baseImageHeight;j++){ for(int i=0;i< baseImageWidth;i++){ double distance = cvPointPolygonTest(&hullMatPoints,cvPoint2D32f(i,j),1); if(distance >=0){ pointsInsideHull.push_back(cvPoint(i,j)); } } } }
/* createConvexHull: Create a Convex Hull for a seq. of a contours Convex Hull allows for classification of hand state Returns an image of "Hand contour" with convexity defects Image returned shall be a 3-channel RGB image @ctx: context from compute_ContourTree */ IplImage *createConvexHull(Convexctx_t *ctx) { assert(ctx); CvSeq *hull; hull = cvConvexHull2(ctx->contour, 0, CV_CLOCKWISE, 0); IplImage *img = cvCreateImage(cvSize(W,H), 8, 3); cvZero(img); CvPoint pt0;/* for(int i=0; i < ctx->contour->total; ++i){ pt0 = *(CvPoint *)cvGetSeqElem(ctx->contour, i); cvCircle(img, pt0, 2, CV_RGB( 255, 0, 0 ), CV_FILLED); }*/ pt0 = *(CvPoint *)cvGetSeqElem(hull, hull->total - 1); //?? for(int i=0; i < hull->total; ++i){ CvPoint pt = *(CvPoint *)cvGetSeqElem(hull, i); cvLine( img, pt0, pt, CV_RGB( 0, 255, 0 )); pt0 = pt; } return img; }
CV_IMPL CvBox2D cvMinAreaRect2( const CvArr* array, CvMemStorage* storage ) { CvMemStorage* temp_storage = 0; CvBox2D box; CvPoint2D32f* points = 0; CV_FUNCNAME( "cvMinAreaRect2" ); memset(&box, 0, sizeof(box)); __BEGIN__; int i, n; CvSeqReader reader; CvContour contour_header; CvSeqBlock block; CvSeq* ptseq = (CvSeq*)array; CvPoint2D32f out[3]; if( CV_IS_SEQ(ptseq) ) { if( !CV_IS_SEQ_POINT_SET(ptseq) && (CV_SEQ_KIND(ptseq) != CV_SEQ_KIND_CURVE || !CV_IS_SEQ_CONVEX(ptseq) || CV_SEQ_ELTYPE(ptseq) != CV_SEQ_ELTYPE_PPOINT )) CV_ERROR( CV_StsUnsupportedFormat, "Input sequence must consist of 2d points or pointers to 2d points" ); if( !storage ) storage = ptseq->storage; } else { CV_CALL( ptseq = cvPointSeqFromMat( CV_SEQ_KIND_GENERIC, array, &contour_header, &block )); } if( storage ) { CV_CALL( temp_storage = cvCreateChildMemStorage( storage )); } else { CV_CALL( temp_storage = cvCreateMemStorage(1 << 10)); } if( !CV_IS_SEQ_CONVEX( ptseq )) { CV_CALL( ptseq = cvConvexHull2( ptseq, temp_storage, CV_CLOCKWISE, 1 )); } else if( !CV_IS_SEQ_POINT_SET( ptseq )) { CvSeqWriter writer; if( !CV_IS_SEQ(ptseq->v_prev) || !CV_IS_SEQ_POINT_SET(ptseq->v_prev)) CV_ERROR( CV_StsBadArg, "Convex hull must have valid pointer to point sequence stored in v_prev" ); cvStartReadSeq( ptseq, &reader ); cvStartWriteSeq( CV_SEQ_KIND_CURVE|CV_SEQ_FLAG_CONVEX|CV_SEQ_ELTYPE(ptseq->v_prev), sizeof(CvContour), CV_ELEM_SIZE(ptseq->v_prev->flags), temp_storage, &writer ); for( i = 0; i < ptseq->total; i++ ) { CvPoint pt = **(CvPoint**)(reader.ptr); CV_WRITE_SEQ_ELEM( pt, writer ); } ptseq = cvEndWriteSeq( &writer ); } n = ptseq->total; CV_CALL( points = (CvPoint2D32f*)cvAlloc( n*sizeof(points[0]) )); cvStartReadSeq( ptseq, &reader ); if( CV_SEQ_ELTYPE( ptseq ) == CV_32SC2 ) { for( i = 0; i < n; i++ ) { CvPoint pt; CV_READ_SEQ_ELEM( pt, reader ); points[i].x = (float)pt.x; points[i].y = (float)pt.y; } } else { for( i = 0; i < n; i++ ) { CV_READ_SEQ_ELEM( points[i], reader ); } } if( n > 2 ) { icvRotatingCalipers( points, n, CV_CALIPERS_MINAREARECT, (float*)out ); box.center.x = out[0].x + (out[1].x + out[2].x)*0.5f; box.center.y = out[0].y + (out[1].y + out[2].y)*0.5f; box.size.height = (float)sqrt((double)out[1].x*out[1].x + (double)out[1].y*out[1].y); box.size.width = (float)sqrt((double)out[2].x*out[2].x + (double)out[2].y*out[2].y); box.angle = (float)atan2( -(double)out[1].y, (double)out[1].x ); } else if( n == 2 ) { box.center.x = (points[0].x + points[1].x)*0.5f; box.center.y = (points[0].y + points[1].y)*0.5f; double dx = points[1].x - points[0].x; double dy = points[1].y - points[0].y; box.size.height = (float)sqrt(dx*dx + dy*dy); box.size.width = 0; box.angle = (float)atan2( -dy, dx ); } else { if( n == 1 ) box.center = points[0]; } box.angle = (float)(box.angle*180/CV_PI); __END__; cvReleaseMemStorage( &temp_storage ); cvFree( &points ); return box; }
ReturnType HandsMotionTracking::onExecute() { // 영상을 Inport로부터 취득 opros_any *pData = ImageIn.pop(); RawImage result; //아웃 데이터 std::vector<PositionDataType> data; if(pData != NULL){ // 포트로 부터 이미지 취득 RawImage Image = ImageIn.getContent(*pData); RawImageData *RawImage = Image.getImage(); // 현재영상의 크기를 취득 m_in_width = RawImage->getWidth(); m_in_height = RawImage->getHeight(); // 메모리 한번 해제해주고 if(m_image_buff != NULL) cvReleaseImage(&m_image_buff); if(m_image_dest != NULL) cvReleaseImage(&m_image_dest); if(m_image_dest2 != NULL) cvReleaseImage(&m_image_dest2); if(m_image_th != NULL) cvReleaseImage(&m_image_th); if(m_image_th2 != NULL) cvReleaseImage(&m_image_th2); // 이미지용 메모리 할당 m_image_buff = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3);//원본 이미지 m_image_dest = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3); m_image_dest2 = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3); m_image_th = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 1);//영역 추출 이미지 m_image_th2 = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 1);//영역 추출 이미지 if(!video_flag) { std::string cpath = getProperty("opros.component.dir"); std::string file = getProperty("VideoFile"); if (file == "") file = "sample.avi"; std::string path = cpath + file; m_video = NULL; m_video = cvCreateFileCapture(path.c_str()); //비디오 video_flag = true;// 비디오가 계속 새로 재생됨을 방지 } // 영상에 대한 정보를 확보!memcpy memcpy(m_image_buff->imageData, RawImage->getData(), RawImage->getSize()); // 출력용 cvCopy(m_image_buff, m_image_dest, 0); // 색상 분리용 이미지 IplImage* m_image_YCrCb = cvCreateImage(cvGetSize(m_image_buff), IPL_DEPTH_8U, 3); IplImage* m_Y = cvCreateImage(cvGetSize(m_image_buff), IPL_DEPTH_8U, 1); IplImage* m_Cr = cvCreateImage(cvGetSize(m_image_buff), IPL_DEPTH_8U, 1); IplImage* m_Cb = cvCreateImage(cvGetSize(m_image_buff), IPL_DEPTH_8U, 1); cvCvtColor(m_image_buff, m_image_YCrCb, CV_RGB2YCrCb); //RGB - > YCrCV 변환 cvSplit(m_image_YCrCb, m_Y, m_Cr, m_Cb, NULL); //채널 분리 //추출이 필요한 영역 픽셀 데이터 저장 변수 unsigned char m_Cr_val = 0; unsigned char m_Cb_val = 0; // 살색추출 for(int i=0;i<m_image_buff->height;i++) { for(int j=0;j<m_image_buff->width;j++) { //Cr 영역과 Cb 영역 추출 m_Cr_val = (unsigned char)m_Cr->imageData[i*m_Cr->widthStep+j]; m_Cb_val = (unsigned char)m_Cb->imageData[i*m_Cb->widthStep+j]; //살색에 해당하는 영역인지 검사 if( (77 <= m_Cr_val) && (m_Cr_val <= 127) && (133 <= m_Cb_val) && (m_Cb_val <= 173) ) { // 살색부분은 하얀색 m_image_buff->imageData[i*m_image_buff->widthStep+j*3+0] = (unsigned char)255; m_image_buff->imageData[i*m_image_buff->widthStep+j*3+1] = (unsigned char)255; m_image_buff->imageData[i*m_image_buff->widthStep+j*3+2] = (unsigned char)255; } else { // 나머지는 검정색 m_image_buff->imageData[i*m_image_buff->widthStep+j*3+0]= 0; m_image_buff->imageData[i*m_image_buff->widthStep+j*3+1]= 0; m_image_buff->imageData[i*m_image_buff->widthStep+j*3+2]= 0; } } } //살색 추출한 영상을 이진화 cvCvtColor(m_image_buff, m_image_th, CV_RGB2GRAY); //잡영 제거를 위한 연산 cvDilate (m_image_th, m_image_th, NULL, 2);//팽창 cvErode (m_image_th, m_image_th, NULL, 2);//침식 //변수 및 이미지 메모리 초기화 int temp_num = 0; int StartX , StartY, EndX , EndY; int nNumber = 0; m_nThreshold = 100; if( m_rec_out != NULL ) { delete m_rec_out; m_rec_out = NULL; m_nBlobs_out = _DEF_MAX_BLOBS; } else { m_rec_out = NULL; m_nBlobs_out = _DEF_MAX_BLOBS; } if( m_image_th2 != NULL ) cvReleaseImage( &m_image_th2 ); //레이블링 할 영상 따로 생성 m_image_th2 = cvCloneImage( m_image_th ); //레이블링 할 이미지의 크기 저장 int nWidth = m_image_th2->width; int nHeight = m_image_th2->height; //해당 영상 크기만큼 버프 설정 unsigned char* tmpBuf = new unsigned char [nWidth * nHeight]; for(int j=0; j<nHeight ;j++) for(int i=0; i<nWidth ;i++) //전 픽셀 순회 tmpBuf[j*nWidth+i] = (unsigned char)m_image_th2->imageData[j*m_image_th2->widthStep+i]; ////// 레이블링을 위한 포인트 초기화 m_vPoint_out = new Visited [nWidth * nHeight]; for(int nY = 0; nY < nHeight; nY++) { for(int nX = 0; nX < nWidth; nX++) { m_vPoint_out[nY * nWidth + nX].bVisitedFlag = FALSE; m_vPoint_out[nY * nWidth + nX].ptReturnPoint.x = nX; m_vPoint_out[nY * nWidth + nX].ptReturnPoint.y = nY; } } ////// 레이블링 수행 for(int nY = 0; nY < nHeight; nY++) { for(int nX = 0; nX < nWidth; nX++) { if(tmpBuf[nY * nWidth + nX] == 255) // Is this a new component?, 255 == Object { temp_num++; tmpBuf[nY * nWidth + nX] = temp_num; StartX = nX, StartY = nY, EndX = nX, EndY= nY; __NRFIndNeighbor(tmpBuf, nWidth, nHeight, nX, nY, &StartX, &StartY, &EndX, &EndY, m_vPoint_out); if(__Area(tmpBuf, StartX, StartY, EndX, EndY, nWidth, temp_num) < m_nThreshold) { for(int k = StartY; k <= EndY; k++) { for(int l = StartX; l <= EndX; l++) { if(tmpBuf[k * nWidth + l] == temp_num) tmpBuf[k * nWidth + l] = 0; } } --temp_num; if(temp_num > 250) temp_num = 0; } } } } // 포인트 메모리 해제 delete m_vPoint_out; //결과 보존 nNumber = temp_num; //레이블링 수만큼 렉트 생성 if( nNumber != _DEF_MAX_BLOBS ) m_rec_out = new CvRect [nNumber]; //렉트 만들기 if( nNumber != 0 ) DetectLabelingRegion(nNumber, tmpBuf, nWidth, nHeight,m_rec_out); for(int j=0; j<nHeight; j++) for(int i=0; i<nWidth ; i++) m_image_th2->imageData[j*m_image_th2->widthStep+i] = tmpBuf[j*nWidth+i]; delete tmpBuf; //레이블링 수 보존 m_nBlobs_out = nNumber; //레이블링 영역 거르기 int nMaxWidth = m_in_height * 9 / 10; // 영상 가로 전체 크기의 90% 이상인 레이블은 제거 int nMaxHeight = m_in_width * 9 / 10; // 영상 세로 전체 크기의 90% 이상인 레이블은 제거 //최소영역과 최대영역 지정- 화면 크기에 영향 받음.. _BlobSmallSizeConstraint( 5, 150, m_rec_out, &m_nBlobs_out); _BlobBigSizeConstraint(nMaxWidth, nMaxHeight,m_rec_out, &m_nBlobs_out); //앞으로 쓸 메모리 등록 storage1 = cvCreateMemStorage(0); storage2 = cvCreateMemStorage(0); //변수 초기화 CvPoint point; CvSeq* seq[10]; CvSeq* hull; CvPoint end_pt; CvPoint center; //내보낼 데이터 초기화 outData[0].x = 0, outData[0].y = 0; outData[1].x = 0, outData[1].y = 0; outData[2].x = 0, outData[2].y = 0; int num = 0; int temp_x = 0; int temp_y = 0; int rect = 0; //만일을 대비하여 준비한 시퀸스 배열의 크기를 초과하지 않도록 조절 //일단 한곳에서만 영상이 나오도록 조절.. if(m_nBlobs_out > 1) { m_nBlobs_out = 1; } //레이블링 영역 내의 처리 시작 for( int i=0; i < m_nBlobs_out; i++ ) { //사각형 그리기에 필요한 두점 저장 CvPoint pt1 = cvPoint( m_rec_out[i].x, m_rec_out[i].y ); CvPoint pt2 = cvPoint( pt1.x + m_rec_out[i].width,pt1.y + m_rec_out[i].height ); // 컬러값 설정 CvScalar color = cvScalar( 0, 0, 255 ); //레이블 사각형 그리기 - 확인용 //cvDrawRect( m_image_dest, pt1, pt2, color); //레이블을 관심영역으로 지정할 이미지 생성 temp_mask = cvCreateImage(cvSize(m_rec_out[i].width, m_rec_out[i].height),8,1); temp_mask2 = cvCreateImage(cvSize(m_rec_out[i].width, m_rec_out[i].height),8,1); //관심영역 지정 cvSetImageROI(m_image_th, m_rec_out[i]); //관심영역 추출 cvCopy(m_image_th, temp_mask, 0); //관심영역 해제 cvResetImageROI(m_image_th); //관심영역 내의 오브젝트 처리를 위한 시퀸스 생성 seq[i] = cvCreateSeq(CV_SEQ_KIND_GENERIC | CV_32SC2,sizeof(CvContour),sizeof(CvPoint), storage1); //관심영역에서 추출한이미지의 흰색 픽셀값으로 시퀸스 생성 for(int j =0; j < temp_mask ->height ; j++) { for(int k = 0; k < temp_mask ->width; k++) { if((unsigned char)temp_mask->imageData[j*temp_mask->widthStep+k] == 255) { point.x = k; //흰색 픽셀 x좌표 저장 point.y = j; //흰색 픽셀 y좌표 저장 cvSeqPush(seq[i], &point); //시퀸스 구조체에 해당 좌표 삽입 temp_x += point.x; //좌표 누적 temp_y += point.y; //좌표 누적 num++; //픽셀 수 카운트 } } } //좌표 초기화 point.x = 0; point.y = 0; end_pt.x = 0; end_pt.y = 0; center.x = 0; center.y = 0; CvPoint dist_pt; //중심점과의 최대거리를 찾을 컨백스헐 저장 double fMaxDist = 0; //중심점과의 최대거리 저장 double fDist = 0; //거리계산에 사용 //중심점 찾기 - 픽셀의 평균값 찾기 if(num != 0) { center.x = (int)temp_x/num; //평균 좌표값 구하기 center.y = (int)temp_y/num; //평균 좌표값 구하기 } //관심영역 설정 cvSetImageROI(m_image_dest, m_rec_out[i]); /////////컨백스헐 그리기//////// if(seq[i]->total !=0) { //컨백스헐 구하기 hull = cvConvexHull2(seq[i], 0, CV_COUNTER_CLOCKWISE, 0); point = **CV_GET_SEQ_ELEM(CvPoint*, hull,hull->total-1); //구한 컨백스헐 라인으로 그리기 for(int x = 0; x < hull->total; x++) { CvPoint hull_pt = **CV_GET_SEQ_ELEM(CvPoint*, hull,x); //컨백스헐 라인 그리기 //cvLine(m_image_dest, point, hull_pt, CV_RGB(255, 255, 0 ),2, 8); point = hull_pt; //최대 거리 구하기 dist_pt = **CV_GET_SEQ_ELEM(CvPoint*, hull,x); fDist = sqrt((double)((center.x - dist_pt.x) * (center.x - dist_pt.x) + (center.y - dist_pt.y) * (center.y - dist_pt.y))); if(fDist > fMaxDist) { max_pt = dist_pt; fMaxDist = fDist; } } } //중심점그리기 cvCircle(m_image_dest,center,5, CV_RGB(0,0,255), 5); //내보낼 중심점 데이터 저장 outData[0].x = center.x; outData[0].y = center.y; ////////마스크 만들기/////// //중심점을 기준으로 그릴 마스크 이미지 생성 circle_mask = cvCreateImage(cvGetSize(temp_mask), 8, 1); //바탕은 검은색으로 cvSetZero(circle_mask); //흰색 원 - 손 영상과의 연산을 위해 바이너리 이미지에 그리기 int radi = (int)m_rec_out[i].height/2.9; // 원 크기 수동조절.. //흰색 원과 흰색 네모로 구성된 마스크 영상 생성을 위한 그리기 cvCircle(circle_mask, center, radi, CV_RGB(255,255,255),CV_FILLED); cvDrawRect(circle_mask, cvPoint(center.x - radi, center.y),cvPoint(center.x + radi, pt2.y), CV_RGB(255,255,255),CV_FILLED); //마스크 추출 cvSub(temp_mask, circle_mask, temp_mask, 0); ///////관심영역 레이블링 - 손가락 끝 추출////// //변수 및 이미지 메모리 초기화 int temp_num_in = 0; int StartX_in , StartY_in, EndX_in , EndY_in; int nNumber_in = 0; m_nThreshold_in = 10; if( m_rec_in != NULL ) { delete m_rec_in; m_rec_in = NULL; m_nBlobs_in = _DEF_MAX_BLOBS; } else { m_rec_in = NULL; m_nBlobs_in = _DEF_MAX_BLOBS; } if( temp_mask2 != NULL ) cvReleaseImage( &temp_mask2 ); temp_mask2 = cvCloneImage( temp_mask ); //들어온 이미지의 크기 저장 int nWidth = temp_mask2->width; int nHeight = temp_mask2->height; //영상 크기만큼 버프 설정 unsigned char* tmpBuf_in = new unsigned char [nWidth * nHeight]; for(int j=0; j<nHeight ;j++) for(int i=0; i<nWidth ;i++) //전 픽셀 순회 tmpBuf_in[j*nWidth+i] = (unsigned char)temp_mask2->imageData[j*temp_mask2->widthStep+i]; /////// 레이블링을 위한 포인트 초기화 //////// m_vPoint_in = new Visited [nWidth * nHeight]; for(int nY = 0; nY < nHeight; nY++) { for(int nX = 0; nX < nWidth; nX++) { m_vPoint_in[nY * nWidth + nX].bVisitedFlag = FALSE; m_vPoint_in[nY * nWidth + nX].ptReturnPoint.x = nX; m_vPoint_in[nY * nWidth + nX].ptReturnPoint.y = nY; } } ////레이블링 수행 for(int nY = 0; nY < nHeight; nY++) { for(int nX = 0; nX < nWidth; nX++) { if(tmpBuf_in[nY * nWidth + nX] == 255) // Is this a new component?, 255 == Object { temp_num_in++; tmpBuf_in[nY * nWidth + nX] = temp_num_in; StartX_in = nX, StartY_in = nY, EndX_in = nX, EndY_in= nY; __NRFIndNeighbor(tmpBuf_in, nWidth, nHeight, nX, nY, &StartX_in, &StartY_in, &EndX_in, &EndY_in,m_vPoint_in); if(__Area(tmpBuf_in, StartX_in, StartY_in, EndX_in, EndY_in, nWidth, temp_num_in) < m_nThreshold_in) { for(int k = StartY_in; k <= EndY_in; k++) { for(int l = StartX_in; l <= EndX_in; l++) { if(tmpBuf_in[k * nWidth + l] == temp_num_in) tmpBuf_in[k * nWidth + l] = 0; } } --temp_num_in; if(temp_num_in > 250) temp_num_in = 0; } } } } // 포인트 메모리 해제 delete m_vPoint_in; //레이블링 수 보존 nNumber_in = temp_num_in; if( nNumber_in != _DEF_MAX_BLOBS ) m_rec_in = new CvRect [nNumber_in]; if( nNumber_in != 0 ) DetectLabelingRegion(nNumber_in, tmpBuf_in, nWidth, nHeight,m_rec_in); for(int j=0; j<nHeight; j++) for(int i=0; i<nWidth ; i++) temp_mask2->imageData[j*temp_mask2->widthStep+i] = tmpBuf_in[j*nWidth+i]; delete tmpBuf_in; m_nBlobs_in = nNumber_in; //최소영역과 최대영역 설정 _BlobSmallSizeConstraint( 5, 5, m_rec_in, &m_nBlobs_in); _BlobBigSizeConstraint( temp_mask2->width, temp_mask2->height,m_rec_in, &m_nBlobs_in); //선언 및 초기화 CvPoint center_in; CvPoint point_in; point_in.x = 0; point_in.y = 0; center_in.x = 0; center_in.x = 0; CvSeq* seq_in[20]; //준비한 시퀸스 배열크기를 초과하지 않도록 조절 if(m_nBlobs_in > 20) { m_nBlobs_in =20; } for( int ni =0; ni < m_nBlobs_in; ni++ ) { //사각형 그리기에 필요한 두 점 저장 CvPoint pt1 = cvPoint( m_rec_in[ni].x, m_rec_in[ni].y ); CvPoint pt2 = cvPoint( pt1.x + m_rec_in[ni].width,pt1.y + m_rec_in[ni].height ); //색상값 설정 CvScalar color = cvScalar( 255,0 , 255 ); //레이블 사각형 그리기 //cvDrawRect( m_image_dest, pt1, pt2, color); //처리할 손끝 마스크 생성할 메모리 할당 in_mask = cvCreateImage(cvSize(m_rec_in[ni].width, m_rec_in[ni].height),8,1); //관심영역 설정 cvSetImageROI(temp_mask, m_rec_in[ni]); //필요한 영역 복사 cvCopy(temp_mask, in_mask, 0); //관심영역 해제 cvResetImageROI(temp_mask); //관심영역 내의 오브젝트 처리를 위한 시퀸스 생성 seq_in[ni] = cvCreateSeq(CV_SEQ_KIND_GENERIC | CV_32SC2,sizeof(CvContour),sizeof(CvPoint), storage2); //초기화 int temp_x_in = 0; int temp_y_in = 0; int num_in = 0; //관심영역에서 추출한이미지의 흰색 픽셀값으로 시퀸스 생성 for(int j =0; j < in_mask ->height ; j++) { for(int k = 0; k < in_mask ->width; k++) { if((unsigned char)in_mask->imageData[j*in_mask->widthStep+k] == 255) { point_in.x = k; //흰색 픽셀 x좌표 저장 point_in.y = j; //흰색 픽셀 y좌표 저장 cvSeqPush(seq_in[ni], &point_in); //시퀸스 구조체에 해당 좌표 삽입 temp_x_in += point_in.x; //좌표 누적 temp_y_in += point_in.y; //좌표 누적 num_in++; //픽셀 수 카운트 } } } //초기화 max_pt_in.x = 0; max_pt_in.y = 0; double fMaxDist_in = 0; double fDist_in = 0; //중심점 찾기 - 픽셀의 평균값 찾기 if(num_in != 0) { center_in.x = (int)temp_x_in/num_in + pt1.x; //평균 좌표값 구하기 center_in.y = (int)temp_y_in/num_in + pt1.y; //평균 좌표값 구하기 } //우선 끝점이 2개일때만.. if(m_nBlobs_in == 2) { //초기화 finger_pt[ni].x = NULL; finger_pt[ni].y = NULL; finger_pt[ni].x = NULL; finger_pt[ni].y = NULL; if(seq_in[ni]->total !=0) { //컨백스헐 구하기 - 윤곽선의 좌표 정보 겟 CvSeq* hull_in = cvConvexHull2(seq_in[ni], 0, CV_COUNTER_CLOCKWISE, 0); //point_in = **CV_GET_SEQ_ELEM(CvPoint*, hull_in,hull_in->total-1); //구한 컨백스헐 라인으로 그리기 for(int nx = 0; nx < hull_in->total; nx++) { CvPoint hull_pt_in = **CV_GET_SEQ_ELEM(CvPoint*, hull_in,nx); hull_pt_in.x = hull_pt_in.x + pt1.x; hull_pt_in.y = hull_pt_in.y + pt1.y; //중심점과 해당영역의 컨백스 헐 지점간의 거리 계산 fDist_in = sqrt((double)((center.x - hull_pt_in.x) * (center.x - hull_pt_in.x) + (center.y - hull_pt_in.y) * (center.y - hull_pt_in.y))); //거리가 먼 점 찾기 if(fDist_in > fMaxDist_in) { max_pt_in = hull_pt_in; fMaxDist_in = fDist_in; } } } //최대점 보존 finger_pt[ni].x = max_pt_in.x ; finger_pt[ni].y = max_pt_in.y ; //관심영역 해제할 경우의 값으로 보정 finger_pt[ni].x = finger_pt[ni].x + m_rec_out[i].x; finger_pt[ni].y = finger_pt[ni].y + m_rec_out[i].y; }
/* * Main thread for Kinect input, vision processing, and network send - everything, really. */ void *cv_threadfunc (void *ptr) { // Images for openCV IplImage* timg = cvCloneImage(rgbimg); // Image we do our processing on IplImage* dimg = cvCloneImage(timg); // Image we draw on CvSize sz = cvSize( timg->width & -2, timg->height & -2); IplImage* outimg = cvCreateImage(sz, 8, 3); // Mem. mgmt. Remember to clear each time we run loop. CvMemStorage* storage = cvCreateMemStorage(0); // Set region of interest cvSetImageROI(timg, cvRect(0, 0, sz.width, sz.height)); if (display) { cvSetImageROI(dimg, cvRect(0, 0, sz.width, sz.height)); } // Open network socket. CRRsocket = openSocket(); if (CRRsocket < 0) pthread_exit(NULL); /* * MAIN LOOP */ while (1) { // Sequence to run ApproxPoly on CvSeq* polyseq = cvCreateSeq( CV_SEQ_KIND_CURVE | CV_32SC2, sizeof(CvSeq), sizeof(CvPoint), storage ); CvSeq* contours; // Raw contours list CvSeq* hull; // Current convex hull int hullcount; // # of points in hull /* PULL RAW IMAGE FROM KINECT */ pthread_mutex_lock( &mutex_rgb ); if (display) { cvCopy(rgbimg, dimg, 0); } cvCopy(rgbimg, timg, 0); pthread_mutex_unlock( &mutex_rgb ); /* DILATE */ IplConvKernel* element = cvCreateStructuringElementEx(3, 3, 1, 1, 0); IplConvKernel* element2 = cvCreateStructuringElementEx(5, 5, 2, 2, 0); cvDilate(timg, timg, element2, 1); cvErode(timg, timg, element, 1); /* THRESHOLD*/ cvThreshold(timg, timg, 100, 255, CV_THRESH_BINARY); /* OUTPUT PROCESSED OR RAW IMAGE (FindContours destroys image) */ if (display) { cvCvtColor(dimg, outimg, CV_GRAY2BGR); } /* CONTOUR FINDING */ cvFindContours(timg, storage, &contours, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0)); /* CONVEX HULL + POLYGON APPROXIMATION + CONVERT TO RECTANGLE + FILTER FOR INVALID RECTANGLES */ // Store points to draw line between CvPoint* draw1; CvPoint* draw2; vector<PolyVertices> rectangleList; while (contours) // Run for all polygons { // List of raw rectangles PolyVertices fullrect; // Filter noise if (fabs(cvContourArea(contours, CV_WHOLE_SEQ)) > 600) { // Get convex hull hull = cvConvexHull2( contours, storage, CV_CLOCKWISE, 1 ); hullcount = hull->total; // Draw hull (red line) if (display) { draw1 = (CvPoint*)cvGetSeqElem(hull, hullcount - 1); for (int i = 0; i < hullcount; i++) { draw2 = (CvPoint*)cvGetSeqElem( hull, i ); cvLine( outimg, *draw1, *draw2, CV_RGB(255,0,0), 1, 8, 0 ); draw1 = draw2; } } // Convert polys from convex hull to rectangles, fill list polyToQuad(hull, &fullrect, outimg); // Filter for bad rectangles if(!(fullrect.points[0] == NULL || fullrect.points[1] == NULL || fullrect.points[2] == NULL || fullrect.points[3] == NULL) && !fullrect.isMalformed()) { /* FILL rectangleList */ rectangleList.push_back(fullrect); #ifdef DEBUG_MAIN printf("RESULT: (%d,%d), (%d,%d), (%d,%d), (%d,%d)\n", fullrect.points[0]->x, fullrect.points[0]->y, fullrect.points[1]->x, fullrect.points[1]->y, fullrect.points[2]->x, fullrect.points[2]->y, fullrect.points[3]->x, fullrect.points[3]->y); fflush(stdout); #endif } } cvClearSeq(polyseq); contours = contours->h_next; } /* FILTER OVERLAPPING RECTANGLES */ FilterInnerRects(rectangleList); /* SORT INTO CORRECT BUCKET */ SortRects(rectangleList); /* DRAW & PROCESS MATH; FILL SEND STRUCT */ // TODO: Might want to make the math stuff static for efficiency. RobotMath robot; TrackingData outgoing; memset(&outgoing, 0, sizeof(TrackingData)); // Fill packets // Packet fields are unsigned 16bit integers, so we need to scale them up // Currently both dist and angle scaled 100x (hundredths precision) // NOTE: // Currently correct results are only calculated by using bottom basket and constant for top. if (rectangleList[0].isValid()) { outgoing.distHigh = 100 * robot.GetDistance(*(rectangleList[0].points[2]), *(rectangleList[0].points[3]), 0); outgoing.angleHigh = 100 * robot.GetAngle(*(rectangleList[0].points[2]), *(rectangleList[0].points[3])); } // if (rectangleList[1].isValid()) // { // outgoing.distLeft = 100 * robot.GetDistance(*(rectangleList[1].points[2]), *(rectangleList[1].points[3]), 1); // outgoing.angleLeft = 100 * robot.GetAngle(*(rectangleList[1].points[2]), *(rectangleList[1].points[3])); // } // if (rectangleList[2].isValid()) // { // outgoing.distRight = 100 * robot.GetDistance(*(rectangleList[2].points[2]), *(rectangleList[2].points[3]), 2); // outgoing.angleRight = 100 * robot.GetAngle(*(rectangleList[2].points[2]), *(rectangleList[2].points[3])); // } if (rectangleList[3].isValid()) { outgoing.distLow = 100 * robot.GetDistance(*(rectangleList[3].points[2]), *(rectangleList[3].points[3]), 3); outgoing.angleLow = 100 * robot.GetAngle(*(rectangleList[3].points[2]), *(rectangleList[3].points[3])); } // Draw filtered rects (thick blue line) if (display) { for (int i = 0; i < 4; i++) { if (outimg && rectangleList[i].isValid()) { cvLine( outimg, *(rectangleList[i].points[3]), *(rectangleList[i].points[2]), CV_RGB(0,0,255), 2, 8, 0 ); cvLine( outimg, *(rectangleList[i].points[2]), *(rectangleList[i].points[0]), CV_RGB(0,0,255), 2, 8, 0 ); cvLine( outimg, *(rectangleList[i].points[0]), *(rectangleList[i].points[1]), CV_RGB(0,0,255), 2, 8, 0 ); cvLine( outimg, *(rectangleList[i].points[1]), *(rectangleList[i].points[3]), CV_RGB(0,0,255), 2, 8, 0 ); } } } #ifdef DEBUG_MAIN printf("Top distance: %d\n", outgoing.distHigh); printf("Top angle: %d\n\n", outgoing.angleHigh); #endif CvPoint cent1 = cvPoint(320, 0); CvPoint cent2 = cvPoint(320, 480); if (display) { cvLine( outimg, cent1, cent2, CV_RGB(0,255,0), 1, 8, 0 ); } /* SEND TO CRIO */ sendData(&outgoing, CRRsocket); if( cvWaitKey( 15 )==27 ) { // Empty for now. } /* DISPLAY */ if (display) { cvShowImage (FREENECTOPENCV_WINDOW_N,outimg); } /* CLEANUP */ cvClearMemStorage(storage); } pthread_exit(NULL); }
void Buoy::FC_FindBiggestContours(IplImage *src) { _mask=0; nContours=0; largest_length=0; len=0; dst=0; contours=0; c=0; newC=0; CvMemStorage* tempStorage = cvCreateMemStorage(); temp=*src; IplImage *src_img=cvCreateImage(cvSize(temp.width,temp.height),IPL_DEPTH_32S,1); // IplImage *dest=cvCreateImage(cvSize(temp.width,temp.height),IPL_DEPTH_8U,1); _mask=&temp; int poly1Hull0=1; CvPoint offset; offset.x=0; offset.y=0; mask = cvGetMat( _mask, &mstub ); // clean up raw mask cvMorphologyEx( mask, mask, 0, 0, CV_MOP_OPEN, 1 ); cvMorphologyEx( mask, mask, 0, 0, CV_MOP_CLOSE, 1 ); // find contours around only bigger regions scanner = cvStartFindContours( mask, tempStorage, sizeof(CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, offset ); while( (c = cvFindNextContour( scanner )) != 0 ) { len = cvContourPerimeter( c ); if(len > largest_length) { largest_length = len; } } contours=cvEndFindContours( &scanner ); scanner = cvStartFindContours( mask, tempStorage, sizeof(CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, offset ); while( (c = cvFindNextContour( scanner )) != 0 ) { len = cvContourPerimeter( c ); double q = largest_length ; if( len < q ) //Get rid of blob if it's perimeter is too small cvSubstituteContour( scanner, 0 ); else //Smooth it's edges if it's large enough { if( poly1Hull0 ) //Polygonal approximation of the segmentation newC = cvApproxPoly( c, sizeof(CvContour), tempStorage, CV_POLY_APPROX_DP, 2, 0 ); else //Convex Hull of the segmentation newC = cvConvexHull2( c, tempStorage, CV_CLOCKWISE, 1 ); cvSubstituteContour( scanner, newC ); nContours++; R=cvBoundingRect(c,0); } } contours = cvEndFindContours( &scanner ); // paint the found regions back into the image cvZero( src_img ); cvZero( _mask ); for( c=contours; c != 0; c = c->h_next ) { cvDrawContours( src_img, c, cvScalarAll(1), cvScalarAll(1), -1, -1, 8, cvPoint(-offset.x,-offset.y)); } cvReleaseMemStorage( &tempStorage ); // convert to 8 bit IplImage for( int i = 0; i < src_img->height; i++ ) for( int j = 0; j < src_img->width; j++ ) { int idx = CV_IMAGE_ELEM( src_img, int, i, j ); //get reference to pixel at (col,row), dst = &CV_IMAGE_ELEM( src, uchar, i, j ); //for multi-channel images (col) should be multiplied by number of channels */ if( idx == -1 || idx == 1 ) *dst = (uchar)255; else if( idx <= 0 || idx > 1 ) *dst = (uchar)0; // should not get here else { *dst = (uchar)0; } } //qDebug()<<nContours; cvReleaseImage(&src_img); // cvReleaseImage(&temp); //return dest; }
void connected_Components(IplImage *mask, int poly1_hull0, float perimScale, int *num, CvRect *bbs, CvPoint *centers) { static CvMemStorage* mem_storage = NULL; static CvSeq* contours = NULL; //CLEAN UP RAW MASK cvMorphologyEx( mask, mask, NULL, NULL, CV_MOP_OPEN, CVCLOSE_ITR ); cvMorphologyEx( mask, mask, NULL, NULL, CV_MOP_CLOSE, CVCLOSE_ITR ); //FIND CONTOURS AROUND ONLY BIGGER REGIONS if( mem_storage==NULL ) mem_storage = cvCreateMemStorage(0); else cvClearMemStorage(mem_storage); CvContourScanner scanner = cvStartFindContours(mask,mem_storage,sizeof(CvContour),CV_RETR_EXTERNAL,CV_CHAIN_APPROX_SIMPLE); CvSeq* c; int numCont = 0; while( (c = cvFindNextContour( scanner )) != NULL ) { double len = cvContourPerimeter( c ); double q = (mask->height + mask->width) /perimScale; //calculate perimeter len threshold if( len < q ) //Get rid of blob if it's perimeter is too small { cvSubstituteContour( scanner, NULL ); } else //Smooth it's edges if it's large enough { CvSeq* c_new; if(poly1_hull0) //Polygonal approximation of the segmentation c_new = cvApproxPoly(c,sizeof(CvContour),mem_storage,CV_POLY_APPROX_DP, CVCONTOUR_APPROX_LEVEL,0); else //Convex Hull of the segmentation c_new = cvConvexHull2(c,mem_storage,CV_CLOCKWISE,1); cvSubstituteContour( scanner, c_new ); numCont++; } } contours = cvEndFindContours( &scanner ); // PAINT THE FOUND REGIONS BACK INTO THE IMAGE cvZero( mask ); IplImage *maskTemp; //CALC CENTER OF MASS AND OR BOUNDING RECTANGLES if(num != NULL) { int N = *num, numFilled = 0, i=0; CvMoments moments; double M00, M01, M10; maskTemp = cvCloneImage(mask); for(i=0, c=contours; c != NULL; c = c->h_next,i++ ) { if(i < N) //Only process up to *num of them { cvDrawContours(maskTemp,c,CV_CVX_WHITE, CV_CVX_WHITE,-1,CV_FILLED,8); //Find the center of each contour if(centers != NULL) { cvMoments(maskTemp,&moments,1); M00 = cvGetSpatialMoment(&moments,0,0); M10 = cvGetSpatialMoment(&moments,1,0); M01 = cvGetSpatialMoment(&moments,0,1); centers[i].x = (int)(M10/M00); centers[i].y = (int)(M01/M00); } //Bounding rectangles around blobs if(bbs != NULL) { bbs[i] = cvBoundingRect(c); } cvZero(maskTemp); numFilled++; } //Draw filled contours into mask cvDrawContours(mask,c,CV_CVX_WHITE,CV_CVX_WHITE,-1,CV_FILLED,8); //draw to central mask } //end looping over contours *num = numFilled; cvReleaseImage( &maskTemp); } else { for( c=contours; c != NULL; c = c->h_next ) { cvDrawContours(mask,c,CV_CVX_WHITE, CV_CVX_BLACK,-1,CV_FILLED,8); } } }
void connectComponent(IplImage* src, const int poly_hull0, const float perimScale, int *num, vector<CvRect> &rects, vector<CvPoint> ¢ers) { /* * Pre : "src" :is the input image * "poly_hull0" :is usually set to 1 * "perimScale" :defines how big connected component will be retained, bigger * the number, more components are retained (100) * * Post: "num" :defines how many connected component was found * "rects" :the bounding box of each connected component * "centers" :the center of each bounding box */ rects.clear(); centers.clear(); CvMemStorage* mem_storage = NULL; CvSeq* contours = NULL; // Clean up cvMorphologyEx(src, src, 0, 0, CV_MOP_OPEN, 1); cvMorphologyEx(src, src, 0, 0, CV_MOP_CLOSE, 1); // Find contours around only bigger regions mem_storage = cvCreateMemStorage(0); CvContourScanner scanner = cvStartFindContours(src, mem_storage, sizeof(CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE); CvSeq* c; int numCont = 0; while ((c = cvFindNextContour(scanner)) != NULL) { double len = cvContourPerimeter(c); // calculate perimeter len threshold double q = (double) (src->height + src->width) / perimScale; // get rid of blob if its perimeter is too small if (len < q) { cvSubstituteContour(scanner, NULL); } else { // smooth its edge if its large enough CvSeq* c_new; if (poly_hull0) { // polygonal approximation c_new = cvApproxPoly(c, sizeof(CvContour), mem_storage, CV_POLY_APPROX_DP, 2, 0); } else { // convex hull of the segmentation c_new = cvConvexHull2(c, mem_storage, CV_CLOCKWISE, 1); } cvSubstituteContour(scanner, c_new); numCont++; } } contours = cvEndFindContours(&scanner); // Calc center of mass and/or bounding rectangles if (num != NULL) { // user wants to collect statistics int numFilled = 0, i = 0; for (i = 0, c = contours; c != NULL; c = c->h_next, i++) { if (i < *num) { // bounding retangles around blobs rects.push_back(cvBoundingRect(c)); CvPoint center = cvPoint(rects[i].x + rects[i].width / 2, rects[i].y + rects[i].height / 2); centers.push_back(center); numFilled++; } } *num = numFilled; } cvReleaseMemStorage(&mem_storage); }
CvSeq *find_contours( CvMat *input_image ) { CvSeq *contours = 0; CvMemStorage *storage = NULL; CvSeq *seq = 0; CvSeq *convex_contours = 0; CvSeq *polydp_contours = 0; CvMemStorage *hull_storage = NULL; float area; storage = cvCreateMemStorage(0); hull_storage = cvCreateMemStorage(0); /* ** Find the contours in the input image and store in the ** contours list structure. */ cvFindContours( input_image, storage, &contours , sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_TC89_KCOS, cvPoint(0,0) ); /* ** Now that we have found contours, zero out the original image to remove noise */ seq = contours; for ( ; seq != 0; seq = seq->h_next ) { /* ** Calculate convex hull of the contour */ convex_contours = cvConvexHull2(seq, storage, CV_CLOCKWISE, 1 ); /* ** Calculate area of the geometry */ area = fabsf(cvContourArea( convex_contours, CV_WHOLE_SEQ, 0)); /* ** Filter out only shapes that are target sized > MIN_RECT_AREA */ if ( area > tracking.min_rect_area ) { /* ** Approximate polygonal shape */ polydp_contours = cvApproxPoly(seq, sizeof(CvContour), storage, CV_POLY_APPROX_DP, 4.0, 0); /* ** Draw poly contours on image for diagnostics */ #ifdef GRAPHICS cvDrawContours( input_image, polydp_contours, CV_RGB(255,255,255), CV_RGB(255,255,255), 0, 1, 8, cvPoint(0,0)); #endif /* ** If the shape has four corners, check and see it is a target */ if ( polydp_contours->total == 4 ) Detect_Targets( polydp_contours, input_image ); } } /* ** Clear mem storage (important!) */ cvClearMemStorage(storage); cvClearMemStorage(hull_storage); /* ** Release mem storage (important!) */ cvReleaseMemStorage(&storage); cvReleaseMemStorage(&hull_storage); return(contours); }
CV_IMPL CvBox2D cvMinAreaRect2( const CvArr* array, CvMemStorage* storage ) { cv::Ptr<CvMemStorage> temp_storage; CvBox2D box; cv::AutoBuffer<CvPoint2D32f> _points; CvPoint2D32f* points; memset(&box, 0, sizeof(box)); int i, n; CvSeqReader reader; CvContour contour_header; CvSeqBlock block; CvSeq* ptseq = (CvSeq*)array; CvPoint2D32f out[3]; if( CV_IS_SEQ(ptseq) ) { if( !CV_IS_SEQ_POINT_SET(ptseq) && (CV_SEQ_KIND(ptseq) != CV_SEQ_KIND_CURVE || CV_SEQ_ELTYPE(ptseq) != CV_SEQ_ELTYPE_PPOINT )) CV_Error( CV_StsUnsupportedFormat, "Input sequence must consist of 2d points or pointers to 2d points" ); if( !storage ) storage = ptseq->storage; } else { ptseq = cvPointSeqFromMat( CV_SEQ_KIND_GENERIC, array, &contour_header, &block ); } if( storage ) { temp_storage = cvCreateChildMemStorage( storage ); } else { temp_storage = cvCreateMemStorage(1 << 10); } ptseq = cvConvexHull2( ptseq, temp_storage, CV_CLOCKWISE, 1 ); n = ptseq->total; _points.allocate(n); points = _points; cvStartReadSeq( ptseq, &reader ); if( CV_SEQ_ELTYPE( ptseq ) == CV_32SC2 ) { for( i = 0; i < n; i++ ) { CvPoint pt; CV_READ_SEQ_ELEM( pt, reader ); points[i].x = (float)pt.x; points[i].y = (float)pt.y; } } else { for( i = 0; i < n; i++ ) { CV_READ_SEQ_ELEM( points[i], reader ); } } if( n > 2 ) { icvRotatingCalipers( points, n, CV_CALIPERS_MINAREARECT, (float*)out ); box.center.x = out[0].x + (out[1].x + out[2].x)*0.5f; box.center.y = out[0].y + (out[1].y + out[2].y)*0.5f; box.size.width = (float)sqrt((double)out[1].x*out[1].x + (double)out[1].y*out[1].y); box.size.height = (float)sqrt((double)out[2].x*out[2].x + (double)out[2].y*out[2].y); box.angle = (float)atan2( (double)out[1].y, (double)out[1].x ); } else if( n == 2 ) { box.center.x = (points[0].x + points[1].x)*0.5f; box.center.y = (points[0].y + points[1].y)*0.5f; double dx = points[1].x - points[0].x; double dy = points[1].y - points[0].y; box.size.width = (float)sqrt(dx*dx + dy*dy); box.size.height = 0; box.angle = (float)atan2( dy, dx ); } else { if( n == 1 ) box.center = points[0]; } box.angle = (float)(box.angle*180/CV_PI); return box; }
void detect(IplImage* img_8uc1,IplImage* img_8uc3) { clock_t inicio, fin; inicio = clock(); CvMemStorage* storage = cvCreateMemStorage(); CvSeq* first_contour = NULL; CvSeq* maxitem=NULL; char resultado [] = " "; double area=0,areamax=0; double longitudExt = 0; double radio = 0; int maxn=0; int Nc = cvFindContours( img_8uc1, storage, &first_contour, sizeof(CvContour), CV_RETR_LIST ); int n=0; //printf( "Contornos detectados: %d\n", Nc ); if(Nc>0) { for( CvSeq* c=first_contour; c!=NULL; c=c->h_next ) { area=cvContourArea(c,CV_WHOLE_SEQ ); if(area>areamax) { areamax=area; maxitem=c; maxn=n; } n++; } CvMemStorage* storage3 = cvCreateMemStorage(0); if(areamax>5000) { maxitem = cvApproxPoly( maxitem, sizeof(CvContour), storage3, CV_POLY_APPROX_DP, 10, 1 ); CvPoint pt0; CvMemStorage* storage1 = cvCreateMemStorage(0); CvMemStorage* storage2 = cvCreateMemStorage(0); CvSeq* ptseq = cvCreateSeq( CV_SEQ_KIND_GENERIC|CV_32SC2, sizeof(CvContour), sizeof(CvPoint), storage1 ); CvSeq* hull; CvSeq* defects; CvPoint minDefectPos;; minDefectPos.x = 1000000; minDefectPos.y = 1000000; CvPoint maxDefectPos; maxDefectPos.x = 0; maxDefectPos.y = 0; for(int i = 0; i < maxitem->total; i++ ) { CvPoint* p = CV_GET_SEQ_ELEM( CvPoint, maxitem, i ); pt0.x = p->x; pt0.y = p->y; cvSeqPush( ptseq, &pt0 ); } hull = cvConvexHull2( ptseq, 0, CV_CLOCKWISE, 0 ); int hullcount = hull->total; defects= cvConvexityDefects(ptseq,hull,storage2 ); //printf(" Numero de defectos %d \n",defects->total); CvConvexityDefect* defectArray; int j=0; // This cycle marks all defects of convexity of current contours. longitudExt = 0; for(;defects;defects = defects->h_next) { int nomdef = defects->total; // defect amount //outlet_float( m_nomdef, nomdef ); //printf(" defect no %d \n",nomdef); if(nomdef == 0) continue; // Alloc memory for defect set. //fprintf(stderr,"malloc\n"); defectArray = (CvConvexityDefect*)malloc(sizeof(CvConvexityDefect)*nomdef); // Get defect set. //fprintf(stderr,"cvCvtSeqToArray\n"); cvCvtSeqToArray(defects,defectArray, CV_WHOLE_SEQ); // Draw marks for all defects. for(int i=0; i<nomdef; i++) { CvPoint startP; startP.x = defectArray[i].start->x; startP.y = defectArray[i].start->y; CvPoint depthP; depthP.x = defectArray[i].depth_point->x; depthP.y = defectArray[i].depth_point->y; CvPoint endP; endP.x = defectArray[i].end->x; endP.y = defectArray[i].end->y; //obtener minimo y maximo minDefectPos.x = getMin (startP.x, depthP.x, endP.x, minDefectPos.x); minDefectPos.y = getMin (startP.y, depthP.y, endP.y, minDefectPos.y); maxDefectPos.x = getMax (startP.x, depthP.x, endP.x, maxDefectPos.x); maxDefectPos.y = getMax (startP.y, depthP.y, endP.y, maxDefectPos.y); //fin obtener minimo y maximo if (saveLength) { longitudExt += longBtwnPoints(startP, depthP); longitudExt += longBtwnPoints(depthP, endP); } //printf(" defect depth for defect %d %f \n",i,defectArray[i].depth); cvLine(img_8uc3, startP, depthP, CV_RGB(255,255,0),1, CV_AA, 0 ); cvCircle( img_8uc3, depthP, 5, CV_RGB(0,0,164), 2, 8,0); cvCircle( img_8uc3, startP, 5, CV_RGB(255,0,0), 2, 8,0); cvCircle( img_8uc3, endP, 5, CV_RGB(0,255,0), 2, 8,0); cvLine(img_8uc3, depthP, endP,CV_RGB(0,0,0),1, CV_AA, 0 ); } /*if (nomdef>0) { resultado [0] = identificaGesto (longitudExt, nomdef, radio); if (resultado[0] !=' ') printf ("Gesto identificado (%c) \n", resultado[0]); }*/ if (saveLength) { radio = (double)maxDefectPos.x / (double)maxDefectPos.y; if (nomdef>0) { printf ("_______________________\n"); resultado [0] = identificaGesto (longitudExt, nomdef, radio); fin = clock(); fin = fin - inicio; if (resultado[0] !=' ') printf ("Gesto identificado (%c) \n", resultado[0]); else printf ("No se identifico ningun gesto\n"); printf("Tiempo de ejecucion: %f\nLongitud %g \nNomDef %i \nradio %g \n",(((float)fin)/CLOCKS_PER_SEC ), longitudExt, nomdef, radio); FILE *fp; fp=fopen("archivo.txt", "a"); if (nomdef == 6) fprintf(fp, "\n>>>>>>>5<<<<<<\n%g\n%i\n%g\n",longitudExt, nomdef, radio); else fprintf(fp, "\n%g\n%i\n%g\n",longitudExt, nomdef, radio); fclose (fp); } else printf("No hay defectos"); printf ("_______________________\n"); } /* char txt[]="0"; txt[0]='0'+nomdef-1; CvFont font; cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 1.0, 1.0, 0, 5, CV_AA); cvPutText(img_8uc3, txt, cvPoint(50, 50), &font, cvScalar(0, 0, 255, 0)); */ CvFont font; cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 1.0, 1.0, 0, 5, CV_AA); if (resultado!= NULL) cvPutText(img_8uc3, resultado, cvPoint(50, 50), &font, cvScalar(0, 0, 255, 0)); j++; // Free memory. free(defectArray); } pt0 = **CV_GET_SEQ_ELEM( CvPoint*, hull, hullcount - 1 ); for(int i = 0; i < hullcount; i++ ) { CvPoint pt = **CV_GET_SEQ_ELEM( CvPoint*, hull, i ); cvLine( img_8uc3, pt0, pt, CV_RGB( 0, 255, 0 ), 1, CV_AA, 0 ); pt0 = pt; } cvLine( img_8uc3, minDefectPos, cvPoint( (maxDefectPos.x), (minDefectPos.y)), CV_RGB( 2500, 0, 0 ), 1, CV_AA, 0 ); cvLine( img_8uc3, cvPoint( (maxDefectPos.x), (minDefectPos.y)), maxDefectPos, CV_RGB( 2500, 0, 0 ), 1, CV_AA, 0 ); cvLine( img_8uc3, maxDefectPos, cvPoint( (minDefectPos.x), (maxDefectPos.y)), CV_RGB( 2500, 0, 0 ), 1, CV_AA, 0 ); cvLine( img_8uc3, cvPoint( (minDefectPos.x), (maxDefectPos.y)), minDefectPos, CV_RGB( 2500, 0, 0 ), 1, CV_AA, 0 ); cvReleaseMemStorage( &storage ); cvReleaseMemStorage( &storage1 ); cvReleaseMemStorage( &storage2 ); cvReleaseMemStorage( &storage3 ); //return 0; } }
void ObjectTracker::findConnectedComponents( IplImage* mask, int poly1_hull2 /* = 0 */, double perimScale /* = 0.25 */, int* num /* = NULL */, CvRect* bbs /* = NULL */, CvPoint* centers /* = NULL */ ) { int cvContourApproxLevel = 2; static CvMemStorage *mem_storage = NULL; static CvSeq *contours = NULL; if (mem_storage == NULL) { mem_storage = cvCreateMemStorage(0); } else { cvClearMemStorage(mem_storage); } CvContourScanner scanner = cvStartFindContours(mask, mem_storage, sizeof(CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE); CvSeq *c; int numCont = 0; while ((c = cvFindNextContour(scanner)) != NULL) { double len = cvContourPerimeter(c); double q = (mask->height + mask->width) * perimScale; if (len < q) { cvSubstituteContour(scanner, NULL); } else { if (poly1_hull2) { CvSeq *c_new; if (poly1_hull2 == 1) { c_new = cvApproxPoly(c, sizeof(CvContour), mem_storage, CV_POLY_APPROX_DP, cvContourApproxLevel, 0); } else if (poly1_hull2 == 2) { c_new = cvConvexHull2(c, mem_storage, CV_CLOCKWISE, 1); } cvSubstituteContour(scanner, c_new); } numCont++; } } contours = cvEndFindContours(&scanner); const CvScalar CVX_WHITE = CV_RGB(0xff,0xff,0xff); const CvScalar CVX_BLACK = CV_RGB(0x00,0x00,0x00); cvZero(mask); IplImage *maskTemp; // CALC CENTER OF MASS AND/OR BOUNDING RECTANGLES // if(num != NULL) { //User wants to collect statistics // int N = *num, numFilled = 0, i=0; CvMoments moments; double M00, M01, M10; maskTemp = cvCloneImage(mask); for(i=0, c=contours; c != NULL; c = c->h_next,i++ ) { if(i < N) { // Only process up to *num of them // cvDrawContours( maskTemp, c, CVX_WHITE, CVX_WHITE, -1, CV_FILLED, 8 ); // Find the center of each contour // if(centers != NULL) { cvMoments(maskTemp,&moments,1); M00 = cvGetSpatialMoment(&moments,0,0); M10 = cvGetSpatialMoment(&moments,1,0); M01 = cvGetSpatialMoment(&moments,0,1); centers[i].x = (int)(M10/M00); centers[i].y = (int)(M01/M00); } //Bounding rectangles around blobs // if(bbs != NULL) { bbs[i] = cvBoundingRect(c); } cvZero(maskTemp); numFilled++; } // Draw filled contours into mask // cvDrawContours( mask, c, CVX_WHITE, CVX_WHITE, -1, CV_FILLED, 8 ); } //end looping over contours *num = numFilled; cvReleaseImage( &maskTemp); } // ELSE JUST DRAW PROCESSED CONTOURS INTO THE MASK // else { // The user doesn't want statistics, just draw the contours // for( c=contours; c != NULL; c = c->h_next ) { cvDrawContours( mask, c, CVX_WHITE, CVX_BLACK, -1, CV_FILLED, 8 ); } } }
void AirCursor::analyzeGrab() { cvClearMemStorage(m_cvMemStorage); // get current depth map from Kinect const XnDepthPixel* depthMap = m_depthGenerator.GetDepthMap(); // convert 16bit openNI depth map to 8bit IplImage used in opencv processing int origDepthIndex = 0; char* depthPtr = m_iplDepthMap->imageData; char* debugPtr = 0; if (m_debugImageEnabled) debugPtr = m_iplDebugImage->imageData; for (unsigned int y = 0; y < DEPTH_MAP_SIZE_Y; y++) { for (unsigned int x = 0; x < DEPTH_MAP_SIZE_X; x++) { // get current depth value from original depth map short depth = depthMap[origDepthIndex]; // check that current value is in the allowed range determined by clipping distances, // and if it is map it to range 0 - 255 so that 255 is the closest value unsigned char pixel = 0; if (depth >= NEAR_CLIPPING_DISTANCE && depth <= FAR_CLIPPING_DISTANCE) { depth -= NEAR_CLIPPING_DISTANCE; pixel = 255 - (255.0f * ((float)depth / (FAR_CLIPPING_DISTANCE - NEAR_CLIPPING_DISTANCE))); } else { pixel = 0; } m_iplDepthMap->imageData[y * m_iplDepthMap->widthStep + x] = pixel; *depthPtr = pixel; if (m_debugImageEnabled) { // init debug image with the same depth map *(debugPtr + 0) = pixel; *(debugPtr + 1) = pixel; *(debugPtr + 2) = pixel; debugPtr += 3; } origDepthIndex++; depthPtr++; } } // calculate region of interest corner points in real world coordinates XnPoint3D rwPoint1 = m_handPosRealWorld; rwPoint1.X -= HAND_ROI_SIZE_LEFT; rwPoint1.Y += HAND_ROI_SIZE_UP; XnPoint3D rwPoint2 = m_handPosRealWorld; rwPoint2.X += HAND_ROI_SIZE_RIGHT; rwPoint2.Y -= HAND_ROI_SIZE_DOWN; // convert corner points to projective coordinates XnPoint3D projPoint1, projPoint2; m_depthGenerator.ConvertRealWorldToProjective(1, &rwPoint1, &projPoint1); m_depthGenerator.ConvertRealWorldToProjective(1, &rwPoint2, &projPoint2); // round projected corner points to ints and clip them against the depth map int ROItopLeftX = qRound(projPoint1.X); int ROItopLeftY = qRound(projPoint1.Y); int ROIbottomRightX = qRound(projPoint2.X); int ROIbottomRightY = qRound(projPoint2.Y); if (ROItopLeftX < 0) ROItopLeftX = 0; else if (ROItopLeftX > DEPTH_MAP_SIZE_X - 1) ROItopLeftX = DEPTH_MAP_SIZE_X - 1; if (ROItopLeftY < 0) ROItopLeftY = 0; else if (ROItopLeftY > DEPTH_MAP_SIZE_Y - 1) ROItopLeftY = DEPTH_MAP_SIZE_Y - 1; if (ROIbottomRightX < 0) ROIbottomRightX = 0; else if (ROIbottomRightX > DEPTH_MAP_SIZE_X - 1) ROIbottomRightX = DEPTH_MAP_SIZE_X - 1; if (ROIbottomRightY < 0) ROIbottomRightY = 0; else if (ROIbottomRightY > DEPTH_MAP_SIZE_Y - 1) ROIbottomRightY = DEPTH_MAP_SIZE_Y - 1; // set region of interest CvRect rect = cvRect(ROItopLeftX, ROItopLeftY, ROIbottomRightX - ROItopLeftX, ROIbottomRightY - ROItopLeftY); if(rect.height > 0 && rect.width > 0) { cvSetImageROI(m_iplDepthMap, rect); if (m_debugImageEnabled) cvSetImageROI(m_iplDebugImage, rect); } // use depth threshold to isolate hand // as a center point of thresholding, it seems that it's better to use a point bit below // the point Nite gives as the hand point XnPoint3D rwThresholdPoint = m_handPosRealWorld; rwThresholdPoint.Y -= 30; XnPoint3D projThresholdPoint; m_depthGenerator.ConvertRealWorldToProjective(1, &rwThresholdPoint, &projThresholdPoint); int lowerBound = (unsigned char)m_iplDepthMap->imageData[(int)projThresholdPoint.Y * DEPTH_MAP_SIZE_X + (int)projThresholdPoint.X] - DEPTH_THRESHOLD; if (lowerBound < 0) lowerBound = 0; cvThreshold( m_iplDepthMap, m_iplDepthMap, lowerBound, 255, CV_THRESH_BINARY ); // color used for drawing the hand in the debug image, green for normal and red for grab. // color lags one frame from actual grab status but in practice that shouldn't be too big of a problem int rCol, gCol, bCol; if(m_grabbing) { rCol = 255; gCol = 0; bCol = 0; } else { rCol = 0; gCol = 255; bCol = 0; } // go through the ROI and paint hand on debug image with current grab status color if (m_debugImageEnabled) { // index of first pixel in the ROI int startIndex = ROItopLeftY * m_iplDepthMap->widthStep + ROItopLeftX; depthPtr = &(m_iplDepthMap->imageData[startIndex]); debugPtr = &(m_iplDebugImage->imageData[startIndex * 3]); // how much index needs to increase when moving to next line int vertInc = m_iplDepthMap->widthStep - (ROIbottomRightX - ROItopLeftX); for (int y = ROItopLeftY; y < ROIbottomRightY; y++) { for (int x = ROItopLeftX; x < ROIbottomRightX; x++) { if((unsigned char)*depthPtr > 0) { *(debugPtr + 0) = rCol / 2; *(debugPtr + 1) = gCol / 2; *(debugPtr + 2) = bCol / 2; } // next pixel depthPtr++; debugPtr += 3; } // next line depthPtr += vertInc; debugPtr += vertInc * 3; } } // find contours in the hand and draw them on debug image CvSeq* contours = 0; cvFindContours(m_iplDepthMap, m_cvMemStorage, &contours, sizeof(CvContour)); if (m_debugImageEnabled) { if(contours) { cvDrawContours(m_iplDebugImage, contours, cvScalar(rCol, gCol , bCol), cvScalar(rCol, gCol, bCol), 1); } } // go through contours and search for the biggest one CvSeq* biggestContour = 0; double biggestArea = 0.0f; for(CvSeq* currCont = contours; currCont != 0; currCont = currCont->h_next) { // ignore small contours which are most likely caused by artifacts double currArea = cvContourArea(currCont); if(currArea < CONTOUR_MIN_SIZE) continue; if(!biggestContour || currArea > biggestArea) { biggestContour = currCont; biggestArea = currArea; } } int numOfValidDefects = 0; if(biggestContour) { // calculate convex hull of the biggest contour found which is hopefully the hand CvSeq* hulls = cvConvexHull2(biggestContour, m_cvMemStorage, CV_CLOCKWISE, 0); if (m_debugImageEnabled) { // calculate convex hull and return it in a different form. // only required for drawing CvSeq* hulls2 = cvConvexHull2(biggestContour, m_cvMemStorage, CV_CLOCKWISE, 1); // draw the convex hull cvDrawContours(m_iplDebugImage, hulls2, cvScalar(rCol, gCol , bCol), cvScalar(rCol, gCol, bCol), 1); } // calculate convexity defects of hand's convex hull CvSeq* defects = cvConvexityDefects(biggestContour, hulls, m_cvMemStorage); int numOfDefects = defects->total; if (numOfDefects > 0) { // calculate defect min size in projective coordinates. // this is done using a vector from current hand position to a point DEFECT_MIN_SIZE amount above it. // that vector is converted to projective coordinates and it's length is calculated. XnPoint3D rwTempPoint = m_handPosRealWorld; rwTempPoint.Y += DEFECT_MIN_SIZE; XnPoint3D projTempPoint; m_depthGenerator.ConvertRealWorldToProjective(1, &rwTempPoint, &projTempPoint); int defectMinSizeProj = m_handPosProjected.Y - projTempPoint.Y; // convert opencv seq to array CvConvexityDefect* defectArray;defectArray = (CvConvexityDefect*)malloc(sizeof(CvConvexityDefect) * numOfDefects); cvCvtSeqToArray(defects, defectArray, CV_WHOLE_SEQ); for(int i = 0; i < numOfDefects; i++) { // ignore too small defects if((defectArray[i].depth) < defectMinSizeProj) { continue; } numOfValidDefects++; if (m_debugImageEnabled) { // draw blue point to defect cvCircle(m_iplDebugImage, *(defectArray[i].depth_point), 5, cvScalar(0, 0, 255), -1); cvCircle(m_iplDebugImage, *(defectArray[i].start), 5, cvScalar(0, 0, 255), -1); cvCircle(m_iplDebugImage, *(defectArray[i].end), 5, cvScalar(0, 0, 255), -1); } } free(defectArray); } } if (m_debugImageEnabled) { cvResetImageROI(m_iplDebugImage); // draw white dot on current hand position cvCircle(m_iplDebugImage, cvPoint(m_handPosProjected.X, m_handPosProjected.Y), 5, cvScalar(255, 255, 255), -1); // draw gray dot on current center of threshold position //cvCircle(m_iplDebugImage, cvPoint(projThresholdPoint.X, projThresholdPoint.Y), 5, cvScalar(127, 127, 127), -1); // draw ROI with green //cvRectangle(m_iplDebugImage, cvPoint(ROItopLeftX, ROItopLeftY), cvPoint(ROIbottomRightX, ROIbottomRightY), cvScalar(0, 255, 0)); } // determine current grab status based on defect count if(numOfValidDefects <= GRAB_MAX_DEFECTS) { m_currentGrab = true; } else { m_currentGrab = false; } if (m_debugImageEnabled) { // debug strings QList<QString> debugStrings; debugStrings.push_back(QString("hand distance: " + QString::number(m_handPosRealWorld.Z) + " mm").toStdString().c_str()); debugStrings.push_back(QString("defects: " + QString::number(numOfValidDefects)).toStdString().c_str()); // convert iplDebugImage to QImage char* scanLinePtr = m_iplDebugImage->imageData; for (int y = 0;y < DEPTH_MAP_SIZE_Y; y++) { memcpy(m_debugImage->scanLine(y), scanLinePtr, DEPTH_MAP_SIZE_X * 3); scanLinePtr += DEPTH_MAP_SIZE_X * 3; } emit debugUpdate(*m_debugImage, debugStrings); } }
//-------------------------------------------------------------------------------- int ofxCvMyContourFinder::findContours( IplImage* input, int minArea, int maxArea, int nConsidered, bool bFindHoles, int approximation) { // get width/height disregarding ROI _width = input->width; _height = input->height; reset(); // opencv will clober the image it detects contours on, so we want to // copy it into a copy before we detect contours. That copy is allocated // if necessary (necessary = (a) not allocated or (b) wrong size) // so be careful if you pass in different sized images to "findContours" // there is a performance penalty, but we think there is not a memory leak // to worry about better to create mutiple contour finders for different // sizes, ie, if you are finding contours in a 640x480 image but also a // 320x240 image better to make two ofxCvMyContourFinder objects then to use // one, because you will get penalized less. if( !inputCopy ) { inputCopy = cvCreateImage(cvSize(_width,_height), input->depth, input->nChannels); } else if( inputCopy->width != _width || inputCopy->height != _height ) { // reallocate to new size cvReleaseImage(&inputCopy); inputCopy = cvCreateImage(cvSize(_width,_height), input->depth, input->nChannels); } cvSetImageROI(inputCopy, cvGetImageROI(input)); cvCopy(input, inputCopy); contour_storage = cvCreateMemStorage( 1000 ); storage = cvCreateMemStorage( 1000 ); CvContourRetrievalMode retrieve_mode = (bFindHoles) ? CV_RETR_LIST : CV_RETR_EXTERNAL; CvContourScanner scanner = cvStartFindContours( inputCopy, contour_storage, sizeof(CvContour), retrieve_mode, CV_CHAIN_APPROX_SIMPLE); CvSeq* c; int numCont = 0; while(( c = cvFindNextContour(scanner)) != NULL) { CvSeq* c_new; if( approximation > 0){ c_new = cvApproxPoly( c, sizeof(CvContour), contour_storage, CV_POLY_APPROX_DP, approximation, 0 ); } else { c_new = cvConvexHull2( c, contour_storage, CV_CLOCKWISE, 1 ); } float area = fabs( cvContourArea(c_new, CV_WHOLE_SEQ) ); if( (area > minArea) && (area < maxArea) ) { cvSeqBlobs.push_back(c_new); } numCont++; } // cvEndFindContours(scanner); // sort the pointers based on size if( cvSeqBlobs.size() > 1 ) { sort( cvSeqBlobs.begin(), cvSeqBlobs.end(), mysort_carea_compare ); } // now, we have cvSeqBlobs.size() contours, sorted by size in the array // cvSeqBlobs let's get the data out and into our structures that we like for( int i = 0; i < MIN(nConsidered, (int)cvSeqBlobs.size()); i++ ) { myblobs.push_back( ofxCvMyBlob() ); float area = cvContourArea( cvSeqBlobs[i], CV_WHOLE_SEQ ); CvRect rect = cvBoundingRect( cvSeqBlobs[i], 0 ); cvMoments( cvSeqBlobs[i], myMoments ); myblobs[i].area = fabs(area); myblobs[i].hole = area < 0 ? true : false; myblobs[i].length = cvArcLength(cvSeqBlobs[i]); myblobs[i].boundingRect.x = rect.x; myblobs[i].boundingRect.y = rect.y; myblobs[i].boundingRect.width = rect.width; myblobs[i].boundingRect.height = rect.height; if(cvSeqBlobs[i]->total >= 6){ myblobs[i].box2D_cv = cvMinAreaRect2(cvSeqBlobs[i]); } myblobs[i].bounding_cv = cvBoundingRect(cvSeqBlobs[i]); double x = (myMoments->m10 / myMoments->m00); double y = (myMoments->m01 / myMoments->m00); myblobs[i].centroid.x = (int)x; myblobs[i].centroid.y = (int)y; myblobs[i].centroid_cv = cvPoint2D32f(x,y); // myblobs[i].contour = (CvPoint *)malloc(cvSeqBlobs[i]->total * sizeof(CvPoint)); // cvCvtSeqToArray(cvSeqBlobs[i], myblobs[i].contour, CV_WHOLE_SEQ); // get the points for the blob: CvPoint pt; CvSeqReader reader; cvStartReadSeq( cvSeqBlobs[i], &reader, 0 ); for( int j=0; j < cvSeqBlobs[i]->total; j++ ) { CV_READ_SEQ_ELEM( pt, reader ); myblobs[i].pts.push_back( ofPoint((float)pt.x, (float)pt.y) ); } myblobs[i].nPts = myblobs[i].pts.size(); } nBlobs = myblobs.size(); // Free the storage memory. // Warning: do this inside this function otherwise a strange memory leak if( contour_storage != NULL ) { cvReleaseMemStorage(&contour_storage); } if( storage != NULL ) { cvReleaseMemStorage(&storage); } return nBlobs; }
void detect(IplImage* img_8uc1,IplImage* img_8uc3) { //cvNamedWindow( "aug", 1 ); //cvThreshold( img_8uc1, img_edge, 128, 255, CV_THRESH_BINARY ); CvMemStorage* storage = cvCreateMemStorage(); CvSeq* first_contour = NULL; CvSeq* maxitem=NULL; double area=0,areamax=0; int maxn=0; int Nc = cvFindContours( img_8uc1, storage, &first_contour, sizeof(CvContour), CV_RETR_LIST // Try all four values and see what happens ); int n=0; //printf( "Total Contours Detected: %d\n", Nc ); if(Nc>0) { for( CvSeq* c=first_contour; c!=NULL; c=c->h_next ) { //cvCvtColor( img_8uc1, img_8uc3, CV_GRAY2BGR ); area=cvContourArea(c,CV_WHOLE_SEQ ); if(area>areamax) {areamax=area; maxitem=c; maxn=n; } n++; } CvMemStorage* storage3 = cvCreateMemStorage(0); //if (maxitem) maxitem = cvApproxPoly( maxitem, sizeof(maxitem), storage3, CV_POLY_APPROX_DP, 3, 1 ); if(areamax>5000) { maxitem = cvApproxPoly( maxitem, sizeof(CvContour), storage3, CV_POLY_APPROX_DP, 10, 1 ); CvPoint pt0; CvMemStorage* storage1 = cvCreateMemStorage(0); CvMemStorage* storage2 = cvCreateMemStorage(0); CvSeq* ptseq = cvCreateSeq( CV_SEQ_KIND_GENERIC|CV_32SC2, sizeof(CvContour), sizeof(CvPoint), storage1 ); CvSeq* hull; CvSeq* defects; for(int i = 0; i < maxitem->total; i++ ) { CvPoint* p = CV_GET_SEQ_ELEM( CvPoint, maxitem, i ); pt0.x = p->x; pt0.y = p->y; cvSeqPush( ptseq, &pt0 ); } hull = cvConvexHull2( ptseq, 0, CV_CLOCKWISE, 0 ); int hullcount = hull->total; defects= cvConvexityDefects(ptseq,hull,storage2 ); //printf(" defect no %d \n",defects->total); CvConvexityDefect* defectArray; int j=0; //int m_nomdef=0; // This cycle marks all defects of convexity of current contours. for(;defects;defects = defects->h_next) { int nomdef = defects->total; // defect amount //outlet_float( m_nomdef, nomdef ); //printf(" defect no %d \n",nomdef); if(nomdef == 0) continue; // Alloc memory for defect set. //fprintf(stderr,"malloc\n"); defectArray = (CvConvexityDefect*)malloc(sizeof(CvConvexityDefect)*nomdef); // Get defect set. //fprintf(stderr,"cvCvtSeqToArray\n"); cvCvtSeqToArray(defects,defectArray, CV_WHOLE_SEQ); // Draw marks for all defects. for(int i=0; i<nomdef; i++) { printf(" defect depth for defect %d %f \n",i,defectArray[i].depth); cvLine(img_8uc3, *(defectArray[i].start), *(defectArray[i].depth_point),CV_RGB(255,255,0),1, CV_AA, 0 ); cvCircle( img_8uc3, *(defectArray[i].depth_point), 5, CV_RGB(0,0,164), 2, 8,0); cvCircle( img_8uc3, *(defectArray[i].start), 5, CV_RGB(0,0,164), 2, 8,0); cvLine(img_8uc3, *(defectArray[i].depth_point), *(defectArray[i].end),CV_RGB(255,255,0),1, CV_AA, 0 ); } char txt[]="0"; txt[0]='0'+nomdef-1; CvFont font; cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 1.0, 1.0, 0, 5, CV_AA); cvPutText(img_8uc3, txt, cvPoint(50, 50), &font, cvScalar(0, 0, 255, 0)); j++; // Free memory. free(defectArray); } cvReleaseMemStorage( &storage ); cvReleaseMemStorage( &storage1 ); cvReleaseMemStorage( &storage2 ); cvReleaseMemStorage( &storage3 ); //return 0; } } }
//-------------------------------------------------------------- void eyeTracker::update(ofxCvGrayscaleImage & grayImgFromCam, float threshold, float minSize, float maxSize, float minSquareness) { //threshold? //threshold = thresh; grayImgPreWarp.setFromPixels(grayImgFromCam.getPixels(), grayImgFromCam.width, grayImgFromCam.height); // TODO: there's maybe an unnecessary grayscale image (and copy) here... if( flipX || flipY ) { grayImgPreWarp.mirror(flipY, flipX); } /* // before we were scaling and translating, but this is removed for now if (fabs(xoffset-1) > 0.1f || fabs(yoffset-1) > 0.1f){ grayImgPreWarp.translate(xoffset, yoffset); } if (fabs(scalef-1) > 0.1f){ grayImgPreWarp.scale(scalef, scalef); }*/ grayImg = grayImgPreWarp; grayImgPreModification = grayImg; grayImg.blur(5); if (bUseContrast == true) { grayImg.applyBrightnessContrast(brightness,contrast); } if (bUseGamma == true) { grayImg.applyMinMaxGamma(gamma); } grayImg += edgeMask; threshImg = grayImg; threshImg.contrastStretch(); threshImg.threshold(threshold, true); // the dilation of a 640 x 480 image is very slow, so let's just do a ROI near the thing we like: threshImg.setROI(currentEyePoint.x-50, currentEyePoint.y-50, 100,100); // 200 pix ok? if (bUseDilate == true) { for (int i = 0; i < nDilations; i++) { threshImg.dilate(); } } threshImg.resetROI(); bFoundOne = false; int whoFound = -1; int num = contourFinder.findContours(threshImg, minSize, maxSize, 100, false, true); if( num ) { for(int k = 0; k < num; k++) { float ratio = contourFinder.blobs[k].boundingRect.width < contourFinder.blobs[k].boundingRect.height ? contourFinder.blobs[k].boundingRect.width / contourFinder.blobs[k].boundingRect.height : contourFinder.blobs[k].boundingRect.height / contourFinder.blobs[k].boundingRect.width; float arcl = contourFinder.blobs[k].length; float area = contourFinder.blobs[k].area; float compactness = (float)((arcl*arcl/area)/FOUR_PI); if (bUseCompactnessTest == true && compactness > maxCompactness) { continue; } //printf("compactness %f \n", compactness); //lets ignore rectangular blobs if( ratio > minSquareness) { currentEyePoint = contourFinder.blobs[k].centroid; currentNormPoint.x = currentEyePoint.x; currentNormPoint.y = currentEyePoint.y; currentNormPoint.x /= w; currentNormPoint.y /= h; bFoundOne = true; whoFound = k; break; } } } if (bFoundOne && whoFound != -1) { // do some convex hull stuff: CvSeq* ptseq = cvCreateSeq( CV_SEQ_KIND_GENERIC|CV_32SC2, sizeof(CvContour),sizeof(CvPoint), storage ); CvSeq* hull; CvPoint pt0; for(int i = 0; i < contourFinder.blobs[whoFound].nPts; i++ ) { pt0.x = contourFinder.blobs[whoFound].pts[i].x; pt0.y = contourFinder.blobs[whoFound].pts[i].y; cvSeqPush( ptseq, &pt0 ); } hull = cvConvexHull2( ptseq, 0, CV_CLOCKWISE, 0 ); int hullcount = hull->total; // -------------------------------- TRY TO GET A GOOD ELLIPSE HELLS YEAH !! int MYN = hullcount; float x[MYN], y[MYN]; double p[6]; double ellipseParam[5]; float theta; FitEllipse fitter; for (int i=0; i<MYN; i++) { CvPoint pt = **CV_GET_SEQ_ELEM( CvPoint*, hull, i); x[i] = pt.x; y[i] = pt.y; } double xc, yc; double xa, ya; double la, lb; fitter.apply(x,y,MYN); p[0] = fitter.Axx; p[1] = fitter.Axy; p[2] = fitter.Ayy; p[3] = fitter.Ax; p[4] = fitter.Ay; p[5] = fitter.Ao; bool bOk = solve_ellipse(p,ellipseParam); ofxCvBlob temp; if (bOk == true) { //float *params_ellipse = pupilGeometries[whichEye].params_ellipse; float axis_a = ellipseParam[0]; float axis_b = ellipseParam[1]; float cx = ellipseParam[2]; float cy = ellipseParam[3]; theta = ellipseParam[4]; float aspect = axis_b/axis_a; for (int i = 0; i < 5; i++) { eyeTrackedEllipse.ellipseParam[i] = ellipseParam[i]; } //theta = ofRandom(0,TWO_PI); int resolution = 24; ofxPoint2f ptsForRotation[resolution]; for (int i=0; i<resolution; i++) { float t = TWO_PI * (float)i/(float)resolution; float ex = cx + (axis_a * cos(t )); float ey = cy + (axis_b * sin(t )); ptsForRotation[i].set(ex,ey); } for (int i=0; i<resolution; i++) { ptsForRotation[i].rotate(theta * RAD_TO_DEG, ofxPoint2f(cx, cy)); } currentEyePoint.set(cx, cy); currentNormPoint.x = currentEyePoint.x; currentNormPoint.y = currentEyePoint.y; currentNormPoint.x /= w; currentNormPoint.y /= h; } else { bFoundOne = false; } cvRelease((void **)&hull); }