void FaceDetection::AddContours2Rect(CvSeq *seq, int color, int iLayer)
{
    assert(m_mstgRects != NULL);
    assert(m_seqRects != NULL);

    CvContourRect cr;
    for (CvSeq* external = seq; external; external = external->h_next)
    {
        cr.r = cvContourBoundingRect(external, 1 );
        cr.pCenter.x = cr.r.x + cr.r.width / 2;
        cr.pCenter.y = cr.r.y + cr.r.height / 2;
        cr.iNumber = iLayer;
        cr.iType = 6;
        cr.iFlags = 0;
        cr.seqContour = external;
        cr.iContourLength = external->total;
        cr.iColor = color;
        cvSeqPush(m_seqRects, &cr);
        for (CvSeq* internal = external->v_next; internal; internal = internal->h_next)
        {
            cr.r = cvContourBoundingRect(internal, 0);    
            cr.pCenter.x = cr.r.x + cr.r.width / 2;
            cr.pCenter.y = cr.r.y + cr.r.height / 2;
            cr.iNumber = iLayer;
            cr.iType = 12;
            cr.iFlags = 0;
            cr.seqContour = internal;
            cr.iContourLength = internal->total;
            cr.iColor = color;
            cvSeqPush(m_seqRects, &cr);
        }
    }
}// void FaceDetection::AddContours2Rect(CvSeq *seq, int color, int iLayer)
Exemplo n.º 2
0
void CvFaceElement::FindContours(IplImage* img, IplImage* thresh, int nLayers, int dMinSize)
{
    CvSeq* seq;
    CvRect roi = m_rROI;
    Extend(roi, 1);
    cvSetImageROI(img, roi);
    cvSetImageROI(thresh, roi);
    // layers
    int colors[MAX_LAYERS] = {0};
    int iMinLevel = 0, iMaxLevel = 255;
    float step, power;
    ThresholdingParam(img, nLayers / 2, iMinLevel, iMaxLevel, step, power, 4);
    int iMinLevelPrev = iMinLevel;
    int iMaxLevelPrev = iMinLevel;
    if (m_trPrev.iColor != 0)
    {
        iMinLevelPrev = m_trPrev.iColor - nLayers / 2;
        iMaxLevelPrev = m_trPrev.iColor + nLayers / 2;
    }
    if (iMinLevelPrev < iMinLevel)
    {
        iMaxLevelPrev += iMinLevel - iMinLevelPrev;
        iMinLevelPrev = iMinLevel;
    }
    if (iMaxLevelPrev > iMaxLevel)
    {
        iMinLevelPrev -= iMaxLevelPrev - iMaxLevel;
        if (iMinLevelPrev < iMinLevel)
            iMinLevelPrev = iMinLevel;
        iMaxLevelPrev = iMaxLevel;
    }
    int n = nLayers;
    n -= (iMaxLevelPrev - iMinLevelPrev + 1) / 2;
    step = float(iMinLevelPrev - iMinLevel + iMaxLevel - iMaxLevelPrev) / float(n);
    int j = 0;
    float level;
    for (level = (float)iMinLevel; level < iMinLevelPrev && j < nLayers; level += step, j++)
        colors[j] = int(level + 0.5);
    for (level = (float)iMinLevelPrev; level < iMaxLevelPrev && j < nLayers; level += 2.0, j++)
        colors[j] = int(level + 0.5);
    for (level = (float)iMaxLevelPrev; level < iMaxLevel && j < nLayers; level += step, j++)
        colors[j] = int(level + 0.5);
    //
    for (int i = 0; i < nLayers; i++)
    {
        cvThreshold(img, thresh, colors[i], 255.0, CV_THRESH_BINARY);
        if (cvFindContours(thresh, m_mstgRects, &seq, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE))
        {
            CvTrackingRect cr;
            for (CvSeq* external = seq; external; external = external->h_next)
            {
                cr.r = cvContourBoundingRect(external);
                Move(cr.r, roi.x, roi.y);
                if (RectInRect(cr.r, m_rROI) && cr.r.width > dMinSize  && cr.r.height > dMinSize)
                {
                    cr.ptCenter = Center(cr.r);
                    cr.iColor = colors[i];
                    cvSeqPush(m_seqRects, &cr);
                }
                for (CvSeq* internal = external->v_next; internal; internal = internal->h_next)
                {
                    cr.r = cvContourBoundingRect(internal);
                    Move(cr.r, roi.x, roi.y);
                    if (RectInRect(cr.r, m_rROI) && cr.r.width > dMinSize  && cr.r.height > dMinSize)
                    {
                        cr.ptCenter = Center(cr.r);
                        cr.iColor = colors[i];
                        cvSeqPush(m_seqRects, &cr);
                    }
                }
            }
            cvClearSeq(seq);
        }
    }
    cvResetImageROI(img);
    cvResetImageROI(thresh);
}//void CvFaceElement::FindContours(IplImage* img, IplImage* thresh, int nLayers)
Exemplo n.º 3
0
static CvSeq*
icvGetComponent(uchar* img, int step, CvRect rect,
CvMemStorage* storage) {
    const char nbd = 4;
    int  deltas[16];
    int  x, y;
    CvSeq* exterior = 0;
    char* ptr;

    /* initialize local state */
    CV_INIT_3X3_DELTAS(deltas, step, 1);
    memcpy(deltas + 8, deltas, 8 * sizeof(deltas[0]));

    ptr = (char*)(img + step * rect.y);
    rect.width += rect.x;
    rect.height += rect.y;

    for (y = rect.y; y < rect.height; y++, ptr += step) {
        int prev = ptr[rect.x - 1] & -2;

        for (x = rect.x; x < rect.width; x++) {
            int p = ptr[x] & -2;

            //assert( exterior || ((p | prev) & -4) == 0 );

            if (p != prev) {
                CvSeq* seq = 0;
                int is_hole = 0;
                CvSeqWriter  writer;
                char*  i0, *i1, *i3, *i4 = 0;
                int  prev_s = -1, s, s_end;
                CvPoint pt = { x, y };

                if (!(prev == 0 && p == 2)) {   /* if not external contour */
                    /* check hole */
                    if (p != 0 || prev < 1) {
                        prev = p;
                        continue;
                    }

                    is_hole = 1;
                    if (!exterior) {
                        assert(0);
                        return 0;
                    }
                }

                cvStartWriteSeq(CV_SEQ_CONTOUR | (is_hole ? CV_SEQ_FLAG_HOLE : 0),
                sizeof(CvContour), sizeof(CvPoint), storage, &writer);
                s_end = s = is_hole ? 0 : 4;
                i0 = ptr + x - is_hole;

                do {
                    s = (s - 1) & 7;
                    i1 = i0 + deltas[s];
                    if ((*i1 & -2) != 0) {
                        break;
                    }
                } while (s != s_end);

                if (s == s_end) {           /* single pixel domain */
                    *i0 = (char)(nbd | -128);
                    CV_WRITE_SEQ_ELEM(pt, writer);
                } else {
                    i3 = i0;
                    prev_s = s ^ 4;

                    /* follow border */
                    for (;;) {
                        s_end = s;

                        for (;;) {
                            i4 = i3 + deltas[++s];
                            if ((*i4 & -2) != 0) {
                                break;
                            }
                        }
                        s &= 7;

                        /* check "right" bound */
                        if ((unsigned)(s - 1) < (unsigned) s_end) {
                            *i3 = (char)(nbd | -128);
                        } else if (*i3 > 0) {
                            *i3 = nbd;
                        }

                        if (s != prev_s) {
                            CV_WRITE_SEQ_ELEM(pt, writer);
                            prev_s = s;
                        }

                        pt.x += icvCodeDeltas[s].x;
                        pt.y += icvCodeDeltas[s].y;

                        if (i4 == i0 && i3 == i1) {
                            break;
                        }

                        i3 = i4;
                        s = (s + 4) & 7;
                    }                       /* end of border following loop */
                }

                seq = cvEndWriteSeq(&writer);
                cvContourBoundingRect(seq, 1);

                if (!is_hole) {
                    exterior = seq;
                } else {
                    seq->v_prev = exterior;
                    seq->h_next = exterior->v_next;
                    if (seq->h_next) {
                        seq->h_next->h_prev = seq;
                    }
                    exterior->v_next = seq;
                }

                prev = ptr[x] & -2;
            }
        }
    }

    return exterior;
}
Exemplo n.º 4
0
//--------------------------------------------------------------------------------
int ContourFinder::findContours(	ofxCvGrayscaleImage&  input,
									int minArea,
									int maxArea,
									int nConsidered,
									double hullPress,	
									bool bFindHoles,
									bool bUseApproximation) {
	reset();

	// opencv will clober the image it detects contours on, so we want to
    // copy it into a copy before we detect contours.  That copy is allocated
    // if necessary (necessary = (a) not allocated or (b) wrong size)
	// so be careful if you pass in different sized images to "findContours"
	// there is a performance penalty, but we think there is not a memory leak
    // to worry about better to create mutiple contour finders for different
    // sizes, ie, if you are finding contours in a 640x480 image but also a
    // 320x240 image better to make two ContourFinder objects then to use
    // one, because you will get penalized less.

	if( inputCopy.width == 0 ) {
		inputCopy.allocate( input.width, input.height );
		inputCopy = input;
	} else {
		if( inputCopy.width == input.width && inputCopy.height == input.height ) 
			inputCopy = input;
		else {
			// we are allocated, but to the wrong size --
			// been checked for memory leaks, but a warning:
			// be careful if you call this function with alot of different
			// sized "input" images!, it does allocation every time
			// a new size is passed in....
			//inputCopy.clear();
			inputCopy.allocate( input.width, input.height );
			inputCopy = input;
		}
	}

	CvSeq* contour_list = NULL;
	contour_storage = cvCreateMemStorage( 1000 );
	storage	= cvCreateMemStorage( 1000 );

	CvContourRetrievalMode  retrieve_mode
        = (bFindHoles) ? CV_RETR_LIST : CV_RETR_EXTERNAL;
	cvFindContours( inputCopy.getCvImage(), contour_storage, &contour_list,
                    sizeof(CvContour), retrieve_mode, bUseApproximation ? CV_CHAIN_APPROX_SIMPLE : CV_CHAIN_APPROX_NONE );
	
	CvSeq* contour_ptr = contour_list;

	nCvSeqsFound = 0;

	// put the contours from the linked list, into an array for sorting
	while( (contour_ptr != NULL) )  {
		CvBox2D box = cvMinAreaRect2(contour_ptr);
		int objectId; // If the contour is an object, then objectId is its ID
		objectId = (bTrackObjects)? templates->getTemplateId(box.size.width,box.size.height): -1;
		
		if(objectId != -1 ) { //If the blob is a object
			Blob blob		= Blob();
			blob.id			= objectId;
			blob.isObject	= true;
			float area = cvContourArea( contour_ptr, CV_WHOLE_SEQ );

			cvMoments( contour_ptr, myMoments );
		
			// this is if using non-angle bounding box
			CvRect rect	= cvBoundingRect( contour_ptr, 0 );
			blob.boundingRect.x      = rect.x;
			blob.boundingRect.y      = rect.y;
			blob.boundingRect.width  = rect.width;
			blob.boundingRect.height = rect.height;

			//For anglebounding rectangle
			blob.angleBoundingBox=box;
			blob.angleBoundingRect.x	  = box.center.x;
			blob.angleBoundingRect.y	  = box.center.y;
			blob.angleBoundingRect.width  = box.size.height;
			blob.angleBoundingRect.height = box.size.width;
			blob.angle = box.angle;

			//TEMPORARY INITIALIZATION TO 0, Will be calculating afterwards.This is to prevent sending wrong data
			blob.D.x = 0;
			blob.D.y = 0;
			blob.maccel = 0;

			// assign other parameters
			blob.area                = fabs(area);
			blob.hole                = area < 0 ? true : false;
			blob.length 			 = cvArcLength(contour_ptr);
		
			blob.centroid.x			 = (myMoments->m10 / myMoments->m00);
			blob.centroid.y 		 = (myMoments->m01 / myMoments->m00);
			blob.lastCentroid.x 	 = 0;
			blob.lastCentroid.y 	 = 0;

			// get the points for the blob:
			CvPoint           pt;
			CvSeqReader       reader;
			cvStartReadSeq( contour_ptr, &reader, 0 );
	
    		for( int j=0; j < contour_ptr->total; j++ ) {
				CV_READ_SEQ_ELEM( pt, reader );
				blob.pts.push_back( ofPoint((float)pt.x, (float)pt.y) );
			}
			blob.nPts = blob.pts.size();

			objects.push_back(blob);
			
		} else if(bTrackBlobs) { // SEARCH FOR BLOBS
			float area = fabs( cvContourArea(contour_ptr, CV_WHOLE_SEQ) );
			if( (area > minArea) && (area < maxArea) ) {
				Blob blob=Blob();
				float area = cvContourArea( contour_ptr, CV_WHOLE_SEQ );
				cvMoments( contour_ptr, myMoments );
				
				// this is if using non-angle bounding box
				CvRect rect	= cvBoundingRect( contour_ptr, 0 );
				blob.boundingRect.x      = rect.x;
				blob.boundingRect.y      = rect.y;
				blob.boundingRect.width  = rect.width;
				blob.boundingRect.height = rect.height;
				
				//Angle Bounding rectangle
				blob.angleBoundingRect.x	  = box.center.x;
				blob.angleBoundingRect.y	  = box.center.y;
				blob.angleBoundingRect.width  = box.size.height;
				blob.angleBoundingRect.height = box.size.width;
				blob.angle = box.angle;
				
				// assign other parameters
				blob.area                = fabs(area);
				blob.hole                = area < 0 ? true : false;
				blob.length 			 = cvArcLength(contour_ptr);
				// AlexP
				// The cast to int causes errors in tracking since centroids are calculated in
				// floats and they migh land between integer pixel values (which is what we really want)
				// This not only makes tracking more accurate but also more fluid
				blob.centroid.x			 = (myMoments->m10 / myMoments->m00);
				blob.centroid.y 		 = (myMoments->m01 / myMoments->m00);
				blob.lastCentroid.x 	 = 0;
				blob.lastCentroid.y 	 = 0;
				
				// get the points for the blob:
				CvPoint           pt;
				CvSeqReader       reader;
				cvStartReadSeq( contour_ptr, &reader, 0 );
				
    			for( int j=0; j < min(TOUCH_MAX_CONTOUR_LENGTH, contour_ptr->total); j++ ) {
					CV_READ_SEQ_ELEM( pt, reader );
					blob.pts.push_back( ofPoint((float)pt.x, (float)pt.y) );
				}
				blob.nPts = blob.pts.size();
				
				blobs.push_back(blob);
			}
		} 
		contour_ptr = contour_ptr->h_next;
	}
		
	if(bTrackFingers) {  // SEARCH FOR FINGERS
		CvPoint*		PointArray;
		int*			hull;
		int				hullsize;
		
		if (contour_list)
			contour_list = cvApproxPoly(contour_list, sizeof(CvContour), storage, CV_POLY_APPROX_DP, hullPress, 1 );
			
		for( ; contour_list != 0; contour_list = contour_list->h_next ){
			int count = contour_list->total; // This is number point in contour
				
			CvRect rect = cvContourBoundingRect(contour_list, 1);
			
			if ( (rect.width*rect.height) > 300 ){		// Analize the bigger contour
				CvPoint center;
				center.x = rect.x+rect.width/2;
				center.y = rect.y+rect.height/2;
				
				PointArray = (CvPoint*)malloc( count*sizeof(CvPoint) ); // Alloc memory for contour point set.
				hull = (int*)malloc(sizeof(int)*count);	// Alloc memory for indices of convex hull vertices.
					
				cvCvtSeqToArray(contour_list, PointArray, CV_WHOLE_SEQ); // Get contour point set.
					
				// Find convex hull for curent contour.
				cvConvexHull(	PointArray,
								count,
								NULL,
								CV_COUNTER_CLOCKWISE,
								hull,
								&hullsize);
					
				int upper = 640, lower = 0;
				for	(int j=0; j<hullsize; j++) {
					int idx = hull[j]; // corner index
					if (PointArray[idx].y < upper) 
						upper = PointArray[idx].y;
					if (PointArray[idx].y > lower) 
						lower = PointArray[idx].y;
				}
				
				float cutoff = lower - (lower - upper) * 0.1f;
				// find interior angles of hull corners
				for (int j=0; j<hullsize; j++) {
					int idx = hull[j]; // corner index
					int pdx = idx == 0 ? count - 1 : idx - 1; //  predecessor of idx
					int sdx = idx == count - 1 ? 0 : idx + 1; // successor of idx
						
					cv::Point v1 = cv::Point(PointArray[sdx].x - PointArray[idx].x, PointArray[sdx].y - PointArray[idx].y);
					cv::Point v2 = cv::Point(PointArray[pdx].x - PointArray[idx].x, PointArray[pdx].y - PointArray[idx].y);
						
					float angle = acos( (v1.x*v2.x + v1.y*v2.y) / (norm(v1) * norm(v2)) );
						
					// low interior angle + within upper 90% of region -> we got a finger
					if (angle < 1 ){ //&& PointArray[idx].y < cutoff) {
						Blob blob = Blob();
						
						//float area = cvContourArea( contour_ptr, CV_WHOLE_SEQ );
						//cvMoments( contour_ptr, myMoments );
						
						// this is if using non-angle bounding box
						//CvRect rect	= cvBoundingRect( contour_ptr, 0 );
						blob.boundingRect.x      = PointArray[idx].x-5;
						blob.boundingRect.y      = PointArray[idx].y-5;
						blob.boundingRect.width  = 10;
						blob.boundingRect.height = 10;
						
						//Angle Bounding rectangle
						blob.angleBoundingRect.x	  = PointArray[idx].x-5;
						blob.angleBoundingRect.y	  = PointArray[idx].y-5;
						blob.angleBoundingRect.width  = 10;
						blob.angleBoundingRect.height = 10;
						blob.angle = atan2((float) PointArray[idx].x - center.x , (float) PointArray[idx].y - center.y);
						
						// assign other parameters
						//blob.area                = fabs(area);
						//blob.hole                = area < 0 ? true : false;
						//blob.length 			 = cvArcLength(contour_ptr);
						// AlexP
						// The cast to int causes errors in tracking since centroids are calculated in
						// floats and they migh land between integer pixel values (which is what we really want)
						// This not only makes tracking more accurate but also more fluid
						blob.centroid.x			 = PointArray[idx].x;//(myMoments->m10 / myMoments->m00);
						blob.centroid.y 		 = PointArray[idx].y;//(myMoments->m01 / myMoments->m00);
						blob.lastCentroid.x 	 = 0;
						blob.lastCentroid.y 	 = 0;
						
						fingers.push_back(blob);
					}
				}
				// Free memory.
				free(PointArray);
				free(hull);
			}
		}
	}
	
	nBlobs = blobs.size();
	nFingers = fingers.size();
	nObjects = objects.size();
	
	// Free the storage memory.
	// Warning: do this inside this function otherwise a strange memory leak
	if( contour_storage != NULL )
		cvReleaseMemStorage(&contour_storage);
	
	if( storage != NULL )
		cvReleaseMemStorage(&storage);

	return (bTrackFingers)? nFingers:nBlobs;
}