Esempio n. 1
0
static void
find_connected_components (IplImage * mask, int poly1_hull0, float perimScale,
    CvMemStorage * mem_storage, CvSeq * contours)
{
  CvContourScanner scanner;
  CvSeq *c;
  int numCont = 0;
  /* Just some convenience variables */
  const CvScalar CVX_WHITE = CV_RGB (0xff, 0xff, 0xff);
  const CvScalar CVX_BLACK = CV_RGB (0x00, 0x00, 0x00);

  /* CLEAN UP RAW MASK */
  cvMorphologyEx (mask, mask, 0, 0, CV_MOP_OPEN, CVCLOSE_ITR);
  cvMorphologyEx (mask, mask, 0, 0, CV_MOP_CLOSE, CVCLOSE_ITR);
  /* FIND CONTOURS AROUND ONLY BIGGER REGIONS */
  if (mem_storage == NULL) {
    mem_storage = cvCreateMemStorage (0);
  } else {
    cvClearMemStorage (mem_storage);
  }

  scanner = cvStartFindContours (mask, mem_storage, sizeof (CvContour),
      CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cvPoint (0, 0));

  while ((c = cvFindNextContour (scanner)) != NULL) {
    double len = cvContourArea (c, CV_WHOLE_SEQ, 0);
    /* calculate perimeter len threshold: */
    double q = (mask->height + mask->width) / perimScale;
    /* Get rid of blob if its perimeter is too small: */
    if (len < q) {
      cvSubstituteContour (scanner, NULL);
    } else {
      /* Smooth its edges if its large enough */
      CvSeq *c_new;
      if (poly1_hull0) {
        /* Polygonal approximation */
        c_new =
            cvApproxPoly (c, sizeof (CvContour), mem_storage, CV_POLY_APPROX_DP,
            CVCONTOUR_APPROX_LEVEL, 0);
      } else {
        /* Convex Hull of the segmentation */
        c_new = cvConvexHull2 (c, mem_storage, CV_CLOCKWISE, 1);
      }
      cvSubstituteContour (scanner, c_new);
      numCont++;
    }
  }
  contours = cvEndFindContours (&scanner);

  /* PAINT THE FOUND REGIONS BACK INTO THE IMAGE */
  cvZero (mask);
  /* DRAW PROCESSED CONTOURS INTO THE MASK */
  for (c = contours; c != NULL; c = c->h_next)
    cvDrawContours (mask, c, CVX_WHITE, CVX_BLACK, -1, CV_FILLED, 8, cvPoint (0,
            0));
}
Esempio n. 2
0
CV_IMPL CvSeq*
cvSegmentFGMask( CvArr* _mask, int poly1Hull0, float perimScale,
                 CvMemStorage* storage, CvPoint offset )
{
    CvMat mstub, *mask = cvGetMat( _mask, &mstub );
    CvMemStorage* tempStorage = storage ? storage : cvCreateMemStorage();
    CvSeq *contours, *c;
    int nContours = 0;
    CvContourScanner scanner;
    
    // clean up raw mask
    cvMorphologyEx( mask, mask, 0, 0, CV_MOP_OPEN, 1 );
    cvMorphologyEx( mask, mask, 0, 0, CV_MOP_CLOSE, 1 );

    // find contours around only bigger regions
    scanner = cvStartFindContours( mask, tempStorage,
        sizeof(CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, offset );
    
    while( (c = cvFindNextContour( scanner )) != 0 ) 
    {
        double len = cvContourPerimeter( c );
        double q = (mask->rows + mask->cols)/perimScale; // calculate perimeter len threshold
        if( len < q ) //Get rid of blob if it's perimeter is too small
            cvSubstituteContour( scanner, 0 );
        else //Smooth it's edges if it's large enough
        {
            CvSeq* newC;
            if( poly1Hull0 ) //Polygonal approximation of the segmentation 
                newC = cvApproxPoly( c, sizeof(CvContour), tempStorage, CV_POLY_APPROX_DP, 2, 0 ); 
            else //Convex Hull of the segmentation
                newC = cvConvexHull2( c, tempStorage, CV_CLOCKWISE, 1 );
            cvSubstituteContour( scanner, newC );
            nContours++;
        }
    }
    contours = cvEndFindContours( &scanner );

    // paint the found regions back into the image
    cvZero( mask );
    for( c=contours; c != 0; c = c->h_next ) 
        cvDrawContours( mask, c, cvScalarAll(255), cvScalarAll(0), -1, CV_FILLED, 8,
            cvPoint(-offset.x,-offset.y));

    if( tempStorage != storage )
    {
        cvReleaseMemStorage( &tempStorage );
        contours = 0;
    }

    return contours;
}
Esempio n. 3
0
static COMMAND_FUNC( do_find_contours )
{
	OpenCV_Scanner *ocv_scanner_p;
	OpenCV_Image *ocvi_p;
	/* OpenCV_MemStorage *ocv_mem_p; */

	ocv_scanner_p=PICK_OCV_SCANNER("scanner");
	ocvi_p=PICK_OCVI("binary image");
	/* ocv_mem_p=PICK_OCV_MEM("memory storage"); */
	if( ocv_scanner_p == NO_OPENCV_SCANNER ) return;
	if( ocvi_p == NO_OPENCV_IMAGE ) return;
	/* if( ocv_mem_p == NO_OPENCV_MEM ) return; */
	ocv_scanner_p->ocv_scanner = cvStartFindContours(ocvi_p->ocv_image,
			ocv_scanner_p->ocv_mem, sizeof(CvContour), CV_RETR_EXTERNAL,
			CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0));
}
Esempio n. 4
0
/*F///////////////////////////////////////////////////////////////////////////////////////
//    Name: cvFindContours
//    Purpose:
//      Finds all the contours on the bi-level image.
//    Context:
//    Parameters:
//      img  - source image.
//             Non-zero pixels are considered as 1-pixels
//             and zero pixels as 0-pixels.
//      step - full width of source image in bytes.
//      size - width and height of the image in pixels
//      storage - pointer to storage where will the output contours be placed.
//      header_size - header size of resulting contours
//      mode - mode of contour retrieval.
//      method - method of approximation that is applied to contours
//      first_contour - pointer to first contour pointer
//    Returns:
//      CV_OK or error code
//    Notes:
//F*/
CV_IMPL int
cvFindContours( void*  img,  CvMemStorage*  storage,                
                CvSeq**  firstContour, int  cntHeaderSize,                 
                int  mode, 
                int  method, CvPoint offset )
{
    CvContourScanner scanner = 0;
    CvSeq *contour = 0;
    int count = -1;
    
    CV_FUNCNAME( "cvFindContours" );

    __BEGIN__;

    if( !firstContour )
        CV_ERROR( CV_StsNullPtr, "NULL double CvSeq pointer" );

    if( method == CV_LINK_RUNS )
    {
        if( offset.x != 0 || offset.y != 0 )
            CV_ERROR( CV_StsOutOfRange,
            "Nonzero offset is not supported in CV_LINK_RUNS yet" );

        CV_CALL( count = icvFindContoursInInterval( img, storage,
                                    firstContour, cntHeaderSize ));
    }
    else
    {
        CV_CALL( scanner = cvStartFindContours( img, storage,
                        cntHeaderSize, mode, method, offset ));
        assert( scanner );

        do
        {
            count++;
            contour = cvFindNextContour( scanner );
        }
        while( contour != 0 );

        *firstContour = cvEndFindContours( &scanner );    
    }

    __END__;

    return count;
}
bool findBlueNYelContour(IplImage* img, CvMemStorage* &storage,CvPoint &centre,int color){  //color :  blue==0,  yellow==1
	CvSeq* contours;  
	IplImage* timg = cvCloneImage( img ); // make a copy of input image  
	IplImage* gray = cvCreateImage( cvGetSize(timg), 8, 1 );   
	CvSeq* result;  

	CvSeq* squares = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvPoint), storage );  
	cvNamedWindow("rgbContour",0);

	IplImage* hsv = cvCreateImage( cvGetSize(timg), 8, 3 );   
	cvSmooth(hsv,hsv,2,3);
	if(color==0){
		findLP_HSV_BLUE(timg,hsv);
		cvNamedWindow("hsv_license_blue",0);
	}
	else {
		findLP_HSV_YEL(timg,hsv);
			cvNamedWindow("hsv_license_yel",0);
	}
	//	

	cvNamedWindow("侵蚀前",0);
	cvShowImage("侵蚀前",hsv);
	cvErode(hsv,hsv,0,1);
	cvNamedWindow("侵蚀后",0);
	cvShowImage("侵蚀后",hsv);
	cvDilate(hsv,hsv,0,4);
	cvNamedWindow("膨胀后",0);
	cvShowImage("膨胀后",hsv);
	cvCvtColor(hsv,hsv,CV_HSV2RGB);
	

	cvCvtColor(hsv,gray,CV_RGB2GRAY);
	cvThreshold(gray,gray,100,255,0);
	CvContourScanner scanner = NULL;
	scanner = cvStartFindContours(gray,storage,sizeof(CvContour),CV_RETR_EXTERNAL,CV_CHAIN_APPROX_SIMPLE,cvPoint(0,0));
	//ImagePreprocess::contourFinder(gray,0,hsv_blue,4000,10000);
	// find contours and store them all as a list  
/*	cvFindContours( gray, storage, &contours, sizeof(CvContour),  
		CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) );  */
	// test each contour  
	int t=0;
	while (contours=cvFindNextContour(scanner))
	{
		// approximate contour with accuracy proportional  
		// to the contour perimeter  
		result = cvApproxPoly( contours, sizeof(CvContour), storage,  
			CV_POLY_APPROX_DP, cvContourPerimeter(contours)*0.04, 0 );  
		double tempArea = fabs(cvContourArea(result,CV_WHOLE_SEQ));
		double peri=cvContourPerimeter(result);
		CvRect rct=cvBoundingRect(result,1);
		// square contours should have 4 vertices after approximation  
		// relatively large area (to filter out noisy contours)  
		// and be convex.  
		// Note: absolute value of an area is used because  
		// area may be positive or negative - in accordance with the  
		// contour orientation  
		if(tempArea<3500 || tempArea>10000 || 
			result->total < 4 || result->total >10 ||
			peri<340 || peri>500
			|| rct.width/(1.0*rct.height)>3.85 || rct.width/(1.0*rct.height)<2.47 || rct.width<135 || rct.width>175
			){
			cvSubstituteContour(scanner,NULL);
	}
		else{  
			
	//	cout<<"height: "<<rct.height<<" width: "<<rct.width<<" rate: "<<rct.width/(rct.height*1.0)<<endl;
	//			cout<<"edge num: "<<result->total<<endl;
	//			cout<<"area : "<<fabs(cvContourArea(result,CV_WHOLE_SEQ))<<endl;
	//			cout<<"peri : "<<cvContourPerimeter(result)<<endl;
				CvScalar color = CV_RGB( rand()&255, rand()&255, rand()&255 );
	//			cvDrawContours( timg, result, color, color, -1, 3, 8 );
	//			cvDrawContours( hsv, result, color, color, -1, 3, 8 );
				t++;
				//		contour = cvApproxPoly( contour, sizeof(CvContour), storage, CV_POLY_APPROX_DP, 3, 1 );         
				CvMat *region;
				region=(CvMat*)result; 
				CvMoments moments;  
				cvMoments( region, &moments,0 );
				int xc=moments.m10/moments.m00 , yc=moments.m01/moments.m00; 
				//		double angle3=atan(2*moments.mu11/(moments.mu20-moments.mu02))/2;
		//		cout<<"long: "<<longAxis<<"short: "<<shortAxis<<endl;
				centre=cvPoint(xc,yc);
	//			cvCircle( hsv, centre, 3, color, 3, 8, 0 );
	//			cvCircle( timg, centre, 3, color, 3, 8, 0 );
		}
		// take the next contour  
//		contours = contours->h_next;  			
	}
	result = cvEndFindContours(&scanner);
	cvShowImage("rgbContour",timg);
	if(color==0)
		cvShowImage("hsv_license_blue",hsv);
	else
		cvShowImage("hsv_license_yel",hsv);
	cvReleaseImage( &timg );  
	cvReleaseImage( &hsv );  
	cvReleaseImage( &gray );  
	if(0==t){
		return false;
	}
	else
		return true;
	// release all the temporary images  
	//	cvReleaseImage( &gray );  

	//cvReleaseImage( &hsv_blue );  
}  
void connectComponent(IplImage* src, const int poly_hull0, const float perimScale, int *num,
		vector<CvRect> &rects, vector<CvPoint> &centers) {

	/*
	 * Pre : "src"        :is the input image
	 *       "poly_hull0" :is usually set to 1
	 *       "perimScale" :defines how big connected component will be retained, bigger
	 *                     the number, more components are retained (100)
	 *
	 * Post: "num"        :defines how many connected component was found
	 *       "rects"      :the bounding box of each connected component
	 *       "centers"    :the center of each bounding box
	 */

	rects.clear();
	centers.clear();

	CvMemStorage* mem_storage = NULL;
	CvSeq* contours = NULL;

	// Clean up
	cvMorphologyEx(src, src, 0, 0, CV_MOP_OPEN, 1);
	cvMorphologyEx(src, src, 0, 0, CV_MOP_CLOSE, 1);

	// Find contours around only bigger regions
	mem_storage = cvCreateMemStorage(0);

	CvContourScanner scanner = cvStartFindContours(src, mem_storage, sizeof(CvContour),
			CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
	CvSeq* c;
	int numCont = 0;

	while ((c = cvFindNextContour(scanner)) != NULL) {

		double len = cvContourPerimeter(c);

		// calculate perimeter len threshold
		double q = (double) (src->height + src->width) / perimScale;

		// get rid of blob if its perimeter is too small
		if (len < q) {

			cvSubstituteContour(scanner, NULL);

		} else {

			// smooth its edge if its large enough
			CvSeq* c_new;
			if (poly_hull0) {

				// polygonal approximation
				c_new = cvApproxPoly(c, sizeof(CvContour), mem_storage, CV_POLY_APPROX_DP, 2, 0);

			} else {

				// convex hull of the segmentation
				c_new = cvConvexHull2(c, mem_storage, CV_CLOCKWISE, 1);

			}

			cvSubstituteContour(scanner, c_new);

			numCont++;
		}
	}

	contours = cvEndFindContours(&scanner);

	// Calc center of mass and/or bounding rectangles
	if (num != NULL) {

		// user wants to collect statistics
		int numFilled = 0, i = 0;

		for (i = 0, c = contours; c != NULL; c = c->h_next, i++) {

			if (i < *num) {

				// bounding retangles around blobs

				rects.push_back(cvBoundingRect(c));

				CvPoint center = cvPoint(rects[i].x + rects[i].width / 2, rects[i].y
						+ rects[i].height / 2);
				centers.push_back(center);

				numFilled++;
			}
		}

		*num = numFilled;

	}

	cvReleaseMemStorage(&mem_storage);

}
Esempio n. 7
0
void FTS_ANPR_Seg::extractCharByCCAnalysis( const cv::Mat& oBin,
                                            FTS_ANPR_SegResult& oSegResult )
{
    // Padd the input image first
    // ------------------------------------------------------------------------
	m_oPadded.create( oBin.rows + 2,
					  oBin.cols  + 2,
					  CV_8UC1 );
	cv::copyMakeBorder( oBin, m_oPadded, 1, 1, 1, 1, cv::BORDER_CONSTANT );

    IplImage iiBin    = oBin;
    IplImage iiPadded = m_oPadded;

    cvCopyMakeBorder( &iiBin,
                      &iiPadded,
                      cvPoint( 1, 1 ),
                      IPL_BORDER_CONSTANT,
                      cvScalarAll( 0 )  ); // pad with black border


    // Initializes contour scanning process
    // ------------------------------------------------------------------------
    CvSeq* poContour = 0;
    CvContourScanner oContourScanner;

    oContourScanner = cvStartFindContours( &iiPadded,
                                           m_poStorage,
                                           sizeof( CvContour ),
                                           CV_RETR_EXTERNAL, //CV_RETR_LIST,
                                           CV_CHAIN_APPROX_SIMPLE,
                                           cvPoint( 0, 0 )  );

    // Contour scanning process
    // ------------------------------------------------------------------------
    while(  ( poContour = cvFindNextContour( oContourScanner ) )  )
    {
        // Finding bounding boxes that meet the ratio tests
        // --------------------------------------------------------------------
        CvRect oBox = cvBoundingRect( poContour, 0 );

        if(    !testArea( oBox )
            || !testHeightOverWidth( oBox )
            || !testHeight( oBox.height, iiBin.height )  )
        {
            continue;
        }

        std::list< FTS_ANPR_SegChar*>& oChars = oSegResult.m_oChars;

        // Make sure not too many candidates
        // --------------------------------------------------------------------
        if( oChars.size() >= m_nMaxNumCharCandidates )
        {
            break; // exit the while loop
        }

        // Store the character candidate to the segmentation structure
        // --------------------------------------------------------------------
        oChars.push_back( new FTS_ANPR_SegChar );

        FTS_ANPR_SegChar& oSegChar = *( oChars.back() ); // fill in the empty object

        oSegChar.m_oCharRect = oBox;

        // Offset the bounding box from coordinates in padded image, into coordinates of input image.
        --oSegChar.m_oCharRect.x;
        --oSegChar.m_oCharRect.y;

//        oSegChar.m_oCharBin.resize(oBox.width, oBox.height, SN_PIX_FMT_GREY );
        oSegChar.m_oCharBin = cv::Mat::zeros( cv::Size( oSegChar.m_oCharRect.width, oSegChar.m_oCharRect.height ), CV_8UC1 );

        IplImage iiSegCharBin = oSegChar.m_oCharBin;
//        cvZero( &iiSegCharBin );
//        printf("width = %d, height = %d\n", oSegChar.m_oCharRect.width, oSegChar.m_oCharRect.height );

        // Draw the outer contour and fill all holes. No internal holes
        // after this.
        cvDrawContours( &iiSegCharBin,
                        poContour,
                        CV_RGB( 255, 255, 255 ),
                        CV_RGB( 255, 255, 255 ),
                        1,
                        CV_FILLED,
                        8,
                        cvPoint( -oBox.x, -oBox.y ) // offset contour to smaller image
                        );

        // Recover all the holes in the original image
        cvSetImageROI( &iiBin, oSegChar.m_oCharRect );
        cvAnd( &iiBin, &iiSegCharBin, &iiSegCharBin, 0 );

//        cv::namedWindow( "CCCCCCCCCCCCCCCCCCCCCCC" );
//        cv::imshow( "CCCCCCCCCCCCCCCCCCCCCCC", oSegChar.m_oCharBin );
//        cv::waitKey();
    }

    cvResetImageROI( &iiBin );
    cvEndFindContours( &oContourScanner );


    // Sort the segments using x-coordinate
    // --------------------------------------------------------------------
    oSegResult.m_oChars.sort( &FTS_ANPR_SegChar::LessInX );
}
Esempio n. 8
0
void ObjectTracker::findConnectedComponents( IplImage* mask, int poly1_hull2 /* = 0 */, double perimScale /* = 0.25 */, int* num /* = NULL */, CvRect* bbs /* = NULL */, CvPoint* centers /* = NULL */ ) {
    int cvContourApproxLevel = 2;

    static CvMemStorage *mem_storage = NULL;
    static CvSeq *contours = NULL;

    if (mem_storage == NULL) {
        mem_storage = cvCreateMemStorage(0);
    } else {
        cvClearMemStorage(mem_storage);
    }

    CvContourScanner scanner = cvStartFindContours(mask, mem_storage, sizeof(CvContour),
                               CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);

    CvSeq *c;
    int numCont = 0;
    while ((c = cvFindNextContour(scanner)) != NULL) {
        double len = cvContourPerimeter(c);

        double q = (mask->height + mask->width) * perimScale;

        if (len < q) {
            cvSubstituteContour(scanner, NULL);
        } else {
            if (poly1_hull2) {
                CvSeq *c_new;
                if (poly1_hull2 == 1) {
                    c_new = cvApproxPoly(c, sizeof(CvContour), mem_storage, CV_POLY_APPROX_DP,
                                         cvContourApproxLevel, 0);
                } else if (poly1_hull2 == 2) {
                    c_new = cvConvexHull2(c, mem_storage, CV_CLOCKWISE, 1);
                }
                cvSubstituteContour(scanner, c_new);
            }
            numCont++;
        }
    }

    contours = cvEndFindContours(&scanner);

    const CvScalar CVX_WHITE = CV_RGB(0xff,0xff,0xff);
    const CvScalar CVX_BLACK = CV_RGB(0x00,0x00,0x00);

    cvZero(mask);
    IplImage *maskTemp;

    // CALC CENTER OF MASS AND/OR BOUNDING RECTANGLES
    //
    if(num != NULL) {
        //User wants to collect statistics
        //
        int N = *num, numFilled = 0, i=0;
        CvMoments moments;
        double M00, M01, M10;
        maskTemp = cvCloneImage(mask);
        for(i=0, c=contours; c != NULL; c = c->h_next,i++ ) {
            if(i < N) {
                // Only process up to *num of them
                //
                cvDrawContours(
                    maskTemp,
                    c,
                    CVX_WHITE,
                    CVX_WHITE,
                    -1,
                    CV_FILLED,
                    8
                );
                // Find the center of each contour
                //
                if(centers != NULL) {
                    cvMoments(maskTemp,&moments,1);
                    M00 = cvGetSpatialMoment(&moments,0,0);
                    M10 = cvGetSpatialMoment(&moments,1,0);
                    M01 = cvGetSpatialMoment(&moments,0,1);
                    centers[i].x = (int)(M10/M00);
                    centers[i].y = (int)(M01/M00);
                }
                //Bounding rectangles around blobs
                //
                if(bbs != NULL) {
                    bbs[i] = cvBoundingRect(c);
                }
                cvZero(maskTemp);
                numFilled++;
            }
            // Draw filled contours into mask
            //
            cvDrawContours(
                mask,
                c,
                CVX_WHITE,
                CVX_WHITE,
                -1,
                CV_FILLED,
                8
            );
        } //end looping over contours
        *num = numFilled;
        cvReleaseImage( &maskTemp);
    }
    // ELSE JUST DRAW PROCESSED CONTOURS INTO THE MASK
    //
    else {
        // The user doesn't want statistics, just draw the contours
        //
        for( c=contours; c != NULL; c = c->h_next ) {
            cvDrawContours(
                mask,
                c,
                CVX_WHITE,
                CVX_BLACK,
                -1,
                CV_FILLED,
                8
            );
        }
    }
}
Esempio n. 9
0
    void CvScan::charsImgSegement(IplImage *src, vector<IplImage*> &vector) {
        
        if (src == NULL) {
            return;
        }
        IplImage *pimg = cvCreateImage(cvSize(src->width*1.1, src->height*1.1), src->depth, src->nChannels);
        //*
        int m_otsu = otsu(pimg);
        printf("m_otsu:%d\n",m_otsu);
        cvReleaseImage(&pimg);
        cvZero(pimg);
        pimg = cvCreateImage(cvGetSize(src), src->depth, src->nChannels);
        cvThreshold(src, pimg, m_otsu, 255, CV_THRESH_BINARY);
        //查看 ret:right
        //vector.push_back(pimg);
        //return;
        //*/
        
        std::vector<CvRect> contours;
        CvSeq* contour;
        CvMemStorage *storage = cvCreateMemStorage(0);
        CvContourScanner scanner= cvStartFindContours(pimg,storage,sizeof(CvContour),CV_RETR_CCOMP,CV_CHAIN_APPROX_SIMPLE,cvPoint(0,0));
        //开始遍历轮廓树
        CvRect rect;
        double tmparea = 0.0;double indexArea = 0.0;double minarea = 5*5;double pixels = pimg->width*pimg->height;
        int i = 0;uchar *pp;IplImage *pdst;
        while ((contour = cvFindNextContour(scanner))) {
            tmparea = fabs(cvContourArea(contour));
            indexArea = fabs(cvContourArea(contour)/pixels);
            rect = cvBoundingRect(contour,0);
            
//            if (indexArea < 0.02 || indexArea >= 1 || tmparea < minarea) {
//                //不符合条件 删除区域
//                cvSubstituteContour(scanner, NULL);
//            }else{
//                contours.push_back(rect);
//            }
            //*
            if (tmparea<minarea){
                //当连通区域的中心点为白色时,而且面积较小则用黑色进行填充
                pp=(uchar*)(pimg->imageData+pimg->widthStep*(rect.y+rect.height/2)+rect.x+rect.width/2);
                if (pp[0]==255){
                    for (int y=rect.y;y<rect.y+rect.height;y++){
                        for (int x=rect.x;x<rect.x+rect.width;x++){
                            pp=(uchar*)(pimg->imageData+pimg->widthStep*y+x);
                            if(pp[0]==255){
                                pp[0]=0;
                            }
                        }
                    }
                }
            }else{
                contours.push_back(rect);
            };
            //*/
        }
        cvEndFindContours(&scanner);
        int size = (int)contours.size();
        if (size <= 0) {
            return;
        }
        printf("检测出的矩形个数:%d\n",size);
        
        std::vector<CvRect> sortedRect;
        ////对符合尺寸的图块按照从左到右进行排序
        sortRect(contours, sortedRect);
        for (i = 0; i < sortedRect.size(); i++) {
            
            //printf("找到的rect:%d-%d-%d-%d\n",sortedRect[i].x,sortedRect[i].y,sortedRect[i].width,sortedRect[i].height);
            pdst = cvCreateImage(cvSize(sortedRect[i].width,sortedRect[i].height), IPL_DEPTH_8U, 1);
            cvSetImageROI(pimg, sortedRect[i]);
            //cvAdd(pimg, pdst, pdst, NULL);
            cvCopy(pimg, pdst, NULL);
            //cvReleaseImage(&pdst);
            cvResetImageROI(pimg);
            if (verifyImgCharSizes(pdst)) {
                IplImage *dst = cvCreateImage(cvSize(kTrimmedCharacterImageWidth, kTrimmedCharacterImageHeight), pdst->depth, pdst->nChannels);
                cvResize(pdst, dst, CV_INTER_LINEAR);
                vector.push_back(dst);
                cvReleaseImage(&pdst);
            }
        }
        //printf("共找到%d个字符块\n",i);
    }
Esempio n. 10
0
void connected_Components(IplImage *mask, int poly1_hull0, float perimScale, int *num, CvRect *bbs, CvPoint *centers)
{
	static CvMemStorage*	mem_storage	= NULL;
	static CvSeq*			contours	= NULL;
	//CLEAN UP RAW MASK
	cvMorphologyEx( mask, mask, NULL, NULL, CV_MOP_OPEN, CVCLOSE_ITR );
	cvMorphologyEx( mask, mask, NULL, NULL, CV_MOP_CLOSE, CVCLOSE_ITR );

	//FIND CONTOURS AROUND ONLY BIGGER REGIONS
	if( mem_storage==NULL ) mem_storage = cvCreateMemStorage(0);
    else cvClearMemStorage(mem_storage);

	CvContourScanner scanner = cvStartFindContours(mask,mem_storage,sizeof(CvContour),CV_RETR_EXTERNAL,CV_CHAIN_APPROX_SIMPLE);
	CvSeq* c;
	int numCont = 0;
	while( (c = cvFindNextContour( scanner )) != NULL )
	{
		double len = cvContourPerimeter( c );
		double q = (mask->height + mask->width) /perimScale;   //calculate perimeter len threshold
		if( len < q ) //Get rid of blob if it's perimeter is too small
		{
			cvSubstituteContour( scanner, NULL );
		}
		else //Smooth it's edges if it's large enough
		{
			CvSeq* c_new;
			if(poly1_hull0) //Polygonal approximation of the segmentation
	            c_new = cvApproxPoly(c,sizeof(CvContour),mem_storage,CV_POLY_APPROX_DP, CVCONTOUR_APPROX_LEVEL,0);
			else //Convex Hull of the segmentation
				c_new = cvConvexHull2(c,mem_storage,CV_CLOCKWISE,1);
            cvSubstituteContour( scanner, c_new );
			numCont++;
        }
	}
	contours = cvEndFindContours( &scanner );

	// PAINT THE FOUND REGIONS BACK INTO THE IMAGE
	cvZero( mask );
	IplImage *maskTemp;
	//CALC CENTER OF MASS AND OR BOUNDING RECTANGLES
	if(num != NULL)
	{
		int N = *num, numFilled = 0, i=0;
		CvMoments moments;
		double M00, M01, M10;
		maskTemp = cvCloneImage(mask);
		for(i=0, c=contours; c != NULL; c = c->h_next,i++ )
		{
			if(i < N) //Only process up to *num of them
			{
				cvDrawContours(maskTemp,c,CV_CVX_WHITE, CV_CVX_WHITE,-1,CV_FILLED,8);
				//Find the center of each contour
				if(centers != NULL)
				{
					cvMoments(maskTemp,&moments,1);
					M00 = cvGetSpatialMoment(&moments,0,0);
					M10 = cvGetSpatialMoment(&moments,1,0);
					M01 = cvGetSpatialMoment(&moments,0,1);
					centers[i].x = (int)(M10/M00);
					centers[i].y = (int)(M01/M00);
				}
				//Bounding rectangles around blobs
				if(bbs != NULL)
				{
					bbs[i] = cvBoundingRect(c);
				}
				cvZero(maskTemp);
				numFilled++;
			}
			//Draw filled contours into mask
			cvDrawContours(mask,c,CV_CVX_WHITE,CV_CVX_WHITE,-1,CV_FILLED,8); //draw to central mask
		} //end looping over contours
		*num = numFilled;
		cvReleaseImage( &maskTemp);
	}
	else
	{
		for( c=contours; c != NULL; c = c->h_next )
		{
			cvDrawContours(mask,c,CV_CVX_WHITE, CV_CVX_BLACK,-1,CV_FILLED,8);
		}
	}
}
Esempio n. 11
0
void Buoy::FC_FindBiggestContours(IplImage *src)
{

    _mask=0;
    nContours=0;
    largest_length=0;
    len=0;
    dst=0;
    contours=0;
    c=0;
    newC=0;

    CvMemStorage* tempStorage = cvCreateMemStorage();
   temp=*src;
  IplImage *src_img=cvCreateImage(cvSize(temp.width,temp.height),IPL_DEPTH_32S,1);
//  IplImage *dest=cvCreateImage(cvSize(temp.width,temp.height),IPL_DEPTH_8U,1);
   _mask=&temp;
  int poly1Hull0=1;
  CvPoint offset;
  offset.x=0;
  offset.y=0;
 mask = cvGetMat( _mask, &mstub );




 // clean up raw mask
 cvMorphologyEx( mask, mask, 0, 0, CV_MOP_OPEN, 1 );
 cvMorphologyEx( mask, mask, 0, 0, CV_MOP_CLOSE, 1 );
 // find contours around only bigger regions
 scanner = cvStartFindContours( mask, tempStorage,
                 sizeof(CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, offset );
 while( (c = cvFindNextContour( scanner )) != 0 )
 {
   len = cvContourPerimeter( c );
   if(len > largest_length)
   {
     largest_length = len;
   }
 }
 contours=cvEndFindContours( &scanner );
 scanner = cvStartFindContours( mask, tempStorage,
                 sizeof(CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, offset );
 while( (c = cvFindNextContour( scanner )) != 0 )
 {
   len = cvContourPerimeter( c );
   double q = largest_length ;
   if( len < q ) //Get rid of blob if it's perimeter is too small
     cvSubstituteContour( scanner, 0 );
   else  //Smooth it's edges if it's large enough
   {

     if( poly1Hull0 ) //Polygonal approximation of the segmentation
       newC = cvApproxPoly( c, sizeof(CvContour), tempStorage, CV_POLY_APPROX_DP, 2, 0 );
     else //Convex Hull of the segmentation
       newC = cvConvexHull2( c, tempStorage, CV_CLOCKWISE, 1 );
     cvSubstituteContour( scanner, newC );
     nContours++;
     R=cvBoundingRect(c,0);
   }
 }
 contours = cvEndFindContours( &scanner );
 // paint the found regions back into the image
 cvZero( src_img );
 cvZero( _mask );
 for( c=contours; c != 0; c = c->h_next )
 {
   cvDrawContours( src_img, c, cvScalarAll(1), cvScalarAll(1), -1, -1, 8,
           cvPoint(-offset.x,-offset.y));
 }
   cvReleaseMemStorage( &tempStorage );
// convert to 8 bit IplImage
for( int i = 0; i < src_img->height; i++ )
  for( int j = 0; j < src_img->width; j++ )
  {
   int idx = CV_IMAGE_ELEM( src_img, int, i, j );  //get reference to pixel at (col,row),
    dst = &CV_IMAGE_ELEM( src, uchar, i, j );                          //for multi-channel images (col) should be multiplied by number of channels */
   if( idx == -1 || idx == 1 )
    *dst = (uchar)255;
   else if( idx <= 0 || idx > 1 )
    *dst = (uchar)0; // should not get here
   else {
    *dst = (uchar)0;
      }
    }
//qDebug()<<nContours;
cvReleaseImage(&src_img);
// cvReleaseImage(&temp);

//return dest;
}
//--------------------------------------------------------------------------------
int ofxCvMyContourFinder::findContours( IplImage*  input,
									  int minArea,
									  int maxArea,
									  int nConsidered,
									  bool bFindHoles,
                                      int approximation) {


    // get width/height disregarding ROI

    _width = input->width;
    _height = input->height;

	reset();

	// opencv will clober the image it detects contours on, so we want to
    // copy it into a copy before we detect contours.  That copy is allocated
    // if necessary (necessary = (a) not allocated or (b) wrong size)
	// so be careful if you pass in different sized images to "findContours"
	// there is a performance penalty, but we think there is not a memory leak
    // to worry about better to create mutiple contour finders for different
    // sizes, ie, if you are finding contours in a 640x480 image but also a
    // 320x240 image better to make two ofxCvMyContourFinder objects then to use
    // one, because you will get penalized less.



	if( !inputCopy ) {
		inputCopy = cvCreateImage(cvSize(_width,_height), input->depth, input->nChannels);
	} else if( inputCopy->width != _width || inputCopy->height != _height ) {
        // reallocate to new size
        cvReleaseImage(&inputCopy);
        inputCopy = cvCreateImage(cvSize(_width,_height), input->depth, input->nChannels);
	}

    cvSetImageROI(inputCopy, cvGetImageROI(input));
    cvCopy(input, inputCopy);

	contour_storage = cvCreateMemStorage( 1000 );
	storage	= cvCreateMemStorage( 1000 );

	CvContourRetrievalMode  retrieve_mode = (bFindHoles) ? CV_RETR_LIST : CV_RETR_EXTERNAL;
	CvContourScanner scanner = cvStartFindContours( inputCopy, contour_storage,
                    sizeof(CvContour), retrieve_mode, CV_CHAIN_APPROX_SIMPLE);

    CvSeq* c;
    int numCont = 0;

    while(( c = cvFindNextContour(scanner)) != NULL)
    {

        CvSeq* c_new;

        if( approximation > 0){

            c_new = cvApproxPoly(
                c,
                sizeof(CvContour),
                contour_storage,
                CV_POLY_APPROX_DP,
                approximation,
                0
            );

        } else {

            c_new = cvConvexHull2(
                c,
                contour_storage,
                CV_CLOCKWISE,
                1
                );

        }


        float area = fabs( cvContourArea(c_new, CV_WHOLE_SEQ) );
		if( (area > minArea) && (area < maxArea) ) {
            cvSeqBlobs.push_back(c_new);
		}
        numCont++;

    }

//    cvEndFindContours(scanner);


	// sort the pointers based on size
	if( cvSeqBlobs.size() > 1 ) {
        sort( cvSeqBlobs.begin(), cvSeqBlobs.end(), mysort_carea_compare );
	}


	// now, we have cvSeqBlobs.size() contours, sorted by size in the array
    // cvSeqBlobs let's get the data out and into our structures that we like
	for( int i = 0; i < MIN(nConsidered, (int)cvSeqBlobs.size()); i++ ) {
		myblobs.push_back( ofxCvMyBlob() );
		float area = cvContourArea( cvSeqBlobs[i], CV_WHOLE_SEQ );
		CvRect rect	= cvBoundingRect( cvSeqBlobs[i], 0 );
		cvMoments( cvSeqBlobs[i], myMoments );

		myblobs[i].area                     = fabs(area);
		myblobs[i].hole                     = area < 0 ? true : false;
		myblobs[i].length 			      = cvArcLength(cvSeqBlobs[i]);
		myblobs[i].boundingRect.x           = rect.x;
		myblobs[i].boundingRect.y           = rect.y;
		myblobs[i].boundingRect.width       = rect.width;
		myblobs[i].boundingRect.height      = rect.height;

		if(cvSeqBlobs[i]->total >= 6){
        myblobs[i].box2D_cv                 = cvMinAreaRect2(cvSeqBlobs[i]);
		}
		myblobs[i].bounding_cv              = cvBoundingRect(cvSeqBlobs[i]);

		double x = (myMoments->m10 / myMoments->m00);
		double y = (myMoments->m01 / myMoments->m00);
		myblobs[i].centroid.x 			  = (int)x;
		myblobs[i].centroid.y 			  = (int)y;
		myblobs[i].centroid_cv              = cvPoint2D32f(x,y);

       // myblobs[i].contour = (CvPoint *)malloc(cvSeqBlobs[i]->total * sizeof(CvPoint));
       // cvCvtSeqToArray(cvSeqBlobs[i], myblobs[i].contour, CV_WHOLE_SEQ);

		// get the points for the blob:

		CvPoint          pt;
		CvSeqReader       reader;
		cvStartReadSeq( cvSeqBlobs[i], &reader, 0 );


    	for( int j=0; j < cvSeqBlobs[i]->total; j++ ) {

			CV_READ_SEQ_ELEM( pt, reader );
            myblobs[i].pts.push_back( ofPoint((float)pt.x, (float)pt.y) );

		}

		myblobs[i].nPts = myblobs[i].pts.size();

	}

    nBlobs = myblobs.size();

	// Free the storage memory.
	// Warning: do this inside this function otherwise a strange memory leak
	if( contour_storage != NULL ) { cvReleaseMemStorage(&contour_storage); }
	if( storage != NULL ) { cvReleaseMemStorage(&storage); }

	return nBlobs;

}
void THISCLASS::OnStep() {
	std::vector<Particle> rejectedparticles;

	// Get and check input image
	IplImage *inputimage = cvCloneImage(mCore->mDataStructureImageBinary.mImage);
	IplImage *outputImage = mCore->mDataStructureImageBinary.mImage;
	//mCore->mDataStructureImageBinary.mImage;
	if (! inputimage) {
		AddError(wxT("No input image."));
		return;
	}
	if (inputimage->nChannels != 1) {
		AddError(wxT("The input image is not a grayscale image."));
		return;
	}
	cvZero(outputImage);

	// We clear the ouput vector
	mParticles.clear();

	// Initialization
	Particle tmpParticle; // Used to put the calculated value in memory
	CvMoments moments; // Used to calculate the moments
	std::vector<Particle>::iterator j; // Iterator used to stock the particles by size

	// We allocate memory to extract the contours from the binary image
	CvMemStorage* storage = cvCreateMemStorage(0);
	CvSeq* contour = 0;


	// Init blob extraxtion
	CvContourScanner blobs = cvStartFindContours(inputimage, storage, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_NONE);

	// This is used to correct the position in case of ROI
	CvRect rectROI;
	if (inputimage->roi != NULL) {
		rectROI = cvGetImageROI(inputimage);
	} else {
		rectROI.x = 0;
		rectROI.y = 0;
	}

	while ((contour = cvFindNextContour(blobs)) != NULL) {
		// Computing the moments
		cvMoments(contour, &moments);

		// Computing particle area
		tmpParticle.mArea = moments.m00;
		tmpParticle.mCenter.x = (float)(rectROI.x + (moments.m10 / moments.m00 + 0.5));  // moments using Green theorem
		tmpParticle.mCenter.y = (float)(rectROI.y + (moments.m01 / moments.m00 + 0.5));  // m10 = x direction, m01 = y direction, m00 = area as edicted in theorem

		// Selection based on area
		if ((mAreaSelection == false) || ((tmpParticle.mArea <= mMaxArea) && (tmpParticle.mArea >= mMinArea)))
		{
			tmpParticle.mCompactness = GetContourCompactness(contour);
			if ((mCompactnessSelection == false) || ((tmpParticle.mCompactness > mMinCompactness) && (tmpParticle.mCompactness < mMaxCompactness)))
			{
				double tempValue = cvGetCentralMoment(&moments, 2, 0) - cvGetCentralMoment(&moments, 0, 2);
				tmpParticle.mOrientation = atan(2 * cvGetCentralMoment(&moments, 1, 1) / (tempValue + sqrt(tempValue * tempValue + 4 * cvGetCentralMoment(&moments, 1, 1) * cvGetCentralMoment(&moments, 1, 1))));
				if ((mOrientationSelection == false) || (((tmpParticle.mOrientation > mMinOrientation) && (tmpParticle.mOrientation < mMaxOrientation)) || ((tmpParticle.mOrientation > mMinOrientation + PI) && (tmpParticle.mOrientation < mMaxOrientation + PI)) || ((tmpParticle.mOrientation > mMinOrientation - PI) && (tmpParticle.mOrientation < mMaxOrientation - PI))))
				{
					cvDrawContours(outputImage, contour, cvScalarAll(255), cvScalarAll(255), 0, CV_FILLED);
					// Check if we have already enough particles
					if (mParticles.size() == mMaxNumber)
					{
						// If the particle is bigger than the smallest stored particle, store it, else do nothing
						if (tmpParticle.mArea > mParticles.back().mArea)
						{
							// Find the place were it must be inserted, sorted by size
							for (j = mParticles.begin(); (j != mParticles.end()) && (tmpParticle.mArea < (*j).mArea); j++);

							// Fill unused values
							tmpParticle.mID = -1;
							tmpParticle.mIDCovariance = -1;

							// Insert the particle
							mParticles.insert(j, tmpParticle);
							// Remove the smallest one
							mParticles.pop_back();
						}
					}
					else
					{
						// The particle is added at the correct place
						// Find the place were it must be inserted, sorted by size
						for (j = mParticles.begin(); (j != mParticles.end()) && (tmpParticle.mArea < (*j).mArea); j++);

						// Fill unused values
						tmpParticle.mID = -1;
						tmpParticle.mIDCovariance = -1;

						// Insert the particle
						mParticles.insert(j, tmpParticle);
					}
				}
			}
		}
		else
		{
			rejectedparticles.push_back(tmpParticle);
		}
		cvRelease((void**)&contour);
	}
	contour = cvEndFindContours(&blobs);

	// If we need to display the particles
	/* if(trackingimg->GetDisplay())
	{
		for(j=rejectedparticles.begin();j!=rejectedparticles.end();j++)
		{
			trackingimg->DrawCircle(cvPoint((int)(((*j).p).x),(int)(((*j).p).y)),CV_RGB(255,0,0));
		}
		for(j=particles.begin();j!=particles.end();j++)
		{
			trackingimg->DrawCircle(cvPoint((int)(((*j).p).x),(int)(((*j).p).y)),CV_RGB(0,255,0));
			trackingimg->Cover(cvPoint((int)(((*j).p).x),(int)(((*j).p).y)),CV_RGB(255,0,0),2);
		}
	} */

	cvReleaseImage(&inputimage);
	cvRelease((void**)&contour);
	cvReleaseMemStorage(&storage);

	// Set these particles
	mCore->mDataStructureParticles.mParticles = &mParticles;

	// Let the DisplayImage know about our image
	DisplayEditor de(&mDisplayOutput);
	if (de.IsActive()) {
		de.SetParticles(&mParticles);
		de.SetMainImage(mCore->mDataStructureImageBinary.mImage);
	}
}