コード例 #1
0
ファイル: BlobFinder.cpp プロジェクト: space150/space150-Cing
/**
 * @internal
 * @brief Find the blobs in the received image.
 * What it looks for in an image is bright areas, so typically 
 * the image result of a background subtraction is a good input.
 * 
 * @param[in] inImage image where the blobs will be searched
 */
void BlobFinder::update( const Image& inImage )
{
	// Check valid
	if ( !isValid() )
	THROW_EXCEPTION( "Trying to compute blobs, with the BlobFinder not initialized. Init method should be called" );

	// Check blob area... and if it has not been set, set it to the max and min (no lower than 10, to avoid opencv issues)
	if ( (m_minBlobArea < 0) || (m_maxBlobArea < 0) )
	{
		m_minBlobArea = 10;
		m_maxBlobArea = (float)inImage.getWidth() * (float)inImage.getHeight();
	}

	// Check both images have same size and it is the same than the filter size
	if( (inImage.getNChannels() != 1) && (inImage.getNChannels() != 3) )
	THROW_EXCEPTION( "Trying to compute blobs on images with non supporte format -> only RGB or GRAYSCALE images supported" );

	// Request temp image to work with
	IplImage* cvTempImage = ImageResourceManager::getSingleton().getImage( inImage.getWidth(), inImage.getHeight(), 1 );

	// If they have different number of channels -> convert them
	if ( inImage.getNChannels() == 3 )
		cvConvertImage( &inImage.getCVImage(), cvTempImage );
	// just one channel -> Copy the input image
	else 
		cvCopy( &inImage.getCVImage(), cvTempImage );

	// Find blobs (openCV contours)	
	int retrivalMode = CV_RETR_EXTERNAL; // CV_RETR_CCOMP
	cvFindContours( cvTempImage, m_findContoursStorage, &m_contour, sizeof(CvContour), retrivalMode, CV_CHAIN_APPROX_SIMPLE );

	// Extract found contours    

	// Iterate through found contours and store them..
	m_blobs.clear();
	for( ; m_contour != 0; m_contour = m_contour->h_next )
	{
		// Get contour area
		double area = fabs( cvContourArea( m_contour, CV_WHOLE_SEQ ) );

		// If it has a good size (between min and max)
		if ( ( area > m_maxBlobArea ) || ( area < m_minBlobArea ) )
		  continue;

		// Store new Blob
		m_blobs.push_back( Blob( area, m_contour ) );
	}

	// Release temp image
	ImageResourceManager::getSingleton().releaseImage( cvTempImage );

	// Extract information of found blobs
	extractBlobsInformation();

	// Clear OpenCV contours storage 
	cvClearMemStorage( m_findContoursStorage );
}
コード例 #2
0
// Function to detect and return the coordiantes of the largest face in an image
CvRect* detect_and_draw( IplImage* img, char* cascade_name )
{
    // Create memory for calculations
    static CvMemStorage* storage = 0;

    // Create a new Haar classifier
    static CvHaarClassifierCascade* cascade = 0;

    int scale = 1;

    // Create a new image based on the input image
    IplImage* temp = cvCreateImage( cvSize(img->roi->width/scale,img->roi->height/scale), 8, 3 );
    int i;

    // Load the HaarClassifierCascade
	if( cascade == 0)
	{
		cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 );
	}
    
    // Allocate the memory storage
    storage = cvCreateMemStorage(0);

    // Clear the memory storage which was used before
    cvClearMemStorage( storage );

	//creates rectangle to return
	CvRect* large = &cvRect(0,0,2,2);
	//return large;

    // Find whether the cascade is loaded, to find the faces. If yes, then:
    if( cascade )
    {

        // There can be more than one face in an image. So create a growable sequence of faces.
        // Detect the objects and store them in the sequence
        CvSeq* faces = cvHaarDetectObjects( img, cascade, storage,
                                            1.1, 2, CV_HAAR_DO_CANNY_PRUNING,
                                            cvSize(40, 40) );

        // Loop the number of faces found.
        for( i = 0; i < (faces ? faces->total : 0); i++ )
        {
           // Create a new rectangle for drawing the face
            CvRect* r = (CvRect*)cvGetSeqElem( faces, i );
			
			if((r->height + r->width) > (large->height + large->width))
			{
				large = r;
			}
        }
    }
	return large;

    // Release the temp image created.
    cvReleaseImage( &temp );
}
コード例 #3
0
static GstFlowReturn
kms_face_detector_transform_frame_ip (GstVideoFilter * filter,
                                      GstVideoFrame * frame)
{
    KmsFaceDetector *facedetector = KMS_FACE_DETECTOR (filter);
    GstMapInfo info;

    if ((facedetector->priv->haar_detector)
            && (facedetector->priv->pCascadeFace == NULL)) {
        return GST_FLOW_OK;
    }

    kms_face_detector_initialize_images (facedetector, frame);
    gst_buffer_map (frame->buffer, &info, GST_MAP_READ);

    facedetector->priv->cvImage->imageData = (char *) info.data;
    cvResize (facedetector->priv->cvImage, facedetector->priv->cvResizedImage,
              CV_INTER_LINEAR);

    g_mutex_lock (&facedetector->priv->mutex);

    if (facedetector->priv->qos_control) {
        facedetector->priv->throw_frames++;
        GST_DEBUG ("Filter is too slow. Frame dropped %d",
                   facedetector->priv->throw_frames);
        g_mutex_unlock (&facedetector->priv->mutex);
        goto send;
    }

    g_mutex_unlock (&facedetector->priv->mutex);

    cvClearSeq (facedetector->priv->pFaceRectSeq);
    cvClearMemStorage (facedetector->priv->pStorageFace);
    if (facedetector->priv->haar_detector) {
        facedetector->priv->pFaceRectSeq =
            cvHaarDetectObjects (facedetector->priv->cvResizedImage,
                                 facedetector->priv->pCascadeFace, facedetector->priv->pStorageFace, 1.2,
                                 3, CV_HAAR_DO_CANNY_PRUNING,
                                 cvSize (facedetector->priv->cvResizedImage->width / 20,
                                         facedetector->priv->cvResizedImage->height / 20),
                                 cvSize (facedetector->priv->cvResizedImage->width / 2,
                                         facedetector->priv->cvResizedImage->height / 2));

    } else {
        classify_image (facedetector->priv->cvResizedImage,
                        facedetector->priv->pFaceRectSeq);
    }

send:
    if (facedetector->priv->pFaceRectSeq->total != 0) {
        kms_face_detector_send_event (facedetector, frame);
    }

    gst_buffer_unmap (frame->buffer, &info);

    return GST_FLOW_OK;
}
コード例 #4
0
ファイル: main.cpp プロジェクト: jmnx/Learning-OpenCV
// Function to detect and draw any faces that is present in an image
bool detect_and_draw( IplImage* img,CvHaarClassifierCascade* cascade )
{
    int scale = 1;

    // Create a new image based on the input image
    IplImage* temp = cvCreateImage( cvSize(img->width/scale,img->height/scale), 8, 3 );

    // Create two points to represent the face locations
    CvPoint pt1, pt2;
    int i;

    // Clear the memory storage which was used before
    cvClearMemStorage( storage );

    // Find whether the cascade is loaded, to find the faces. If yes, then:
    if( cascade )
    {

        // There can be more than one face in an image. So create a growable sequence of faces.
        // Detect the objects and store them in the sequence
        CvSeq* faces = cvHaarDetectObjects( img, cascade, storage,
                                            1.1, 30, CV_HAAR_DO_CANNY_PRUNING,
                                            cvSize(40, 40) );
	
        // Loop the number of faces found.
        for( i = 0; i < (faces ? faces->total : 0); i++ )
        {
           // Create a new rectangle for drawing the face
            CvRect* r = (CvRect*)cvGetSeqElem( faces, i );

            // Find the dimensions of the face,and scale it if necessary
            pt1.x = r->x*scale;
            pt2.x = (r->x+r->width)*scale;
            pt1.y = r->y*scale;
            pt2.y = (r->y+r->height)*scale;

            // Draw the rectangle in the input image
            cvRectangle( img, pt1, pt2, CV_RGB(255,0,0), 3, 8, 0 );
        }
    }

	
    // Show the image in the window named "result"
    cvShowImage( "result", img );
   if(i  > 0)
		return 1;
	else
		return 0;


    // Release the temp image created.

   
    cvReleaseImage( &temp );
   
	
}
コード例 #5
0
static void
find_connected_components (IplImage * mask, int poly1_hull0, float perimScale,
    CvMemStorage * mem_storage, CvSeq * contours)
{
  CvContourScanner scanner;
  CvSeq *c;
  int numCont = 0;
  /* Just some convenience variables */
  const CvScalar CVX_WHITE = CV_RGB (0xff, 0xff, 0xff);
  const CvScalar CVX_BLACK = CV_RGB (0x00, 0x00, 0x00);

  /* CLEAN UP RAW MASK */
  cvMorphologyEx (mask, mask, 0, 0, CV_MOP_OPEN, CVCLOSE_ITR);
  cvMorphologyEx (mask, mask, 0, 0, CV_MOP_CLOSE, CVCLOSE_ITR);
  /* FIND CONTOURS AROUND ONLY BIGGER REGIONS */
  if (mem_storage == NULL) {
    mem_storage = cvCreateMemStorage (0);
  } else {
    cvClearMemStorage (mem_storage);
  }

  scanner = cvStartFindContours (mask, mem_storage, sizeof (CvContour),
      CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cvPoint (0, 0));

  while ((c = cvFindNextContour (scanner)) != NULL) {
    double len = cvContourArea (c, CV_WHOLE_SEQ, 0);
    /* calculate perimeter len threshold: */
    double q = (mask->height + mask->width) / perimScale;
    /* Get rid of blob if its perimeter is too small: */
    if (len < q) {
      cvSubstituteContour (scanner, NULL);
    } else {
      /* Smooth its edges if its large enough */
      CvSeq *c_new;
      if (poly1_hull0) {
        /* Polygonal approximation */
        c_new =
            cvApproxPoly (c, sizeof (CvContour), mem_storage, CV_POLY_APPROX_DP,
            CVCONTOUR_APPROX_LEVEL, 0);
      } else {
        /* Convex Hull of the segmentation */
        c_new = cvConvexHull2 (c, mem_storage, CV_CLOCKWISE, 1);
      }
      cvSubstituteContour (scanner, c_new);
      numCont++;
    }
  }
  contours = cvEndFindContours (&scanner);

  /* PAINT THE FOUND REGIONS BACK INTO THE IMAGE */
  cvZero (mask);
  /* DRAW PROCESSED CONTOURS INTO THE MASK */
  for (c = contours; c != NULL; c = c->h_next)
    cvDrawContours (mask, c, CVX_WHITE, CVX_BLACK, -1, CV_FILLED, 8, cvPoint (0,
            0));
}
コード例 #6
0
bool detect_and_draw( IplImage* srcImg, CvRect& roi)
{
    double scale = 1.1;
    IplImage* gray = cvCreateImage( cvSize(srcImg->width,srcImg->height), 8, 1 );
    IplImage* small_img = cvCreateImage( cvSize( cvRound (srcImg->width/scale),
                                         cvRound (srcImg->height/scale)),
                                         8, 1 );
    int i;

    cvCvtColor( srcImg, gray, CV_BGR2GRAY );
    cvResize( gray, small_img, CV_INTER_LINEAR );
    cvEqualizeHist( small_img, small_img );
    cvClearMemStorage( storage );

    if( cascade )
    {
        CvSeq* faces = cvHaarDetectObjects( small_img, cascade, storage,
                                            1.1, 2, 0/*CV_HAAR_DO_CANNY_PRUNING*/,
                                            cvSize(20, 20) );

        int index = 0;
        //must contain face
        if (faces->total==0)
        {
            printf("\n--Error with the face image: no face detected!\n");
            return false;
        }
        //more than one face
        else if (faces->total>1)
        {
            printf("\n--Warning with the face image: more than one face detected!\n");

            //get rect with max area
            double area = 0.0;
            for (int i = 0; i < faces->total; i++)
            {
                CvRect* tr = (CvRect*)cvGetSeqElem( faces, i );
                if(tr->height*tr->width > area)
                {
                    area = tr->height*tr->width;
                    index = i;
                }
            }
        }
        //get roi
        CvRect* r = (CvRect*)cvGetSeqElem( faces, index );
        roi.x = r->x*scale;
        roi.y = r->y*scale;
        roi.width = r->width*scale;
        roi.height = r->height*scale;
    }
    cvReleaseImage( &gray );
    cvReleaseImage( &small_img );
    return true;
}
コード例 #7
0
ファイル: cards.c プロジェクト: Hoevers/CardDetector
/**
 * Try to detect a card from the learned list
 * @param card The binary enhanced image
 */
enum card_type
card_detect(CvMat *card_roi)
{

	int contour_count     = 0;
	bool card_found       = false;
	enum card_type card   = CARD_UNKNOWN;
	CvMemStorage* storage = cvCreateMemStorage(0);
	CvSeq *contour        = NULL;
	CvSeq *contours       = NULL;
	CvMat *edges          = NULL;

	/* Detect edges and find */
	edges = cvCreateMat( card_roi->rows, card_roi->cols, card_roi->type );
	//cvCanny( card_roi, edges, 0, 255, 3 );
	contour_count = cvFindContours(
        card_roi,
        storage,
        &contours,
        sizeof(CvContour),
        CV_RETR_LIST,
		CV_CHAIN_APPROX_NONE,
		cvPoint(0, 0)
	);

	/* Compare contours */
	if( contour_count > 0)
	{
		for( contour = contours; contour != NULL; contour = contour->h_next )
		{
			cvDrawContours( 
				edges,
				contour,
				CV_RGB(255,255,255),
				CV_RGB(255,255,255),
				0,
				2,
				CV_FILLED, cvPoint(0, 0) );
#if DEBUG
				cvShowImage( "card_edges", edges );
				cvMoveWindow( "card_edges", 0, 0 );
				while( cvWaitKey(250) != 32 )
				;
#endif
		}
	}

	/* Cleanup */
	cvClearSeq( contours );
	cvReleaseMat( &edges );
	cvClearMemStorage(storage);
	cvReleaseMemStorage(&storage);

	return card;
}
コード例 #8
0
ファイル: ShapeClassifier.cpp プロジェクト: gotomypc/eyepatch
void ShapeClassifier::StartTraining(TrainingSet *sampleSet) {
	// Make a copy of the set used for training (we'll want to save it later)
	sampleSet->CopyTo(&trainSet);

	cvClearMemStorage(templateStorage);
    templateContours = NULL;

    // TODO: call into trainingset class to do this instead of accessing samplemap
    for (map<UINT, TrainingSample*>::iterator i = sampleSet->sampleMap.begin(); i != sampleSet->sampleMap.end(); i++) {
        TrainingSample *sample = (*i).second;
        if (sample->iGroupId == GROUPID_POSSAMPLES) { // positive sample

            IplImage *grayscale = cvCreateImage( cvSize(sample->fullImageCopy->width, sample->fullImageCopy->height), IPL_DEPTH_8U, 1);
            cvCvtColor(sample->fullImageCopy, grayscale, CV_BGR2GRAY);
            cvCanny(grayscale, grayscale, SHAPE_CANNY_EDGE_LINK, SHAPE_CANNY_EDGE_FIND, SHAPE_CANNY_APERTURE);
			cvDilate(grayscale, grayscale, 0, 2);

            CvMemStorage *storage = cvCreateMemStorage(0);
            CvSeq *sampleContours = NULL;

            cvFindContours(grayscale, storage, &sampleContours, sizeof(CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_TC89_KCOS);
			if (sampleContours != NULL) {
			    sampleContours = cvApproxPoly(sampleContours, sizeof(CvContour), storage, CV_POLY_APPROX_DP, 0.2, 1 );
				for (CvSeq *contour = sampleContours; contour != NULL; contour = contour->h_next)
				{
					if ((contour->total > SHAPE_MIN_CONTOUR_POINTS) && (contour->flags & CV_SEQ_FLAG_CLOSED)){
						if (!templateContours) {
							templateContours = cvCloneSeq(contour, templateStorage);
						} else {
							CvSeq *newContour = cvCloneSeq(contour, templateStorage);
							newContour->h_next = templateContours->h_next;
							templateContours->h_next = newContour;
						}
					}
				}
			}
            cvReleaseMemStorage(&storage);
            cvReleaseImage(&grayscale);

		} else if (sample->iGroupId == GROUPID_NEGSAMPLES) { // negative sample
            // do nothing for now
            // TODO: we could compare guesses against these as well and remove them if they match
        }
    }

    UpdateContourImage();

    if (isOnDisk) { // this classifier has been saved so we'll update the files
        Save();        
    }

    // update member variables
	isTrained = true;
}
コード例 #9
0
ファイル: porndetect.c プロジェクト: nuarlyss/porndetect
void detect_and_draw( IplImage* img, int muncul )
{
    static CvScalar colors[] = 
    {
        {{0,0,255}},
        {{0,128,255}},
        {{0,255,255}},
        {{0,255,0}},
        {{255,128,0}},
        {{255,255,0}},
        {{255,0,0}},
        {{255,0,255}}
    };

    double scale = 1.3;
    IplImage* gray = cvCreateImage( cvSize(img->width,img->height), 8, 1 );
    IplImage* small_img = cvCreateImage( cvSize( cvRound (img->width/scale),
                         cvRound (img->height/scale)),
                     8, 1 );
    int i;

    cvCvtColor( img, gray, CV_BGR2GRAY );
    cvResize( gray, small_img, CV_INTER_LINEAR );
    cvEqualizeHist( small_img, small_img );
    cvClearMemStorage( storage );

    if( cascade )
    {
        double t = (double)cvGetTickCount();
        CvSeq* faces = cvHaarDetectObjects( small_img, cascade, storage,
                                            1.1, 2, 0/*CV_HAAR_DO_CANNY_PRUNING*/,
                                            cvSize(30, 30) );
        t = (double)cvGetTickCount() - t;
        //printf( "detection time = %gms\n", t/((double)cvGetTickFrequency()*1000.) );
	printf( "%s detected area = %d\n", input_name, faces->total);
        for( i = 0; i < (faces ? faces->total : 0); i++ )
        {
            CvRect* r = (CvRect*)cvGetSeqElem( faces, i );
            CvPoint center;
            int radius;
            center.x = cvRound((r->x + r->width*0.5)*scale);
            center.y = cvRound((r->y + r->height*0.5)*scale);
            radius = cvRound((r->width + r->height)*0.25*scale);
            cvCircle( img, center, radius, colors[i%8], CV_FILLED, 8, 0 );
        }
    }

    if(muncul==1) 
    {
      cvShowImage( "result", img );
    }
    cvReleaseImage( &gray );
    cvReleaseImage( &small_img );
}
コード例 #10
0
ファイル: Croper.cpp プロジェクト: AnthonyNystrom/Pikling
int main(int argc, char* argv[])
{
    int i=0, c;
	bPreviewFilter=false;
    // create memory storage that will contain all the dynamic data
    storage = cvCreateMemStorage(0);
	if (argc>1 && !strcmp(argv[1],"f"))
		bPreviewFilter=true;

	char names[255];
    while(1)
    {
		sprintf(names, "pic%d.png", i+1);
        // load i-th image
        img0 = cvLoadImage( names, 1 );
        if( !img0 )
        {
            printf("Couldn't load %s\n", names );
            break;
        }
        img = cvCloneImage( img0 );
        
        // create window and a trackbar (slider) with parent "image" and set callback
        // (the slider regulates upper threshold, passed to Canny edge detector) 
        cvNamedWindow( wndname, 1 );
        
        // find and draw the squares
		IplImage* imgFilter = GetImageFilteredForSquareDetect(img);
		IplImage* imgShow;
		if (bPreviewFilter)
			imgShow = imgFilter;
		else
			imgShow = img;
		drawSquaresAndCrop(names, imgShow, img, findSquares4( imgFilter, storage ) );
		//drawCircleAndCrop(names, img);
		cvReleaseImage(&imgFilter);
        
        // wait for key.
        // Also the function cvWaitKey takes care of event processing
        c = cvWaitKey(0);
        // release both images
        cvReleaseImage( &img );
        cvReleaseImage( &img0 );
        // clear memory storage - reset free space position
        cvClearMemStorage( storage );
        if( (char)c == 27 )
            break;
		i++;
    }
    
    cvDestroyWindow( wndname );
    
    return 0;
}
コード例 #11
0
ファイル: cards.c プロジェクト: Hoevers/CardDetector
/**
 * Process the CvMat image as a card
 * @param image   The current image data pointer
 * @param current Current card type
 */
void
card_process(CvMat *image, enum card_type current)
{
	int contour_count     = 0;

	CvMat *edges          = NULL;
	CvSeq *contours       = NULL;
	CvMemStorage* storage = NULL;

	/* Check for valid current card */
	if( (image != NULL) && (current >= CARD_JOKER) && (current < CARD_TYPE_END) )
	{
		storage = cvCreateMemStorage(0); 
		edges   = cvCreateMat( image->rows, image->cols, image->type );

		cvSmooth( image, image, CV_GAUSSIAN, 3, 0, 0, 0 );
		cvThreshold( image, image, 200, 255, CV_THRESH_BINARY);

		/* Show card */
#if 0 //DEBUG
		cvShowImage( card_type_string[current], image );
		cvMoveWindow( card_type_string[current], 0, 0 );
#endif /* DEBUG */

		cvCanny( image, edges, 170, 200, 3 );

		/* Show edges */
#if 0 //DEBUG
		cvShowImage( "edges", edges );
		cvMoveWindow( "edges", edges->cols, 0 );
		while( cvWaitKey(0) != 32 )
			;
#endif /* DEBUG */

		/* Find contours */
		contour_count = cvFindContours(
				edges,
				storage,
				&contours,
				sizeof(CvContour),
				CV_RETR_EXTERNAL,
				CV_CHAIN_APPROX_SIMPLE,
				cvPoint(0, 0)
		);

		/* Save contours in card list */
		card_contours[ current ] = contours;

		/* Cleanup */
		cvReleaseMat( &edges );
		cvClearMemStorage(storage);
		cvReleaseMemStorage(&storage);
	}
}
コード例 #12
0
ファイル: process.cpp プロジェクト: turlicht/Scenery
void Process::findContours()
{
    cvClearMemStorage(contourStorage);
    contours.clear();

    cvCvtColor(image, grayImage, CV_RGB2GRAY);

//    if (param.contour.smooth) {
//        cvSmooth(grayImage, grayImage, CV_BLUR, 3, 3);
//    }

    cvCanny(grayImage, hitImage, contourParam.threshold1, contourParam.threshold2, 3);

    // находим контуры
    cvFindContours(hitImage, contourStorage, &contoursSeq, sizeof(CvContour),
                   CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0));

    for(CvSeq* seq = contoursSeq; seq != 0; seq = seq->h_next) {
        Contour contour;
        for( int i=0; i<seq->total; ++i ) {
            CvPoint* cvP = (CvPoint*)cvGetSeqElem(seq, i);
            ContourPt pt;
            pt.x = cvP->x;
            pt.y = cvP->y;
            contour.push_back(pt);
            //qDebug() << cvP->x << cvP->y;
        }
        contours.push_back(contour);
    }

    // пример работы с контуром
    //for(CvSeq* seq = contours; seq != 0; seq = seq->h_next){
        // нарисовать контур
        // cvDrawContours(dstImage, seq, CV_RGB(255,216,0), CV_RGB(0,0,250), 0, 1, 8);
        // Работаем с точками последовательности
         //CvPoint* p = (CvPoint*)cvGetSeqElem ( seq, i );
    //}

    // рисуем обводку
//    if (param.contour.isDrawHull) {
//        CvMemStorage* hullStorage = cvCreateMemStorage(0);

//        for(CvSeq* seq = contours; seq != 0; seq = seq->h_next){
//            CvSeq *hulls = cvConvexHull2(seq, hullStorage, CV_CLOCKWISE, 1);
//            //cvDrawContours(dstImage, hulls, CV_RGB(255, 0, 0), CV_RGB(100, 0, 0), 0, 2, 8);

//            cvClearMemStorage(hullStorage);
//        }

//        cvReleaseMemStorage(&hullStorage);
    //    }

}
コード例 #13
0
faceDetector::faceDetector()
{

    cascade = (CvHaarClassifierCascade*)cvLoad(HAAR_CASCADE_FACE, 0, 0, 0 );
    storage = cvCreateMemStorage(0);
    cvClearMemStorage( storage );
    faceInformation.LT= cvPoint(0,0);
    faceInformation.RB= cvPoint(0,0);
    faceInformation.Width=0;
    faceInformation.Height=0;

}
コード例 #14
0
ファイル: camera.c プロジェクト: derenrich/camsaver
int init_camera() {
  // Load the HaarClassifierCascade
  cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 );
  if( !cascade ) {
    fprintf( stderr, "ERROR: Could not load classifier cascade\n" );
    return 1;
  }
  // Allocate the memory storage
  storage = cvCreateMemStorage(0);
  cvClearMemStorage( storage );
 
}
コード例 #15
0
ファイル: main.cpp プロジェクト: MiaoChaoran/shiyan
void detect_and_draw(IplImage* img )
{
    double scale=1.2;
    static CvScalar colors[] = {
        {{0,0,255}},{{0,128,255}},{{0,255,255}},{{0,255,0}},
        {{255,128,0}},{{255,255,0}},{{255,0,0}},{{255,0,255}}
    };//Just some pretty colors to draw with

    //Image Preparation
    //
    IplImage* gray = cvCreateImage(cvSize(img->width,img->height),8,1);
    IplImage* small_img=cvCreateImage(cvSize(cvRound(img->width/scale),cvRound(img->height/scale)),8,1);
    cvCvtColor(img,gray, CV_BGR2GRAY);
    cvResize(gray, small_img, CV_INTER_LINEAR);

    cvEqualizeHist(small_img,small_img); //Ö±·½Í¼¾ùºâ

    //Detect objects if any
    //
    cvClearMemStorage(storage);
    double t = (double)cvGetTickCount();
    CvSeq* objects = cvHaarDetectObjects(small_img,
                                         cascade,
                                         storage,
                                         1.1,
                                         2,
                                         0/*CV_HAAR_DO_CANNY_PRUNING*/,
                                         cvSize(30,30));

    t = (double)cvGetTickCount() - t;
    printf( "detection time = %gms\n", t/((double)cvGetTickFrequency()*1000.) );

    //Loop through found objects and draw boxes around them
    for(int i=0; i<(objects? objects->total:0); ++i)
    {
        CvRect* r=(CvRect*)cvGetSeqElem(objects,i);
        cvRectangle(img, cvPoint(r->x*scale,r->y*scale), cvPoint((r->x+r->width)*scale,(r->y+r->height)*scale), colors[i%8]);
    }
    for( int i = 0; i < (objects? objects->total : 0); i++ )
    {
        CvRect* r = (CvRect*)cvGetSeqElem( objects, i );
        CvPoint center;
        int radius;
        center.x = cvRound((r->x + r->width*0.5)*scale);
        center.y = cvRound((r->y + r->height*0.5)*scale);
        radius = cvRound((r->width + r->height)*0.25*scale);
        cvCircle( img, center, radius, colors[i%8], 3, 8, 0 );
    }

    cvShowImage( "result", img );
    cvReleaseImage(&gray);
    cvReleaseImage(&small_img);
}
コード例 #16
0
ファイル: example1.cpp プロジェクト: Ajaku/flandmark
void detectFaceInImage(IplImage *orig, IplImage* input, CvHaarClassifierCascade* cascade, FLANDMARK_Model *model, int *bbox, double *landmarks)
{
    // Smallest face size.
    CvSize minFeatureSize = cvSize(40, 40);
    int flags =  CV_HAAR_DO_CANNY_PRUNING;
    // How detailed should the search be.
    float search_scale_factor = 1.1f;
    CvMemStorage* storage;
    CvSeq* rects;
    int nFaces;

    storage = cvCreateMemStorage(0);
    cvClearMemStorage(storage);

    // Detect all the faces in the greyscale image.
    rects = cvHaarDetectObjects(input, cascade, storage, search_scale_factor, 2, flags, minFeatureSize);
    nFaces = rects->total;

    double t = (double)cvGetTickCount();
    for (int iface = 0; iface < (rects ? nFaces : 0); ++iface)
    {
        CvRect *r = (CvRect*)cvGetSeqElem(rects, iface);
        
        bbox[0] = r->x;
        bbox[1] = r->y;
        bbox[2] = r->x + r->width;
        bbox[3] = r->y + r->height;
        
        flandmark_detect(input, bbox, model, landmarks);

        // display landmarks
        cvRectangle(orig, cvPoint(bbox[0], bbox[1]), cvPoint(bbox[2], bbox[3]), CV_RGB(255,0,0) );
        cvRectangle(orig, cvPoint(model->bb[0], model->bb[1]), cvPoint(model->bb[2], model->bb[3]), CV_RGB(0,0,255) );
        cvCircle(orig, cvPoint((int)landmarks[0], (int)landmarks[1]), 3, CV_RGB(0, 0,255), CV_FILLED);
        for (int i = 2; i < 2*model->data.options.M; i += 2)
        {
            cvCircle(orig, cvPoint(int(landmarks[i]), int(landmarks[i+1])), 3, CV_RGB(255,0,0), CV_FILLED);

        }
    }
    t = (double)cvGetTickCount() - t;
    int ms = cvRound( t / ((double)cvGetTickFrequency() * 1000.0) );

    if (nFaces > 0)
    {
        printf("Faces detected: %d; Detection of facial landmark on all faces took %d ms\n", nFaces, ms);
    } else {
        printf("NO Face\n");
    }
    
    cvReleaseMemStorage(&storage);
}
コード例 #17
0
mvContours::mvContours() :
    bin_contours(PROFILE_BIN("mvContours - Contour Finding")),
    bin_match(PROFILE_BIN("mvContours - Matching")),
    bin_calc(PROFILE_BIN("mvContours - Calculation"))
{
    m_storage = cvCreateMemStorage(0);
    m_contours = NULL;

    init_contour_template_database (contour_circ_images, NUM_CONTOUR_CIRC_IMAGES, hu_moments_circ_vector);
    init_contour_template_database (contour_rect_images, NUM_CONTOUR_RECT_IMAGES, hu_moments_rect_vector);

    // return used memory to storage
    cvClearMemStorage(m_storage);
}
コード例 #18
0
ファイル: adaboostDetect.cpp プロジェクト: Riseley/Drone
int adaboostDetect::detectAndDraw(IplImage* img, CvRect** regions) {
    double t = (double) cvGetTickCount();
    int fii = 0;
    IplImage* gray = cvCreateImage(cvSize(img->width, img->height), 8, 1);
    IplImage* smallImg = cvCreateImage( cvSize( cvRound (img->width/scaleFactor),
                                               cvRound (img->height/scaleFactor)), 8, 1 );
    cvCvtColor(img, gray, CV_BGR2GRAY);
    cvResize(gray, smallImg,CV_INTER_LINEAR);
    cvEqualizeHist(smallImg, smallImg);
    cvClearMemStorage(storage);
    
    int nx1, nx2, ny1, ny2;
    CvRect* nR;
    
    if (!cascade) {
        return 0;
    }
    
    CvSeq* faces = cvHaarDetectObjects( smallImg, cascade, storage, scaleFactor, minNeighbours, flags, minSize);
    for (int i=0; i<(faces ? faces->total : 0); i++) {
        if (i == 0) {
            nR = (CvRect*) malloc(1 * sizeof(CvRect));
        } else {
            nR = (CvRect*) realloc(nR, (i+1) * sizeof(CvRect));
        }
        CvRect* r = (CvRect*) cvGetSeqElem(faces, i);
        CvPoint center;
        center.x = cvRound((r->x + r->width * 0.5) * scaleFactor);
        center.y = cvRound((r->y + r->height * 0.5) * scaleFactor);
        nx1 = cvRound(r->x * scaleFactor);
        ny1 = cvRound(r->y * scaleFactor);
        nx2 = cvRound((r->x + r->width) * scaleFactor);
        ny2 = cvRound((r->y + r->height) * scaleFactor);
        nR[fii] = cvRect(nx1, ny1, nx2-nx1, ny2-ny1);
        CvScalar color;
        color = CV_RGB(0, 255, 0);
        cvRectangle(img, cvPoint(nx1, ny1), cvPoint(nx2, ny2), color);
        fii++;
    }
    
    *regions = nR;
    
    cvShowImage("result", img);
    cvReleaseImage(&gray);
    cvReleaseImage(&smallImg);
    t = (double) cvGetTickCount() - t;
    printf( "detection time = %gms\n", t/((double)cvGetTickFrequency()*1000.) );
    return fii;
}
コード例 #19
0
ファイル: adaboostDetect.cpp プロジェクト: Riseley/Drone
int adaboostDetect::detectCheck(IplImage* img, float maxSizeDiff, float maxPosDiff, int nStages) {
    maxSizeDiff = 1.5f;
    maxPosDiff = 0.3f;
    int detCount;
    
    /* number of stages. If <= 0 all stages are used */
    int nos = -1;
    int nos0 = cascade->count;
    
    CvSeq* objects;
    cvClearMemStorage(storage);
    if (nos <= 0) {
        nos = nos0;
    }
   
    ObjectPos det;
    float distance;
    float sf = 1.1f;
    ObjectPos ref;
    cascade->count = nos;
    objects = cvHaarDetectObjects(img, cascade, storage, sf, 0);
    cascade->count = nos0;
    
    int w = img->width;
    int h = img->height;
    ref.x = 0.5f * w;
    ref.y = 0.5f * h;
    ref.width = sqrtf(0.5f * (w*w + h*h));
    ref.found = 0;
    ref.neighbours = 0;
    
    detCount = (objects ? objects->total : 0);
    int found = 0;
    for (int i=0; i<detCount; i++) {
        CvAvgComp r = *((CvAvgComp*) cvGetSeqElem(objects, i));
        det.x = 0.5f * r.rect.width + r.rect.x;
        det.y = 0.5f * r.rect.height + r.rect.y;
        det.width = sqrtf(0.5f * (r.rect.width * r.rect.width + r.rect.height * r.rect.height));
        det.neighbours = r.neighbors;
        distance = sqrtf((det.x - ref.x) * (det.x - ref.x) + (det.y - ref.y) * (det.y - ref.y));
        if ((distance < ref.width * maxPosDiff) && (det.width > ref.width / maxSizeDiff) && (det.width < ref.width * maxSizeDiff)) {
            ref.found = 1;
            ref.neighbours = MAX(ref.neighbours, det.neighbours);
            found = 1;
        }
    }
    
    return found;
}
コード例 #20
0
// Clean up allocaated resources.
void clean_up_images()
{
  for(int i = 0; i < N; i++) {
    cvReleaseImage(&image_buffer[i]);
  }
  
  cvReleaseImage(&mhi);
  cvReleaseImage(&silhouette);
  cvReleaseImage(&orientation);
  cvReleaseImage(&orientation_mask);
  cvReleaseImage(&segment_mask);
  
  if (storage)
    cvClearMemStorage(storage);
}
コード例 #21
0
CybRegionTrackInfo *CybHaarTracker::detect(IplImage *img) {

	double scale = 1.3;
	CybRegionTrackInfo *region= NULL;
	IplImage* gray = cvCreateImage(cvSize(img->width, img->height), 8, 1);
	IplImage* small_img = cvCreateImage(cvSize(cvRound(img->width/scale),
			cvRound(img->height/scale)), 8, 1);

	cvCvtColor(img, gray, CV_BGR2GRAY);
	cvResize(gray, small_img, CV_INTER_LINEAR);
	cvEqualizeHist(small_img, small_img);
	cvClearMemStorage(storage);

	if (cascade) {
		double t = (double)cvGetTickCount();
		CvSeq* faces = cvHaarDetectObjects(small_img, cascade, storage, 1.1, 2,
				0/*CV_HAAR_DO_CANNY_PRUNING*/, cvSize(30, 30) );
		t = (double)cvGetTickCount() - t;

		if (faces && (faces->total > 0)) {
			CvRect* r = (CvRect*)cvGetSeqElem(faces, 0);

			region = new CybRegionTrackInfo((int)((r->x)*scale),
					(int)((r->x + r->width)*scale),
					(int)((r->y)*scale),
					(int)((r->y + r->height)*scale));

			int meanx = (region->getMaxX() + region->getMinX())/2;
			int meany = (region->getMaxY() + region->getMinY())/2;

			if (dbg_mode) {
				cvLine(img, cvPoint(meanx, meany), cvPoint(meanx, meany), 
				CV_RGB(50, 50 , 50), 4, 8, 0);
				cvRectangle(img, cvPoint((int)(region->getMinX()),
						(int)(region->getMinY())), cvPoint(
						(int)(region->getMaxX()), (int) (region->getMaxY())), 
				CV_RGB(150, 150, 150), 2, 8, 0);

			}
		}

	}

	cvReleaseImage( &gray);
	cvReleaseImage( &small_img);

	return region;
}
コード例 #22
0
ファイル: Tracker.cpp プロジェクト: caomw/MILTracker1.01
bool			Tracker::initFace(TrackerParams* params, Matrixu &frame)
{
	const char* cascade_name = "haarcascade_frontalface_alt_tree.xml";
	const int minsz = 20;
	if( Tracker::facecascade == NULL )
		Tracker::facecascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 );

	frame.createIpl();
	IplImage *img = frame.getIpl();
	IplImage* gray = cvCreateImage( cvSize(img->width, img->height), IPL_DEPTH_8U, 1 );
    cvCvtColor(img, gray, CV_BGR2GRAY );
	frame.freeIpl();
	cvEqualizeHist(gray, gray);

	CvMemStorage* storage = cvCreateMemStorage(0);
	cvClearMemStorage(storage);
	CvSeq* faces = cvHaarDetectObjects(gray, Tracker::facecascade, storage, 1.05, 3, CV_HAAR_DO_CANNY_PRUNING ,cvSize(minsz, minsz));
	
	int index = faces->total-1;
	CvRect* r = (CvRect*)cvGetSeqElem( faces, index );
	
	

	while(r && (r->width<minsz || r->height<minsz || (r->y+r->height+10)>frame.rows() || (r->x+r->width)>frame.cols() ||
		r->y<0 || r->x<0)){
		r = (CvRect*)cvGetSeqElem( faces, --index);
	}

	//if( r == NULL ){
	//	cout << "ERROR: no face" << endl;
	//	return false;
	//}
	//else 
	//	cout << "Face Found: " << r->x << " " << r->y << " " << r->width << " " << r->height << endl;
	if( r==NULL )
		return false;

	//fprintf(stderr,"x=%f y=%f xmax=%f ymax=%f imgw=%f imgh=%f\n",(float)r->x,(float)r->y,(float)r->x+r->width,(float)r->y+r->height,(float)frame.cols(),(float)frame.rows());

	params->_initstate.resize(4);
	params->_initstate[0]	= (float)r->x;// - r->width;
	params->_initstate[1]	= (float)r->y;// - r->height;
	params->_initstate[2]	= (float)r->width;
	params->_initstate[3]	= (float)r->height+10;


	return true;
}
コード例 #23
0
ファイル: crop_image.cpp プロジェクト: hobie88/tutino.indigo
// Perform face detection on the input image, using the given Haar Cascade.
// Returns a rectangle for the detected region in the given image.
CvRect detectFaceInImage(IplImage *inputImg, CvHaarClassifierCascade* cascade)
{
	const CvSize minFeatureSize = cvSize(20, 20);
	const int flags = CV_HAAR_FIND_BIGGEST_OBJECT | CV_HAAR_DO_ROUGH_SEARCH;	// Only search for 1 face.
	const float search_scale_factor = 1.1f;
	IplImage *detectImg;
	IplImage *greyImg = 0;
	CvMemStorage* storage;
	CvRect rc;
	//double t;
	CvSeq* rects;


	storage = cvCreateMemStorage(0);
	cvClearMemStorage( storage );

	// If the image is color, use a greyscale copy of the image.
	detectImg = (IplImage*)inputImg;	// Assume the input image is to be used.
	if (inputImg->nChannels > 1)
	{
		greyImg = cvCreateImage(cvSize(inputImg->width, inputImg->height), IPL_DEPTH_8U, 1 );
		cvCvtColor( inputImg, greyImg, CV_BGR2GRAY );
		detectImg = greyImg;	// Use the greyscale version as the input.
	}

	// Detect all the faces.
	//$$$$$t = (double)cvGetTickCount();
	rects = cvHaarDetectObjects( detectImg, (CvHaarClassifierCascade*)cascade, storage,
				search_scale_factor, 3, flags, minFeatureSize );
	//$$$$$t = (double)cvGetTickCount() - t;
	//$$$$$ROS_INFO("[Face Detection took %d ms and found %d objects]\n", cvRound( t/((double)cvGetTickFrequency()*1000.0) ), rects->total );

	// Get the first detected face (the biggest).
	if (rects->total > 0)
        {
          rc = *(CvRect*)cvGetSeqElem( rects, 0 );
        }
	else
		rc = cvRect(-1,-1,-1,-1);	// Couldn't find the face.

	//cvReleaseHaarClassifierCascade( &cascade );
	//cvReleaseImage( &detectImg );
	if (greyImg)
	cvReleaseImage( &greyImg );
	cvReleaseMemStorage( &storage );

	return rc;	// Return the biggest face found, or (-1,-1,-1,-1).
}
コード例 #24
0
int 
Haar_Detect(IplImage* img, CvHaarClassifierCascade* haarclassifier_face, CvMemStorage* mem_storage, CvRect* tracking_window)
{
  CvRect* r;
  cvClearMemStorage(mem_storage);

  CvSeq* faces = cvHaarDetectObjects(img, haarclassifier_face, mem_storage, 
				    1.1, 2, CV_HAAR_DO_CANNY_PRUNING | CV_HAAR_FIND_BIGGEST_OBJECT, 
				    cvSize(30, 30), cvSize(0, 0));
    if((r = (CvRect*)cvGetSeqElem( faces, 0 )))
    {
      *tracking_window = *r;
      return(0);
    }
    return (1);
}
コード例 #25
0
ファイル: ObjectDetect.cpp プロジェクト: rbbrnc/krbkACD
// CvSeq* cvHaarDetectObjects(const CvArr* image,
//		CvHaarClassifierCascade *cascade,
//		CvMemStorage* storage,
//		double scale_factor=1.1,
//		int min_neighbors=3,
//		int flags=0,
//		CvSize min_size=cvSize(0,0),
//		CvSize max_size=cvSize(0,0))
int ObjectDetect::detect(double scale, bool fast)
{
    if (!m_image) {
        return 0;
    }

	// Detect objects
	cvClearMemStorage(storage);
	if (fast) {
		m_objects = cvHaarDetectObjects(m_image, m_haarCascade, storage, scale, 4, CV_HAAR_DO_CANNY_PRUNING, cvSize(40, 50));
	} else {
		m_objects = cvHaarDetectObjects(m_image, m_haarCascade, storage);
	}

	return (m_objects) ? m_objects->total : 0;
}
コード例 #26
0
CvPoint CTools::QuartzPostion(IplImage* src, IplImage* dst)
{
	CvMemStorage * storage = cvCreateMemStorage(0);
	CvSeq * contour = 0;
	int mode = CV_RETR_EXTERNAL;
	double length;
	CvPoint2D32f center;
	float r;
	CvPoint pt; 

	pt.y = 1000;
	pt.x = 0;

	CalibrateData m_CalDat;

	GetCalirateParam(&m_CalDat);

	IplImage* temp = cvCreateImage(cvGetSize(src), 8, 1);
	cvCanny(src, temp, 50, 100);

	cvFindContours(temp, storage, &contour, sizeof(CvContour), mode);

	for( CvSeq* c = contour; c != NULL; c = c->h_next)
	{
		c = cvApproxPoly( c, sizeof(CvContour), storage, CV_POLY_APPROX_DP, 5, 1 );
		length = cvArcLength(c, CV_WHOLE_SEQ, -1);
		if ((length > m_CalDat.WaferPxLow) && (length < m_CalDat.WaferPxHigh))
		{
			cvDrawContours(dst, c, CV_RGB(0,0,255), CV_RGB(255, 0, 0), -1, 2, 8);
			cvMinEnclosingCircle(c, &center, &r);
			if ((center.y > 336) && (center.y < pt.y))
			{
				pt = cvPointFrom32f(center);
			}
			//pt[num] = cvPointFrom32f(center);
			//cvCircle(pContoursImg, pt[num], 3, CV_RGB(0,0,255), -1);
			//cvCircle(pContoursImg, pt[num], r, CV_RGB(0,0,255), 2);
		}
	}
	cvCircle(dst, pt, 10, CV_RGB(255,0, 0), -1);
	cvReleaseImage(&temp);
	cvClearMemStorage( storage );
	cvReleaseMemStorage( &storage );

	return pt;

}
コード例 #27
0
ファイル: camera.c プロジェクト: electricface/dde
static void detect(IplImage* frame)
{
    double const scale = 1.3;

    IplImage* gray = cvCreateImage(cvSize(frame->width, frame->height), 8, 1);
    IplImage* small_img = cvCreateImage(cvSize(cvRound(frame->width/scale),
                                               cvRound(frame->height/scale)),
                                        8, 1);
    cvCvtColor(frame, gray, CV_RGB2GRAY);
    cvResize(gray, small_img, CV_INTER_LINEAR);
    cvEqualizeHist(small_img, small_img);

    if (storage == NULL)
        storage = cvCreateMemStorage(0);

    if (cascade == NULL)
        cascade = (CvHaarClassifierCascade*)cvLoad(CASCADE_NAME, 0, 0, 0);

    cvClearMemStorage(storage);
    CvSeq* objects = NULL;
    objects = cvHaarDetectObjects(small_img, cascade, storage, scale,
                                  3, 0
                                  | CV_HAAR_FIND_BIGGEST_OBJECT
                                  , cvSize(0, 0), cvSize(0, 0));

    if (objects && objects->total > 0) {
        g_timer_stop(recognition_info.timer);
        double diff_time = g_timer_elapsed(recognition_info.timer, NULL);
        if (diff_time < recognition_info.DELAY_TIME)
            goto out;

        for (int i = 0; i < objects->total; ++i) {
            CvRect* r = (CvRect*)cvGetSeqElem(objects, i);
            cvRectangle(frame, cvPoint(r->x * scale, r->y * scale),
                        cvPoint((r->x + r->width) * scale, (r->y + r->height) * scale),
                        cvScalar(0, 0xff, 0xff, 0), 4, 8, 0);
        }

        recognition_info.reco_state = START_RECOGNIZING;
    } else {
        g_timer_start(recognition_info.timer);
    }

out:
    cvReleaseImage(&gray);
    cvReleaseImage(&small_img);
}
コード例 #28
0
ファイル: main.cpp プロジェクト: piotrekno1/DistanceEstimator
/*
 * Process the input image - find all the faces and reutrn their
 * description.
 * Returns all the detected faces in the form of a CvSeq.
 */
CvSeq* detect_faces(IplImage *img)
{
    CvSeq *faces = NULL;

    /*  Clear storage from previous calculations */
    cvClearMemStorage( storage );

    /*  Check if face data is loaded */
    if( cascade )
    {
        faces = cvHaarDetectObjects( img, cascade, storage,
                SCALE_FACTOR, MIN_NEIGHBOURS, CV_HAAR_DO_CANNY_PRUNING,
                cvSize(FACE_MIN_SIZE, FACE_MIN_SIZE) );
    }

    return faces;
}
コード例 #29
0
IplImage* preprocess(IplImage* img){     //creates a Image with the contours in the picture
    CvMemStorage* 	g_storage = NULL;
    IplImage* gray;
    gray = cvCreateImage( cvGetSize( img ), 8, 1 );  //creates the immage, allocating memory for the pixel values
    g_storage = cvCreateMemStorage(0);
    cvClearMemStorage( g_storage );
    CvSeq* contours = 0;
    cvCvtColor( img, gray, CV_BGR2GRAY );
    cvThreshold( gray, gray, 100, 255, CV_THRESH_BINARY );
    cvFindContours( gray, g_storage, &contours );           //find the contours with the thresholdimmage
    cvZero( gray );
    if( contours )
    {
        cvDrawContours(gray,contours,cvScalarAll(255),cvScalarAll(255),100 ); //paint the contours on immage contours
    }
    return gray;
}
コード例 #30
0
ファイル: eyesDetector.cpp プロジェクト: polyu/camera-single
eyesDetector::eyesDetector()
{

    nested_cascade = (CvHaarClassifierCascade*)cvLoad(HAAR_CASCADE_EYE, 0, 0, 0 );
    if(nested_cascade==NULL)
    	{exit(-1);}
    nested_cascade_2    = (CvHaarClassifierCascade*)cvLoad(HAAR_CASCADE_EYE_2, 0, 0, 0 );
    if(nested_cascade_2==NULL)
        {exit(-1);}
    storage = cvCreateMemStorage(0);
    cvClearMemStorage( storage );
    eyesInformation.LE =cvPoint(0,0);
    eyesInformation.RE =cvPoint(0,0);
    eyesInformation.Length =0;
    bothEyesDetected=0;


}