vector<VisionRecognitionResult> IPEL_Haar2FaceEyeDetectionComp::Recognize(vector<unsigned char> image,int width,int height,int pixelBytes)
{
	//PrintMessage("SUCCESS:IPEL_Haar2FaceEyeDetectionComp::Recognize()\n");

	vector<VisionRecognitionResult> _recognitionResult(0);

	IplImage *cvImage = cvCreateImageHeader( cvSize(width, height), 8, pixelBytes );
	cvImage->imageData = (char *)&image[0];

	if( _storage ) cvClearMemStorage( _storage );

	if( _cascade_f ) {
		/* detect faces */
		CvSeq *faces = cvHaarDetectObjects(cvImage, _cascade_f, _storage,
			1.1, 3, CV_HAAR_DO_CANNY_PRUNING, cvSize( 30, 30 ) );

		if( faces && faces->total>0) {
			/* Get region of face */
			int nfaces = faces->total; // faces->total값이 변한다. 
			_recognitionResult.resize (nfaces);
			CvRect *fr = new CvRect[nfaces];

			for( int i = 0; i < (faces ? nfaces : 0); i++ ) {
				/* draw a rectangle */
				CvRect *r = (CvRect*)cvGetSeqElem(faces, i);
				memcpy(&fr[i],r,sizeof(CvRect));					

				//rec.type = 1;
				_recognitionResult[i].name = "Face";
				/*- Get Upper left rectangle corner coordinate -*/
				_recognitionResult[i].point1X = (int)((r->x) + 0.5);
				_recognitionResult[i].point1Y = (int)((r->y) + 0.5);
				/*- Get Upper right rectangle corner coordinate -*/
				_recognitionResult[i].point2X = (int)((r->x + r->width) + 0.5);
				_recognitionResult[i].point2Y = (int)((r->y) + 0.5);
				/*- Get Lower right rectangle corner coordinate -*/
				_recognitionResult[i].point3X = (int)((r->x + r->width) + 0.5);
				_recognitionResult[i].point3Y = (int)((r->y + r->height) + 0.5);
				/*- Get Lower left rectangle corner coordinate -*/
				_recognitionResult[i].point4X = (int)((r->x) + 0.5);
				_recognitionResult[i].point4Y = (int)((r->y + r->height) + 0.5);
			}

			// Haar함수를 두번 수행할때 결과가 다를수 있다.
			for( int i = 0; i < (faces ? nfaces : 0); i++ ) {
				/* reset buffer for the next object detection */
				cvClearMemStorage(_storage);

				/* Set the Region of Interest: estimate the eyes' position */
				cvSetImageROI(cvImage, cvRect(fr[i].x, fr[i].y + (int)(fr[i].height/5.5), fr[i].width, (int)(fr[i].height/3.0) ) );

				/* detect eyes */
				CvSeq* eyes = cvHaarDetectObjects(cvImage, _cascade_e, _storage,
					1.15, 3, CV_HAAR_DO_CANNY_PRUNING, cvSize(25, 15));

				/* draw a rectangle for each eye found */
				for(int j = 0; j < (eyes ? eyes->total : 0); j++ ) {
					if(j>1) break;
					CvRect *er = (CvRect*) cvGetSeqElem( eyes, j );
					cvRectangle(cvImage,
						cvPoint(er->x, er->y), 
						cvPoint(er->x + er->width, er->y + er->height),
						CV_RGB(255, 0, 0), 1, 8, 0);

				}

				cvResetImageROI(cvImage);
			}

			delete fr;
		}
	}

#if 0
	if( _recognitionResult.size() ) {
		for( std::vector<VisionRecognitionResult>::iterator it = _recognitionResult.begin()  ;  it != _recognitionResult.end()  ;  it++ ) {
			cvLine(cvImage,
				cvPoint(it->point1X,it->point1Y),
				cvPoint(it->point2X,it->point2Y),
				CV_RGB(0, 255, 0));
			cvLine(cvImage,
				cvPoint(it->point2X,it->point2Y),
				cvPoint(it->point3X,it->point3Y),
				CV_RGB(0, 255, 0));
			cvLine(cvImage,
				cvPoint(it->point3X,it->point3Y),
				cvPoint(it->point4X,it->point4Y),
				CV_RGB(0, 255, 0));
			cvLine(cvImage,
				cvPoint(it->point4X,it->point4Y),
				cvPoint(it->point1X,it->point1Y),
				CV_RGB(0, 255, 0));
		}
	}
#endif

	cvReleaseImageHeader( &cvImage );

	return _recognitionResult;
}
int ofxCvHaarFinder::findHaarObjects(const ofxCvGrayscaleImage& input,
	int x, int y, int w, int h,
	int minWidth, int minHeight) {

	int nHaarResults = 0;

	if (cascade) {
		if (!blobs.empty())
			blobs.clear();

		// we make a copy of the input image here
		// because we need to equalize it.

		if (img.width == input.width && img.height == input.height) {
				img = input;
		} else {
				img.clear();
				img.allocate(input.width, input.height);
				img = input;
		}

		img.setROI(x, y, w, h);
		cvEqualizeHist(img.getCvImage(), img.getCvImage());
		CvMemStorage* storage = cvCreateMemStorage();

		/*
		Alternative modes:

		CV_HAAR_DO_CANNY_PRUNING
		Regions without edges are ignored.

		CV_HAAR_SCALE_IMAGE
		Scale the image rather than the detector
		(sometimes yields speed increases).

		CV_HAAR_FIND_BIGGEST_OBJECT
		Only return the largest result.

		CV_HAAR_DO_ROUGH_SEARCH
		When BIGGEST_OBJECT is enabled, stop at
		the first scale for which multiple results
		are found.
		*/

		CvSeq* haarResults = cvHaarDetectObjects(
				img.getCvImage(), cascade, storage, scaleHaar, neighbors, CV_HAAR_DO_CANNY_PRUNING,
				cvSize(minWidth, minHeight));

		nHaarResults = haarResults->total;

		for (int i = 0; i < nHaarResults; i++ ) {
			printf("%i objects\n", i);
			
			ofxCvBlob blob;

			CvRect* r = (CvRect*) cvGetSeqElem(haarResults, i);

			float area = r->width * r->height;
			float length = (r->width * 2) + (r->height * 2);
			float centerx	= (r->x) + (r->width / 2.0);
			float centery	= (r->y) + (r->height / 2.0);

			blob.area = fabs(area);
			blob.hole = area < 0 ? true : false;
			blob.length	= length;
			blob.boundingRect.x = r->x + x;
			blob.boundingRect.y = r->y + y;
			blob.boundingRect.width = r->width;
			blob.boundingRect.height = r->height;
			blob.centroid.x = centerx;
			blob.centroid.y = centery;
			blob.pts.push_back(ofPoint(r->x, r->y));
			blob.pts.push_back(ofPoint(r->x + r->width, r->y));
			blob.pts.push_back(ofPoint(r->x + r->width, r->y + r->height));
			blob.pts.push_back(ofPoint(r->x, r->y + r->height));

			blobs.push_back(blob);
		}

		cvReleaseMemStorage(&storage);
	}

	return nHaarResults;
}
Beispiel #3
0
IplImage * detect_and_draw( IplImage* img )
{
	static CvScalar colors[] =
		    {
		        {{0,0,255}},
		        {{0,128,255}},
		        {{0,255,255}},
		        {{0,255,0}},
		        {{255,128,0}},
		        {{255,255,0}},
		        {{255,0,0}},
		        {{255,0,255}}
		    };

		    IplImage *gray, *small_img;
		    int i, j;

		    gray = cvCreateImage( cvSize(img->width,img->height), 8, 1 );
		    small_img = cvCreateImage( cvSize( cvRound (img->width/scale),
		                         cvRound (img->height/scale)), 8, 1 );

		    cvCvtColor( img, gray, CV_BGR2GRAY );
		    cvResize( gray, small_img, CV_INTER_LINEAR );
		    cvEqualizeHist( small_img, small_img );
		    cvClearMemStorage( storage );

		    if( cascade )
		    {
		        double t = (double)cvGetTickCount();
		        CvSeq* faces = cvHaarDetectObjects( small_img, cascade, storage,
		                                            1.1, 2, 0
		                                            //|CV_HAAR_FIND_BIGGEST_OBJECT
		                                            //|CV_HAAR_DO_ROUGH_SEARCH
		                                            |CV_HAAR_DO_CANNY_PRUNING
		                                            //|CV_HAAR_SCALE_IMAGE
		                                            ,
		                                            cvSize(30, 30) );
		        t = (double)cvGetTickCount() - t;
		        num=faces->total;
		        //printf( "detection time = %gms\n", t/((double)cvGetTickFrequency()*1000.) );
		        for( i = 0; i < (faces ? faces->total : 0); i++ )
		        {
		            CvRect* r = (CvRect*)cvGetSeqElem( faces, i );
		            CvMat small_img_roi;
		            CvSeq* nested_objects;
		            CvPoint center;
		            CvScalar color = colors[i%8];
		            int radius;
		            center.x = cvRound((r->x + r->width*0.5)*scale);
		            center.y = cvRound((r->y + r->height*0.5)*scale);
		            radius = cvRound((r->width + r->height)*0.25*scale);
		            Cx=center.x;
		            Cy=center.y;
		            R=radius;
		            cvCircle( img, center, radius, color, 3, 8, 0 );
		            if( !nested_cascade )
		                continue;
		            cvGetSubRect( small_img, &small_img_roi, *r );
		            nested_objects = cvHaarDetectObjects( &small_img_roi, nested_cascade, storage,
		                                        1.1, 2, 0
		                                        //|CV_HAAR_FIND_BIGGEST_OBJECT
		                                        //|CV_HAAR_DO_ROUGH_SEARCH
		                                        //|CV_HAAR_DO_CANNY_PRUNING
		                                        //|CV_HAAR_SCALE_IMAGE
		                                        ,
		                                        cvSize(0, 0) );
		            for( j = 0; j < (nested_objects ? nested_objects->total : 0); j++ )
		            {
		                CvRect* nr = (CvRect*)cvGetSeqElem( nested_objects, j );
		                center.x = cvRound((r->x + nr->x + nr->width*0.5)*scale);
		                center.y = cvRound((r->y + nr->y + nr->height*0.5)*scale);
		                radius = cvRound((nr->width + nr->height)*0.25*scale);
		                cvCircle( img, center, radius, color, 3, 8, 0 );
		            }
		        }
		    }
	   cvReleaseImage( &gray );
	   cvReleaseImage( &small_img );
	   //fp = fopen("/sdcard/test.jpg","w+");
	   return img;
}
Beispiel #4
0
// ######################################################################
Image<PixRGB<byte> > FaceDetector::findFaces(Image<PixRGB<byte> > ima)
{
  Image<PixRGB<byte> > res;
  IplImage *img = img2ipl(ima);


  int scale = 1;

  // Create a string that contains the cascade name
  const char* cascade_name =
    "haarcascade_frontalface_alt.xml";

  // Load a HaarClassifierCascde
  CvHaarClassifierCascade* cascade =
    (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 );

  // Check whether the cascade has loaded successfully.
  // Else report and error and quit
  if( !cascade )
    LFATAL("ERROR: Could not load classifier cascade");

  // Create a new image based on the input image
  IplImage* temp =
    cvCreateImage( cvSize(img->width/scale,img->height/scale), 8, 3 );

  // Create two points to represent the face locations
  CvPoint pt1, pt2;
  int i;

  // Clear the memory storage which was used before
  CvMemStorage* storage = 0;
  storage = cvCreateMemStorage(0);
  cvClearMemStorage( storage );

  std::vector<Rectangle> tempResults;

  // Find whether the cascade is loaded, to find the faces. If yes, then:
  if( cascade )
    {

      // There can be more than one face in an image.
      // So create a growable sequence of faces.
      // Detect the objects and store them in the sequence
      CvSeq* faces = cvHaarDetectObjects( img, cascade, storage,
                                          1.1, 2, CV_HAAR_DO_CANNY_PRUNING,
                                          cvSize(40, 40) );

      // Loop the number of faces found.
      for( i = 0; i < (faces ? faces->total : 0); i++ )
        {
          // Create a new rectangle for drawing the face
          CvRect* r = (CvRect*)cvGetSeqElem( faces, i );

          // Find the dimensions of the face,and scale it if necessary
          pt1.x = r->x*scale;
          pt2.x = (r->x+r->width)*scale;
          pt1.y = r->y*scale;
          pt2.y = (r->y+r->height)*scale;

          // Draw the rectangle in the input image
          cvRectangle( img, pt1, pt2, CV_RGB(255,0,0), 3, 8, 0 );

          // store rectangle to publish later
          tempResults.push_back
            (Rectangle::tlbrO(pt1.y,pt1.x,pt2.y,pt2.x));
        }
    }

  // Release the temp image created.
  cvReleaseImage( &temp );

  // storing data
  its_Curr_Res_mutex.lock();
  itsCurrentFacesFound.clear();
  itsCurrentFacesFound = tempResults;
  // for (uint j = 0; j < tempResults.size(); j++)
  //   {
  //     //      itsCurrentFacesFound
  //   }
  its_Curr_Res_mutex.unlock();

  res = ipl2rgb(img);
  return res;
}
Beispiel #5
0
/****************************************************************************
 * Filter: Check for faces and raises an event when one is found.
 ****************************************************************************
 * p_pic: A picture_t with its p_data_orig member set to an array of
 * IplImages (one image for each picture_t plane).
 ****************************************************************************/
static picture_t *Filter( filter_t *p_filter, picture_t *p_pic )
{
    IplImage** p_img = NULL;
    int i_planes = 0;
    CvPoint pt1, pt2;
    int i, scale = 1;
 
    if ((!p_pic) )
    {
        msg_Err( p_filter, "no image array" );
        return NULL;
    }
    if (!(p_pic->p_data_orig))
    {
        msg_Err( p_filter, "no image array" );
        return NULL;
    }
    //(hack) cast the picture_t to array of IplImage*
    p_img = (IplImage**) p_pic->p_data_orig;
    i_planes = p_pic->i_planes;

    //check the image array for validity
    if ((!p_img[0]))    //1st plane is 'I' i.e. greyscale
    {
        msg_Err( p_filter, "no image" );
        return NULL;
    }
    if ((p_pic->format.i_chroma != VLC_CODEC_I420))
    {
        msg_Err( p_filter, "wrong chroma - use I420" );
        return NULL;
    }
    if (i_planes<1)
    {
        msg_Err( p_filter, "no image planes" );
        return NULL;
    }

    //perform face detection
    cvClearMemStorage(p_filter->p_sys->p_storage);
    CvSeq* faces = NULL;
    if( p_filter->p_sys->p_cascade )
    {
        //we should make some of these params config variables
        faces = cvHaarDetectObjects( p_img[0], p_filter->p_sys->p_cascade,
            p_filter->p_sys->p_storage, 1.15, 5, CV_HAAR_DO_CANNY_PRUNING,
                                            cvSize(20, 20) );
        //create the video_filter_region_info_t struct
        CvRect* r;
        if (faces && (faces->total > 0))
        {
            //msg_Dbg( p_filter, "Found %d face(s)", faces->total );
            free( p_filter->p_sys->event_info.p_region );
            p_filter->p_sys->event_info.p_region = NULL;
            if( NULL == ( p_filter->p_sys->event_info.p_region =
                  (video_filter_region_info_t *)malloc(faces->total*sizeof(video_filter_region_info_t))))
            {
                return NULL;
            }
            memset(p_filter->p_sys->event_info.p_region, 0, faces->total*sizeof(video_filter_region_info_t));
            p_filter->p_sys->event_info.i_region_size = faces->total;
        }

        //populate the video_filter_region_info_t struct
        for( i = 0; i < (faces ? faces->total : 0); i++ )
        {
            r = (CvRect*)cvGetSeqElem( faces, i );
            pt1.x = r->x*scale;
            pt2.x = (r->x+r->width)*scale;
            pt1.y = r->y*scale;
            pt2.y = (r->y+r->height)*scale;
            cvRectangle( p_img[0], pt1, pt2, CV_RGB(0,0,0), 3, 8, 0 );

            *(CvRect*)(&(p_filter->p_sys->event_info.p_region[i])) = *r;
            p_filter->p_sys->event_info.p_region[i].i_id = p_filter->p_sys->i_id++;
            p_filter->p_sys->event_info.p_region[i].p_description = "Face Detected";
        }

        if (faces && (faces->total > 0))    //raise the video filter event
            var_TriggerCallback( p_filter->p_libvlc, VIDEO_FILTER_EVENT_VARIABLE );
    }
    else
        msg_Err( p_filter, "No cascade - is opencv-haarcascade-file valid?" );

    return p_pic;
}
Beispiel #6
0
void face_detect( void )
{
	CvCapture *capture = 0;
	IplImage *frame = 0;
	IplImage *frame_copy = 0;
	double height = 480;
	double width = 640;
	int c;
	CvRect last_rect = {0};

	CvHaarClassifierCascade* cvHCC = (CvHaarClassifierCascade*)cvLoad(filename);
	CvMemStorage* cvMStr = cvCreateMemStorage(0);
	CvSeq* face;
	capture = cvCreateCameraCapture (0);
	
	cvSetCaptureProperty (capture, CV_CAP_PROP_FRAME_WIDTH, width);
	cvSetCaptureProperty (capture, CV_CAP_PROP_FRAME_HEIGHT, height);
	cvNamedWindow ("capture_face_detect", CV_WINDOW_AUTOSIZE);
		
	open_tonnel();

	while (1) {
		CvRect near_rect = {0};
		frame = cvQueryFrame (capture);


		frame_copy = cvCreateImage(cvSize(frame->width, frame->height), IPL_DEPTH_8U, frame->nChannels);
		if(frame->origin == IPL_ORIGIN_TL) {
			cvCopy(frame, frame_copy);
		} else {
			cvFlip(frame, frame_copy);
		}

		IplImage* gray = cvCreateImage(cvSize(frame_copy->width, frame_copy->height), IPL_DEPTH_8U, 1);
		IplImage* detect_frame = cvCreateImage(cvSize((frame_copy->width / SCALE), (frame_copy->height / SCALE)), IPL_DEPTH_8U, 1);
		cvCvtColor(frame_copy, gray, CV_BGR2GRAY);
		cvResize(gray, detect_frame, CV_INTER_LINEAR);
		cvEqualizeHist(detect_frame, detect_frame);


		face = cvHaarDetectObjects(detect_frame, cvHCC, cvMStr, 1.1, 2, CV_HAAR_DO_CANNY_PRUNING, cvSize(30, 30) );

		CvScalar detect_color = CV_RGB(255, 0, 0);

		double d = 1000000000000000.0;

		for (int i = 0; i < face->total; i++) {
			CvRect* faceRect = (CvRect*)cvGetSeqElem(face, i);

			if(last_rect.x == 0 && last_rect.y == 0) {

			} else {
				double x = abs(last_rect.x - faceRect->x);
				double y = abs(last_rect.y - faceRect->y);
				double e = sqrt( x*x+y*y );
				if( d > e) {
					last_rect.x = faceRect->x;
					last_rect.y = faceRect->y;
					last_rect.width = faceRect->width;
					last_rect.height = faceRect->height;
					printf("x\n");
				}
			}

			// rect
			cvRectangle(frame_copy,
				cvPoint(faceRect->x * SCALE, faceRect->y * SCALE),
				cvPoint((faceRect->x + faceRect->width) * SCALE, (faceRect->y + faceRect->height) * SCALE),
				detect_color,
				3, CV_AA);
			detect_color = CV_RGB(0, 0, 255);


		}

		// send to server
		{
			char str[1024];
			sprintf_s(str, "[{ \"x\" : %f, \"y\" : %f}]", last_rect.x * SCALE, last_rect.y * SCALE);
			printf("%s", str);
			send_tunnel(str);
		}
		
		cvShowImage ("capture_face_detect", frame_copy);
		cvReleaseImage(&gray);
		cvReleaseImage(&detect_frame);

		// key event
		c = cvWaitKey (16);
		if (c == 'e') {
			break;
		}
		if( c == 's') {
				CvRect* faceRect = (CvRect*)cvGetSeqElem(face, 0);
				if(faceRect != NULL) {
					last_rect.x = faceRect->x;
					last_rect.y = faceRect->y;
					last_rect.width = faceRect->width;
					last_rect.height = faceRect->height;
				}
		}
	}

	close_tunnel();

	/* free */
	cvReleaseMemStorage(&cvMStr);
	cvReleaseCapture (&capture);	
	cvDestroyWindow("capture_face_detect");
	cvReleaseHaarClassifierCascade(&cvHCC);
	


}
Beispiel #7
0
void eyesDetector::runEyesDetector(IplImage * input,IplImage * fullImage,CvPoint LT)

{

    bothEyesDetected=0;
    //static int countR;
    //static CvPoint leftEyeP,rightEyeP;
    eyesInformation.LE =cvPoint(0,0);
    eyesInformation.RE =cvPoint(0,0);
    eyesInformation.Length =0;
    if (input==0)
        return;

    double scale=1,j=0;
    //  //printf("%e SCALE \n\n",scale);

    IplImage *gray = cvCreateImage( cvSize(input->width,input->height/(2)), 8, 1 );
    IplImage *gray_fullimage = cvCreateImage( cvSize(fullImage->width,fullImage->height), 8, 1 );

    IplImage *gray_scale = cvCreateImage( cvSize(input->width/scale,input->height/(2*scale)), 8, 1 );

    cvSetImageROI(input,cvRect(0,(input->height)/(8),input->width,(input->height)/(2)));
    cvCvtColor( input, gray, CV_BGR2GRAY );
    cvResetImageROI(input);


    cvCvtColor( fullImage, gray_fullimage, CV_BGR2GRAY );
    cvResize(gray,gray_scale,CV_INTER_LINEAR);


    cvClearMemStorage( storage );
    CvSeq* nested_objects = cvHaarDetectObjects(gray_scale, nested_cascade, storage,1.4, 2, 0,cvSize(0,0) );
    int count=nested_objects ? nested_objects->total : 0;
    if (count==0)
    {
        nested_objects = cvHaarDetectObjects( gray_scale, nested_cascade_2, storage,1.4, 2, 0,cvSize(0,0) );
    }
    int leftT=0,rightT=0;
    count=nested_objects ? nested_objects->total : 0;
    //int Flag=0;
    if (count>1)
    {

        for ( j = 0; j < (nested_objects ? nested_objects->total : 0); j++ )
        {
            CvPoint center;
            CvRect* nr = (CvRect*)cvGetSeqElem( nested_objects, j );
            center.x = cvRound((LT.x+ (nr->x + nr->width*0.5)*scale));
            center.y = cvRound((LT.y + (input->height)/8 + (nr->y + nr->height*0.5)*scale));

            if ((center.x-4)>0 && (center.x-4)<(IMAGE_WIDTH-8) && (center.y-4)>0  && (center.y-4)<(IMAGE_HEIGHT-8))
            {
                cvSetImageROI(gray_fullimage,cvRect(center.x-4,center.y-4,8,8));
                IplImage* eyeDetect = cvCreateImage(cvSize(8,8),8,1);
                cvResize( gray_fullimage,eyeDetect, CV_INTER_LINEAR ) ;
                cvResetImageROI(gray_fullimage);

                double xCordinate=(center.x-4+CenterofMass(eyeDetect,0));
                double yCordinate=(center.y-4+CenterofMass(eyeDetect,1));

                cvReleaseImage( &eyeDetect );
                if (center.x<cvRound((LT.x + input->width*0.5)))
                {
                    eyesInformation.LE.x=xCordinate;
                    eyesInformation.LE.y=yCordinate;

                    //cvCircle( fullImage, cvPoint(eyesInformation.LE.x,eyesInformation.LE.y), 4, CV_RGB(128,128,128), 1, 8, 0 );
                    leftT=1;
                }
                else
                {

                    eyesInformation.RE.x=xCordinate;
                    eyesInformation.RE.y=yCordinate;
                    //cvCircle( fullImage, cvPoint(eyesInformation.RE.x,eyesInformation.RE.y), 4, CV_RGB(128,128,128), 1, 8, 0 );

                    rightT=1;
                }

            }
        }





        if (leftT==1 && rightT==1)
        {
            eyesInformation.Length=sqrt(pow(eyesInformation.RE.y-eyesInformation.LE.y,2)+ pow(eyesInformation.RE.x-eyesInformation.LE.x,2));
            bothEyesDetected=1;
        }

    }
    cvReleaseImage(&gray_fullimage);
    cvReleaseImage(&gray);
    cvReleaseImage(&gray_scale);
    cvClearMemStorage( storage );
}
Beispiel #8
0
// Function to detect and draw any faces that is present in an image
bool face_detect( IplImage* srcImg, std::vector<CvRect> *vectFaces, CvHaarClassifierCascade* cascade, bool detect_only)
{

    // Create memory for calculations
    static CvMemStorage* storage = 0;

    // Create two points to represent the face locations
    CvPoint pt1, pt2;
    int i;

    if( !cascade )
    {
        MY_LOG("%s: faceCascade is NULL", __FILE__);
        return false;
    }

    // Allocate the memory storage
    storage = cvCreateMemStorage(0);

    // Clear the memory storage which was used before
    cvClearMemStorage( storage );

    int max_width = 0;
    // Find whether the cascade is loaded, to find the biggest face. If yes, then:
    if( cascade )
    {

        // There can be more than one face in an image. So create a growable sequence of faces.
        // Detect the objects and store them in the sequence

        DURATION_START;
        CvSeq* faces = cvHaarDetectObjects( srcImg, cascade, storage,
                1.1, 2, CV_HAAR_DO_CANNY_PRUNING
                , cvSize(20, 20) 
                );
        DURATION_STOP("cvHaarDetectObjects()");

        // Loop the number of faces found.
        for( i = 0, max_width=0; i < faces->total; i++ )
        {
            // Create a new rectangle for the face
            CvRect* r = (CvRect*)cvGetSeqElem( faces, i );

            if(vectFaces != NULL) {
                vectFaces -> push_back(*r);
            }

            MY_LOG("%s: found face <%d,%d> with %dx%d\n", __func__, r->x, r->y, r->width, r->height);

            if(!detect_only) {
                // Draw the rectangle in the input image
                cvRectangle( srcImg, pt1, pt2, CV_RGB(255,0,0), 3, 8, 0 );
            }

            if(max_width < r->width) {

                pt1.x = r->x;
                pt2.x = (r->x + r->width);
                pt1.y = r->y;
                pt2.y = (r->y + r->height);

                max_width = r->width;

            }
        }

        if(max_width <= 4) {
            return false;
        }

        //printf("%s: (%d,%d), (%d,%d) -> (%d * %d)\n", __func__, pt1.x, pt1.y, pt2.x, pt2.y, (pt2.x - pt1.x) , (pt2.y - pt1.y));

        //cvSetImageROI(srcImg, cvRect(pt1.x, pt1.y, pt2.x - pt1.x, pt2.y - pt1.y));
        //// __android_log_print(ANDROID_LOG_DEBUG, "run to here ", "func:%s, line:%d", __func__,__LINE__);
        //// printf("%s: srcImg ROI: (%d * %d)\n",__func__, cvGetImageROI(srcImg).width, cvGetImageROI(srcImg).height );
        //IplImage *tmpImg2 = cvCreateImage( cvSize(cvGetImageROI(srcImg).width, cvGetImageROI(srcImg).height), IPL_DEPTH_8U, 1);
        //IplImage *tmpImg = srcImg;
        ////color depth
        //if(srcImg->nChannels != 1)  {
        //    cvCvtColor(srcImg, tmpImg2, CV_BGR2GRAY);
        //    tmpImg = tmpImg2;
        //}

        ////resize
        //*dstImg = cvCreateImage(cvSize(FACE_SIZE, FACE_SIZE), IPL_DEPTH_8U, 1);
        //cvResize(tmpImg, *dstImg, CV_INTER_CUBIC);

        ////__android_log_print(ANDROID_LOG_DEBUG, "run to here ", "func:%s, line:%d", __func__,__LINE__);
        //cvResetImageROI(srcImg);

        //cvReleaseImage(&tmpImg2);
        ////__android_log_print(ANDROID_LOG_DEBUG, "run to here ", "func:%s, line:%d", __func__,__LINE__);
        
        return true;
    }

    return false;
}
Beispiel #9
0
void face_detect_crop(IplImage * input,IplImage * output)
{

    IplImage * img;
    img = cvCreateImage(cvGetSize(input),IPL_DEPTH_8U,1);
    cvCvtColor(input,img,CV_RGB2GRAY);//convert input to Greyscale and store in image
    int face_origin_x,face_origin_y,width,hieght;//variables to crop face


    cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 ); //load the face detedction cascade
    storage = cvCreateMemStorage(0);
    int scale = 1;
    CvPoint pt1,pt2;
    int face_number;

    CvSeq* faces = cvHaarDetectObjects( img, cascade, storage,1.1, 2, CV_HAAR_DO_CANNY_PRUNING,cvSize(40, 40) );
    for( face_number = 0; face_number < (faces ? faces->total : 0); face_number++ )
    {
        CvRect* r = (CvRect*)cvGetSeqElem( faces, face_number );

        //Specifies the points for rectangle.
        /* pt1_____________

           |              |

           |              |

           |              |

           |_____________pt2 */
        pt1.x = r->x*scale;
        pt2.x = (r->x+r->width)*scale;
        pt1.y = r->y*scale;
        pt2.y = (r->y+r->height)*scale;
        cvRectangle( input, pt1, pt2, CV_RGB(255,255,255), 1, 8, 0 );
        CvRect rs=*r;
        //cvNamedWindow("i-O", 1);
        //cvShowImage("i-O",input);
        //cvWaitKey(0);
        cvSetImageROI(img,rs);
    }
    IplImage * frame;
    CvSize s1= {48,48};
    frame=cvCreateImage(s1,IPL_DEPTH_8U,1);

    cvResize(img,frame);
    cvCvtColor(frame,output,CV_GRAY2RGB);

    CvPoint pt;
    cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name_eye, 0, 0, 0 ); //load the face detedction cascade
    CvSeq* faces1 = cvHaarDetectObjects( input, cascade, storage,1.1, 2, CV_HAAR_DO_CANNY_PRUNING,cvSize(40, 40) );
    for( face_number = 0; face_number < (faces1 ? faces1->total : 0); face_number++ )
    {
        CvRect* r = (CvRect*)cvGetSeqElem( faces1, face_number );
        pt.x = (r->x*scale);
        pt2.x = ((r->x+r->width)*scale);
        pt.y = (r->y*scale);
        pt2.y = ((r->y+r->height)*scale);
        cvRectangle( input, pt, pt2, CV_RGB(0,255,255), 1, 8, 0 );
    }



}
    /****************************************************************
    Tracker::InitializeWithFace
        Initialize the tracker with opencv's face detector.
    Exceptions:
        None
    ****************************************************************/
    bool    Tracker::InitializeWithFace( TrackerParameters* params, Matrixu& frame )
    {
        const int minsz = 20;

        //Get the name of the haar-cascade .xml file
        const char* cascade_name = HAAR_CASCADE_FILE_NAME;
        ASSERT_TRUE( cascade_name != NULL );

        //Load the cascade
        if ( Tracker::s_faceCascade == NULL )
        {
            Tracker::s_faceCascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 );
        }

        frame.createIpl();
        IplImage* img    = frame.getIpl();
        ASSERT_TRUE( img != NULL );

        //convert to grayscale
        IplImage* gray    = cvCreateImage( cvSize(img->width, img->height), IPL_DEPTH_8U, 1 );
        ASSERT_TRUE( gray != NULL );
        cvCvtColor(img, gray, CV_BGR2GRAY );

        frame.freeIpl();

        //histogram equalization
        cvEqualizeHist( gray, gray );

        //memory storage
        CvMemStorage* storage = cvCreateMemStorage(0);
        cvClearMemStorage(storage);

        //call opencv's haar feature based detector
        CvSeq* faces = cvHaarDetectObjects( gray,
                                            Tracker::s_faceCascade,
                                            storage,
                                            1.05,
                                            3,
                                            CV_HAAR_DO_CANNY_PRUNING, 
                                            cvSize( minsz, minsz ) );

        int index = faces->total-1;
        CvRect* r = (CvRect*)cvGetSeqElem( faces, index );
        if ( r == NULL )
        {
            return false;
        }

        while ( r && (r->width<minsz || r->height < minsz || (r->y+r->height+10)>frame.rows() || (r->x+r->width)>frame.cols() ||
            r->y<0 || r->x<0) )
        {
            r = (CvRect*)cvGetSeqElem( faces, --index );
        }

        //set the params
        params->m_initState.resize(4);
        params->m_initState[0]    = (float)r->x;
        params->m_initState[1]    = (float)r->y;
        params->m_initState[2]    = (float)r->width;
        params->m_initState[3]    = (float)r->height+10;

        return true;
    }
Beispiel #11
0
//-------DETECT FACE OF GADHIJI ---------------
void detectAllFaces( IplImage *img )
{
	// -------THIS STRUCTURE TO GIVES DETAILS OF DETECTED FACE----------- 
	GandhitplMatch detectedimg[10];
	//cvNamedWindow("displayface",1);
    int k;
 	int cnt=0;
	 CvRect *r1;
	 CvScalar s;
	 int fl=0;
    /* detect faces */
    CvSeq *faces = cvHaarDetectObjects(
            img,
            cascade,
            storage,
            1.1,
            1,
            0 /*CV_HAAR_DO_CANNY_PRUNNING*/,
            cvSize( 30, 30 ) );
	globalmaximum=-999;
	globalmaxindex=-1;
    /* for each face found, draw a red box */
    for( k = 0 ; k < ( faces ? faces->total : 0 ) ; k++ ) {
			r1 = ( CvRect* )cvGetSeqElem( faces, k );	
		if((r1->height<100)&&(r1->width<100))
		 {
				fl=1;
				detectedimg[k].faceimg=cvCreateImage(cvGetSize(img),8,3);
				cvCopyImage(img,detectedimg[k].faceimg);
				detectedimg[k].height=r1->height;
				detectedimg[k].width=r1->width;
				detectedimg[k].x=r1->x;
				detectedimg[k].y=r1->y;
				
			//	cvShowImage("displayface",detectedimg[k].faceimg);
				//cvWaitKey(100);
	
				
			/*cvRectangle( img,
						 cvPoint( r1->x, r1->y ),
						 cvPoint( r1->x + r1->width, r1->y + r1->height ),
						 CV_RGB( 255, 0, 0 ), 1, 8, 0 ); */
				//printf("facedetection called");
//				printf("width= %d height= %d",detectedimg[k].faceimg->width,detectedimg[k].faceimg->height);
		for(int i=0;i<img->height;i++)
	{
		//printf("....");
		for(int j=0;j<img->width;j++)
		{
		//	printf("hi %d",j);
			if((j<r1->x || j>r1->x + r1->width)  || (i<r1->y || i>r1->y + r1->height))
			{ 
					
				
				s = cvGet2D(detectedimg[k].faceimg, i, j);
		//	printf("hi.....");
			//	s.
			//	s.val[3]=0.0;
				s.val[0]=0.0;
				s.val[1]=0.0;
				s.val[2]=0.0;
			
	cvSet2D(detectedimg[k].faceimg, i, j, s );
	
			}
			
			
		}
//		printf("over j");

	}

	//cvShowImage("displayface",detectedimg[k].faceimg);
	//cvWaitKey(10);
		




//			printf("width %d height  %d\n",r1->width,r1->height);
		}
		//-------SEND THE DETECTED FACE TO MATCH WITH FACE OF GANDHIJI---------
		gandhijitplMatch(detectedimg[k],k);	
 
	}
		//------KEEP THE MATCHED IMAGE WHOSE MATCH IS GREATER THAN 0.62-----------
	if(faces->total>0 && globalmaximum>0.62)
	{
		GlobalGandhiji[globalcnt].faceimg=detectedimg[globalmaxindex].faceimg;
		GlobalGandhiji[globalcnt].x=detectedimg[globalmaxindex].x;
		GlobalGandhiji[globalcnt].y=detectedimg[globalmaxindex].y;
		GlobalGandhiji[globalcnt].width=detectedimg[globalmaxindex].width;
		GlobalGandhiji[globalcnt].height=detectedimg[globalmaxindex].height;
		GlobalGandhiji[globalcnt].matchval=globalmaximum;

	}
	else
	{
		GlobalGandhiji[globalcnt].matchval=-1;//TO ELIMINATE THE IMAGES

	}
	globalcnt++;
    /* display video */
  //  cvShowImage( "video", img );
	//cvWaitKey(100);
}
Beispiel #12
0
RotatedRect searchFace(Mat& src, GenericModel *model, cv::Size2f scaleFactor, bool draw){
	
	GenericFeature *minFeature;
	Mat auxImg, auxImg2;
	resize(src, auxImg,cv::Size2i(scaleFactor.width*src.size().width, scaleFactor.height*src.size().height));
	auxImg2 = auxImg.clone();
	
	CvHaarClassifierCascade* cascade = (CvHaarClassifierCascade*) cvLoad (CASCADE_NAME, 0, 0, 0);
    CvMemStorage* storage = cvCreateMemStorage(0);
    assert (storage);
	if (! cascade)
        abort ();
	
	CvHaarClassifierCascade* cascadeProfile = (CvHaarClassifierCascade*) cvLoad (CASCADE_NAME_PROFILE, 0, 0, 0);
    CvMemStorage* storageProfile = cvCreateMemStorage(0);
    assert (storageProfile);
	if (! cascadeProfile)
        abort ();
	
	IplImage *gray_image = cvCreateImage(src.size(), IPL_DEPTH_8U, 1);
	IplImage aux = IplImage(src);
	
	cvCvtColor (&aux, gray_image, CV_BGR2GRAY);
	cvEqualizeHist( gray_image, gray_image );
	
	CvSeq* faces = cvHaarDetectObjects (gray_image, cascade, storage, 1.1, 3, CV_HAAR_DO_CANNY_PRUNING, cvSize (25, 25));
	CvSeq* facesProfiles = cvHaarDetectObjects (gray_image, cascadeProfile, storageProfile, 1.1, 3, CV_HAAR_DO_CANNY_PRUNING, cvSize (25, 25));
	
	double minValue = 10000.0;
	RotatedRect minRect;
	
	model->updateModel(auxImg);
	if (draw) cvNamedWindow("ROI");
	
	for (int i = 0; i < (faces ? faces->total : 0); i++){
		CvRect* r = (CvRect*) cvGetSeqElem (faces, i);
		RotatedRect auxRect(Point2i(r->x+r->width/2,r->y+r->height/2),Size2i(r->width,r->height),0);
		auxRect = scaleRect(auxRect, cv::Size2f(scaleFactor.width, scaleFactor.height));
		if (draw) drawRotatedRect(auxImg2, auxRect,CV_RGB(100,50,50) , 2);
		
		
		if(model->ModelType == COV_FULL_IMAGE){
			//minFeature = (GenericFeature *)new CovarianceFullDescriptor(auxRect,model->tracker_param);
			CV_Assert(false);
		}
		else if(model->ModelType == COV_SUB_WINDOWS)
			minFeature = (GenericFeature *)new CovariancePatchDescriptor(auxRect,model->tracker_param);
		else if(model->ModelType == COV_SUB_WINDOWS_B)
			minFeature = (GenericFeature *)new CovariancePatchDescriptor(auxRect,model->tracker_param);
		
		minFeature->computeFeature(model);
		double dist = model->distance(minFeature);
		
		if (dist<minValue) {
			minValue = dist;
			minRect = auxRect;
		}
		
		minFeature->clear();
		delete minFeature;
		if (draw){
			cout << "dist: "<<dist<<endl;
			imshow( "ROI", auxImg2);
			cvWaitKey();
		}
		
	}
	
	for (int i = 0; i < (facesProfiles ? facesProfiles->total : 0); i++){
		CvRect* r = (CvRect*) cvGetSeqElem (facesProfiles, i);
		RotatedRect auxRect(Point2i(r->x+r->width/2,r->y+r->height/2),Size2i(r->width,r->height),0);
		auxRect = scaleRect(auxRect, cv::Size2f(scaleFactor.width, scaleFactor.height));
		if (draw) drawRotatedRect(auxImg2, auxRect,CV_RGB(0,0,0) , 2);
		
		if(model->ModelType == COV_FULL_IMAGE){
			//minFeature = (GenericFeature *)new CovarianceFullDescriptor(auxRect,model->tracker_param);
			CV_Assert(false);
		}
		else if(model->ModelType == COV_SUB_WINDOWS)
			minFeature = (GenericFeature *)new CovariancePatchDescriptor(auxRect,model->tracker_param);
		else if(model->ModelType == COV_SUB_WINDOWS_B)
			minFeature = (GenericFeature *)new CovariancePatchDescriptor(auxRect,model->tracker_param);
		
		minFeature->computeFeature(model);
		double dist = model->distance(minFeature);
		
		
		if (dist<minValue) {
			minValue = dist;
			minRect = auxRect;
		}
		
		minFeature->clear();
		delete minFeature;
		if (draw){
			cout << "dist: "<<dist<<endl;
			imshow( "ROI", auxImg2);
			cvWaitKey();
		}	
	}	
	
	
	if (draw){
		drawRotatedRect(auxImg2, minRect,CV_RGB(255,0,0) , 3);	
		imshow( "ROI", auxImg2);
		cvWaitKey();
		cvDestroyWindow("ROI");
	}
	auxImg2.release();
	auxImg.release();
	
	cvReleaseImage(&gray_image);
	
	cvClearMemStorage(storage);
	cvClearMemStorage(storageProfile);
	
	return scaleRect(minRect, cv::Size2f(1/scaleFactor.width, 1/scaleFactor.height));	
}
Beispiel #13
0
bool searchFace(const Mat& src, RotatedRect rect){
	
	
	CvHaarClassifierCascade* cascade = (CvHaarClassifierCascade*) cvLoad (CASCADE_NAME, 0, 0, 0);
    CvMemStorage* storage = cvCreateMemStorage(0);
    assert (storage);
	if (! cascade)
        abort ();
	
	CvHaarClassifierCascade* cascadeProfile = (CvHaarClassifierCascade*) cvLoad (CASCADE_NAME_PROFILE, 0, 0, 0);
    CvMemStorage* storageProfile = cvCreateMemStorage(0);
    assert (storageProfile);
	if (! cascadeProfile)
        abort ();
	
	IplImage *gray_image = cvCreateImage(src.size(), IPL_DEPTH_8U, 1);
	IplImage aux = IplImage(src);
	
	rect.size.width *= 1.5;
	rect.size.height *= 1.5;
	
	cvCvtColor (&aux, gray_image, CV_BGR2GRAY);
	cvEqualizeHist( gray_image, gray_image );
	cvSetImageROI(gray_image, getBoundingRect(rect));
	
	CvSeq* faces = cvHaarDetectObjects (gray_image, cascade, storage, 1.1, 3, CV_HAAR_DO_CANNY_PRUNING, cvSize (10, 10));
	CvSeq* facesProfiles = cvHaarDetectObjects (gray_image, cascadeProfile, storageProfile, 1.1, 3, CV_HAAR_DO_CANNY_PRUNING, cvSize (10, 10));
	
	for (int i = 0; i < (faces ? faces->total : 0); i++){
		CvRect* r = (CvRect*) cvGetSeqElem (faces, i);
		
		CvPoint center;
		int radius;
		center.x = cvRound((r->width*0.5 + r->x));
		center.y = cvRound((r->y + r->height*0.5));
		radius = cvRound((r->width + r->height)*0.25);
		cvCircle (gray_image, center, radius, CV_RGB(0,255,0), 3, 8, 0 );

	}
	
	for (int i = 0; i < (facesProfiles ? facesProfiles->total : 0); i++){
		CvRect* r = (CvRect*) cvGetSeqElem (facesProfiles, i);
		CvPoint center;
		int radius;
		center.x = cvRound((r->width*0.5 + r->x));
		center.y = cvRound((r->y + r->height*0.5));
		radius = cvRound((r->width + r->height)*0.25);
		cvCircle (gray_image, center, radius, CV_RGB(0,255,0), 3, 2, 0 );
	}
	
	
	cvNamedWindow("ROI");
	imshow( "ROI", gray_image);
	//cvWaitKey();
	
	cvResetImageROI(gray_image);
	cvReleaseImage(&gray_image);
	
	cvClearMemStorage(storage);
	
	if (faces->total>0 || facesProfiles->total>0)return true;
	else return false;
}
Beispiel #14
0
public:void detect_and_draw( IplImage* img, IplImage* imgAnterior ) {
        static CvScalar colors[] = {
            {{0,0,255}},
            {{0,128,255}},
            {{0,255,255}},
            {{0,255,0}},
            {{255,128,0}},
            {{255,255,0}},
            {{255,0,0}},
            {{255,0,255}}
        };

        double scale = 1.3;
        IplImage* gray = cvCreateImage( cvSize(img->width,img->height), 8, 1 );
        IplImage* small_img = cvCreateImage( cvSize( cvRound (img->width/scale),
                             cvRound (img->height/scale)),
                         8, 1 );
        int i;

        IplImage* onlyhaart = cvCreateImage(cvGetSize(img), img->depth, img->nChannels);
        cvCopy(img, onlyhaart);

        cvCvtColor( img, gray, CV_BGR2GRAY );
        cvResize( gray, small_img, CV_INTER_LINEAR );
        cvEqualizeHist( small_img, small_img );
        cvClearMemStorage( storageHaart );

        if(cascade) {
            double t = (double)cvGetTickCount();
            CvSeq* faces = cvHaarDetectObjects(small_img, cascade, storageHaart,
                                                1.1, 2, 0/*CV_HAAR_DO_CANNY_PRUNING*/,
                                                cvSize(30, 30) );
            t = (double)cvGetTickCount() - t;
            for(i = 0; i < (faces ? faces->total : 0); i++ ) {
                CvRect* r = (CvRect*)cvGetSeqElem( faces, i );

                CvRect rect = cvRect(r->x, r->y, r->width, r->height);

                if ((rect.height < (img->height + 1)) & (rect.width < (img->width + 1))
                        & analizarMhi(img, imgAnterior, 30, rect)) {
                    printf( "detection time = %gms\n", t/((double)cvGetTickFrequency()*100.) );
                    CvPoint center;
                    int radius;
                    center.x = cvRound((rect.x + rect.width*0.5)*scale);
                    center.y = cvRound((rect.y + rect.height*0.5)*scale);
                    radius = cvRound((rect.width + rect.height)*0.25*scale);
                    cvCircle( img, center, radius, colors[i%8], 3, 8, 0 );
                }
                CvPoint center;
                int radius;
                center.x = cvRound((rect.x + rect.width*0.5)*scale);
                center.y = cvRound((rect.y + rect.height*0.5)*scale);
                radius = cvRound((rect.width + rect.height)*0.25*scale);
                cvCircle( onlyhaart, center, radius, colors[i%8], 3, 8, 0 );
            }
        }
        cvShowImage( "Detecta", img );
        cvShowImage( "onlyhaart", onlyhaart);
        cvShowImage("bluedetect", imgAnterior);
        cvReleaseImage( &gray );
        cvReleaseImage( &small_img );
    }
Beispiel #15
0
// Function to detect and draw any faces that is present in an image
void detect_and_draw( IplImage* img )
{

    // Create memory for calculations
    static CvMemStorage* storage = 0;

    // Create a new Haar classifier
    static CvHaarClassifierCascade* cascade = 0;

    static int scale = 1;

    // Create a new image based on the input image
    IplImage* temp = cvCreateImage( cvSize(img->width/scale,img->height/scale), 8, 3 );

    // Create two points to represent the face locations
    CvPoint pt1, pt2;
    static int i;

    // Load the HaarClassifierCascade
    cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 );
    
    // Check whether the cascade has loaded successfully. Else report and error and quit
    if( !cascade )
    {
        fprintf( stderr, "ERROR: Could not load classifier cascade\n" );
        return;
    }
    
    // Allocate the memory storage
    storage = cvCreateMemStorage(0);

    // Create a new named window with title: result
    //cvNamedWindow( "result", CV_WINDOW_AUTOSIZE );

    // Clear the memory storage which was used before
    cvClearMemStorage( storage );

    // Find whether the cascade is loaded, to find the faces. If yes, then:
    if( cascade )
    {
		//printf("go cascade\n");
        // There can be more than one face in an image. So create a growable sequence of faces.
        // Detect the objects and store them in the sequence
        CvSeq* faces = cvHaarDetectObjects( img, cascade, storage,
                                            1.1, 2, CV_HAAR_DO_CANNY_PRUNING,
                                            cvSize(40, 40) );
		//printf("ici\n");
        // Loop the number of faces found.
        for( i = 0; i < (faces ? faces->total : 0); i++ )
        {
			//printf("Et d'un\n");
           // Create a new rectangle for drawing the face
            CvRect* r = (CvRect*)cvGetSeqElem( faces, i );

            // Find the dimensions of the face,and scale it if necessary
            pt1.x = r->x*scale;
            pt2.x = (r->x+r->width)*scale;
            pt1.y = r->y*scale;
            pt2.y = (r->y+r->height)*scale;

            // Draw the rectangle in the input image
            cvRectangle( img, pt1, pt2, CV_RGB(255,0,0), 3, 8, 0 );
          
        }
    }

	//printf("nombre de visages: %d\n",i);
    // Show the image in the window named "result"
    //cvShowImage( "result", img );
	
    // Release the temp image created.
    //cvReleaseImage( &temp );
}
Beispiel #16
0
//视频设备和显示设备初始化和预览函数(加设备状态检测)--------------------------------
int video_fb_init_preview()
{
	//串口相关变量-------------------------------
	char buff[512];
	int nread=0;
	int FrameDone=0;//一帧数据结束标志
	int FrameCount=0;//记录帧长度
	int j=0;
	int key=0;//开关标志
	int stat=0;//视频设备状态标志
	//-------------------------------------------
	
	int numBufs;

	//--------------------------------------------
	//SDL yuv
	SDL_Surface      *pscreen;
	SDL_Overlay      *overlay;
	SDL_Rect         drect;
	SDL_Event        sdlevent;
	SDL_mutex        *affmutex;
	unsigned char    *p = NULL;
	unsigned char    frmrate;
	unsigned int     currtime;
	unsigned int     lasttime;
	char* status = NULL;

	//SDL RGB
	unsigned int     rmask;
	unsigned int     gmask;
	unsigned int     bmask;
	unsigned int     amask;	
	int              bpp;
	int 		 pitch;
	int 		 pixels_num;
	unsigned char    *pixels;
	unsigned char    *p_RGB = NULL;	
	SDL_Surface      *pscreen_RGB;
	SDL_Surface      *display_RGB;
	printf("USB Camera Test\n");

	video_fd = open("/dev/video0", O_RDWR, 0);//打开摄像头设备,使用阻塞方式打开
	if (video_fd<0)
	{
		printf("open error\n");
		return  1;
	}

	/*************先向驱动尝试获取设备视频格式start*************/
	struct v4l2_fmtdesc fmt0;
	int ret0;
	memset(&fmt0,0,sizeof(fmt0));
	fmt0.index = 0;
	fmt0.type=V4L2_BUF_TYPE_VIDEO_CAPTURE;
	while((ret0 = ioctl(video_fd,VIDIOC_ENUM_FMT,&fmt0) == 0))
	{
		fmt0.index++;
		printf("%d> pixelformat =%c%c%c%c,description =%s\n",fmt0.index,fmt0.pixelformat&0xff,(fmt0.pixelformat>>8)&0xff,(fmt0.pixelformat>>16)&0xff,(fmt0.pixelformat>>24)&0xff,fmt0.description);
	}
	/**************************END***************************/
	
	//---------------------设置获取视频的格式----------------//
	struct v4l2_format fmt;	
	memset( &fmt, 0, sizeof(fmt));
	fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;	//视频数据流类型,永远都V4L2_BUF_TYPE_VIDEO_CAPTURE
	fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;//视频源的格式为JPEG或YUN4:2:2或RGB
	fmt.fmt.pix.width = 640;//设置视频宽度
	fmt.fmt.pix.height = 480;//设置视频高度
	//fmt.fmt.pix.field=V4L2_FIELD_INTERLACED;
	//fmt.fmt.pix.colorspace=8;
	//printf("color: %d \n",fmt.fmt.pix.colorspace);
	if (ioctl(video_fd, VIDIOC_S_FMT, &fmt) < 0)//使配置生效
	{
		printf("set format failed\n");
		return 2;
	}
	//-------------------------------------------------------//
	
	//+++++++++++++++++++++++++++++++++++++++++++++++++++++++
	//if(SDL_Init(SDL_INIT_VIDEO) < 0)
	//{
	//	printf("SDL Init failed.\n");
	//	exit(1);
	//}
	
	//SDL 设置:YUV输出
	/*
 	pscreen = SDL_SetVideoMode(fmt.fmt.pix.width, fmt.fmt.pix.height,0,SDL_VIDEO_Flags);
	overlay = SDL_CreateYUVOverlay(fmt.fmt.pix.width, fmt.fmt.pix.height,SDL_YUY2_OVERLAY,pscreen);
	p = (unsigned char *)overlay->pixels[0];
	drect.x = 0;
	drect.y = 0;
	drect.w = pscreen->w;
	drect.h = pscreen->h;
	*/

	//SDL 设置:RGB输出
	//pscreen = SDL_SetVideoMode(fmt.fmt.pix.width, fmt.fmt.pix.height, 24, SDL_SWSURFACE | SDL_DOUBLEBUF);
	rmask = 0x000000ff;
	gmask = 0x0000ff00;
	bmask = 0x00ff0000;
	amask = 0x00000000;
	bpp   = 24;
	pitch = fmt.fmt.pix.width*3;
	pixels_num = fmt.fmt.pix.width*fmt.fmt.pix.height*3;
	pixels = (unsigned char *)malloc(pixels_num);
	memset(pixels, 0, pixels_num);
	p_RGB = (unsigned char *)pixels;
	//pscreen_RGB = SDL_CreateRGBSurfaceFrom(pixels, fmt.fmt.pix.width, fmt.fmt.pix.height, bpp, pitch, rmask, gmask, bmask, amask);

	
	//lasttime = SDL_GetTicks();
	//affmutex = SDL_CreateMutex();
	//SDL 设置end
	
	//openCV 设置
	CvMemStorage*  storage = cvCreateMemStorage(0);
	IplImage*      img     = cvCreateImageHeader(cvSize(fmt.fmt.pix.width,fmt.fmt.pix.height), IPL_DEPTH_8U, 3);//image头,未开辟数据空间
	IplImage*      imggray = cvCreateImage(cvSize(fmt.fmt.pix.width,fmt.fmt.pix.height), IPL_DEPTH_8U, 1);//image,开辟数据空间
	cvNamedWindow("image", 1);

	unsigned char *pRGB = NULL;
	pRGB = (unsigned char *)calloc(1,fmt.fmt.pix.width*fmt.fmt.pix.height*3*sizeof(unsigned char));
	//openCV 设置 end

	//------------------------申请帧缓冲---------------------//
	struct v4l2_requestbuffers req;
	memset(&req, 0, sizeof (req));
	req.count = 3;	//缓存数量,即可保存的图片数量
	req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;	//数据流类型,永远都是V4L2_BUF_TYPE_VIDEO_CAPTURE
	req.memory = V4L2_MEMORY_MMAP;	//存储类型:V4L2_MEMORY_MMAP或V4L2_MEMORY_USERPTR
	if (ioctl(video_fd, VIDIOC_REQBUFS, &req) == -1)//使配置生效
	{
		perror("request buffer error \n");
		return 2;
	}
	//-------------------------------------------------------//
	
	//--------将VIDIOC_REQBUFS获取内存转为物理空间-------------//
	buffers = calloc(req.count, sizeof(VideoBuffer));	
	//printf("sizeof(VideoBuffer) is %d\n", sizeof(VideoBuffer));
	struct v4l2_buffer buf;
	for (numBufs = 0; numBufs < req.count; numBufs++)
	{
		memset( &buf, 0, sizeof(buf));
		buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;	
		//存储类型:V4L2_MEMORY_MMAP(内存映射)或V4L2_MEMORY_USERPTR(用户指针)
		buf.memory = V4L2_MEMORY_MMAP;
		buf.index = numBufs;
		if (ioctl(video_fd, VIDIOC_QUERYBUF, &buf) < 0)//使配置生效
		{
			printf("VIDIOC_QUERYBUF error\n");
			return 2;
		}
		//printf("buf len is %d\n", sizeof(buf));
		buffers[numBufs].length = buf.length;
		buffers[numBufs].offset = (size_t) buf.m.offset;
		//使用mmap函数将申请的缓存地址转换应用程序的绝对地址------
		buffers[numBufs].start = mmap(NULL, buf.length, PROT_READ | PROT_WRITE,
			MAP_SHARED, video_fd, buf.m.offset);	
		if (buffers[numBufs].start == MAP_FAILED)
		{
			perror("buffers error\n");
			return 2;
		}
		if (ioctl(video_fd, VIDIOC_QBUF, &buf) < 0)//放入缓存队列
		{
			printf("VIDIOC_QBUF error\n");
			return 2;
		}

	}
	//-------------------------------------------------------//
	
	//----------------------开始视频显示----------------------//
	enum v4l2_buf_type type;
	type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
	if (ioctl(video_fd, VIDIOC_STREAMON, &type) < 0)
	{
		printf("VIDIOC_STREAMON error\n");
		return 2;
	}
	//-------------------------------------------------------//
	
	//---------------------读取视频源格式---------------------//	
	fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;				
	if (ioctl(video_fd, VIDIOC_G_FMT, &fmt) < 0)	
	{
		printf("get format failed\n");
		return 2 ;
	}
	else
	{
		printf("Picture:Width = %d   Height = %d\n", fmt.fmt.pix.width, fmt.fmt.pix.height);
		
	}
	//-------------------------------------------------------//
	int i=0;	
	//一些关于fb设备或者没有用到的变量---------------------------
	/*FILE * fd_y_file = 0;
	int a=0;
	int k = 0;
	int i=0;
	//设置显卡设备framebuffer------------------------------------
	struct jpeg_decompress_struct cinfo;
	struct jpeg_error_mgr jerr;
	FILE *infile;//Jpeg文件的句柄
	unsigned char *buffer;
	char *fb_device;
	unsigned int x;
	unsigned int y;
	//打开显卡设备------------------------------------------------
	if ((fb = open("/dev/fb0", O_RDWR)) < 0)
	{
		perror(__func__);
		return 2;
	}

	//获取framebuffer的状态-----------------------------------------
	fb_set(fb);//设置显存参数	
	fb_stat(fb);//获取显卡驱动中的长、宽和显示位宽
	
	printf("frame buffer: %dx%d,  %dbpp, 0x%xbyte= %d,graylevels= %d \n", 
		fbdev.fb_width, fbdev.fb_height, fbdev.fb_bpp, fbdev.fb_size, fbdev.fb_size,fbdev.fb_gray);

	//映射framebuffer的地址到用户空间----------------------------------
	fbdev.fb_mem = mmap (NULL, fbdev.fb_size, PROT_READ|PROT_WRITE,MAP_SHARED,fb,0);
	fbdev.fb = fb;
	*/
		
	//预览采集到的图像(如果有需要可以添加capture功能)-------------------
	while (sdl_quit)
	{
		
		fd_set fds;//文件描述符集,准备使用Select机制
		struct timeval tv;
		int ret1;
		
		FD_ZERO(&fds);//清空文件描述符集
		FD_SET(video_fd,&fds);//将视频设备文件的描述符放入集合中
		
		//消息等待超时,可以完全阻塞-------------------------------
		tv.tv_sec =2;
		tv.tv_usec=0;
		//等待视频设备准备好--------------------------------------
		ret1=select(video_fd+1,&fds,NULL,NULL,&tv);
		if(-1==ret1)
		{
			if(EINTR==errno)
				continue;
			printf("select error. \n");
			exit(EXIT_FAILURE);
		}
		if(0==ret1)
		{
			printf("select timeout. \n");
			continue;
		}		
		while(sdl_quit)		
		{
					 
			//检测退出消息
			while(SDL_PollEvent(&sdlevent))
			{
				if(sdlevent.type == SDL_QUIT)
				{
					sdl_quit = 0;
					break;
				}
			}
			currtime = SDL_GetTicks();
			if(currtime - lasttime >0)
				frmrate = 1000/(currtime-lasttime);
			lasttime = currtime;

			//开始获取FIFO中已经准备好的一帧数据-----------------------		
			memset(&buf ,0,sizeof(buf));
			buf.type=V4L2_BUF_TYPE_VIDEO_CAPTURE;
			buf.memory=V4L2_MEMORY_MMAP;
			//准备好的出列--------------------------------------------
			ret1=ioctl (video_fd,VIDIOC_DQBUF,&buf);
			if(ret1!=0)
			{					
				printf("Lost the video \n");					
			}	
	
			//获取当前帧的用户空间首地址,用于格式转换------------------
			unsigned char *ptcur=buffers[buf.index].start;
			//++++++++++++++++++++++++++++++++++++++++
			//算法区
			//+++++++++++++++++++++++++++++++++++++++++
			//灰度变换
			/*
			unsigned char *pgray = NULL;
			pgray = (unsigned char *)calloc(1,fmt.fmt.pix.width*fmt.fmt.pix.height*2*sizeof(unsigned char));//避免被识别为段错误
			yuv2gray(ptcur,pgray,fmt.fmt.pix.width, fmt.fmt.pix.height);
			*/

			//YUV向RGB(24bit)转换
			YUYVToRGB888(ptcur, pRGB, fmt.fmt.pix.width, fmt.fmt.pix.height);
			
			//opencv 检测人脸
			cvSetData(img, pRGB, fmt.fmt.pix.width*3);//将pRGB数据装入img中
			cvCvtColor(img, imggray, CV_RGB2GRAY);//将img灰度转换到imggray,供opencv检测使用
			CvHaarClassifierCascade *cascade=(CvHaarClassifierCascade*)cvLoad("/usr/share/opencv-2.4.6.1/data/haarcascades/haarcascade_frontalface_alt2.xml", storage,0,0);
			cvClearMemStorage(storage);
			cvEqualizeHist(imggray, imggray);
			CvSeq* objects = cvHaarDetectObjects(imggray, cascade, storage, 1.1, 2, 0, cvSize(30,30),cvSize(30,30));
			
			//opencv 标记人脸
			CvScalar colors[] = {{{255,0,0}},{{0,0,0}}};
			int faces=0;
			for(faces=0; faces < (objects ? objects->total:0); faces++)
			{
				CvRect* r = (CvRect *)cvGetSeqElem(objects,faces);
				cvRectangle(img, cvPoint(r->x, r->y), cvPoint(r->x+r->width, r->y+r->height),colors[0],2,8,0 );//原始图像上加框
			}
			

			//调整opencv img图像数据
			/*CvScalar s;
			int imgi=0,imgj=0,sdlcount=0;
			for(imgi=0;imgi<img->height;imgi++)
			{
				for(imgj=0; imgj<img->width; imgj++)
				{
					s=cvGet2D(img,imgi,imgj);
					pRGB[sdlcount++]=0xff;//s.val[0];//B
					pRGB[sdlcount++]=0xff;//s.val[1];//G
					pRGB[sdlcount++]=0xff;//s.val[2];//R
					//cvSet2D(img,imgi,imgj,s);
				}
			}
			*/
			//opencv 显示图像	
			cvShowImage("image", img);
			char c = cvWaitKey(1);
			printf("%d\n",c);
			if(c==27)
				sdl_quit=0;
			
			
			//yuv载入到SDL
			/*
			SDL_LockYUVOverlay(overlay);
			memcpy(p, pgray,pscreen->w*(pscreen->h)*2);
			SDL_UnlockYUVOverlay(overlay);
			SDL_DisplayYUVOverlay(overlay, &drect);
			*/

			//RGB载入到SDL
			//memcpy(pixels, pRGB, pscreen_RGB->w*(pscreen_RGB->h)*3);
			//SDL_BlitSurface(pscreen_RGB, NULL, display_RGB, NULL);
			//SDL_Flip(display_RGB);

			//统计帧率
			//status = (char *)calloc(1,20*sizeof(char));
			//sprintf(status, "Fps:%d",frmrate);
			//SDL_WM_SetCaption(status, NULL);
			//SDL_Delay(10);
			//用完了的入列--------------------------------------------
			ret1=ioctl (video_fd,VIDIOC_QBUF,&buf);
			if(ret1!=0)
			{					
				printf("Lost the video \n");					
			}
			
		}	
	}	

	//fb_munmap(fbdev.fb_mem, fbdev.fb_size);	//释放framebuffer映射
	//close(fb);//关闭Framebuffer设备
	for(i=0;i<req.count;i++)
	{
		if(-1==munmap(buffers[i].start,buffers[i].length))
			printf("munmap error:%d \n",i);
	}

	cvDestroyWindow("image");
	close(video_fd);					
	SDL_DestroyMutex(affmutex);
	//SDL_FreeYUVOverlay(overlay);
	cvReleaseImage(&img);
	cvReleaseImage(&imggray);
	free(status);
	free(buffers);
	//free(pRGB);
	SDL_Quit();
	return 0;

}
Beispiel #17
0
void Main::AnalyzePicture(String fileName, String outputPath) {
	String imageFileName = fileName + ".jpg";
//	imageFileName.print();
 	IplImage* img;
	img = cvLoadImage( imageFileName);

	//cvNamedWindow( "Output" );
	//cvShowImage( "Output", img );
	//cvWaitKey();
	CvMemStorage* faceStorage = cvCreateMemStorage(0);
	CvMemStorage* eyeStorage = cvCreateMemStorage(0);

	CvHaarClassifierCascade* faceCascade = (CvHaarClassifierCascade*)cvLoad( "haarcascade_frontalface_alt2.xml" );
	CvHaarClassifierCascade* eyeLeftCascade = (CvHaarClassifierCascade*)cvLoad( "eyeLeftCascade.xml" );
	CvHaarClassifierCascade* eyeRightCascade = (CvHaarClassifierCascade*)cvLoad( "eyeRightCascade.xml" );
	double scale = 1.3;

	static CvScalar colors[] = { {{0,0,255}}, {{0,128,255}}, {{0,255,255}}, 
	{{0,255,0}}, {{255,128,0}}, {{255,255,0}}, {{255,0,0}}, {{255,0,255}} };

	CvRect* r;
	// Detect faces
	cvClearMemStorage( faceStorage );
	CvSeq* faces = cvHaarDetectObjects( img, faceCascade, faceStorage, 1.1, 4, 0, cvSize( 40, 50 ));

	String workingFileName;
	// Loop through Faces in picture and draw boxes
	for( int i = 0; i < (faces ? faces->total : 0 ); i++ ){
		r = ( CvRect* )cvGetSeqElem( faces, i );

		cvRectangle( img, cvPoint( r->x, r->y ), cvPoint( r->x + r->width, r->y + r->height ),
			colors[i%8]);

		/* sets the Region of Interest Note that the rectangle area has to be __INSIDE__ the image */
		cvSetImageROI(img, cvRect(r->x, r->y, r->width, r->height));

		IplImage *saveFace = cvCreateImage(cvSize(r->width, r->height), 8, 3);
		cvCopy(img, saveFace, NULL);
 
		// Eyes
		CvSeq* eyes; int numEyes = 0;
		cvClearMemStorage( eyeStorage );
		eyes = cvHaarDetectObjects( img, eyeLeftCascade, eyeStorage, 1.1, 4, 0);
		this->drawEyes(eyes, img, true);
		numEyes += eyes->total;

		cvClearMemStorage( eyeStorage );
		eyes = cvHaarDetectObjects( img, eyeRightCascade, eyeStorage, 1.1, 4, 0);
		this->drawEyes(eyes, img, false);
		numEyes += eyes->total;

		//char buffer[50];
		//sprintf(buffer, "Output %ld", i);
		//cvShowImage(buffer, img );
		//cvWaitKey();

		bool accepted = numEyes > 1;
		String suffix = accepted ? "jpg" : "reject.jpg";
		if (accepted) 		
			printf("Extracted face\n");
		else
			printf("Rejected face.\n");

		workingFileName.sprintf("%s\\%s.Face_%ld.markup.%s",outputPath.c_str() ,fileName.c_str(),i,suffix.c_str());
		printf("Writing %s\n", workingFileName.c_str());
		remove(workingFileName.c_str());
		cvSaveImage(workingFileName, img);	// save marked up face

		workingFileName.sprintf("%s\\%s.Face_%ld.%s",outputPath.c_str() ,fileName.c_str(),i,suffix.c_str());
		printf("Writing %s\n", workingFileName.c_str());
		remove(workingFileName.c_str());
		cvSaveImage(workingFileName, saveFace);	// save unmarked face

		cvResetImageROI(img);
		cvReleaseImage( &saveFace );
	}

	//cvNamedWindow( "Output" );
	//cvShowImage( "Output", img );
	//cvWaitKey();
	workingFileName.sprintf("%s\\%s.markup.jpg",outputPath.c_str() ,fileName.c_str());
	printf("Writing %s\n", workingFileName.c_str());
	remove(workingFileName.c_str());
	cvSaveImage(workingFileName, img);		// save marked up picture

	cvReleaseImage( &img );
}
C_RESULT display_stage_transform (display_stage_cfg_t *cfg, vp_api_io_data_t *in, vp_api_io_data_t *out)
{
	if(count > 1)
	{
		temp_x = cur_x;
		temp_y = cur_y;
	}

	int i = 0;
  	const char *cascade_name = "/usr/share/opencv/haarcascades/haarcascade_frontalface_alt.xml";
  	CvHaarClassifierCascade *cascade = 0;
  	CvMemStorage *storage = 0;
  	CvSeq *faces;

	cascade = (CvHaarClassifierCascade *) cvLoad (cascade_name, 0, 0, 0);

	if(!cascade)
	{
		printf("Could not load cascade.\n");
		return -1;
	}
  	
	uint32_t width = 0, height = 0;
    	getPicSizeFromBufferSize (in->size, &width, &height);

	//Obtain image from ARDrone and convert it to OpenCV format.
    	IplImage *img = ipl_image_from_data((uint8_t*)in->buffers[0], 1, 640, 360);

	storage = cvCreateMemStorage (0);
    	cvClearMemStorage (storage);

	//DetectFaces
	faces = cvHaarDetectObjects (img, cascade, storage, 1.11, 4, 0, cvSize(30, 30), cvSize (0, 0));
    	

	for (i = 0; i < (faces ? faces->total : 0); i++) 
	{
		CvRect *r = (CvRect *) cvGetSeqElem (faces, i);
      		doMosaic(img, r->x, r->y, r->width, r->height, 10);
		cur_x = r->x;
		cur_y = r->y;
		//printf("In the loop.\n");
		count++; //increases count when detect faces
 	}
	
	if(count > 2)
	{
		printf("prev_x = %d, prev_y = %d, cur_x = %d, cur_y = %d\n", temp_x, temp_y, cur_x, cur_y);
		if(cur_x - temp_x > 3)
		{
			//printf("Move Right.\n");
			ardrone_at_set_progress_cmd( 1, 1.0, 0.0, 0.0, 0.0 );
		}

		if(cur_x - temp_x < -3)
		{
			//printf("Move Left.\n");
			ardrone_at_set_progress_cmd( 1, -1.0, 0.0, 0.0, 0.0 );
		}

		if(cur_y - temp_y > 3)
		{
			//printf("Move Down.\n");
		        ardrone_at_set_progress_cmd( 1, 0.0, 0.0, -1.0, 0.0 );
		}

		if(cur_y - temp_y < -5)
		{
			//printf("Move Up.\n");
			//ardrone_at_reset_com_watchdog();
		        ardrone_tool_set_progressive_cmd( 1, 0.0, 0.0, 0.3, 0.0 );
			//vp_os_delay(1000);
		}
	}

	//emergency++;p

	if(count > 50)
	{
		printf("Exit\n");
		ardrone_tool_set_ui_pad_start(0);
		return -1;
	}

	cvNamedWindow("FaceDetect", CV_WINDOW_AUTOSIZE);
	cvShowImage("FaceDetect", img);
	cvWaitKey(1);
	cvReleaseImage(&img);

    return C_OK;
}