コード例 #1
0
/*
	Detección y recorte de una cara humana.
	Recibe una imagen en la cual intentaremos detectar una cara.
	Recibe un clasificador que usará para la detección.
	Devuelve 'true' si detecta una cara, 'false' si no.
*/
bool detect(Mat& img, CascadeClassifier& cascade, Mat& imRecortada) {
    vector<Rect> faces;
    Mat gray, smallImg(cvRound (img.rows), cvRound(img.cols), CV_8UC1);
    cvtColor(img, gray, CV_BGR2GRAY);
    resize(gray, smallImg, smallImg.size(), 0, 0, INTER_LINEAR);
    equalizeHist(smallImg, smallImg);
    cascade.detectMultiScale(smallImg, faces, 1.1, 1, 0|CV_HAAR_FIND_BIGGEST_OBJECT|CV_HAAR_DO_ROUGH_SEARCH, Size(250, 250));
    
    if(faces.size() == 0){
        return false;
    } else {
        for(vector<Rect>::const_iterator r = faces.begin(); r != faces.end(); r++){
            Point center;
            int radius;
            center.x = cvRound((r->x + r->width*0.5));
            center.y = cvRound((r->y + r->height*0.5));
            radius = cvRound((r->width + r->height)*0.25);
            
	        // Recortando la cara, despreciando el resto de la imagen.
			Point x1((center.x - radius + 20), (center.y - radius));
			Point x2((center.x + radius - 20), (center.y + radius));
			Rect myROI(x1.x, x1.y, (x2.x-x1.x), (x2.y-x1.y));        
            imRecortada = img(myROI);
        }
        return true;
    }
}
コード例 #2
0
ファイル: reco.cpp プロジェクト: joanzinho/TANIT
/** @function detectAndDisplay */
void detectAndDisplay( Mat frame )
{
  std::vector<Rect> faces;
  Mat frame_gray;

  cvtColor( frame, frame_gray, CV_BGR2GRAY );
  equalizeHist( frame_gray, frame_gray );

  //-- Detect faces
  face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );

  for( size_t i = 0; i < faces.size(); i++ )
  {
    Point center( faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5 );
    ellipse( frame, center, Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, Scalar( 255, 0, 255 ), 4, 8, 0 );

    Mat faceROI = frame_gray( faces[i] );
    std::vector<Rect> eyes;


    for( size_t j = 0; j < eyes.size(); j++ )
     {
       Point center( faces[i].x + eyes[j].x + eyes[j].width*0.5, faces[i].y + eyes[j].y + eyes[j].height*0.5 );
       int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
       circle( frame, center, radius, Scalar( 255, 0, 0 ), 4, 8, 0 );
     }
  }
  //-- Show what you got
  imshow( window_name, frame );
}
コード例 #3
0
ファイル: detection.cpp プロジェクト: xoxoj/excel-FIT
void processList(param par, CascadeClassifier cascade)
{
    // open list with images paths 
    ifstream inputList(par.inputList.c_str());   
    string line;

    // loop across all images from input file
    while(getline(inputList, line))
    {
        cout << line << endl;
        string path = getPath(line);
        string name = getName(line);

        Mat src;
        src = imread(line);  
        if (src.data == NULL){
            cout << "Could not load image." << endl;
            continue;
        }

        resize(src, src, cv::Size(src.cols/2, src.rows/2));

        // convert src image to grayscale 
        Mat grayImg;
        cvtColor(src, grayImg, CV_BGR2GRAY);

        // vector for founded objects
        std::vector<Rect> plates;

        // detection licence plates
        cascade.detectMultiScale(grayImg, plates, 1.1, 5, 0, Size(50, 12));

        // draw rectangle on the detected plates
        Mat boundImg, croppedImg;
        src.copyTo(boundImg);

        int poc = 0;
        
        for (int i = 0; i < plates.size(); i++)
        {
            //src.copyTo(boundImg);
            rectangle(boundImg, Point(plates[i].x,plates[i].y), Point(plates[i].x+plates[i].width,plates[i].y+plates[i].height), Scalar(0,255,0), 2);
            Mat potImg; 
            potImg = src(plates[i]);

            croppedImg = cropLP(potImg);


            Mat outImg(plates[i].height, plates[i].width, CV_8UC3, 0.0);
            Mat roi(src, plates[i]);
            roi.copyTo(outImg);
            //imwrite("out/"+NumberToString(POC)+".png", outImg);
            POC++;

        }    

        imshow("bounded", boundImg);
        waitKey(0);        
    }
}
コード例 #4
0
ファイル: facedetect.cpp プロジェクト: JieGuo/facedetect
void detectFaces(Mat &img, CascadeClassifier &cascade, double scale) {

	Mat gray;
	cvtColor(img, gray, CV_BGR2GRAY);

	Mat smallImg(cvRound(img.rows / scale), cvRound(img.cols / scale), CV_8UC1);
	Size smallImgSize = smallImg.size();

	resize(gray, smallImg, smallImgSize, 0, 0, INTER_LINEAR);
	equalizeHist(smallImg, smallImg);

	vector<Rect> faces;
	cascade.detectMultiScale(smallImg, faces, 1.1, 2, CV_HAAR_SCALE_IMAGE, Size(30, 30));

	int i = 0;
	for (vector<Rect>::const_iterator r = faces.begin(); r != faces.end(); r++, i++) {

		printf(
			"face;x=%f&y=%f&width=%f&height=%f\n",
			(float) r->x / smallImgSize.width,
			(float) r->y / smallImgSize.height,
			(float) r->width / smallImgSize.width,
			(float) r->height / smallImgSize.height
		);

	}

}
コード例 #5
0
ファイル: detect_face.cpp プロジェクト: juzejunior/OpenCv
/**
 * @function detectAndDisplay
 */
Mat detectAndDisplay(Mat frame)
{
   std::vector<Rect> faces;
   std::vector<Rect> profile_faces;
   Mat frame_gray;
   //convert to gray, to have less colors to worry	
   cvtColor(frame, frame_gray, COLOR_BGR2GRAY);
   //remove noise, using equalization
   equalizeHist(frame_gray, frame_gray);
   //-- Detect frontal faces with cascade
   face_cascade.detectMultiScale(frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30));
   
   Rect face_i = faces[0];	
		
   Point center(faces[0].x + faces[0].width/2, faces[0].y + faces[0].height/2);
   rectangle(frame, face_i, CV_RGB(0, 255, 0), 1);
	   
   Mat faceROI = frame_gray(faces[0]);
   //showing the face - Erase later
   //imshow("Face Detected", faceROI);
   std::vector<Rect> eyes;
   //writing some text
   string box_text = format("Pessoa %d", 0);
   //on top left, put some text
   int pos_x = max(face_i.tl().x - 10, 0);
   int pos_y = max(face_i.tl().y - 10, 0);
   //put text on image
   //center: putText(frame, box_text, center , FONT_HERSHEY_PLAIN, 1.0, CV_RGB(0,255,0), 2.0);      
   putText(frame, box_text, Point(pos_x, pos_y), FONT_HERSHEY_PLAIN, 1.0, CV_RGB(0,255,0), 2.0);      
   //-- Show what you got
   imshow(window_name, frame);
   
   return faceROI;
}
コード例 #6
0
ファイル: dart.cpp プロジェクト: jslater8/cvthing
/** @function detectAndDisplay */
void detectAndDisplay( Mat frame, const char* outname )
{
	// Mat image = imread("test9.jpg", 1);
	// printf("TEST: %d\n", doDetect(image));
	std::vector<Rect> faces;
	Mat frame_gray;

	// 1. Prepare Image by turning it into Grayscale and normalising lighting
	cvtColor( frame, frame_gray, CV_BGR2GRAY );
	equalizeHist( frame_gray, frame_gray );

	// 2. Perform Viola-Jones Object Detection
	cascade.detectMultiScale( frame_gray, faces, 1.6, 1, 0|CV_HAAR_SCALE_IMAGE, Size(80, 80), Size(500,500) );

       // 3. Print number of Faces found
	// std::cout << faces.size() << std::endl;

       // 4. Draw box around faces found
	
	// doDetect(frame, points);
	

	// for (int i = 0; i < faces.size(); i++){
	// 	for (int j = 0; j < points.size(); j++){
	// 		// make sure points from hough are inside viola jones
	// 		// Rect intersect = faces[i] & points[j];
	// 		// if ((intersect == points[j])){
	// 		// 	rectangle(frame, Point(faces[i].x, faces[i].y), Point(faces[i].x + faces[i].width, faces[i].y + faces[i].height), Scalar( 255, 0, 0 ), 3);
	// 		// }
	// 		if(faces[i].contains(points[j])){
	// 			rectangle(frame, Point(faces[i].x, faces[i].y), Point(faces[i].x + faces[i].width, faces[i].y + faces[i].height), Scalar( 0, 0, 255 ), 4);
	// 		}
	// 	}
	// }

	Mat output = frame.clone();

	for(int i = 0; i < faces.size(); i++){
		Mat image;
		Rect image2 = faces[i] + cv::Size(faces[i].width*0.5, faces[i].height*0.5);
		if (image2.x + image2.width < frame.cols && image2.y + image2.height < frame.rows){
			image = frame(image2);
		}
		else image = frame(faces[i]);

		char buffer[20];
		sprintf(buffer, "test%d.jpg", i);
		imwrite(buffer, image);
		int test = doDetect(image);
		if (test) rectangle(output, Point(faces[i].x, faces[i].y), Point(faces[i].x + faces[i].width, faces[i].y + faces[i].height), Scalar( 0, 0, 255 ), 4);
	}


	imwrite(outname,output);

	for( int i = 0; i < faces.size(); i++ )
	{
		rectangle(frame, Point(faces[i].x, faces[i].y), Point(faces[i].x + faces[i].width, faces[i].y + faces[i].height), Scalar( 0, 255, 0 ), 2);
	}
}
コード例 #7
0
ファイル: perf_haar.cpp プロジェクト: stylight/opencv
PERF_TEST(HaarFixture, Haar)
{
    vector<Rect> faces;

    Mat img = imread(getDataPath("gpu/haarcascade/basketball1.png"), CV_LOAD_IMAGE_GRAYSCALE);
    ASSERT_TRUE(!img.empty()) << "can't open basketball1.png";
    declare.in(img);

    if (RUN_PLAIN_IMPL)
    {
        CascadeClassifier faceCascade;
        ASSERT_TRUE(faceCascade.load(getDataPath("gpu/haarcascade/haarcascade_frontalface_alt.xml")))
                << "can't load haarcascade_frontalface_alt.xml";

        TEST_CYCLE() faceCascade.detectMultiScale(img, faces,
                                                     1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, Size(30, 30));

        SANITY_CHECK(faces, 4 + 1e-4);
    }
    else if (RUN_OCL_IMPL)
    {
        ocl::CascadeClassifier_GPU faceCascade;
        ocl::oclMat oclImg(img);

        ASSERT_TRUE(faceCascade.load(getDataPath("gpu/haarcascade/haarcascade_frontalface_alt.xml")))
                << "can't load haarcascade_frontalface_alt.xml";

        OCL_TEST_CYCLE() faceCascade.detectMultiScale(oclImg, faces,
                                     1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, Size(30, 30));

        SANITY_CHECK(faces, 4 + 1e-4);
    }
    else
        OCL_PERF_ELSE
}
コード例 #8
0
// Find face from eyes
Point faceFromEyes(Point priorCenter, Mat face) {

  std::vector<Rect> eyes;
  int avg_x = 0;
  int avg_y = 0;

  // Try to detect eyes, if no face is found
  eyes_cascade.detectMultiScale(face, eyes, 1.1, 2,
				0 |CASCADE_SCALE_IMAGE, Size(30, 30));

  // Iterate over eyes
  for(size_t j = 0; j < eyes.size(); j++) {

    // centerpoint of eyes
    Point eye_center(priorCenter.x + eyes[j].x + eyes[j].width/2,
		     priorCenter.y + eyes[j].y + eyes[j].height/2);

    // Average center of eyes
    avg_x += eye_center.x;
    avg_y += eye_center.y;
  }

  // Use average location of eyes
  if(eyes.size() > 0) {
    priorCenter.x = avg_x / eyes.size();
    priorCenter.y = avg_y / eyes.size();
  }

  return priorCenter;
}
コード例 #9
0
ファイル: main2.cpp プロジェクト: epodak/FaceTracker-1
/**
 * @function detectAndDisplay
 */
void detectAndDisplay( Mat frame, int fd )
{
 
   std::vector<Rect> faces;
   Mat frame_gray;
    int rc1, rc2;

   cvtColor( frame, frame_gray, CV_BGR2GRAY );
   equalizeHist( frame_gray, frame_gray );
   //-- Detect faces
   face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );

   for( int i = 0; i < faces.size(); i++ )
    {
      Point center( faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5 );
      ellipse( frame, center, Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, Scalar( 255, 0, 255 ), 2, 8, 0 );
	  cout << "X:" << faces[i].x  <<  "  y:" << faces[i].y  << endl;
        
        actualx = faces[i].x;
        actualy = faces[i].y;
        
        valx = actualx/5;
        //rc1 = ard->serialport_writebyte(fd, (uint8_t)valx);
        
        valy = actualy/5;
        //rc2 = ard->serialport_writebyte(fd, (uint8_t)valy);

        Mat faceROI = frame_gray( faces[i] );
      std::vector<Rect> eyes;
    } 

   //-- Show what you got
   imshow( window_name, frame );

}
コード例 #10
0
/** @function detectAndDisplay */
void detectAndDisplay(Mat &frame)
{
  static vector<Rect> faces;
  static Mat frame_gray;

  cvtColor( frame, frame_gray, CV_BGR2GRAY );
  equalizeHist( frame_gray, frame_gray );

  //-- Detect faces
  face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );

  size_t tam = faces.size();
  for(size_t i = 0; i < tam; i++){
    Point center( faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5 );

    /// Transforma o valor da coodenada x do pixel central da face (int --> char *)
    ss.clear();
    ss << center.x;
    ss >> bytes;

    cout << bytes << endl;

    /// Envia o valor da coodenada x do pixel central da face para a porta serial do Arduino
    RS232_cputs(COM, bytes);

    /// Cria uma elipse em volta do pixel central da face para destacá-la
    //ellipse( frame, center, Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, Scalar( 255, 255, 255 ), 4, 8, 0 );
  }

  /// Exibir a frame do vídeo que acabou de ser processada
  //imshow( window_name, frame );
 }
コード例 #11
0
ファイル: main.cpp プロジェクト: huurneman/tobcat
int main(int argc, const char** argv)
{
	// Standard interface for executing the program
	if( argc == 1 ){
		cout << "Simple real time face detection demo." << endl;
		cout << "real_time_face_detection.exe <webcam number - default = 0> <face_model.xml> <# of overlaps - default = 15>" << endl;
		return 0;
	}

	// Retrieve the correct capturing device
	int device_number = atoi(argv[1]);
	VideoCapture capture( device_number );

	if(!capture.isOpened()){
		cout << "Could not open webcam input, make sure a webcam is connected to the system." << endl;
		return -1;
	}

	Mat frame, grayscale;
	string window_name = "Face detection demo, please smile";

	CascadeClassifier cascade;
	cascade.load(argv[2]);

	int number_overlaps = atoi(argv[3]);

	while( true ){
		capture >> frame;
		if( !frame.empty() ){

		}else{
			cout << "Bad frame received, closing down application!";
			break;
		}

		// Process frame and perform detection
		cvtColor(frame, grayscale, COLOR_BGR2GRAY);
		equalizeHist(grayscale, grayscale);

		vector<Rect> faces;

		cascade.detectMultiScale(grayscale, faces, 1.05, number_overlaps);

		// Add rectangle around each result and display
		for( int i = 0; i < faces.size(); i++ ){
			rectangle(frame, faces[i], Scalar(0,0,255), 2);
			stringstream face;
			face << "Face " << (i + 1);
			putText(frame, face.str(), Point(faces[i].x, faces[i].y - 15), 1, 2, Scalar(0, 0, 255), 1 );
		}

		imshow(window_name, frame);

		int key = waitKey(25);
		if(key == 27 ) { break;}
	}

	return 0;
}
コード例 #12
0
ファイル: detectObject.cpp プロジェクト: Renferal/SociAll
// Search for objects such as faces in the image using the given parameters, storing the multiple cv::Rects into 'objects'.
// Can use Haar cascades or LBP cascades for Face Detection, or even eye, mouth, or car detection.
// Input is temporarily shrunk to 'scaledWidth' for much faster detection, since 200 is enough to find faces.
void detectObjectsCustom(const Mat &img, CascadeClassifier &cascade, vector<Rect> &objects, int scaledWidth, int flags, Size minFeatureSize, float searchScaleFactor, int minNeighbors)
{
    // If the input image is not grayscale, then convert the BGR or BGRA color image to grayscale.
    Mat gray;
    if (img.channels() == 3) {
        cvtColor(img, gray, CV_BGR2GRAY);
    }
    else if (img.channels() == 4) {
        cvtColor(img, gray, CV_BGRA2GRAY);
    }
    else {
        // Access the input image directly, since it is already grayscale.
        gray = img;
    }

    // Possibly shrink the image, to run much faster.
    Mat inputImg;
    float scale = img.cols / (float)scaledWidth;
    if (img.cols > scaledWidth) {
        // Shrink the image while keeping the same aspect ratio.
        int scaledHeight = cvRound(img.rows / scale);
        resize(gray, inputImg, Size(scaledWidth, scaledHeight));
    }
    else {
        // Access the input image directly, since it is already small.
        inputImg = gray;
    }

    // Standardize the brightness and contrast to improve dark images.
    Mat equalizedImg;
    equalizeHist(inputImg, equalizedImg);

    // Detect objects in the small grayscale image.
    cascade.detectMultiScale(equalizedImg, objects, searchScaleFactor, minNeighbors, flags, minFeatureSize);

    // Enlarge the results if the image was temporarily shrunk before detection.
    if (img.cols > scaledWidth) {
        for (int i = 0; i < (int)objects.size(); i++ ) {
            objects[i].x = cvRound(objects[i].x * scale);
            objects[i].y = cvRound(objects[i].y * scale);
            objects[i].width = cvRound(objects[i].width * scale);
            objects[i].height = cvRound(objects[i].height * scale);
        }
    }

    // Make sure the object is completely within the image, in case it was on a border.
    for (int i = 0; i < (int)objects.size(); i++ ) {
        if (objects[i].x < 0)
            objects[i].x = 0;
        if (objects[i].y < 0)
            objects[i].y = 0;
        if (objects[i].x + objects[i].width > img.cols)
            objects[i].x = img.cols - objects[i].width;
        if (objects[i].y + objects[i].height > img.rows)
            objects[i].y = img.rows - objects[i].height;
    }

    // Return with the detected face rectangles stored in "objects".
}
コード例 #13
0
void FaceDetection::detect(char* image) 
{
    //TODO : change below "out.png" to correct output file path.
    string image_to_scan(image), outout_image("C:\\ImageProcessing\\Images\\Detected\\out.jpg");
    Mat frame;
    Mat img = imread(image_to_scan);
    Mat gray;
    cvtColor(img, gray, CV_BGR2GRAY);

    // Get the height from the first image. We'll need this
    // later in code to reshape the images to their original
    // size AND we need to reshape incoming faces to this size:
    int im_width = images[0].cols;
    int im_height = images[0].rows;

    // Find the faces in the picture:
    vector< Rect_<int> > faces;
    haar_cascade.detectMultiScale(gray, faces);

    // At this point you have the position of the faces in
    // faces. Now we'll get the faces, make a prediction and
    // annotate it in the video. Cool or what?
    for(int i = 0; i < faces.size(); i++) {
        // Process face by face:
        Rect face_i = faces[i];
        // Crop the face from the image. So simple with OpenCV C++:
        Mat face = gray(face_i);
        // Resizing the face is necessary for Eigenfaces and Fisherfaces. You can easily
        // verify this, by reading through the face recognition tutorial coming with OpenCV.
        // Resizing IS NOT NEEDED for Local Binary Patterns Histograms, so preparing the
        // input data really depends on the algorithm used.
        //
        // I strongly encourage you to play around with the algorithms. See which work best
        // in your scenario, LBPH should always be a contender for robust face recognition.
        //
        // Since I am showing the Fisherfaces algorithm here, I also show how to resize the
        // face you have just found:
        Mat face_resized;
        cv::resize(face, face_resized, Size(im_width, im_height), 1.0, 1.0, INTER_CUBIC);
        // Now perform the prediction, see how easy that is:
        int prediction = model->predict(face_resized);
        // And finally write all we've found out to the original image!
        // First of all draw a green rectangle around the detected face:
        rectangle(img, face_i, CV_RGB(0, 255,0), 1);
        // Create the text we will annotate the box with:
        string box_text = "";
        box_text = format("Prediction = %d", prediction);
        // Calculate the position for annotated text (make sure we don't
        // put illegal values in there):
        int pos_x = std::max(face_i.tl().x - 10, 0);
        int pos_y = std::max(face_i.tl().y - 10, 0);
        // And now put it into the image:
        putText(img, box_text, Point(pos_x, pos_y), FONT_HERSHEY_PLAIN, 1.0, CV_RGB(0,255,0), 2.0);
    }

    // write the result of the scan to output file.
    imwrite(outout_image, img);
    return ;
}
コード例 #14
0
vector<Rect> detectFaces(Mat frame) {
    vector<Rect> faces;
    Mat bufferMat;
    cvtColor(frame, bufferMat, COLOR_BGR2GRAY);
    equalizeHist(bufferMat, bufferMat);
    face_cascade.detectMultiScale(bufferMat, faces, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, Size(30, 30));
    return faces;
}
コード例 #15
0
ファイル: facedetector.cpp プロジェクト: nangege/robotCute
void faceDetector::detectObject(CascadeClassifier &classifier, const Mat &input,vector<Rect> & objects)
{
    int miniNeighbor = 4;
    Size miniFeatureSize = Size(10,10);
    float scaleFactor = 1.1f;
    int flag = CV_HAAR_SCALE_IMAGE;
    classifier.detectMultiScale(input,objects,scaleFactor,miniNeighbor,0|flag,miniFeatureSize);
}
コード例 #16
0
//人脸检测函数
void detectAndDisplay( Mat frame )
{
   std::vector<Rect> faces;   //创建矩形类型的faces容器用来存放检测到的人脸矩形
   Mat frame_gray;

   cvtColor( frame, frame_gray, COLOR_BGR2GRAY ); //转灰度图
   equalizeHist( frame_gray, frame_gray );   //直方图均衡化
   //分类器多尺度检测函数
   face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );
   //对每张脸都创建中心点对象并画图
   for( size_t i = 0; i < faces.size(); i++ )
    {
      Point center( faces[i].x + faces[i].width/2, faces[i].y + faces[i].height/2 );
      ellipse( frame, center, Size( faces[i].width/2, faces[i].height/2), 0, 0, 360, Scalar( 255, 0, 255 ), 1, 8, 0 );
      Mat faceROI = frame_gray( faces[i] );  

      //在每张脸中的寻找眼睛
	  std::vector<Rect> eyes;
      eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CV_HAAR_SCALE_IMAGE, Size(30, 30) );   //眼睛的多尺度检测函数
      for( size_t j = 0; j < eyes.size(); j++ )
      {
         Point eye_center( faces[i].x + eyes[j].x + eyes[j].width/2, faces[i].y + eyes[j].y + eyes[j].height/2 );
         int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
         circle( frame, eye_center, radius, Scalar( 255, 0, 0 ), 1, 8, 0 );
      }

	  //在每张脸中寻找鼻子
	  std::vector<Rect> nose;
	  nose_cascade.detectMultiScale( faceROI, nose, 1.1, 2, 0 |CV_HAAR_SCALE_IMAGE, Size(30, 30) );
	  for( size_t k = 0; k < nose.size(); k++)
	  {
		 Point nose_center( faces[i].x + nose[k].x + nose[k].width/2, faces[i].y + nose[k].y + nose[k].height/2 );   //生成鼻子中心点类对象,并赋初值
		 ellipse( frame, nose_center, Size( nose[k].width/2, nose[k].height/2), 0, 0, 360, Scalar( 200, 0, 100 ), 1, 8, 0 );//画椭圆
	  }
		//在每张脸中寻找嘴巴
	    //std::vector<Rect> mouth;
		//mouth_cascade.detectMultiScale( faceROI, mouth, 1.1, 2, 0 |CV_HAAR_SCALE_IMAGE, Size(30, 30) );
		//for( size_t l = 0; l < mouth.size(); l++)
		//{
		//	Point mouth_center( faces[i].x + mouth[l].x + mouth[l].width/2, faces[i].y + mouth[l].y + mouth[l].height/2 );
		//	ellipse( frame, mouth_center, Size( mouth[l].width/2, mouth[l].height/2), 0, 0, 360, Scalar( 255, 255, 255 ), 1, 8, 0 );
		//}
    }
    //在窗口上展示
    imshow( window_name, frame );
}
コード例 #17
0
ファイル: cvface.cpp プロジェクト: codedhead/facerec
void detectAndRecognize( Mat frame, Ptr<FaceRecognizer> model )
{
	std::vector<Rect> faces;
	Mat frame_gray;

	cvtColor( frame, frame_gray, CV_BGR2GRAY );
	//equalizeHist( frame_gray, frame_gray );

	//-- Detect faces
	face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );
	//faces.push_back(Rect(Point(0,0),Size(frame.cols,frame.rows)));
	for( size_t i = 0; i < faces.size(); i++ )
	{
		Point bottom_right(faces[i].x + faces[i].width,faces[i].y + faces[i].height);
		Point center( faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5 );
		ellipse( frame, center, Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, Scalar( 255, 0, 255 ), 4, 8, 0 );

		Mat to_rec = frame_gray( faces[i] );
		cv::resize(to_rec,to_rec,cv::Size(NORM_IMG_WIDTH,NORM_IMG_HEIGHT));
#ifdef DO_EQUALIZE
		equalizeHist(to_rec,to_rec);
#endif

#ifdef FACE_DEBUG
		Mat rgb_to_rec;
		cvtColor( to_rec, rgb_to_rec, CV_GRAY2BGR );
		drawAonB(rgb_to_rec,frame,bottom_right/*+Point(0,NORM_IMG_HEIGHT)*/);
#endif

		int predictLabel=-1;
		double confidence=0.;
		predictLabel=model->predict(to_rec);
		//printf("confidence: %lf\n",confidence);

		if(predictLabel==-1) continue;
		
 		string class_name=g_trainer.label(predictLabel);

		
				
		Mat avatar=imread(string("data/")+class_name+"/"+class_name+".avatar");
#ifndef FACE_DEBUG
		drawAonB(avatar,frame,bottom_right);
#endif
// 		if(!avatar.empty())
// 		{
// 			int w=min(max(frame.cols-1-bottom_right.x,0),avatar.cols),
// 				h=min(max(frame.rows-1-bottom_right.y,0),avatar.rows);
//  			cv::Rect avatar_roi( bottom_right, cv::Size(w,h));
//  			avatar(Rect(Point(0,0),Size(w,h))).copyTo( frame(avatar_roi) );
// 		}		
		
		putText(frame,class_name,bottom_right,FONT_HERSHEY_SIMPLEX, 1.5, cvScalar(250,20,10),3);
	}

	//-- Show what you got
	imshow( CAPTURE_WND_NAME, frame );
}
コード例 #18
0
ファイル: capture.cpp プロジェクト: cstucker/facerec
void detectAndDraw( Mat& img,
                   CascadeClassifier& cascade, CascadeClassifier& nestedCascade,
                   double scale, int sImg)
{
    int i = 0, saveFace = 0;
    double t = 0;
    vector<Rect> faces;
    Mat gray, smallImg( cvRound (img.rows/scale), cvRound(img.cols/scale), CV_8UC1 );

    cvtColor( img, gray, CV_BGR2GRAY );
    resize( gray, smallImg, smallImg.size(), 0, 0, INTER_LINEAR );
    equalizeHist( smallImg, smallImg );
    Mat eqImg(smallImg);

    t = (double)cvGetTickCount();
    cascade.detectMultiScale( smallImg, faces,
        1.1, 2, 0
        |CV_HAAR_SCALE_IMAGE
        ,
        Size(30, 30) );
    t = (double)cvGetTickCount() - t;
    printf( "detection time = %g ms\n", t/((double)cvGetTickFrequency()*1000.) );
    int key = waitKey(10);
    if(key >= 0)
        saveFace = 1;
    for( vector<Rect>::const_iterator r = faces.begin(); r != faces.end(); r++, i++ )
    {
        Mat smallImgROI;
        vector<Rect> nestedObjects;
        Point center;
        CvPoint pt1, pt2;
        pt1.x = r->x*scale;
        pt1.y = r->y*scale;
        pt2.x = pt1.x + r->width*scale;
        pt2.y = pt1.y + r->height*scale;
        if(saveFace||sImg){
            captures++;
            Mat faceRect = smallImg(*r);
            Mat resFace( 100, 100, CV_8UC1 );
            resize(faceRect, resFace, resFace.size(), 0, 0, INTER_LINEAR);
            GaussianBlur( resFace, resFace, Size(7,7), 3 );
            imshow("result",resFace);
            String imageFName = captureDir;
            stringstream out;
            out << captures;
            imageFName += out.str();
            imageFName += ".pgm";
            cerr << "Saving image " << imageFName << endl;
            waitKey(0);
            imwrite(imageFName, resFace);
            saveFace = 0;
        }else{
            rectangle( eqImg, pt1, pt2, CV_RGB(255,255,255), 2, 8, 0);
        }
    }
    cv::imshow( "result", eqImg );
}
コード例 #19
0
ファイル: facedetector.cpp プロジェクト: nangege/robotCute
void faceDetector::detectLargestObject(CascadeClassifier & classifier, const Mat & input,vector<Rect> & outPut)
{
    int minNeighbor = 4;
    Size miniFeature = Size(10,10);
    float scaleFactor = 1.1f;
    int Flag = CV_HAAR_FIND_BIGGEST_OBJECT;
    classifier.detectMultiScale(input,outPut,scaleFactor,minNeighbor,0|Flag,miniFeature);

}
コード例 #20
0
bool detect_face(Mat &frame) {
	vector<Rect> faces;

 	face_cascade.detectMultiScale(frame,faces,1.5, 1, CV_HAAR_SCALE_IMAGE |CV_HAAR_DO_CANNY_PRUNING, Size(30,30));
	if(faces.size() > 0)
		return true;
	else 
		return false;
}
コード例 #21
0
ファイル: FaceRecDaemon.cpp プロジェクト: savaul/opencv
CvRect detectFaceInImage(
  IplImage* img, CascadeClassifier &cascade
)
{
  //syslog(LOG_INFO, "detectFaceInImage begins");

  CvRect found_face;
  
  if (!small_img)
  {
    small_img =
      cvCreateImage(
        cvSize(
          cvRound(img->width / scale),
          cvRound(img->height / scale)
        ),
        8, 1
      );
  }
 
  cvResize(img, small_img, CV_INTER_LINEAR);
  cvEqualizeHist(small_img, small_img);

  Mat imgMat(small_img);

  // Detect objects in the small grayscale image.
  
  vector<Rect> objects;
  cascade.detectMultiScale(
    imgMat,
    objects,
    1.1f,
    2,
    CASCADE_FIND_BIGGEST_OBJECT,
    Size(20, 20)
  );

  if (objects.size() > 0)
  {
    // Found at least one face

    Rect r = (Rect)objects.at(0);

    found_face.x = (int)((double)r.x * scale);
    found_face.y = (int)((double)r.y * scale);
    found_face.width = (int)((double)r.width * scale);
    found_face.height = (int)((double)r.height * scale);
  }
  else
  {
    // Couldn't find the face

    found_face = cvRect(-1,-1,-1,-1);
  }
    
  return found_face;
}
コード例 #22
0
ファイル: clandmark.cpp プロジェクト: rafaelvareto/Clandmark
/** @function detectAndDisplay */
void detectAndDisplay( Mat &frame, Flandmark *flandmark, CFeaturePool *featurePool)
{
	std::vector<Rect> faces;
	Mat frame_gray;
	int bbox[8];
	fl_double_t *landmarks;

	cvtColor( frame, frame_gray, CV_BGR2GRAY );
//    cvtColor( frame, frame_gray, COLOR_BGR2GRAY );  // <- OpenCV 3.0
	//equalizeHist( frame_gray, frame_gray );

	//-- Detect faces
	face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );
//    face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CASCADE_SCALE_IMAGE, Size(30, 30) );

	for( uint32_t i = 0; i < faces.size(); i++ )
	{
		// Get detected face bounding box
		bbox[0] = faces[i].x;
		bbox[1] = faces[i].y;
		bbox[2] = faces[i].x+faces[i].width;
		bbox[3] = faces[i].y;
		bbox[4] = faces[i].x+faces[i].width;
		bbox[5] = faces[i].y+faces[i].height;
		bbox[6] = faces[i].x;
		bbox[7] = faces[i].y+faces[i].height;

		// Detect facial landmarks
		cimg_library::CImg<unsigned char>* frm_gray = cvImgToCImg(frame_gray);
//		flandmark->detect_from_nf(frm_gray, bbox);
//		flandmark->detect( frm_gray, bbox );
		flandmark->detect_optimized(frm_gray, bbox);

		delete frm_gray;

		// Get detected landmarks
		landmarks = flandmark->getLandmarks();

		// Draw bounding box and detected landmarks
//		rectangle(frame, Point(bbox[0], bbox[1]), Point(bbox[2], bbox[3]), Scalar(255, 0, 0));
		circle(frame, Point(int(landmarks[0]), int(landmarks[1])), 2, Scalar(255, 0, 0), -1);
		for (int i=2; i < 2*flandmark->getLandmarksCount(); i+=2)
		{
			circle(frame, Point(int(landmarks[i]), int(landmarks[i+1])), 2, Scalar(0, 0, 255), -1);
		}

		// Textual output
		printTimingStats(flandmark->timings);
		//printLandmarks(landmarks, flandmark->getLandmarksCount());
		printLandmarks(flandmark->getLandmarksNF(), flandmark->getLandmarksCount());
	}

	//-- Show what you got
#ifdef SHOW_WINDOWS
	imshow( window_name, frame );
#endif
}
コード例 #23
0
ファイル: objectdetect.hpp プロジェクト: melmoumni/CT
	/** @function detectAndDisplay */
	void detectAndMark( Mat frame, CascadeClassifier& logo_cascade, vector<Rect>& logos )
	{
		Mat frame_gray;
		Mat ROI;
		cvtColor( frame, frame_gray, COLOR_BGR2GRAY );
		equalizeHist( frame_gray, frame_gray );	
		//-- Detect logos
		logo_cascade.detectMultiScale( frame_gray, logos, 1.05, 1, 0|CASCADE_SCALE_IMAGE);
	}
コード例 #24
0
ファイル: main.cpp プロジェクト: bassaer/PersonDetection
Mat detectHumanInImage(Mat &image,CascadeClassifier cascade){
    vector<Rect> humans;
    cascade.detectMultiScale(image, humans,1.1,2);
    for (int i = 0; i< humans.size(); i++){
        rectangle(image,Point(humans[i].x,humans[i].y),Point(humans[i].x + humans[i].width,humans[i].y + humans[i].height),Scalar(200.0,100),3,CV_AA);
        
    }
    return image;
}
コード例 #25
0
ファイル: main.cpp プロジェクト: JuarezASF/Code
int main(int argc, char **arg)
{
    cout << "Hello World do classificador em cascada!" << endl;

    //LÊ IMAGEM DE ENRTADA E CONVERTE PARA GRAYSCALE
    img = imread(arg[1]);
    cvtColor(img, gray, COLOR_BGR2GRAY);

    //INICIALIZA CLASSIFICADORES
    CascadeClassifier *faceDetector;
    CascadeClassifier *eyeDetector;
    faceDetector = new CascadeClassifier("/usr/include/opencv/data/haarcascade_frontalface_default.xml");
    eyeDetector = new CascadeClassifier("/usr/include/opencv/data/haarcascade_eye.xml");
    // na minha instação do opencv esses arquivos não existiam, tive que baixá-los e colocá-los nessa pasta
    // baixei os .xml dessa página https://github.com/Itseez/opencv/tree/master/data/haarcascades

    //OBJETOS SÃO DETECTADOS COMO UM VETOR DE RETÂNGULOS
    vector<Rect> faces;
    faceDetector->detectMultiScale(gray, faces, 1.1);
    cout << "Número de faces detectadas:" << faces.size() << endl;
    //PARA CADA FACE DETECTADA, PROCURAR POR OLHOS
    for(unsigned int i = 0; i < faces.size(); i++){
        rectangle(img, faces[i], Scalar(255, 0 ,0));
        Mat ROI(gray, faces[i]);
        Mat ROI_color(img, faces[i]);

        vector<Rect> eyes;
        eyeDetector->detectMultiScale(ROI, eyes);
        for(unsigned int j = 0; j < eyes.size(); j++)
            rectangle(ROI_color, eyes[j], Scalar(0, 255, 0));

        cout << "\t \t Número de olhos detectados na face " << i << ": " << eyes.size() << endl;
    }


    // Create windows
    namedWindow( image_window, WINDOW_AUTOSIZE );
    //MOSTRA IMAGEM FINAL
    imshow(image_window, img);
    //ESPERA COMANDO PARA ENCERRAR EXECUÇÃO
    waitKey(0);

    return 0;
}
コード例 #26
0
ファイル: face.cpp プロジェクト: cwvh/cvstart
int main(int argc, char** argv) {
    using namespace std;
    using namespace cv;

    VideoCapture cap(0);
    if (!cap.isOpened())
        exit(1);

    if (argc > 2) {
        cap.set(CV_CAP_PROP_FRAME_WIDTH, atoi(argv[1]));
        cap.set(CV_CAP_PROP_FRAME_HEIGHT, atoi(argv[2]));
    }

    CascadeClassifier cascade;
    if (!cascade.load("haarcascade_frontalface_default.xml"))
        exit(2);

    const char* name = basename(argv[0]);
    namedWindow(name);
    for (int frame = 1;; frame++) {
        static double mean = 0;
        TickMeter tm;
        Mat img, gray;

        tm.start();
        cap >> img;

        cvtColor(img, gray, CV_BGR2GRAY);
        equalizeHist(gray, gray);

        vector<Rect> objects;
        cascade.detectMultiScale(gray, objects, 1.2, 9,
                CV_HAAR_DO_CANNY_PRUNING);
        typedef vector<Rect>::const_iterator RCI;
        for (RCI i = objects.begin(); i != objects.end(); ++i) {
            Point center(cvRound(i->x+i->width/2),cvRound(i->y+i->height/2));
            int radius = cvRound(i->width / 2);
            circle(img, center, radius, Scalar(128,255,128), 2, 8, 0);
        }

        imshow(name, img);

        tm.stop();
        mean += tm.getTimeMilli();
        if (frame % 25 == 0) {
            printf("avg detect time: %.2f ms\n", mean / 25);
            mean = 0;
        }

        switch (waitKey(10)) {
        case 'q': case 27:
            exit(0);
            break;
        }
    }
}
コード例 #27
0
void PreProcessingFilter::detectObjectsCustom(const Mat &image, CascadeClassifier &cascade, vector<Rect> &objects, int scaledWidth, int flags, Size minFeatureSize, float searchScaleFactor, int minNeighbors)
{
    Mat gray;
    if (image.channels() == 3)
    {
        cvtColor(image, gray, CV_BGR2GRAY);
    }
    else if (image.channels() == 4)
    {
        cvtColor(image, gray, CV_BGRA2GRAY);
    }
    else
    {
        gray = image;
    }

    Mat inputImg;
    float scale = image.cols / (float)scaledWidth;
    if (image.cols > scaledWidth)
    {
        int scaledHeight = cvRound(image.rows / scale);
        resize(gray, inputImg, Size(scaledWidth, scaledHeight));
    }
    else
    {
        inputImg = gray;
    }

    Mat equalizedImg;
    equalizeHist(inputImg, equalizedImg);

    cascade.detectMultiScale(equalizedImg, objects, searchScaleFactor, minNeighbors, flags, minFeatureSize);

    if (image.cols > scaledWidth)
    {
        for (int i = 0; i < (int)objects.size(); i++)
        {
            objects[i].x = cvRound(objects[i].x * scale);
            objects[i].y = cvRound(objects[i].y * scale);
            objects[i].width = cvRound(objects[i].width * scale);
            objects[i].height = cvRound(objects[i].height * scale);
        }
    }

    for (int i = 0; i < (int)objects.size(); i++ )
    {
        if (objects[i].x < 0)
            objects[i].x = 0;
        if (objects[i].y < 0)
            objects[i].y = 0;
        if (objects[i].x + objects[i].width > image.cols)
            objects[i].x = image.cols - objects[i].width;
        if (objects[i].y + objects[i].height > image.rows)
            objects[i].y = image.rows - objects[i].height;
    }
}
コード例 #28
0
ファイル: cvLib.hpp プロジェクト: jstockhoff3/SHIELD
void detectFaces(Mat frame, vector<Rect> *theFaces, CascadeClassifier theCascade){
    
    Mat frame_gray;

    cvtColor(frame, frame_gray, CV_BGR2GRAY);
    equalizeHist(frame_gray, frame_gray);

    
    theCascade.detectMultiScale(frame_gray, *theFaces, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, Size(30, 30));
}
コード例 #29
0
ファイル: test.cpp プロジェクト: ucd-robotics/AR.Drone.2.0
void detectAndDisplay( Mat frame )
{
   std::vector<Rect> faces;
   Mat frame_gray;

   cvtColor( frame, frame_gray, CV_BGR2GRAY );
   equalizeHist( frame_gray, frame_gray );
   //-- Detect faces
   face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(50, 50) );
   
   printf("No face!)\n");

   for( size_t i = 0; i < faces.size(); i++ )
    {

	ostringstream os;
	os << imagecounter << " face.png";
	string name = os.str();
      imwrite( name, frame );
      //cvSaveImage(name, &(IplImage(frame)));
      imagecounter += 1;
      printf("We have a face!!)\n");
      Point center( faces[i].x + faces[i].width/2, faces[i].y + faces[i].height/2 );

      ellipse( frame, center, Size( faces[i].width/2, faces[i].height/2), 0, 0, 360, Scalar( 255, 0, 255 ), 2, 8, 0 );

      Mat faceROI = frame_gray( faces[i] );
      std::vector<Rect> eyes;

      //-- In each face, detect eyes
      eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CV_HAAR_SCALE_IMAGE, Size(50, 50) );

      for( size_t j = 0; j < eyes.size(); j++ )
       {
         Point eye_center( faces[i].x + eyes[j].x + eyes[j].width/2, faces[i].y + eyes[j].y + eyes[j].height/2 );
         int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
         circle( frame, eye_center, radius, Scalar( 255, 0, 0 ), 3, 8, 0 );
       }
    }

   //-- Show what you got
   imshow( window_name, frame );
}
コード例 #30
0
ファイル: finder.cpp プロジェクト: spencerjackson/Stress-Test
vector<Rect> Finder::detectEyes (const Mat& image) {
	//Detect eyes
	CascadeClassifier eyes;
	vector<Rect> objects;
	string filename("resources/haarcascade_mcs_eyepair_big.xml");
	if (!eyes.load(filename)) throw FileNotFoundException(filename);
	eyes.detectMultiScale(image,objects);
	return objects;

}