void DetailedFaceDetector::detectFeatures(const cv::Mat &frame) { //get the face rectangles RectVector lastRects = getAreas(); //create a new detailed result DetailedFaces newDetails; for(RectVectorItr faceRect = lastRects.begin(); faceRect != lastRects.end(); ++faceRect) { //create a new entry FaceDetails ¤tFace = newDetails[*faceRect]; if(detectWhat && EYES) detectEyes(frame, *faceRect, currentFace); if(detectWhat && NOSE) detectNose(frame, *faceRect, currentFace); if(detectWhat && MOUTH) detectMouth(frame, *faceRect, currentFace); } setDetailedFaceInfo(newDetails); notifyControllers(); }
/** * @function redisplay * (Called at each openGL step) * - Processes the webcam frame to detect the eyes with OpenCV, * - Creates a 3D scene with OpenGL, * - Render the scene and the webcam image. */ void redisplay() { if(frame.empty()) return; if(!bPause) { glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // OPENCV //-- flip frame image cv::Mat tempimage; if(bInvertCam) cv::flip(frame, tempimage, 0); else cv::flip(frame, tempimage, 1); //-- detect eyes tempimage = detectEyes(tempimage); // OPENGL //-- scene setGlCamera(); draw3dScene(); //-- cam if(bDisplayCam) displayCam(tempimage); // RENDER glutSwapBuffers(); } //-- post the next redisplay glutPostRedisplay(); }
static void detectFacialFeaures(Mat& img, const vector<Rect_<int> > faces, string eye_cascade, string nose_cascade, string mouth_cascade) { for(unsigned int i = 0; i < faces.size(); ++i) { // Mark the bounding box enclosing the face Rect face = faces[i]; rectangle(img, Point(face.x, face.y), Point(face.x+face.width, face.y+face.height), Scalar(255, 0, 0), 1, 4); // Eyes, nose and mouth will be detected inside the face (region of interest) Mat ROI = img(Rect(face.x, face.y, face.width, face.height)); // Check if all features (eyes, nose and mouth) are being detected bool is_full_detection = false; if( (!eye_cascade.empty()) && (!nose_cascade.empty()) && (!mouth_cascade.empty()) ) is_full_detection = true; // Detect eyes if classifier provided by the user if(!eye_cascade.empty()) { vector<Rect_<int> > eyes; detectEyes(ROI, eyes, eye_cascade); // Mark points corresponding to the centre of the eyes for(unsigned int j = 0; j < eyes.size(); ++j) { Rect e = eyes[j]; circle(ROI, Point(e.x+e.width/2, e.y+e.height/2), 3, Scalar(0, 255, 0), -1, 8); /* rectangle(ROI, Point(e.x, e.y), Point(e.x+e.width, e.y+e.height), Scalar(0, 255, 0), 1, 4); */ } } // Detect nose if classifier provided by the user double nose_center_height = 0.0; if(!nose_cascade.empty()) { vector<Rect_<int> > nose; detectNose(ROI, nose, nose_cascade); // Mark points corresponding to the centre (tip) of the nose for(unsigned int j = 0; j < nose.size(); ++j) { Rect n = nose[j]; circle(ROI, Point(n.x+n.width/2, n.y+n.height/2), 3, Scalar(0, 255, 0), -1, 8); nose_center_height = (n.y + n.height/2); } } // Detect mouth if classifier provided by the user double mouth_center_height = 0.0; if(!mouth_cascade.empty()) { vector<Rect_<int> > mouth; detectMouth(ROI, mouth, mouth_cascade); for(unsigned int j = 0; j < mouth.size(); ++j) { Rect m = mouth[j]; mouth_center_height = (m.y + m.height/2); // The mouth should lie below the nose if( (is_full_detection) && (mouth_center_height > nose_center_height) ) { rectangle(ROI, Point(m.x, m.y), Point(m.x+m.width, m.y+m.height), Scalar(0, 255, 0), 1, 4); } else if( (is_full_detection) && (mouth_center_height <= nose_center_height) ) continue; else rectangle(ROI, Point(m.x, m.y), Point(m.x+m.width, m.y+m.height), Scalar(0, 255, 0), 1, 4); } } } return; }
int main(int argc, char** argv) { IplImage *img; char *file1 = "haarcascade_frontalface_alt.xml"; char *file2 = "haarcascade_eye.xml"; /* usage: eyedetect <image> */ //assert(argc == 2); /* load the face classifier */ cascade_f = (CvHaarClassifierCascade*)cvLoad(file1, 0, 0, 0); /* load the eye classifier */ cascade_e = (CvHaarClassifierCascade*)cvLoad(file2, 0, 0, 0); /* setup memory storage, needed by the object detector */ storage = cvCreateMemStorage(0); cv::VideoCapture cap(0); if(!cap.isOpened()) return -1; //cap.set(CV_CAP_PROP_FRAME_WIDTH, 320); //cap.set(CV_CAP_PROP_FRAME_HEIGHT, 240); /* load image */ //img = cvLoadImage(argv[1], 1); /* always check */ assert(cascade_f && cascade_e && storage && img); cvNamedWindow("Faces", 1); cvNamedWindow("Eyes", 1); cvNamedWindow("Eyes before", 1); //add hough controls cvNamedWindow("Controls", 1); cvCreateTrackbar("Param 1", "Controls", ¶m1, 300); cvCreateTrackbar("Param 2", "Controls", ¶m2, 300); cvCreateTrackbar("ScaleA*10", "Controls", &scaleA, 300); cvCreateTrackbar("-ScaleB*10", "Controls", &scaleB, 300); cv::Mat frame; for(;;) { cap >> frame; IplImage tmp_img = frame; img = &tmp_img; /* detect eyes and display image */ detectEyes(img); cvShowImage("Faces", img); if(cvWaitKey(30) == 'q') break; } cvWaitKey(0); cvDestroyWindow("Faces"); cvReleaseImage(&img); return 0; }