/** * @brief Detects faces from a cv::Mat * @param input_img [const cv::Mat&] The input image * @param fast [bool] True for fast detection -- frontal only * @return [std::vector<cv::Rect>] A vector containing the detected faces. * Each face is represented by a rectangle. */ std::vector<cv::Rect> FaceDetector::detectFaces( const cv::Mat& input_img, bool fast) { std::vector<cv::Rect> front_faces, profile_faces, final_faces; cv::Mat grayscale_img; if( input_img.empty() ) { return final_faces; } cv::cvtColor(input_img, grayscale_img, CV_BGR2GRAY); cv::equalizeHist(grayscale_img, grayscale_img); // Detect Front Faces std::string haar_file_path = "/usr/share/opencv/haarcascades/haarcascade_frontalface_alt.xml"; front_faces = detectFaces( grayscale_img, haar_file_path ); if(fast) { return front_faces; } // Detect Profile Faces haar_file_path = "/usr/share/opencv/haarcascades/haarcascade_profileface.xml"; profile_faces = detectFaces( grayscale_img, haar_file_path ); // Identify unique faces final_faces = identifyUniqueFaces( front_faces, profile_faces ); return final_faces; }
int facial_features(int argc, char** argv) { cv::CommandLineParser parser(argc, argv, "{eyes||}{nose||}{mouth||}{help h||}"); if (parser.has("help")) { help(); return 0; } input_image_path = parser.get<string>(0); face_cascade_path = parser.get<string>(1); eye_cascade_path = parser.has("eyes") ? parser.get<string>("eyes") : ""; nose_cascade_path = parser.has("nose") ? parser.get<string>("nose") : ""; mouth_cascade_path = parser.has("mouth") ? parser.get<string>("mouth") : ""; if (input_image_path.empty() || face_cascade_path.empty()) { cout << "IMAGE or FACE_CASCADE are not specified"; return 1; } // Load image and cascade classifier files Mat image; image = imread(input_image_path); // Detect faces and facial features vector<Rect_<int> > faces; detectFaces(image, faces, face_cascade_path); detectFacialFeaures(image, faces, eye_cascade_path, nose_cascade_path, mouth_cascade_path); imshow("Result", image); waitKey(0); return 0; }
void FaceDetector::detectProfileFaces(const Mat &image, std::vector<Rect> &faces) { QString face_cascade_profile_file_name = "/home/lang/opencv/opencv-2.4.9/data/" "haarcascades/haarcascade_profileface.xml"; detectFaces( image, faces, face_cascade_profile_file_name ); }
Brain::Brain(WebCamWrapper& webcam):m_webcam(webcam),m_faceTracker(m_webcam) { m_faceTracker.moveToThread(&facedetectorThread); QObject::connect(this, SIGNAL(triggerFaceDetection(cv::Mat)), &m_faceTracker, SLOT(detectFaces(cv::Mat))); QObject::connect(&m_faceTracker, SIGNAL(faceDetection(vector<cv::Rect_<int> >,vector<string>,std::vector<cv::Point>)), this, SLOT(onFacesDetection(vector<cv::Rect_<int> >,vector<string>,std::vector<cv::Point>))); speech.moveToThread(&soundThread); QObject::connect(this, SIGNAL(say(std::string)),&speech, SLOT(onSay(std::string))); synthesizeSound(); }
void FaceDetector::processFrame() { Mat frame = getMostRecentFrame(); if(frame.empty()) { return; } detectFaces(frame); if(hasUpdates) { notifyControllers(); } }
void OMXCameraAdapter::FDFrameCallback(CameraFrame *cameraFrame) { LOG_FUNCTION_NAME; if(!mFaceDetectionRunning || mFaceDetectionPaused) { goto EXIT; } VceCropRect crop; crop.cropw = cameraFrame->mWidth; crop.croph = cameraFrame->mHeight; crop.cropx = cameraFrame->mXOff; crop.cropy = cameraFrame->mYOff; detectFaces(cameraFrame->mBuffer, &crop); EXIT: LOG_FUNCTION_NAME_EXIT; return; }
int main( int argc, char** argv ) { CvCapture *capture; IplImage *frame; int key; //NamNguyen comment and add(2lines below) //char *filename = "haarcascade_frontalface_alt.xml"; char filename[] = "haarcascade_frontalface_alt.xml"; cascade = ( CvHaarClassifierCascade* )cvLoad( filename, 0, 0, 0 ); storage = cvCreateMemStorage( 0 ); capture = cvCaptureFromCAM( 0 ); assert( cascade && storage && capture ); cvNamedWindow( "video", 1 ); while( key != 'q' ) { frame = cvQueryFrame( capture ); if( !frame ) { fprintf( stderr, "Cannot query frame!\n" ); break; } cvFlip( frame, frame, -1 ); frame->origin = 0; detectFaces( frame ); key = cvWaitKey( 10 ); } cvReleaseCapture( &capture ); cvDestroyWindow( "video" ); cvReleaseHaarClassifierCascade( &cascade ); cvReleaseMemStorage( &storage ); return 0; }
void DetailedFaceDetector::processFrame() { cv::Mat frame = getMostRecentFrame(); if(frame.empty()) return; detectFaces(frame); //only detect features if the FaceDetector has updates! if(hasUpdates) { //only detect other features if the face detector had updates if(detectWhat > 0) detectFeatures(frame); //since this object disables autoNotify, do it manually now! //this is to avoid double updates to the controllers and //only update when necessary notifyControllers(); } }
/** * @brief Finds faces in an image retrieved from a file URL * @param file_name [std::string] The image file's URL * @param fast [bool] True for fast detection -- frontal only * @return [std::vector<cv::Rect>] A vector containing the detected faces. * Each face is represented by a rectangle. */ std::vector<cv::Rect> FaceDetector::findFaces(std::string file_name, bool fast) { cv::Mat input_img; input_img = loadImage(file_name); return detectFaces(input_img, fast); }
void VideoCorrect::correctImage(Mat& inputFrame, Mat& outputFrame, bool developerMode){ resize(inputFrame, inputFrame, CAMERA_RESOLUTION); inputFrame.copyTo(img); //Convert to YCbCr color space cvtColor(img, ycbcr, CV_BGR2YCrCb); //Skin color thresholding inRange(ycbcr, Scalar(0, 150 - Cr, 100 - Cb), Scalar(255, 150 + Cr, 100 + Cb), bw); if(IS_INITIAL_FRAME){ face = detectFaces(img); if(face.x != 0){ lastFace = face; } else{ outputFrame = img; return; } prevSize = Size(face.width/2, face.height/2); head = Mat::zeros(bw.rows, bw.cols, bw.type()); ellipse(head, Point(face.x + face.width/2, face.y + face.height/2), prevSize, 0, 0, 360, Scalar(255,255,255,0), -1, 8, 0); if(face.x > 0 && face.y > 0 && face.width > 0 && face.height > 0 && (face.x + face.width) < img.cols && (face.y + face.height) < img.rows){ img(face).copyTo(bestImg); } putText(img, "Give your best pose!", Point(face.x, face.y), CV_FONT_HERSHEY_SIMPLEX, 0.4, Scalar(255,255,255,0), 1, CV_AA); } firstFrameCounter--; if(face.x == 0) //missing face prevention face = lastFace; //Mask the background out bw &= head; //Compute more accurate image moments after background removal m = moments(bw, true); angle = (atan((2*m.nu11)/(m.nu20-m.nu02))/2)*180/PI; center = Point(m.m10/m.m00,m.m01/m.m00); //Smooth rotation (running average) bufferCounter++; rotationBuffer[ bufferCounter % SMOOTHER_SIZE ] = angle; smoothAngle += (angle - rotationBuffer[(bufferCounter + 1) % SMOOTHER_SIZE]) / SMOOTHER_SIZE; //Expand borders copyMakeBorder( img, img, BORDER_EXPAND, BORDER_EXPAND, BORDER_EXPAND, BORDER_EXPAND, BORDER_REPLICATE, Scalar(255,255,255,0)); if(!IS_INITIAL_FRAME){ //Rotate the image to correct the leaning angle rotateImage(img, smoothAngle); //After rotation detect faces face = detectFaces(img); if(face.x != 0) lastFace = face; //Create background mask around the face head = Mat::zeros(bw.rows, bw.cols, bw.type()); ellipse(head, Point(face.x - BORDER_EXPAND + face.width/2, face.y -BORDER_EXPAND + face.height/2), prevSize, 0, 0, 360, Scalar(255,255,255,0), -1, 8, 0); //Draw a rectangle around the face //rectangle(img, face, Scalar(255,255,255,0), 1, 8, 0); //Overlay the ideal pose if(replaceFace && center.x > 0 && center.y > 0){ center = Point(face.x + face.width/2, face.y + face.width/2); overlayImage(img, bestImg, center, smoothSize); } } else{ face.x += BORDER_EXPAND; //position alignment after border expansion (not necessary if we detect the face after expansion) face.y += BORDER_EXPAND; } //Smooth ideal image size (running average) sizeBuffer[ bufferCounter % SMOOTHER_SIZE ] = face.width; smoothSize += (face.width - sizeBuffer[(bufferCounter + 1) % SMOOTHER_SIZE]) / SMOOTHER_SIZE; //Get ROI center = Point(face.x + face.width/2, face.y + face.width/2); roi = getROI(img, center); if(roi.x > 0 && roi.y > 0 && roi.width > 0 && roi.height > 0 && (roi.x + roi.width) < img.cols && (roi.y + roi.height) < img.rows){ img = img(roi); } //Resize the final image resize(img, img, CAMERA_RESOLUTION); if(developerMode){ Mat developerScreen(img.rows, img.cols + inputFrame.cols + bw.cols, CV_8UC3); Mat left(developerScreen, Rect(0, 0, img.size().width, img.size().height)); img.copyTo(left); Mat center(developerScreen, Rect(img.cols, 0, inputFrame.cols, inputFrame.rows)); inputFrame.copyTo(center); cvtColor(bw, bw, CV_GRAY2BGR); Mat right(developerScreen, Rect(img.size().width + inputFrame.size().width, 0, bw.size().width, bw.size().height)); bw.copyTo(right); Mat rightmost(developerScreen, Rect(img.size().width + inputFrame.size().width + bw.size().width - bestImg.size().width, 0, bestImg.size().width, bestImg.size().height)); bestImg.copyTo(rightmost); outputFrame = developerScreen; } else{ outputFrame = img; } }
int Detector::detectFaces(ClassifierInterface* ci, const std::string img_path, int draw){ return detectFaces(ci,img_path,-1.0,draw,0); }
////////////////////////////////// // main() // int _tmain(int argc, _TCHAR* argv[]) { // try_conv(); if( !initAll() ) exitProgram(-1); // Capture and display video frames until a face // is detected int frame_count = 0; while( (char)27!=cvWaitKey(1) ) { //Retrieve next image and // Look for a face in the next video frame //read into pfd_pVideoFrameCopy if (!captureVideoFrame()){ if (frame_count==0) throw exception("Failed before reading anything"); break; //end of video.. } ++frame_count; CvSeq* pSeq = 0; detectFaces(pfd_pVideoFrameCopy,&pSeq); //Do some filtration of pSeq into pSeqOut, based on history etc, //update data structures (history ,face threads etc.)s list<Face> & faces_in_this_frame = FdProcessFaces(pfd_pVideoFrameCopy,pSeq); //== draw rectrangle for each detected face == if (!faces_in_this_frame.empty()){ //faces detected (??) int i = 0; for(list<Face>::iterator face_itr = faces_in_this_frame.begin(); face_itr != faces_in_this_frame.end(); ++face_itr) { CvPoint pt1 = cvPoint(face_itr->x,face_itr->y); CvPoint pt2 = cvPoint(face_itr->x + face_itr->width,face_itr->y + face_itr->height); if (face_itr->frame_id == frame_count) //detected for this frame cvRectangle( pfd_pVideoFrameCopy, pt1, pt2, colorArr[i++%3],3,8,0); else //from a previous frame cvRectangle( pfd_pVideoFrameCopy, pt1, pt2, colorArr[i++%3],1,4,0); } }else{ //no faces detected Sleep(100); } cvShowImage( DISPLAY_WINDOW, pfd_pVideoFrameCopy ); cvReleaseImage(&pfd_pVideoFrameCopy); } //end input while cout << "==========================================================" << endl; cout << "========== Input finished ================================" << endl; cout << "==========================================================" << endl << endl; cout << "Press a key to continue with history playback" <<endl; char cc = fgetc(stdin); cout << "==========================================================" << endl; cout << "==== Playback history + rectangles + =====" << endl; cout << "==== create output video(s) =====" << endl; cout << "==========================================================" << endl << endl; list<FDHistoryEntry> & pHistory = FdGetHistorySeq(); //== VIDEO WRITER START ===================== int isColor = 1; int fps = 12;//30;//25; // or 30 int frameW = 640; // 744 for firewire cameras int frameH = 480; // 480 for firewire cameras CvVideoWriter * playbackVidWriter=cvCreateVideoWriter((OUTPUT_PLAYBACK_VIDEOS_DIR + "\\playback.avi").c_str(), PFD_VIDEO_OUTPUT_FORMAT, fps,cvSize(frameW,frameH),isColor); CvVideoWriter * croppedVidWriter = 0; if (!playbackVidWriter) { cerr << "can't create vid writer" << endl; exitProgram(-1); } bool wasWrittenToVideo = false; //== VIDEO WRITER END ===================== int index = 0; // play recorded sequence---------------------------- // i.e. just what's in the history int playback_counter = 0; cout << "start finding consensus rect " << endl; //find min max bool found =false; int min_x = INT_MAX,//pFaceRect->x, max_x = 0,//pFaceRect->x+pFaceRect->width, min_y = INT_MAX,//pFaceRect->y, max_y = 0;//pFaceRect->y+pFaceRect->height; for (list<FDHistoryEntry>::iterator itr = pHistory.begin() ; itr != pHistory.end(); ++itr) { CvSeq* pFacesSeq = itr->pFacesSeq; assert(pFacesSeq); //TODO Might want to convert to Face here CvRect * pFaceRect = (CvRect*)cvGetSeqElem(pFacesSeq, 0); //works only on first rec series if (pFaceRect){ found = true; if (pFaceRect->x < min_x) min_x = pFaceRect->x; if (pFaceRect->x+pFaceRect->width > max_x) max_x = pFaceRect->x + pFaceRect->width; if (pFaceRect->y < min_y) min_y = pFaceRect->y; if (pFaceRect->y+pFaceRect->height > max_y) max_y = pFaceRect->y+pFaceRect->height; } } //assert(found); //some rect in history.. CvRect consensus_rect; consensus_rect.x = min_x; consensus_rect.y = min_y; consensus_rect.width = max_x - min_x; consensus_rect.height = max_y - min_y; Sleep(3000); //just to make sure that pruneHistory isn't modifying.. cout << "start playback loop " << endl; int k = 0; for (list<FDHistoryEntry>::iterator itr = pHistory.begin() ; itr != pHistory.end(); ++itr) { cout << ++k << endl; //cvResetImageROI(history_itr->pFrame); //now reset by FDFaceThread pfd_pVideoFrameCopy = cvCreateImage( cvGetSize(itr->pFrame ), 8, 3 ); //TODO query image for its properties cvCopy( itr->pFrame , pfd_pVideoFrameCopy, 0 ); CvSeq* pFacesSeq = itr->pFacesSeq; #ifndef NO_RECTS_ON_PLAYBACK for(int i = 0 ;i < pFacesSeq->total ;i++){ Face * pFaceRect = (Face*)cvGetSeqElem(pFacesSeq, i); assert(pFaceRect != NULL); CvPoint pt1 = cvPoint(pFaceRect->x,pFaceRect->y); CvPoint pt2 = cvPoint(pFaceRect->x + pFaceRect->width,pFaceRect->y + pFaceRect->height); if (itr->frame_id == pFaceRect->frame_id) cvRectangle( pfd_pVideoFrameCopy, pt1, pt2, colorArr[i%3],3,8,0); else cvRectangle( pfd_pVideoFrameCopy, pt1, pt2, colorArr[i%3],1,4,0); } #endif if (pFacesSeq->total > 0) { assert(found); //write 1st sequence if exists to cropped vid if (!croppedVidWriter) croppedVidWriter=cvCreateVideoWriter((OUTPUT_PLAYBACK_VIDEOS_DIR + "\\cropped_playback.avi").c_str(), PFD_VIDEO_OUTPUT_FORMAT, fps,cvSize(max_x-min_x,max_y-min_y),isColor); assert(croppedVidWriter); cvResetImageROI(pfd_pVideoFrameCopy); cvSetImageROI(pfd_pVideoFrameCopy,consensus_rect); //write cropped image to video file IplImage *croppedImg = cvCreateImage(cvGetSize(pfd_pVideoFrameCopy), pfd_pVideoFrameCopy->depth, pfd_pVideoFrameCopy->nChannels); assert(croppedImg); cvCopy(pfd_pVideoFrameCopy, croppedImg, NULL); assert(croppedVidWriter); cvWriteFrame(croppedVidWriter,croppedImg); cvReleaseImage(&croppedImg); } cvShowImage( DISPLAY_WINDOW, pfd_pVideoFrameCopy ); cvResetImageROI(pfd_pVideoFrameCopy); //CROP_PLAYBACK_FACE cvWriteFrame(playbackVidWriter,pfd_pVideoFrameCopy); if( (char)27==cvWaitKey(1) ) break;//exitProgram(0); Sleep(50); ++playback_counter; } cvReleaseVideoWriter(&playbackVidWriter); cvReleaseVideoWriter(&croppedVidWriter); exitProgram(0); //----------------------------------------------------------- //----------------------------------------------------------- //----------------------------------------------------------- }