cv::Mat FlandmarkAligner::align(const cv::Mat& inputImage) { if (!d->isLoaded()) { return inputImage; } cv::Mat image; // ensure it's grayscale if (inputImage.channels() > 1) { cvtColor(inputImage, image, CV_RGB2GRAY); } else { image = inputImage; } qCDebug(LIBKFACE_LOG) << "Detecting" << d->model->data.options.M << "landmarks"; QVector<double> landmarks(2*d->model->data.options.M); // bbox with detected face (format: top_left_col top_left_row bottom_right_col bottom_right_row) int bbox[] = {30,30,120,120}; //{ 0, 0, image.cols, image.rows }; IplImage iplImage = image; flandmark_detect(&iplImage, bbox, d->model, landmarks.data()); for (int i = 0; i < d->model->data.options.M; i++) { qCDebug(LIBKFACE_LOG) << "Landmark" << i << landmarks.at(2*i) << ", " << landmarks.at(2*i+1); } return inputImage; }
void detectFaceInImage(IplImage *orig, IplImage* input, CvHaarClassifierCascade* cascade, FLANDMARK_Model *model, int *bbox, double *landmarks) { // Smallest face size. CvSize minFeatureSize = cvSize(40, 40); int flags = CV_HAAR_DO_CANNY_PRUNING; // How detailed should the search be. float search_scale_factor = 1.1f; CvMemStorage* storage; CvSeq* rects; int nFaces; storage = cvCreateMemStorage(0); cvClearMemStorage(storage); // Detect all the faces in the greyscale image. rects = cvHaarDetectObjects(input, cascade, storage, search_scale_factor, 2, flags, minFeatureSize); nFaces = rects->total; double t = (double)cvGetTickCount(); for (int iface = 0; iface < (rects ? nFaces : 0); ++iface) { CvRect *r = (CvRect*)cvGetSeqElem(rects, iface); bbox[0] = r->x; bbox[1] = r->y; bbox[2] = r->x + r->width; bbox[3] = r->y + r->height; flandmark_detect(input, bbox, model, landmarks); // display landmarks cvRectangle(orig, cvPoint(bbox[0], bbox[1]), cvPoint(bbox[2], bbox[3]), CV_RGB(255,0,0) ); cvRectangle(orig, cvPoint(model->bb[0], model->bb[1]), cvPoint(model->bb[2], model->bb[3]), CV_RGB(0,0,255) ); cvCircle(orig, cvPoint((int)landmarks[0], (int)landmarks[1]), 3, CV_RGB(0, 0,255), CV_FILLED); for (int i = 2; i < 2*model->data.options.M; i += 2) { cvCircle(orig, cvPoint(int(landmarks[i]), int(landmarks[i+1])), 3, CV_RGB(255,0,0), CV_FILLED); } } t = (double)cvGetTickCount() - t; int ms = cvRound( t / ((double)cvGetTickFrequency() * 1000.0) ); if (nFaces > 0) { printf("Faces detected: %d; Detection of facial landmark on all faces took %d ms\n", nFaces, ms); } else { printf("NO Face\n"); } cvReleaseMemStorage(&storage); }
int main( int argc, char** argv ) { char flandmark_window[] = "flandmark_simple_example"; double t; int ms; int * bbox = (int*)malloc(4*sizeof(int)); if (argc < 6) { fprintf(stderr, "Usage: flandmark_1 <path_to_input_image> <face_bbox - 4int> [<path_to_output_image>]\n"); exit(1); } //cvNamedWindow(flandmark_window, 0 ); t = (double)cvGetTickCount(); FLANDMARK_Model * model = flandmark_init("flandmark_model.dat"); if (model == 0) { printf("Structure model wasn't created. Corrupted file flandmark_model.dat?\n"); exit(1); } t = (double)cvGetTickCount() - t; ms = cvRound( t / ((double)cvGetTickFrequency() * 1000.0) ); printf("Structure model loaded in %d ms.\n", ms); // input image IplImage *img = cvLoadImage(argv[1]); if (img == NULL) { //fprintf(stderr, "Wrong path to image. Exiting...\n"); fprintf(stderr, "Cannot open image %s. Exiting...\n", argv[1]); exit(1); } // convert image to grayscale IplImage *img_grayscale = cvCreateImage(cvSize(img->width, img->height), IPL_DEPTH_8U, 1); cvCvtColor(img, img_grayscale, CV_BGR2GRAY); // face bbox bbox[0] = ::atoi(argv[2]); bbox[1] = ::atoi(argv[3]); bbox[2] = ::atoi(argv[4]); bbox[3] = ::atoi(argv[5]); // call flandmark_detect t = (double)cvGetTickCount(); float * landmarks = (float*)malloc(2*model->data.options.M*sizeof(float)); if(flandmark_detect(img_grayscale, bbox, model, landmarks)) { printf("Error during detection.\n"); } t = (double)cvGetTickCount() - t; ms = cvRound( t / ((double)cvGetTickFrequency() * 1000.0) ); printf("Landmarks detected in %d ms.\n", ms); // cvRectangle(img, cvPoint(bbox[0], bbox[1]), cvPoint(bbox[2], bbox[3]), CV_RGB(255,0,0) ); // cvRectangle(img, cvPoint(model->bb[0], model->bb[1]), cvPoint(model->bb[2], model->bb[3]), CV_RGB(0,0,255) ); cvCircle(img, cvPoint((int)landmarks[0], (int)landmarks[1]), 3, CV_RGB(0, 0,255), CV_FILLED); for (int i = 2; i < 2*model->data.options.M; i += 2) { cvCircle(img, cvPoint(int(landmarks[i]), int(landmarks[i+1])), 3, CV_RGB(255,0,0), CV_FILLED); } printf("detection = \t["); for (int ii = 0; ii < 2*model->data.options.M; ii+=2) { printf("%.2f ", landmarks[ii]); } printf("]\n"); printf("\t\t["); for (int ii = 1; ii < 2*model->data.options.M; ii+=2) { printf("%.2f ", landmarks[ii]); } printf("]\n"); cvShowImage(flandmark_window, img); cvWaitKey(0); if (argc == 3) { printf("Saving image to file %s...\n", argv[2]); cvSaveImage(argv[2], img); } // cleanup cvDestroyWindow(flandmark_window); cvReleaseImage(&img); cvReleaseImage(&img_grayscale); free(landmarks); free(bbox); flandmark_free(model); }
Mat Detector::detect(string imgname){ //cout<<"Debug: "<<debug<<endl; Mat resized; IplImage *frame = cvLoadImage(imgname.data(), 2|4); if (frame == NULL) { fprintf(stderr, "Cannot open image %s.Returning empty Mat...\n", imgname.data()); return resized; } else if (frame->width < 100 || frame->height < 100) { fprintf(stderr, "image %s too small.Returning empty Mat...\n", imgname.data()); cvReleaseImage(&frame); return resized; } else if (frame->width > 100000 || frame->height > 100000) { fprintf(stderr, "image %s too large.Returning empty Mat...\n", imgname.data()); cvReleaseImage(&frame); return resized; } // convert image to grayscale IplImage *frame_bw = cvCreateImage(cvSize(frame->width, frame->height), IPL_DEPTH_8U, 1); cvConvertImage(frame, frame_bw); Mat frame_mat(frame, 1); // Smallest face size. CvSize minFeatureSize = cvSize(100, 100); int flags = CV_HAAR_DO_CANNY_PRUNING; // How detailed should the search be. float search_scale_factor = 1.1f; CvMemStorage* storage; CvSeq* rects; int nFaces; storage = cvCreateMemStorage(0); cvClearMemStorage(storage); // Detect all the faces in the greyscale image. rects = cvHaarDetectObjects(frame_bw, faceCascade, storage, search_scale_factor, 2, flags, minFeatureSize); //rects = MBLBPDetectMultiScale(frame_bw, faceCascade, storage, 1229, 1, 50, 500); nFaces = rects->total; if (nFaces != 1){ if (debug) printf("%d faces detected\n", nFaces); storage = cvCreateMemStorage(0); cvReleaseMemStorage(&storage); cvReleaseImage(&frame_bw); cvReleaseImage(&frame); return resized; } int iface = 0; CvRect *r = (CvRect*)cvGetSeqElem(rects, iface); double* landmarks = new double[2*fmodel->data.options.M]; int bbox[4]; bbox[0] = r->x; bbox[1] = r->y; bbox[2] = r->x + r->width; bbox[3] = r->y + r->height; // Detect landmarks flandmark_detect(frame_bw, bbox, fmodel, landmarks); //align faces double angle[3]; angle[0] = atan((landmarks[7]-landmarks[9])/(landmarks[6]-landmarks[8])); angle[1] = atan((landmarks[11]-landmarks[13])/(landmarks[10]-landmarks[12])); angle[2] = atan((landmarks[3]-landmarks[5])/(landmarks[2]-landmarks[4])); //cout<<angle[0]*180<<" "<<angle[1]*180<<" "<<angle[2]*180<<endl; double angle_rotate = 0; if (angle[0] > angle[1]){ if (angle[1] > angle[2]) angle_rotate = angle[1]; else if (angle[0] > angle[2]) angle_rotate = angle[2]; else angle_rotate = angle[0]; } else{ if (angle[1] < angle[2]) angle_rotate = angle[1]; else if (angle[2] < angle[0]) angle_rotate = angle[0]; else angle_rotate = angle[2]; } Rect faceRect(r->x, r->y,r->width, r->height); //save face to tmp if debug if (debug){ cvRectangle(frame, cvPoint(bbox[0], bbox[1]), cvPoint(bbox[2], bbox[3]), CV_RGB(255,0,0) ); cvRectangle(frame, cvPoint(fmodel->bb[0], fmodel->bb[1]), cvPoint(fmodel->bb[2], fmodel->bb[3]), CV_RGB(0,0,255) ); cvCircle(frame, cvPoint((int)landmarks[0], (int)landmarks[1]), 3, CV_RGB(0,0,255), CV_FILLED); cvCircle(frame, cvPoint(int(landmarks[2]), int(landmarks[3])), 3, CV_RGB(255,0,0), CV_FILLED); cvCircle(frame, cvPoint(int(landmarks[4]), int(landmarks[5])), 3, CV_RGB(255,0,0), CV_FILLED); cvCircle(frame, cvPoint(int(landmarks[6]), int(landmarks[7])), 3, CV_RGB(255,0,0), CV_FILLED); cvCircle(frame, cvPoint(int(landmarks[8]), int(landmarks[9])), 3, CV_RGB(255,0,0), CV_FILLED); cvCircle(frame, cvPoint(int(landmarks[10]), int(landmarks[11])), 3, CV_RGB(255,0,0), CV_FILLED); cvCircle(frame, cvPoint(int(landmarks[12]), int(landmarks[13])), 3, CV_RGB(255,0,0), CV_FILLED); cvCircle(frame, cvPoint(int(landmarks[14]), int(landmarks[15])), 3, CV_RGB(255,0,0), CV_FILLED); Mat face(frame, 0); Mat croppedFaceImage = face(faceRect).clone(); Mat rotated = rotateImage(croppedFaceImage, angle_rotate * 180 / PI); resize(rotated, resized, Size(100, 100)); imwrite( "./tmp/face.jpg" , resized ); } delete [] landmarks; storage = cvCreateMemStorage(0); cvReleaseMemStorage(&storage); cvReleaseImage(&frame_bw); cvReleaseImage(&frame); if (faceRect.height < 50 && faceRect.width < 50){ printf("Face too small: %d x %d\n", faceRect.height, faceRect.width); return resized; } Mat croppedFaceImage = frame_mat(faceRect).clone(); Mat rotated = rotateImage(croppedFaceImage, angle_rotate * 180 / PI); resize(rotated, resized, Size(100, 100)); return resized; }
//Compute Landmark void FaceVue::my_Landmark(IplImage* input,double* value,int *bbox2) { flandmark_detect(input, value ,bbox2, landmark_Model, landmarks); }
//Return region of detected face and target_Face is filled CvRect FaceVue::detect_FaceROI(const Mat&frame) { is_Face_Found = false; Mat input; cv::cvtColor (frame, input, CV_RGB2GRAY); cv::equalizeHist (input, input); std::vector<Rect> faces; /// TO DO What is 1.25,2, Size(30,30) detection_Model->detectMultiScale( input, faces, 1.25, 2, Size(30, 30) ); double value; int maxArea = 0; target_Face->index = -1; // stores detected face ROI CvRect rect; for (unsigned int iface = 0; iface < faces.size(); ++iface) { bbox[0] = faces[iface].x; bbox[1] = faces[iface].y; bbox[2] = faces[iface].x + faces[iface].width; bbox[3] = faces[iface].y + faces[iface].height; IplImage ipl_input = input; flandmark_detect(&ipl_input, &value ,bbox, landmark_Model, landmarks); if(value > detection_threshold) { if(maxArea <= faces[iface].width*faces[iface].height) { is_Face_Found = true; maxArea = faces[iface].width*faces[iface].height; target_Face->index = iface; target_Face->left_eye_x = (landmarks[4]+landmarks[12])/2; target_Face->left_eye_y = (landmarks[5]+landmarks[13])/2; target_Face->right_eye_x = (landmarks[2]+landmarks[10])/2; target_Face->right_eye_y = (landmarks[3]+landmarks[11])/2; target_Face->mouth_x = (landmarks[6]+landmarks[8])/2; target_Face->mouth_y = (landmarks[7]+landmarks[9])/2; rect = cvRect(faces[iface].x - faces[iface].width/4,faces[iface].y - faces[iface].height/4,3*faces[iface].width/2,3*faces[iface].height/2); if(rect.x + rect.width > input.size().width) { rect.width -= 2*(rect.width + rect.x - input.size().width); rect.x += rect.width + rect.x -input.size().width; } if(rect.y + rect.height > input.size().height) { rect.height -= 2*(rect.height + rect.y - input.size().height); rect.y += rect.height + rect.y - input.size().height; } if(rect.x < 0) { rect.width += 2*rect.x; rect.x=0; } if(rect.y < 0) { rect.height += 2*rect.y; rect.y=0; } } } } if(target_Face->index != -1) { target_Face->p1_x = faces[target_Face->index].x; target_Face->p1_y = faces[target_Face->index].y; target_Face->p2_x = faces[target_Face->index].x + faces[target_Face->index].width; target_Face->p2_y = faces[target_Face->index].y +faces[target_Face->index].height; } return rect; }