void VideoFaceDetector::detectFacesTemplateMatching(const cv::Mat &frame) { // Calculate duration of template matching m_templateMatchingCurrentTime = cv::getTickCount(); double duration = (double)(m_templateMatchingCurrentTime - m_templateMatchingStartTime) / TICK_FREQUENCY; // If template matching lasts for more than 2 seconds face is possibly lost // so disable it and redetect using cascades if (duration > m_templateMatchingMaxDuration) { m_foundFace = false; m_templateMatchingRunning = false; m_templateMatchingStartTime = m_templateMatchingCurrentTime = 0; m_facePosition.x = m_facePosition.y = 0; m_trackedFace.x = m_trackedFace.y = m_trackedFace.width = m_trackedFace.height = 0; return; } // Edge case when face exits frame while if (m_faceTemplate.rows * m_faceTemplate.cols == 0 || m_faceTemplate.rows <= 1 || m_faceTemplate.cols <= 1) { m_foundFace = false; m_templateMatchingRunning = false; m_templateMatchingStartTime = m_templateMatchingCurrentTime = 0; m_facePosition.x = m_facePosition.y = 0; m_trackedFace.x = m_trackedFace.y = m_trackedFace.width = m_trackedFace.height = 0; return; } // Template matching with last known face //cv::matchTemplate(frame(m_faceRoi), m_faceTemplate, m_matchingResult, CV_TM_CCOEFF); cv::matchTemplate(frame(m_faceRoi), m_faceTemplate, m_matchingResult, CV_TM_SQDIFF_NORMED); cv::normalize(m_matchingResult, m_matchingResult, 0, 1, cv::NORM_MINMAX, -1, cv::Mat()); double min, max; cv::Point minLoc, maxLoc; cv::minMaxLoc(m_matchingResult, &min, &max, &minLoc, &maxLoc); // Add roi offset to face position minLoc.x += m_faceRoi.x; minLoc.y += m_faceRoi.y; // Get detected face //m_trackedFace = cv::Rect(maxLoc.x, maxLoc.y, m_trackedFace.width, m_trackedFace.height); m_trackedFace = cv::Rect(minLoc.x, minLoc.y, m_faceTemplate.cols, m_faceTemplate.rows); m_trackedFace = doubleRectSize(m_trackedFace, cv::Rect(0, 0, frame.cols, frame.rows)); // Get new face template m_faceTemplate = getFaceTemplate(frame, m_trackedFace); // Calculate face roi m_faceRoi = doubleRectSize(m_trackedFace, cv::Rect(0, 0, frame.cols, frame.rows)); // Update face position m_facePosition = centerOfRect(m_trackedFace); }
void detectFacesTemplateMatching(cv::Mat &frame) { // Calculate duration of template matching template_matching_current_time = cv::getTickCount(); double duration = (double)(template_matching_current_time - template_matching_start_time) / TICK_FREQUENCY; // If template matching lasts for more than 2 seconds face is possibly lost // so disable it and redetect using cascades if (duration > 2) { found_face = false; template_matching_running = false; template_matching_start_time = template_matching_current_time = 0; } // Template matching with last known face cv::matchTemplate(frame(face_roi), face_template, matching_result, CV_TM_CCOEFF); cv::normalize(matching_result, matching_result, 0, 1, cv::NORM_MINMAX, -1, cv::Mat()); double min, max; cv::Point min_loc, max_loc; cv::minMaxLoc(matching_result, &min, &max, &min_loc, &max_loc); // Add roi offset to face position max_loc.x += face_roi.x; max_loc.y += face_roi.y; // Get detected face face = cv::Rect(max_loc.x, max_loc.y, face.width, face.height); // Get new face template face_template = frame(face).clone(); // Calculate face roi face_roi = doubleRectSize(face, cv::Rect(0, 0, frame.cols, frame.rows)); }
void detectFaceAroundRoi(cv::Mat &frame) { // Detect faces sized +/-20% off biggest face in previous search cascade_classifier->detectMultiScale(frame(face_roi), faces, 1.1, 3, 0, cv::Size(face.width * 8 / 10, face.height * 8 / 10), cv::Size(face.width * 12 / 10, face.width * 12 / 10)); if (faces.empty()) { // Activate template matching if not already started and start timer template_matching_running = true; if (template_matching_start_time == 0) template_matching_start_time = cv::getTickCount(); return; } // Turn off template matching if running and reset timer template_matching_running = false; template_matching_current_time = template_matching_start_time = 0; // Get detected face face = biggestFace(faces); // Add roi offset to face face.x += face_roi.x; face.y += face_roi.y; // Get face template face_template = frame(face).clone(); // Calculate roi face_roi = doubleRectSize(face, cv::Rect(0, 0, frame.cols, frame.rows)); }
void VideoFaceDetector::detectFaceAroundRoi(const cv::Mat &frame) { // Detect faces sized +/-20% off biggest face in previous search //m_faceCascade->detectMultiScale(frame(m_faceRoi), m_allFaces, 1.1, 3, 0, // cv::Size(m_trackedFace.width * 8 / 10, m_trackedFace.height * 8 / 10), // cv::Size(m_trackedFace.width * 12 / 10, m_trackedFace.width * 12 / 10)); cv::Mat gray; cv::cvtColor(frame(m_faceRoi), gray, cv::COLOR_BGR2GRAY); int * pResults = NULL; pResults = facedetect_multiview_reinforce((unsigned char*)(gray.ptr(0)), gray.cols, gray.rows, gray.step, 1.2f, 5, m_trackedFace.width * 8 / 10, m_trackedFace.width * 12 / 10); //printf("%d reinforced multiview faces detected.\n", (pResults ? *pResults : 0)); //print the detection results m_allFaces.clear(); for (int i = 0; i < (pResults ? *pResults : 0); i++) { short * p = ((short*)(pResults + 1)) + 6 * i; int x = p[0]; int y = p[1]; int w = p[2]; int h = p[3]; int neighbors = p[4]; int angle = p[5]; cv::Rect face_rect = cv::Rect(p[0], p[1], p[2], p[3]); m_allFaces.push_back(cv::Rect(p[0], p[1], p[2], p[3])); //cv::rectangle(frame, face_rect, cv::Scalar(0, 0, 255)); } if (m_allFaces.empty()) { // Activate template matching if not already started and start timer m_templateMatchingRunning = true; if (m_templateMatchingStartTime == 0) m_templateMatchingStartTime = cv::getTickCount(); return; } // Turn off template matching if running and reset timer m_templateMatchingRunning = false; m_templateMatchingCurrentTime = m_templateMatchingStartTime = 0; // Get detected face m_trackedFace = biggestFace(m_allFaces); // Add roi offset to face m_trackedFace.x += m_faceRoi.x; m_trackedFace.y += m_faceRoi.y; // Get face template m_faceTemplate = getFaceTemplate(frame, m_trackedFace); // Calculate roi m_faceRoi = doubleRectSize(m_trackedFace, cv::Rect(0, 0, frame.cols, frame.rows)); // Update face position m_facePosition = centerOfRect(m_trackedFace); }
void VideoFaceDetector::detectFaceAllSizes(const cv::Mat &frame) { // Minimum face size is 1/5th of screen height // Maximum face size is 2/3rds of screen height /* m_faceCascade->detectMultiScale(frame, m_allFaces, 1.1, 3, 0, cv::Size(frame.rows / 5, frame.rows / 5), cv::Size(frame.rows * 2 / 3, frame.rows * 2 / 3));*/ cv::Mat gray; cv::cvtColor(frame, gray, cv::COLOR_BGR2GRAY); int * pResults = NULL; //std::cout << frame.rows << std::endl; pResults = facedetect_multiview_reinforce((unsigned char*)(gray.ptr(0)), gray.cols, gray.rows, gray.step, 1.2f, 5, frame.rows / 5, frame.rows * 2 / 3); //print the detection results std::vector<cv::Rect> m_allFaces; for (int i = 0; i < (pResults ? *pResults : 0); i++) { short * p = ((short*)(pResults + 1)) + 6 * i; int x = p[0]; int y = p[1]; int w = p[2]; int h = p[3]; int neighbors = p[4]; int angle = p[5]; //printf("face_rect=[%d, %d, %d, %d], neighbors=%d, angle=%d\n", x, y, w, h, neighbors, angle); cv::Rect face_rect = cv::Rect(p[0], p[1], p[2], p[3]); m_allFaces.push_back(cv::Rect(p[0], p[1], p[2], p[3])); //cv::rectangle(frame, face_rect, cv::Scalar(0, 0, 255)); } if (m_allFaces.empty()) return; m_foundFace = true; // Locate biggest face m_trackedFace = biggestFace(m_allFaces); // Copy face template m_faceTemplate = getFaceTemplate(frame, m_trackedFace); // Calculate roi m_faceRoi = doubleRectSize(m_trackedFace, cv::Rect(0, 0, frame.cols, frame.rows)); // Update face position m_facePosition = centerOfRect(m_trackedFace); }
void detectFaceAllSizes(cv::Mat &frame) { // Minimum face size is 1/5th of screen height // Maximum face size is 2/3rds of screen height cascade_classifier->detectMultiScale(frame, faces, 1.1, 3, 0, cv::Size(frame.rows / 5, frame.rows / 5), cv::Size(frame.rows * 2 / 3, frame.rows * 2 / 3)); if (faces.empty()) return; found_face = true; // Locate biggest face; face = biggestFace(faces); // Copy face template face_template = frame(face).clone(); // Calculate roi face_roi = doubleRectSize(face, cv::Rect(0, 0, frame.cols, frame.rows)); }
void VideoFaceDetector::detectFaceAroundRoi(const cv::Mat &frame) { // Detect faces sized +/-20% off biggest face in previous search m_faceCascade->detectMultiScale(frame(m_faceRoi), m_allFaces, 1.1, 3, 0, cv::Size(m_trackedFace.width * 8 / 10, m_trackedFace.height * 8 / 10), cv::Size(m_trackedFace.width * 12 / 10, m_trackedFace.width * 12 / 10)); if (m_allFaces.empty()) { // Activate template matching if not already started and start timer m_templateMatchingRunning = true; if (m_templateMatchingStartTime == 0) m_templateMatchingStartTime = cv::getTickCount(); return; } // Turn off template matching if running and reset timer m_templateMatchingRunning = false; m_templateMatchingCurrentTime = m_templateMatchingStartTime = 0; // Get detected face m_trackedFace = biggestFace(m_allFaces); // Add roi offset to face m_trackedFace.x += m_faceRoi.x; m_trackedFace.y += m_faceRoi.y; // Get face template m_faceTemplate = getFaceTemplate(frame, m_trackedFace); // Calculate roi m_faceRoi = doubleRectSize(m_trackedFace, cv::Rect(0, 0, frame.cols, frame.rows)); // Update face position m_facePosition = centerOfRect(m_trackedFace); }
void VideoFaceDetector::detectFaceAllSizes(const cv::Mat &frame) { // Minimum face size is 1/5th of screen height // Maximum face size is 2/3rds of screen height m_faceCascade->detectMultiScale(frame, m_allFaces, 1.1, 3, 0, cv::Size(frame.rows / 5, frame.rows / 5), cv::Size(frame.rows * 2 / 3, frame.rows * 2 / 3)); if (m_allFaces.empty()) return; m_foundFace = true; // Locate biggest face m_trackedFace = biggestFace(m_allFaces); // Copy face template m_faceTemplate = getFaceTemplate(frame, m_trackedFace); // Calculate roi m_faceRoi = doubleRectSize(m_trackedFace, cv::Rect(0, 0, frame.cols, frame.rows)); // Update face position m_facePosition = centerOfRect(m_trackedFace); }