コード例 #1
0
ファイル: main.cpp プロジェクト: jquinchi/MVE
void detectFaceAroundRoi(cv::Mat &frame)
{
    // Detect faces sized +/-20% off biggest face in previous search
    cascade_classifier->detectMultiScale(frame(face_roi), faces, 1.1, 3, 0,
        cv::Size(face.width * 8 / 10, face.height * 8 / 10),
        cv::Size(face.width * 12 / 10, face.width * 12 / 10));

    if (faces.empty())
    {
        // Activate template matching if not already started and start timer
        template_matching_running = true;
        if (template_matching_start_time == 0)
            template_matching_start_time = cv::getTickCount();
        return;
    }

    // Turn off template matching if running and reset timer
    template_matching_running = false;
    template_matching_current_time = template_matching_start_time = 0;

    // Get detected face
    face = biggestFace(faces);

    // Add roi offset to face
    face.x += face_roi.x;
    face.y += face_roi.y;

    // Get face template
    face_template = frame(face).clone();

    // Calculate roi
    face_roi = doubleRectSize(face, cv::Rect(0, 0, frame.cols, frame.rows));
}
コード例 #2
0
void VideoFaceDetector::detectFaceAroundRoi(const cv::Mat &frame)
{
    // Detect faces sized +/-20% off biggest face in previous search
    //m_faceCascade->detectMultiScale(frame(m_faceRoi), m_allFaces, 1.1, 3, 0,
    //    cv::Size(m_trackedFace.width * 8 / 10, m_trackedFace.height * 8 / 10),
    //    cv::Size(m_trackedFace.width * 12 / 10, m_trackedFace.width * 12 / 10));

	cv::Mat gray;
	cv::cvtColor(frame(m_faceRoi), gray, cv::COLOR_BGR2GRAY);
	int * pResults = NULL;
	pResults = facedetect_multiview_reinforce((unsigned char*)(gray.ptr(0)), gray.cols, gray.rows, gray.step,
		1.2f, 5, m_trackedFace.width * 8 / 10, m_trackedFace.width * 12 / 10);
	//printf("%d reinforced multiview faces detected.\n", (pResults ? *pResults : 0));

	//print the detection results
	m_allFaces.clear();
	for (int i = 0; i < (pResults ? *pResults : 0); i++)
	{
		short * p = ((short*)(pResults + 1)) + 6 * i;
		int x = p[0];
		int y = p[1];
		int w = p[2];
		int h = p[3];
		int neighbors = p[4];
		int angle = p[5];

		cv::Rect face_rect = cv::Rect(p[0], p[1], p[2], p[3]);
		m_allFaces.push_back(cv::Rect(p[0], p[1], p[2], p[3]));
		//cv::rectangle(frame, face_rect, cv::Scalar(0, 0, 255));
	}
    if (m_allFaces.empty())
    {
        // Activate template matching if not already started and start timer
        m_templateMatchingRunning = true;
        if (m_templateMatchingStartTime == 0)
            m_templateMatchingStartTime = cv::getTickCount();
        return;
    }

    // Turn off template matching if running and reset timer
    m_templateMatchingRunning = false;
    m_templateMatchingCurrentTime = m_templateMatchingStartTime = 0;

    // Get detected face
    m_trackedFace = biggestFace(m_allFaces);

    // Add roi offset to face
    m_trackedFace.x += m_faceRoi.x;
    m_trackedFace.y += m_faceRoi.y;

    // Get face template
    m_faceTemplate = getFaceTemplate(frame, m_trackedFace);

    // Calculate roi
    m_faceRoi = doubleRectSize(m_trackedFace, cv::Rect(0, 0, frame.cols, frame.rows));

    // Update face position
    m_facePosition = centerOfRect(m_trackedFace);
}
コード例 #3
0
void VideoFaceDetector::detectFaceAllSizes(const cv::Mat &frame)
{
	// Minimum face size is 1/5th of screen height		
	// Maximum face size is 2/3rds of screen height		
	/*   m_faceCascade->detectMultiScale(frame, m_allFaces, 1.1, 3, 0,
	cv::Size(frame.rows / 5, frame.rows / 5),
	cv::Size(frame.rows * 2 / 3, frame.rows * 2 / 3));*/

	cv::Mat gray;
	cv::cvtColor(frame, gray, cv::COLOR_BGR2GRAY);
	int * pResults = NULL;
	//std::cout << frame.rows << std::endl;		
	pResults = facedetect_multiview_reinforce((unsigned char*)(gray.ptr(0)), gray.cols, gray.rows, gray.step,
		1.2f, 5, frame.rows / 5, frame.rows * 2 / 3);

	//print the detection results		
	std::vector<cv::Rect> m_allFaces;
	for (int i = 0; i < (pResults ? *pResults : 0); i++)
	{
		short * p = ((short*)(pResults + 1)) + 6 * i;
		int x = p[0];
		int y = p[1];
		int w = p[2];
		int h = p[3];
		int neighbors = p[4];
		int angle = p[5];

		//printf("face_rect=[%d, %d, %d, %d], neighbors=%d, angle=%d\n", x, y, w, h, neighbors, angle);		
		cv::Rect face_rect = cv::Rect(p[0], p[1], p[2], p[3]);
		m_allFaces.push_back(cv::Rect(p[0], p[1], p[2], p[3]));
		//cv::rectangle(frame, face_rect, cv::Scalar(0, 0, 255));		
	}

	if (m_allFaces.empty()) return;

	m_foundFace = true;

	// Locate biggest face		
	m_trackedFace = biggestFace(m_allFaces);

	// Copy face template		
	m_faceTemplate = getFaceTemplate(frame, m_trackedFace);

	// Calculate roi		
	m_faceRoi = doubleRectSize(m_trackedFace, cv::Rect(0, 0, frame.cols, frame.rows));

	// Update face position		
	m_facePosition = centerOfRect(m_trackedFace);
}
コード例 #4
0
ファイル: main.cpp プロジェクト: jquinchi/MVE
void detectFaceAllSizes(cv::Mat &frame)
{
    // Minimum face size is 1/5th of screen height
    // Maximum face size is 2/3rds of screen height
    cascade_classifier->detectMultiScale(frame, faces, 1.1, 3, 0,
        cv::Size(frame.rows / 5, frame.rows / 5),
        cv::Size(frame.rows * 2 / 3, frame.rows * 2 / 3));

    if (faces.empty()) return;

    found_face = true;

    // Locate biggest face;
    face = biggestFace(faces);

    // Copy face template
    face_template = frame(face).clone();

    // Calculate roi
    face_roi = doubleRectSize(face, cv::Rect(0, 0, frame.cols, frame.rows));
}
コード例 #5
0
ファイル: VideoFaceDetector.cpp プロジェクト: hgl888/dlib
void VideoFaceDetector::detectFaceAroundRoi(const cv::Mat &frame)
{
    // Detect faces sized +/-20% off biggest face in previous search
    m_faceCascade->detectMultiScale(frame(m_faceRoi), m_allFaces, 1.1, 3, 0,
        cv::Size(m_trackedFace.width * 8 / 10, m_trackedFace.height * 8 / 10),
        cv::Size(m_trackedFace.width * 12 / 10, m_trackedFace.width * 12 / 10));

    if (m_allFaces.empty())
    {
        // Activate template matching if not already started and start timer
        m_templateMatchingRunning = true;
        if (m_templateMatchingStartTime == 0)
            m_templateMatchingStartTime = cv::getTickCount();
        return;
    }

    // Turn off template matching if running and reset timer
    m_templateMatchingRunning = false;
    m_templateMatchingCurrentTime = m_templateMatchingStartTime = 0;

    // Get detected face
    m_trackedFace = biggestFace(m_allFaces);

    // Add roi offset to face
    m_trackedFace.x += m_faceRoi.x;
    m_trackedFace.y += m_faceRoi.y;

    // Get face template
    m_faceTemplate = getFaceTemplate(frame, m_trackedFace);

    // Calculate roi
    m_faceRoi = doubleRectSize(m_trackedFace, cv::Rect(0, 0, frame.cols, frame.rows));

    // Update face position
    m_facePosition = centerOfRect(m_trackedFace);
}
コード例 #6
0
ファイル: VideoFaceDetector.cpp プロジェクト: hgl888/dlib
void VideoFaceDetector::detectFaceAllSizes(const cv::Mat &frame)
{
    // Minimum face size is 1/5th of screen height
    // Maximum face size is 2/3rds of screen height
    m_faceCascade->detectMultiScale(frame, m_allFaces, 1.1, 3, 0,
        cv::Size(frame.rows / 5, frame.rows / 5),
        cv::Size(frame.rows * 2 / 3, frame.rows * 2 / 3));

    if (m_allFaces.empty()) return;

    m_foundFace = true;

    // Locate biggest face
    m_trackedFace = biggestFace(m_allFaces);

    // Copy face template
    m_faceTemplate = getFaceTemplate(frame, m_trackedFace);

    // Calculate roi
    m_faceRoi = doubleRectSize(m_trackedFace, cv::Rect(0, 0, frame.cols, frame.rows));

    // Update face position
    m_facePosition = centerOfRect(m_trackedFace);
}