コード例 #1
0
	void DetailedFaceDetector::detectFeatures(const cv::Mat &frame)
	{
		//get the face rectangles
		RectVector lastRects = getAreas();

		//create a new detailed result
		DetailedFaces newDetails;
	
		for(RectVectorItr faceRect = lastRects.begin(); faceRect != lastRects.end(); ++faceRect)
		{
			//create a new entry
			FaceDetails &currentFace = newDetails[*faceRect];

			if(detectWhat && EYES)
				detectEyes(frame, *faceRect, currentFace);

			if(detectWhat && NOSE)
				detectNose(frame, *faceRect, currentFace);

			if(detectWhat && MOUTH)
				detectMouth(frame, *faceRect, currentFace);
		}

		setDetailedFaceInfo(newDetails);

		notifyControllers();
	}
コード例 #2
0
ファイル: facedetector.cpp プロジェクト: nangege/robotCute
void faceDetector::detectAllFeatures()
{
    detectFace();
    detectLeftEye();
    detectRightEye();
    detectNose();
    detectMouth();
}
コード例 #3
0
void AnchorPointSelector::choosePoints() {
	try {
		cv::Point2f eyes[2];
		cv::Point2f nose[2];
		cv::Point2f mouth[2];
		cv::Point2f eyebrows[2];

		if(!detectEyeCorners(Application::Components::videoInput->frame, Application::Components::videoInput->getResolution(), eyes)) {
			std::cout << "EYE CORNERS NOT DETECTED" << std::endl;
			return;
		}

		cv::Rect noseRect = cv::Rect(eyes[0].x, eyes[0].y, fabs(eyes[0].x - eyes[1].x), fabs(eyes[0].x - eyes[1].x));
		checkRectSize(Application::Components::videoInput->frame, &noseRect);

		if (!detectNose(Application::Components::videoInput->frame, Application::Components::videoInput->getResolution(), noseRect, nose)) {
			std::cout << "NO NOSE" << std::endl;
			return;
		}

		cv::Rect mouthRect = cv::Rect(eyes[0].x, nose[0].y, fabs(eyes[0].x - eyes[1].x), 0.8 * fabs(eyes[0].x - eyes[1].x));
		checkRectSize(Application::Components::videoInput->frame, &mouthRect);

		if (!detectMouth(Application::Components::videoInput->frame, Application::Components::videoInput->getResolution(), mouthRect, mouth)) {
			std::cout << "NO MOUTH" << std::endl;
			return;
		}

		cv::Rect eyebrowRect = cv::Rect(eyes[0].x + fabs(eyes[0].x - eyes[1].x) * 0.25, eyes[0].y - fabs(eyes[0].x - eyes[1].x) * 0.40, fabs(eyes[0].x - eyes[1].x) * 0.5, fabs(eyes[0].x - eyes[1].x) * 0.25);
		checkRectSize(Application::Components::videoInput->frame, &eyebrowRect);
		detectEyebrowCorners(Application::Components::videoInput->frame, Application::Components::videoInput->getResolution(), eyebrowRect, eyebrows);

		//cvSaveImage("cframe.jpg", Application::Components::videoInput->frame);

		_pointTracker->clearTrackers();

		_pointTracker->addTracker(eyes[0]);
		_pointTracker->addTracker(eyes[1]);
		_pointTracker->addTracker(nose[0]);
		_pointTracker->addTracker(nose[1]);
		_pointTracker->addTracker(mouth[0]);
		_pointTracker->addTracker(mouth[1]);
		_pointTracker->addTracker(eyebrows[0]);
		_pointTracker->addTracker(eyebrows[1]);
	}
	catch (std::ios_base::failure &e) {
		std::cout << e.what() << std::endl;
	}
	catch (std::exception &e) {
		std::cout << e.what() << std::endl;
	}
}
コード例 #4
0
	static void detectFacialFeaures(Mat& img, const vector<Rect_<int> > faces, string eye_cascade,
	        string nose_cascade, string mouth_cascade)
	{
	    for(unsigned int i = 0; i < faces.size(); ++i)
	    {
	        // Mark the bounding box enclosing the face
	        Rect face = faces[i];
	        rectangle(img, Point(face.x, face.y), Point(face.x+face.width, face.y+face.height),
	                Scalar(255, 0, 0), 1, 4);
	
	        // Eyes, nose and mouth will be detected inside the face (region of interest)
	        Mat ROI = img(Rect(face.x, face.y, face.width, face.height));
	
	        // Check if all features (eyes, nose and mouth) are being detected
	        bool is_full_detection = false;
	        if( (!eye_cascade.empty()) && (!nose_cascade.empty()) && (!mouth_cascade.empty()) )
	            is_full_detection = true;
	
	        // Detect eyes if classifier provided by the user
	        if(!eye_cascade.empty())
	        {
	            vector<Rect_<int> > eyes;
	            detectEyes(ROI, eyes, eye_cascade);
	
	            // Mark points corresponding to the centre of the eyes
	            for(unsigned int j = 0; j < eyes.size(); ++j)
	            {
	                Rect e = eyes[j];
	                circle(ROI, Point(e.x+e.width/2, e.y+e.height/2), 3, Scalar(0, 255, 0), -1, 8);
	                /* rectangle(ROI, Point(e.x, e.y), Point(e.x+e.width, e.y+e.height),
	                    Scalar(0, 255, 0), 1, 4); */
	            }
	        }
	
	        // Detect nose if classifier provided by the user
	        double nose_center_height = 0.0;
	        if(!nose_cascade.empty())
	        {
	            vector<Rect_<int> > nose;
	            detectNose(ROI, nose, nose_cascade);
	
	            // Mark points corresponding to the centre (tip) of the nose
	            for(unsigned int j = 0; j < nose.size(); ++j)
	            {
	                Rect n = nose[j];
	                circle(ROI, Point(n.x+n.width/2, n.y+n.height/2), 3, Scalar(0, 255, 0), -1, 8);
	                nose_center_height = (n.y + n.height/2);
	            }
	        }
	
	        // Detect mouth if classifier provided by the user
	        double mouth_center_height = 0.0;
	        if(!mouth_cascade.empty())
	        {
	            vector<Rect_<int> > mouth;
	            detectMouth(ROI, mouth, mouth_cascade);
	
	            for(unsigned int j = 0; j < mouth.size(); ++j)
	            {
	                Rect m = mouth[j];
	                mouth_center_height = (m.y + m.height/2);
	
	                // The mouth should lie below the nose
	                if( (is_full_detection) && (mouth_center_height > nose_center_height) )
	                {
	                    rectangle(ROI, Point(m.x, m.y), Point(m.x+m.width, m.y+m.height), Scalar(0, 255, 0), 1, 4);
	                }
	                else if( (is_full_detection) && (mouth_center_height <= nose_center_height) )
	                    continue;
	                else
	                    rectangle(ROI, Point(m.x, m.y), Point(m.x+m.width, m.y+m.height), Scalar(0, 255, 0), 1, 4);
	            }
	        }
	
	    }
	
	    return;
	}