コード例 #1
0
ファイル: main.cpp プロジェクト: AdriGe/FacialRecognition
void scanWebcam()
{
	//Check that the cascade file was loaded
	if(!face_cascade1.load( face_cascade_name1 ))
	{
		cout << "Error while loading cascade files" << endl;                            
	} 

	CvCapture* capture;
	cv::Mat frame;

	//Connect to the video stream
	capture = cvCaptureFromFile(PATH_TO_CAM);

	//If the connection was successful 
	if(capture)
	{
		//Create a FaceRecognizer object that uses the Fisherfaces algorithm (also works with the eigenfaces and LBPH algorithms)
		cv::Ptr<cv::FaceRecognizer> fisherfaces = cv::createFisherFaceRecognizer();

		//Load the database that was previously created during the training phase
		cv::FileStorage fs_fisher(PATH_TO_XML_FISHERFACES, cv::FileStorage::READ);
		fisherfaces->load(fs_fisher);

		//Infinite loop to detect the faces continuously
		while(true)
		{
			//Get one picture from the videostream (The facial recognition is done on images from the video and not directly from the videostream)
			frame = cvQueryFrame( capture );
			cv::namedWindow("test");

			//Check that one image was successfully extracted from the video
			if(!frame.empty())
			{
				//Variables used for the id process
				int predictedLabel = -1;
				double predictedConfidence = 0.0;

				std::vector<cv::Rect> faces; //Contains the rectangle coordinates in which the face will be included
				cv::Mat frame_gray; //Grey image
				cvtColor( frame, frame_gray, CV_RGB2GRAY ); //Converts the image from RGB to shades of grey
				equalizeHist( frame_gray, frame_gray ); //Histogram equalization
				
				//We perform a face detection
				face_cascade1.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, cv::Size(30, 30) );
				

				//If at least one face was detected then we can perform an identification
				for(int i=0; i<faces.size();i++)
				{
					//Get only (crop) the face (shades of grey)
					cv::Mat croppedFace = frame_gray(cv::Rect(faces[i].x, faces[i].y, faces[i].width, faces[i].height));
					//Resize the image
					cv::resize(croppedFace, croppedFace, sizeOfImage);
					
					//Start the identification
					fisherfaces->predict(croppedFace, predictedLabel, predictedConfidence);
					
					//Print the result in the console
					cout << "##### ID " << predictedLabel << "    confidence : " << predictedConfidence;

					int id=predictedLabel;
					const int THRESHOLD = 1000; //Threshold for the facial recognition. Used to make sure that the face was properly recognized.

					string printedName;

					cv::Point center( faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5 );

					//Print the ID result on the video (it's really bad to do it this way !! A funtion should be created !)
					if(id==1 && predictedConfidence>THRESHOLD)
					{
						printedName="Adrien";
						//Print the circle around the face
						ellipse( frame, center, cv::Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, cv::Scalar(0,255,0), 4, 8, 0);
						//Print the person's name
						cv::putText(frame,printedName, center, cv::FONT_HERSHEY_SIMPLEX, 1.0f, cv::Scalar(0,255,0), 2, 8, false );
					}
					else if(id==2 && predictedConfidence>THRESHOLD)
					{
						printedName="Ofir";
						ellipse( frame, center, cv::Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, cv::Scalar(0,255,0), 4, 8, 0);
						cv::putText(frame,printedName, center, cv::FONT_HERSHEY_SIMPLEX, 1.0f, cv::Scalar(0,255,0), 2, 8, false );
					}
					else if(id==3 && predictedConfidence>THRESHOLD)
					{
						printedName="Jeremie";
						ellipse( frame, center, cv::Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, cv::Scalar(0,255,0), 4, 8, 0);
						cv::putText(frame,printedName, center, cv::FONT_HERSHEY_SIMPLEX, 1.0f, cv::Scalar(0,255,0), 2, 8, false );
					}
					else
					{
						printedName="UNKNOWN";
						ellipse( frame, center, cv::Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, cv::Scalar(0,0,255), 4, 8, 0);
						cv::putText(frame,printedName, center, cv::FONT_HERSHEY_SIMPLEX, 1.0f, cv::Scalar(0,0,255), 2, 8, false );
					}
					
				}
				cout << endl;

				//Print each images to recreate a video
				cv::imshow("test", frame);
			}	
			else
			{
				cout << " --(!) No captured frame -- Break!" << endl;
				break;
			}

			int c = cv::waitKey(10);
		}

	}
}
コード例 #2
0
ファイル: eyedet.cpp プロジェクト: Keerecles/libra
namespace stasm
{
static cv::CascadeClassifier leye_det_g;  // left eye detector
static cv::CascadeClassifier reye_det_g;  // right eye detector
static cv::CascadeClassifier mouth_det_g; // mouth detector

//-----------------------------------------------------------------------------

// Return the region of the face we search for the left or right eye.
// Return rect of width=0 if eye must not be searched for (outer eyes in side views).
// We reduce false positives and save time by searching in only part of the face.
// The entire eye box must fall in this region, not just the center of the eye.
// The magic numbers below were found empirically to give good
// results in informal tests.  They reduce the number of false positives
// in the forehead, eyebrows, nostrils, and mouth.

static Rect EyeSearchRect(
    EYAW        eyaw,         // in
    const Rect& facerect,     // in
    const bool  is_right_eye) // in: true for right eye, false for left eye
{
    Rect rect = facerect;
    int width = facerect.width;
    switch (eyaw)
    {
        case EYAW00:                        // frontal model
            if (is_right_eye)
                rect.x += width / 3; // don't search left third of face
            rect.width -= width / 3; // or right third
            rect.height = cvRound(.6 * facerect.height); // don't search lower part of face
            break;
        case EYAW_22:                       // left facing three-quarter model
            if (is_right_eye)               // inner eye
            {
                rect.x += cvRound(.4 * width);
                rect.width = cvRound(.5 * width);
            }
            else                            // outer eye
            {
                rect.x += cvRound(.1 * width);
                rect.width = cvRound(.5 * width);
            }
            rect.height = cvRound(.5 * facerect.height);
            break;
        case EYAW22:                        // right facing three-quarter model
            if (is_right_eye)               // outer eye
            {
                rect.x += cvRound(.4 * width);
                rect.width = cvRound(.5 * width);
            }
            else                            // inner eye
            {
                rect.x += cvRound(.1 * width);
                rect.width = cvRound(.5 * width);
            }
            rect.height = cvRound(.5 * facerect.height);
            break;
        case EYAW_45:                       // left facing three-quarter model
            if (is_right_eye)               // inner eye
            {
                rect.x += cvRound(.4 * width);
                rect.width = cvRound(.5 * width);
                rect.height = cvRound(.5 * facerect.height);
            }
            else                            // outer eye
                rect.width = rect.height = 0;
            break;
        case EYAW45:                        // right facing three-quarter model
            if (is_right_eye)               // outer eye
                rect.width = rect.height = 0;
            else                            // inner eye
            {
                rect.x += cvRound(.1 * width);
                rect.width = cvRound(.5 * width);
                rect.height = cvRound(.5 * facerect.height);
            }
            break;
        default:
            Err("EyeSearchRect: Invalid eyaw %d", eyaw);
            break;
    }
    rect.width  = MAX(0, rect.width);
    rect.height = MAX(0, rect.height);
    return rect;
}

// Get adjustment for position of mouth, based on model type and eye angle.

static void MouthRectShift(
    int&            ixshift,         // out
    int&            iyshift,         // out
    EYAW            eyaw,            // in
    int             facerect_width,  // in
    int             facerect_height, // in
    int             ileft_best,      // in
    int             iright_best,     // in
    const vec_Rect& leyes,           // in
    const vec_Rect& reyes)           // in
{
    double xshift = 0, yshift = 0;
    switch (eyaw)
    {
        case EYAW00: // frontal model
            break;
        case EYAW_45: // left facing three-quarter model
            xshift -= .04 * facerect_width;
            break;
        case EYAW_22: // left facing three-quarter model
            xshift -= .03 * facerect_width;
            break;
        case EYAW22: // right facing three-quarter model
            xshift += .03 * facerect_width;
            break;
        case EYAW45: // right facing three-quarter model
            xshift += .04 * facerect_width;
            break;
        default:
            Err("GeMouthRect: Invalid eyaw %d", eyaw);
            break;
    }

    if (ileft_best != -1 && iright_best != -1)   // got both eyes?
    {
        // get center of eye boxes to get eye angle
        const int xleft  = leyes[ileft_best].x  + leyes[ileft_best].width/2;
        const int yleft  = leyes[ileft_best].y  + leyes[ileft_best].height/2;
        const int xright = reyes[iright_best].x + reyes[iright_best].width/2;
        const int yright = reyes[iright_best].y + reyes[iright_best].height/2;
        double theta = -atan2(double(yright - yleft), double(xright - xleft));
        // move the mouth in the direction of rotation
        xshift += .3 * facerect_height * tan(theta);
        // as the face rotates, the mouth moves up the page
        yshift -= .1 * facerect_height * ABS(tan(theta));
    }
    ixshift = cvRound(xshift);
    iyshift = cvRound(yshift);
}

static Rect MouthRect(           // will search for mouth in this rectangle
    const Rect&     facerect,    // in
    EYAW            eyaw,        // in
    int             ileft_best,  // in: index of best left eye, -1 if none
    int             iright_best, // in: index of best right eye, -1 if none
    const vec_Rect& leyes,       // in: left eyes found by eye detector
    const vec_Rect& reyes)       // in: right eyes found by eye detector
{
    Rect rect = facerect;

    int ixshift, iyshift;
    MouthRectShift(ixshift, iyshift,
                   eyaw, facerect.width, facerect.height,
                   ileft_best, iright_best, leyes, reyes);

    rect.x += cvRound(.2  * facerect.width) + ixshift;

    rect.width = MAX(1, cvRound(.6  * facerect.width));

    switch (eyaw)
    {
        case EYAW00: // frontal model
            rect.y += cvRound(.64 * facerect.height);
            break;
        case EYAW_45: // left facing three-quarter model
            rect.y += cvRound(.55 * facerect.height);
            break;
        case EYAW_22: // left facing three-quarter model
            rect.y += cvRound(.55 * facerect.height);
            break;
        case EYAW22: // right facing three-quarter model
            rect.y += cvRound(.55 * facerect.height);
            break;
        case EYAW45: // right facing three-quarter model
            rect.y += cvRound(.55 * facerect.height);
            break;
        default:
            Err("MouthRect: Invalid eyaw %d", eyaw);
            break;
    }
    rect.y += iyshift;
    rect.height = cvRound(.42 * facerect.height);
    rect.width  = MAX(0, rect.width);
    rect.height = MAX(0, rect.height);
    return rect;
}

bool NeedMouth(
    const vec_Mod& mods) // in: the ASM model(s)
{
    for (int imod = 0; imod < NSIZE(mods); imod++)
        if (mods[imod]->Estart_() == ESTART_EYE_AND_MOUTH)
            return true;
    return false;
}

// Possibly open OpenCV eye detectors and mouth detector.  We say "possibly" because
// the eye and mouth detectors will actually only be opened if any model in mods
// actually needs them.  That is determined by the model's estart field.

void OpenEyeMouthDetectors(
    const vec_Mod& mods,    // in: the ASM models (used to see if we need eyes or mouth)
    const char*    datadir) // in
{
    static bool needeyes = true; // static for efficiency
    if (needeyes && leye_det_g.empty()) // not yet opened?
    {
        // we need the eyes if the estart field of any model
        // is ESTART_EYES or ESTART_EYE_AND_MOUTH
        needeyes = false;
        for (int imod = 0; imod < NSIZE(mods); imod++)
            if (mods[imod]->Estart_() == ESTART_EYES ||
                    mods[imod]->Estart_() == ESTART_EYE_AND_MOUTH)
                needeyes = true;
        if (needeyes)
        {
            // I tried all the eye XML files that come with OpenCV 2.1 and found that
            // the files used below give the best results.  The other eye XML files
            // often failed to detect eyes, even with EYE_MIN_NEIGHBORS=1.
            //
            // In the XML filenames, "left" was verified empirically by me to respond
            // to the image left (not the subject's left).  I tested this on the on
            // the MUCT and BioID sets: haarcascade_mcs_lefteye.xml finds more eyes
            // on the viewer's left than it finds on the right (milbo Lusaka Dec 2011).

            OpenDetector(leye_det_g,  "haarcascade_mcs_lefteye.xml",  datadir);
            OpenDetector(reye_det_g,  "haarcascade_mcs_righteye.xml", datadir);
        }
    }
    static bool needmouth = true; // static for efficiency
    if (needmouth && mouth_det_g.empty()) // not yet opened?
    {
        // we need the eyes if the estart field of any model is ESTART_EYE_AND_MOUTH
        needmouth = false;
        for (int imod = 0; imod < NSIZE(mods); imod++)
            if (mods[imod]->Estart_() == ESTART_EYE_AND_MOUTH)
                needmouth = true;
        if (needmouth)
            OpenDetector(mouth_det_g, "haarcascade_mcs_mouth.xml", datadir);
    }
}

static void DetectAllEyes(
    vec_Rect&    leyes,    // out
    vec_Rect&    reyes,    // out
    const Image& img,      // in
    EYAW         eyaw,     // in
    const Rect&  facerect) // in
{
    CV_Assert(!leye_det_g.empty()); // detector initialized?
    CV_Assert(!reye_det_g.empty());

    // 1.2 is 40ms faster than 1.1 but finds slightly fewer eyes
    static const double EYE_SCALE_FACTOR   = 1.2;
    static const int    EYE_MIN_NEIGHBORS  = 3;
    static const int    EYE_DETECTOR_FLAGS = 0;

    Rect leftrect(EyeSearchRect(eyaw, facerect, false));

    if (leftrect.width)
        leyes = Detect(img, &leye_det_g, &leftrect,
                       EYE_SCALE_FACTOR, EYE_MIN_NEIGHBORS, EYE_DETECTOR_FLAGS,
                       facerect.width / 10);

    Rect rightrect(EyeSearchRect(eyaw, facerect, true));

    if (rightrect.width)
        reyes = Detect(img, &reye_det_g, &rightrect,
                       EYE_SCALE_FACTOR, EYE_MIN_NEIGHBORS, EYE_DETECTOR_FLAGS,
                       facerect.width / 10);
}

static void DetectAllMouths(
    vec_Rect&       mouths,      // out
    const Image&    img,         // in
    EYAW            eyaw,        // in
    const Rect&     facerect,    // in
    int             ileft_best,  // in
    int             iright_best, // in
    const vec_Rect& leyes,       // in
    const vec_Rect& reyes)       // in
{
    CV_Assert(!mouth_det_g.empty()); // detector initialized?

    static const double MOUTH_SCALE_FACTOR   = 1.2; // less false pos with 1.2 than 1.1
    static const int    MOUTH_MIN_NEIGHBORS  = 5;   // less false pos with 5 than 3
    static const int    MOUTH_DETECTOR_FLAGS = 0;

    Rect mouth_rect(MouthRect(facerect,
                              eyaw, ileft_best, iright_best, leyes, reyes));

    mouths =
        Detect(img, &mouth_det_g, &mouth_rect,
               MOUTH_SCALE_FACTOR, MOUTH_MIN_NEIGHBORS, MOUTH_DETECTOR_FLAGS,
               facerect.width / 10);
}

// Return the region of the face which the _center_ of an eye must be for
// the eye to be considered valid.  This is a subset of the region we
// search for eyes (as returned by EyeSearchRect, which must be big
// enough to enclose the _entire_ eye box).

static Rect EyeInnerRect(
    EYAW        eyaw,        // in
    const Rect& facerect)    // in
{
    Rect rect = facerect;
    switch (eyaw)
    {
        case EYAW00: // frontal model
            rect.x     += cvRound(.1 * facerect.width);
            rect.width  = cvRound(.8 * facerect.width);
            rect.y     += cvRound(.2 * facerect.height);
            rect.height = cvRound(.28 * facerect.height);
            break;
        case EYAW_45: // left facing three-quarter model
            rect.x     += cvRound(.4 * facerect.width);
            rect.width  = cvRound(.5 * facerect.width);
            rect.y     += cvRound(.20 * facerect.height);
            rect.height = cvRound(.25 * facerect.height);
            break;
        case EYAW_22: // left facing three-quarter model
            rect.x     += cvRound(.1 * facerect.width);
            rect.width  = cvRound(.8 * facerect.width);
            rect.y     += cvRound(.20 * facerect.height);
            rect.height = cvRound(.25 * facerect.height);
            break;
        case EYAW22: // right facing three-quarter model
            rect.x     += cvRound(.1 * facerect.width);
            rect.width  = cvRound(.8 * facerect.width);
            rect.y     += cvRound(.20 * facerect.height);
            rect.height = cvRound(.25 * facerect.height);
            break;
        case EYAW45: // right facing three-quarter model
            rect.x     += cvRound(.1 * facerect.width);
            rect.width  = cvRound(.5 * facerect.width);
            rect.y     += cvRound(.20 * facerect.height);
            rect.height = cvRound(.25 * facerect.height);
            break;
        default:
            Err("EyeInnerRect: Invalid eyaw %d", eyaw);
            break;
    }
    rect.width  = MAX(0, rect.width);
    rect.height = MAX(0, rect.height);
    return rect;
}

// Is the horizontal overlap between the LeftEye and RightEye rectangles no
// more than 10% and is the horizontal distance between the edges of the
// eyes no more than the eye width.

static bool IsEyeHorizOk(
    const Rect& left,         // in
    const Rect& right)        // in
{
    return left.x + left.width - right.x   <= .1 * left.width &&
           right.x - (left.x + left.width) <= left.width;
}

static bool VerticalOverlap( // do the two eyes overlap vertically?
    const Rect& left,        // in
    const Rect& right)       // in
{
    const int topleft = left.y + left.height;
    const int topright = right.y + right.height;

    return (left.y   >= right.y && left.y   <= right.y + right.height) ||
           (topleft  >= right.y && topleft  <= right.y + right.height) ||
           (right.y  >= left.y  && right.y  <= left.y  + left.height)  ||
           (topright >= left.y  && topright <= left.y  + left.height);
}


// Is the center of rect within the enclosing rect?

static bool InRect(
    const Rect& rect,      // in
    const Rect& enclosing) // in
{
    int x = rect.x + rect.width / 2;  // center of rectangle
    int y = rect.y + rect.height / 2;

    return x >= enclosing.x &&
           x <= enclosing.x + enclosing.width &&
           y >= enclosing.y &&
           y <= enclosing.y + enclosing.height;
}

// Return the indices of the best left and right eye in the list of eyes.
// returned by the feature detectors.
// The heuristic in in detail (based on looking at images produced):
// Find the left and right eye that
//  (i)   are both in eye_inner_rect
//  (ii)  don't overlap horizontally by more than 10%
//  (ii)  overlap vertically.
//  (iii) have the largest total width.
//  (iv)  if frontal have an intereye dist at least .25 * eye_inner_rect width

static void SelectEyes(
    int&            ileft_best,     // out: index into leyes, -1 if none
    int&            iright_best,    // out: index into reyes, -1 if none
    EYAW            eyaw,           // in
    const vec_Rect& leyes,          // in: left eyes found by detectMultiScale
    const vec_Rect& reyes,          // in: right eyes found by detectMultiScale
    const Rect&     eye_inner_rect) // in: center of the eye must be in this region
{
    ileft_best = iright_best = -1; // assume will return no eyes
    int min_intereye = eyaw == EYAW00? cvRound(.25 * eye_inner_rect.width): 0;
    int maxwidth = 0; // combined width of both eye boxes
    int ileft, iright;
    Rect left, right;

    // this part of the code will either select both eyes or no eyes

    for (ileft = 0; ileft < NSIZE(leyes); ileft++)
    {
        left = leyes[ileft];
        if (InRect(left, eye_inner_rect))
        {
            for (iright = 0; iright < NSIZE(reyes); iright++)
            {
                right = reyes[iright];
                if (InRect(right, eye_inner_rect) &&
                    IsEyeHorizOk(left, right) &&
                    right.x - left.x >= min_intereye &&
                    VerticalOverlap(left, right))
                {
                    int total_width = left.width + right.width;
                    if (total_width > maxwidth)
                    {
                        maxwidth = total_width;
                        ileft_best = ileft;
                        iright_best = iright;
                    }
                }
            }
        }
    }
    if (ileft_best == -1 && iright_best == -1)
    {
        // The above loops failed to find a left and right eye in correct
        // relationship to each other.  So simply select largest left eye and
        // largest right eye (but make sure that they are in the eye_inner_rect).

        int max_left_width = 0;
        for (ileft = 0; ileft < NSIZE(leyes); ileft++)
        {
            left = leyes[ileft];
            if (InRect(left, eye_inner_rect))
            {
                if (left.width > max_left_width)
                {
                    max_left_width = left.width;
                    ileft_best = ileft;
                }
            }
        }
        int max_right_width = 0;
        for (iright = 0; iright < NSIZE(reyes); iright++)
        {
            right = reyes[iright];
            if (InRect(right, eye_inner_rect))
            {
                if (right.width > max_right_width)
                {
                    max_right_width = right.width;
                    iright_best = iright;
                }
            }
        }
        // One final check (for vr08m03.bmp) -- if the two largest eyes overlap
        // too much horizontally then discard the smaller eye.

        if (ileft_best != -1 && iright_best != -1)
        {
            left = leyes[ileft_best];
            right = reyes[iright_best];
            if (!IsEyeHorizOk(left, right) || right.x - left.x < min_intereye)
            {
                if (max_right_width > max_left_width)
                    ileft_best = -1;
                else
                    iright_best = -1;
            }
        }
    }
}

// The values below are fairly conservative: for the ASM start shape,
// it's better to not find a mouth than to find an incorrect mouth.

static Rect MouthInnerRect(
    const Rect&     facerect,    // in
    EYAW            eyaw,        // in
    int             ileft_best,  // in: index of best left eye, -1 if none
    int             iright_best, // in: index of best right eye, -1 if none
    const vec_Rect& leyes,       // in: left eyes found by eye detector
    const vec_Rect& reyes)       // in: right eyes found by eye detector
{
    Rect rect = facerect;
    double width = (eyaw == EYAW00? .12: .20) * facerect.width;
    double height = .30 * facerect.height;

    int ixshift, iyshift;
    MouthRectShift(ixshift, iyshift,
                   eyaw, facerect.width, facerect.height,
                   ileft_best, iright_best, leyes, reyes);

    rect.x += cvRound(.50 * (facerect.width - width)) + ixshift;

    rect.width  =  cvRound(width);

    switch (eyaw)
    {
        case EYAW00: // frontal model
            rect.y += cvRound(.7 * facerect.height);
            break;
        case EYAW_45: // left facing three-quarter model
            rect.y += cvRound(.65 * facerect.height);
            break;
        case EYAW_22: // left facing three-quarter model
            rect.y += cvRound(.65 * facerect.height);
            break;
        case EYAW22: // right facing three-quarter model
            rect.y += cvRound(.65 * facerect.height);
            break;
        case EYAW45: // right facing three-quarter model
            rect.y += cvRound(.65 * facerect.height);
            break;
        default:
            Err("MouthInnerRect: Invalid eyaw %d", eyaw);
            break;
    }
    rect.y += iyshift;
    rect.height = cvRound(height);
    rect.width  = MAX(0, rect.width);
    rect.height = MAX(0, rect.height);
    return rect;
}

// The OpenCV mouth detector biases the position of the mouth downward (wrt the
// center of the mouth determined by manual landmarking).  Correct that here.

static int MouthVerticalShift(
    const int       ileft_best,   // in
    const int       iright_best,  // in
    const int       imouth_best,  // in
    const vec_Rect& leyes,        // in
    const vec_Rect& reyes,        // in
    const vec_Rect& mouths)       // in
{
    double shift = 0;
    if (ileft_best != -1 && iright_best != -1) // got both eyes?
    {
        CV_Assert(imouth_best != -1);
        // get eye mouth distance: first get center of both eyes
        const double xleft  = leyes[ileft_best].x  + leyes[ileft_best].width   / 2;
        const double yleft  = leyes[ileft_best].y  + leyes[ileft_best].height  / 2;
        const double xright = reyes[iright_best].x + reyes[iright_best].width  / 2;
        const double yright = reyes[iright_best].y + reyes[iright_best].height / 2;
        const double eyemouth =
            PointDist((xleft + xright) / 2,(yleft + yright) / 2,
                      mouths[imouth_best].x, mouths[imouth_best].y);
        static const double MOUTH_VERT_ADJUST = -0.050; // neg to shift up
        shift = MOUTH_VERT_ADJUST * eyemouth;
    }
    return cvRound(shift);
}

// Return the indices of the best mouth in the list of mouths

static void SelectMouth(
    int&            imouth_best,      // out: index into mouths, -1 if none
    int             ileft_best,       // in: index of best left eye, -1 if none
    int             iright_best,      // in: index of best right eye, -1 if none
    const vec_Rect& leyes,            // in: left eyes found by eye detector
    const vec_Rect& reyes,            // in: right eyes found by eye detector
    const vec_Rect& mouths,           // in: left eyes found by eye detector
    const Rect&     mouth_inner_rect) // in: center of mouth must be in this region
{
    CV_Assert(!mouths.empty());
    imouth_best = -1;

    // if only one mouth, use it
    if (NSIZE(mouths) == 1 && InRect(mouths[0], mouth_inner_rect))
        imouth_best = 0;
    else
    {
        // More than one mouth: selected the lowest mouth to avoid
        // "nostril mouths".  But to avoid "chin mouths", the mouth
        // must also meet the following criteria:
        //   i)  it must be wider than the .7 * smallest eye width
        //   ii) it must be not much narrower than widest mouth.

        int minwidth = 0;
        if (ileft_best != -1)
            minwidth = leyes[ileft_best].width;
        if (iright_best != -1)
            minwidth = MIN(minwidth, reyes[iright_best].width);
        minwidth = cvRound(.7 * minwidth);

        // find widest mouth
        int maxwidth = minwidth;
        for (int imouth = 0; imouth < NSIZE(mouths); imouth++)
        {
            const Rect mouth = mouths[imouth];
            if (InRect(mouth, mouth_inner_rect) && mouth.width > maxwidth)
            {
                maxwidth = mouth.width;
                imouth_best = imouth;
            }
        }
        // choose lowest mouth that is at least .84 the width of widest
        minwidth = MAX(minwidth, cvRound(.84 * maxwidth));
        int ymin = int(-1e5);
        for (int imouth = 0; imouth < NSIZE(mouths); imouth++)
        {
            const Rect mouth = mouths[imouth];
            if (InRect(mouth, mouth_inner_rect) &&
                mouth.y + mouth.height / 2 > ymin &&
                mouth.width > minwidth)
            {
                ymin = mouth.y + mouth.height / 2;
                imouth_best = imouth;
            }
        }
    }
}

static void TweakMouthPosition(
    vec_Rect&       mouths,      // io
    const vec_Rect& leyes,       // in
    const vec_Rect& reyes,       // in
    const int       ileft_best,  // in
    const int       iright_best, // in
    const int       imouth_best, // in
    const DetPar&   detpar)      // in

{
    mouths[imouth_best].y += // move mouth up to counteract OpenCV mouth bias
         MouthVerticalShift(ileft_best, iright_best, imouth_best,
                            leyes, reyes, mouths);

    // If face pose is strong three-quarter, move mouth
    // out to counteract OpenCV mouth detector bias.

    if (detpar.eyaw == EYAW_45)
        mouths[imouth_best].x -= cvRound(.06 * detpar.width);
    else if (detpar.eyaw == EYAW45)
        mouths[imouth_best].x += cvRound(.06 * detpar.width);
}

static void RectToImgFrame(
    double&     x,          // out: center of feature
    double&     y,          // out: center of feature
    const Rect& featrect)   // in
{
    x = featrect.x + featrect.width / 2;
    y = featrect.y + featrect.height / 2;
}

void DetectEyesAndMouth(  // use OpenCV detectors to find the eyes and mouth
    DetPar&       detpar, // io: eye and mouth fields updated, other fields untouched
    const Image&  img)    // in: ROI around face (already rotated if necessary)
{
    Rect facerect(cvRound(detpar.x - detpar.width/2),
                  cvRound(detpar.y - detpar.height/2),
                  cvRound(detpar.width),
                  cvRound(detpar.height));

    // possibly get the eyes

    detpar.lex = detpar.ley = INVALID; // mark eyes as unavailable
    detpar.rex = detpar.rey = INVALID;
    vec_Rect leyes, reyes;
    int ileft_best = -1, iright_best = -1; // indices into leyes and reyes
    if (!leye_det_g.empty()) // do we need the eyes? (depends on model estart field)
    {
        DetectAllEyes(leyes, reyes,
                      img, detpar.eyaw, facerect);

        SelectEyes(ileft_best, iright_best,
                   detpar.eyaw, leyes, reyes, EyeInnerRect(detpar.eyaw, facerect));

        if (ileft_best >= 0)
            RectToImgFrame(detpar.lex, detpar.ley,
                           leyes[ileft_best]);

        if (iright_best >= 0)
            RectToImgFrame(detpar.rex, detpar.rey,
                           reyes[iright_best]);
    }
    // possibly get the mouth

    detpar.mouthx = detpar.mouthy = INVALID;  // mark mouth as unavailable
    if (!mouth_det_g.empty()) // do we need the mouth? (depends on model estart field)
    {
        vec_Rect mouths;
        DetectAllMouths(mouths,
                        img, detpar.eyaw, facerect,
                        ileft_best, iright_best, leyes, reyes);

        if (!mouths.empty())
        {
            int imouth_best = -1;

            SelectMouth(imouth_best,
                        ileft_best, iright_best, leyes, reyes, mouths,
                        MouthInnerRect(facerect, detpar.eyaw,
                                       ileft_best, iright_best, leyes, reyes));

            if (imouth_best >= 0)
            {
                TweakMouthPosition(mouths,
                                   leyes, reyes, ileft_best, iright_best,
                                   imouth_best, detpar);

                RectToImgFrame(detpar.mouthx, detpar.mouthy,
                               mouths[imouth_best]);
            }
        }
    }
}

} // namespace stasm
コード例 #3
0
ファイル: main.cpp プロジェクト: lingz/eye-see-you
/*
 * @function main
 */
int main( int argc, const char** argv ) {

  // Get the mode
  if (argc > 1)
  {
    const char *inputMode = argv[1];
    if (strcmp(inputMode, "normal") == 0) {
      mode = NORMAL;
    } else if (strcmp(inputMode, "debug") == 0) {
      mode = DEBUG;
    } else if (strcmp(inputMode, "plot") == 0) {
      mode = PLOT;
    } else {
      mode = NORMAL;
    }
  }
  else
  {
    mode = NORMAL;
  }

  if (mode == NORMAL) {
    eventHandler = EventHandler();
  }

  if (mode == DEBUG || mode == NORMAL) {
    printf("Input Mode: %s\n", mode == NORMAL ? "normal" :
        mode == DEBUG ? "debug" :
        mode == PLOT ? "plot" : "none");

    cv::namedWindow(main_window_name,CV_WINDOW_NORMAL);
    cv::moveWindow(main_window_name, 400, 100);
    cv::namedWindow(face_window_name,CV_WINDOW_NORMAL);
    cv::moveWindow(face_window_name, 10, 100);
    cv::namedWindow("Right Eye",CV_WINDOW_NORMAL);
    cv::moveWindow("Right Eye", 10, 600);
    cv::namedWindow("Left Eye",CV_WINDOW_NORMAL);
    cv::moveWindow("Left Eye", 10, 800);
  } else if (mode == PLOT) {
    cv::namedWindow(face_window_name,CV_WINDOW_NORMAL);
    cv::moveWindow(face_window_name, 400, 100);
  }

  cv::Mat frame;

  // Load the cascades
  if( !face_cascade.load( FACE_CASCADE_FILE ) ){ printf("--(!)Error loading face cascade, please change face_cascade_name in source code.\n"); return -1; };


  // Read the video stream
  cv::VideoCapture capture( 0 );
  if( capture.isOpened() ) {
    capture.set(CV_CAP_PROP_FRAME_WIDTH, 640);
    capture.set(CV_CAP_PROP_FRAME_HEIGHT, 480);
    capture.set(CV_CAP_PROP_FPS, 15);
    capture >> frame;
    while( true ) {
      capture >> frame;

      // mirror it
      cv::flip(frame, frame, 1);
      frame.copyTo(debugImage);

      // Apply the classifier to the frame
      if( !frame.empty() ) {
        detectAndDisplay( frame );
      }
      else {
        printf(" --(!) No captured frame -- Break!");
        break;
      }
      if (mode == DEBUG || mode == NORMAL) {
        imshow(main_window_name, debugImage);
      }

      if (mode == DEBUG || mode == PLOT || mode == NORMAL) {
        int c = cv::waitKey(10);
        if( (char)c == 'c' ) { break; }
        if( (char)c == 'f' ) {
          imwrite("frame.png", frame);
        }
      }
    }
  }
コード例 #4
0
void FaceDetector::classifierDetect(cv::Mat image, std::vector<cv::Rect>& detections, cv::CascadeClassifier classifier,int flag, cv::Size size)
{
	classifier.detectMultiScale(image, detections, 1.1, 2,  flag, size);//, cv::Size(100,100));

}
コード例 #5
0
ファイル: splay.cpp プロジェクト: yquemener/THS-Geist
// Main function, defines the entry point for the program.
int main( int argc, char** argv )
{
    cv::VideoCapture capture;
    cv::Mat frame;
    cascade.load(cascade_name);
    
    // This works on a D-Link CDS-932L
    const std::string videoStreamAddress = "http://192.168.1.253/nphMotionJpeg?Resolution=320x240&amp;Quality=Standard&.mjpg";

    //open the video stream and make sure it's opened
    if(!capture.open(videoStreamAddress)) {
        std::cout << "Error opening video stream or file" << std::endl;
        return -1;
    }

    // Create a new named window with title: result
    cvNamedWindow( "Result", 1 );

    // Find if the capture is loaded successfully or not.

    // If loaded succesfully, then:
    // Capture from the camera.
    for(;;)
    {   
        cv::Mat gray;
        for(int i=0;i<10;i++)
            capture.grab();
        capture >> frame;
        cv::cvtColor(frame, gray, CV_BGR2GRAY);

        std::vector<cv::Rect> results;
        cascade.detectMultiScale(gray, results, 1.1, 3, 0);
        int dx=0;
        int dy=0;
        for(std::vector<cv::Rect>::iterator it=results.begin();
        it!=results.end();
        it++)
        {
          cv::Rect r = *it;
          std::cout << "_" << r.x << "_\t_" << r.y << "_\t" << std::endl;
          dx = r.x + r.width/2;
          dy = r.y + r.height/2;
        }
        std::stringstream ss;
        ss << "wget -q -O /dev/null \"http://192.168.1.253/nphControlCamera?Width=";
        ss << gray.cols;
        ss << "&Height=";
        ss << gray.rows;
        ss << "&Direction=Direct&NewPosition.x=" << dx;
        ss << "&NewPosition.y=" << dy<<"\"";
        if((dx!=0)||(dy!=0))
        {
            std::cout << ss.str() << std::endl;
            system(ss.str().c_str());
                
        }
        imshow("Result",gray);
        // Wait for a while before proceeding to the next frame
        if( cvWaitKey( 10 ) >= 0 )
            break;
        
    }

    return 0;
}
コード例 #6
0
  void do_work(const sensor_msgs::ImageConstPtr& msg, const std::string input_frame_from_msg)
  {
    // Work on the image.
    try
    {
      // Convert the image into something opencv can handle.
      cv::Mat frame = cv_bridge::toCvShare(msg, msg->encoding)->image;

      // Messages
      opencv_apps::FaceArrayStamped faces_msg;
      faces_msg.header = msg->header;

      // Do the work
      std::vector<cv::Rect> faces;
      cv::Mat frame_gray;

      cv::cvtColor( frame, frame_gray, cv::COLOR_BGR2GRAY );
      cv::equalizeHist( frame_gray, frame_gray );
      //-- Detect faces
#ifndef CV_VERSION_EPOCH
      face_cascade_.detectMultiScale( frame_gray, faces, 1.1, 2, 0, cv::Size(30, 30) );
#else
      face_cascade_.detectMultiScale( frame_gray, faces, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, cv::Size(30, 30) );
#endif

      for( size_t i = 0; i < faces.size(); i++ )
      {
        cv::Point center( faces[i].x + faces[i].width/2, faces[i].y + faces[i].height/2 );
        cv::ellipse( frame,  center, cv::Size( faces[i].width/2, faces[i].height/2), 0, 0, 360, cv::Scalar( 255, 0, 255 ), 2, 8, 0 );
        opencv_apps::Face face_msg;
        face_msg.face.x = center.x;
        face_msg.face.y = center.y;
        face_msg.face.width = faces[i].width;
        face_msg.face.height = faces[i].height;

        cv::Mat faceROI = frame_gray( faces[i] );
        std::vector<cv::Rect> eyes;

        //-- In each face, detect eyes
#ifndef CV_VERSION_EPOCH
        eyes_cascade_.detectMultiScale( faceROI, eyes, 1.1, 2, 0, cv::Size(30, 30) );
#else
        eyes_cascade_.detectMultiScale( faceROI, eyes, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, cv::Size(30, 30) );
#endif

        for( size_t j = 0; j < eyes.size(); j++ )
        {
          cv::Point eye_center( faces[i].x + eyes[j].x + eyes[j].width/2, faces[i].y + eyes[j].y + eyes[j].height/2 );
          int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
          cv::circle( frame, eye_center, radius, cv::Scalar( 255, 0, 0 ), 3, 8, 0 );

          opencv_apps::Rect eye_msg;
          eye_msg.x = eye_center.x;
          eye_msg.y = eye_center.y;
          eye_msg.width = eyes[j].width;
          eye_msg.height = eyes[j].height;
          face_msg.eyes.push_back(eye_msg);
        }

        faces_msg.faces.push_back(face_msg);
      }
      //-- Show what you got
      if( debug_view_) {
        cv::imshow( "Face detection", frame );
        int c = cv::waitKey(1);
      }

      // Publish the image.
      sensor_msgs::Image::Ptr out_img = cv_bridge::CvImage(msg->header, msg->encoding,frame).toImageMsg();
      img_pub_.publish(out_img);
      msg_pub_.publish(faces_msg);
    }
    catch (cv::Exception &e)
    {
      NODELET_ERROR("Image processing error: %s %s %s %i", e.err.c_str(), e.func.c_str(), e.file.c_str(), e.line);
    }

    prev_stamp_ = msg->header.stamp;
  }
コード例 #7
0
ファイル: facedet.cpp プロジェクト: Keerecles/libra
namespace stasm
{
typedef vector<DetPar> vec_DetPar;

static cv::CascadeClassifier facedet_g;  // the face detector

static double BORDER_FRAC = 0.1; // fraction of image width or height
                                 // use 0.0 for no border

//-----------------------------------------------------------------------------

void FaceDet::OpenFaceDetector_( // called by stasm_init, init face det from XML file
    const char* datadir,         // in: directory of face detector files
    void*)                       // in: unused (func signature compatibility)
{
    OpenDetector(facedet_g, "haarcascade_frontalface_alt2.xml",  datadir);
}

// If a face is near the edge of the image, the OpenCV detectors tend to
// return a too-small face rectangle.  By adding a border around the edge
// of the image we mitigate this problem.

static Image EnborderImg(    // return the image with a border
    int&         leftborder, // out: border size in pixels
    int&         topborder,  // out: border size in pixels
    const Image& img)        // io
{
    Image bordered_img(img);
    leftborder = cvRound(BORDER_FRAC * bordered_img.cols);
    topborder  = cvRound(BORDER_FRAC * bordered_img.rows);
    copyMakeBorder(bordered_img, bordered_img,
                   topborder, topborder, leftborder, leftborder,
                   cv::BORDER_REPLICATE);
    return bordered_img;
}

void DetectFaces(          // all face rects into detpars
    vec_DetPar&  detpars,  // out
    const Image& img,      // in
    int          minwidth) // in: as percent of img width
{
    int leftborder = 0, topborder = 0; // border size in pixels
    Image bordered_img(BORDER_FRAC == 0?
                       img: EnborderImg(leftborder, topborder, img));

    // Detection results are very slightly better with equalization
    // (tested on the MUCT images, which are not pre-equalized), and
    // it's quick enough to equalize (roughly 10ms on a 1.6 GHz laptop).

    Image equalized_img; cv::equalizeHist(bordered_img, equalized_img);

    CV_Assert(minwidth >= 1 && minwidth <= 100);

    int minpix = MAX(100, cvRound(img.cols * minwidth / 100.));

    // the params below are accurate but slow
    static const double SCALE_FACTOR   = 1.1;
    static const int    MIN_NEIGHBORS  = 3;
    static const int    DETECTOR_FLAGS = 0;

    vec_Rect facerects = // all face rects in image
        Detect(equalized_img, &facedet_g, NULL,
               SCALE_FACTOR, MIN_NEIGHBORS, DETECTOR_FLAGS, minpix);

    // copy face rects into the detpars vector

    detpars.resize(NSIZE(facerects));
    for (int i = 0; i < NSIZE(facerects); i++)
    {
        Rect* facerect = &facerects[i];
        DetPar detpar; // detpar constructor sets all fields INVALID
        // detpar.x and detpar.y is the center of the face rectangle
        detpar.x = facerect->x + facerect->width / 2.;
        detpar.y = facerect->y + facerect->height / 2.;
        detpar.x -= leftborder; // discount the border we added earlier
        detpar.y -= topborder;
        detpar.width  = double(facerect->width);
        detpar.height = double(facerect->height);
        detpar.yaw = 0; // assume face has no yaw in this version of Stasm
        detpar.eyaw = EYAW00;
        detpars[i] = detpar;
    }
}

// order by increasing distance from left marg, and dist from top marg within that

static bool IncreasingLeftMargin( // compare predicate for std::sort
    const DetPar& detpar1,        // in
    const DetPar& detpar2)        // in
{
    return 1e5 * detpar2.x + detpar2.y >
           1e5 * detpar1.x + detpar1.y;
}

// order by decreasing width, and dist from the left margin within that

static bool DecreasingWidth( // compare predicate for std::sort
    const DetPar& detpar1,   // in
    const DetPar& detpar2)   // in
{
    return 1e5 * detpar2.width - detpar2.x <
           1e5 * detpar1.width - detpar1.x;

}

// Discard too big or small faces (this helps reduce the number of false positives)

static void DiscardMissizedFaces(
    vec_DetPar& detpars) // io
{
    // constants (TODO These have not yet been rigorously empirically adjusted.)
    static const double MIN_WIDTH = 1.33; // as fraction of median width
    static const double MAX_WIDTH = 1.33; // as fraction of median width

    if (NSIZE(detpars) >= 3) // need at least 3 faces
    {
        // sort the faces on their width (smallest first) so can get median width
        sort(detpars.begin(), detpars.end(), DecreasingWidth);
        const int median     = cvRound(detpars[NSIZE(detpars) / 2].width);
        const int minallowed = cvRound(median / MIN_WIDTH);
        const int maxallowed = cvRound(MAX_WIDTH * median);
        // keep only faces that are not too big or small
        vec_DetPar all_detpars(detpars);
        detpars.resize(0);
        for (int iface = 0; iface < NSIZE(all_detpars); iface++)
        {
            DetPar* face = &all_detpars[iface];
            if (face->width >= minallowed && face->width <= maxallowed)
                detpars.push_back(*face);
            else if (trace_g || TRACE_IMAGES)
                lprintf("[discard %d of %d]", iface, NSIZE(all_detpars));
        }
    }
}

static void TraceFaces(         // write image showing detected face rects
    const vec_DetPar& detpars,  // in
    const Image&      img,      // in
    const char*       filename) // in
{
#if TRACE_IMAGES // will be 0 unless debugging (defined in stasm.h)

    CImage cimg; cvtColor(img, cimg, CV_GRAY2BGR); // color image
    for (int iface = 0; iface < NSIZE(detpars); iface++)
    {
        const DetPar &detpar = detpars[iface];

        rectangle(cimg,
                  cv::Point(cvRound(detpar.x - detpar.width/2),
                            cvRound(detpar.y - detpar.height/2)),
                  cv::Point(cvRound(detpar.x + detpar.width/2),
                            cvRound(detpar.y + detpar.height/2)),
                  CV_RGB(255,255,0), 2);

        ImgPrintf(cimg, // 10 * iface to minimize overplotting
                  detpar.x + 10 * iface, detpar.y, 0xffff00, 1, ssprintf("%d", iface));
    }
    cv::imwrite(filename, cimg);

#endif
}

void FaceDet::DetectFaces_(  // call once per image to find all the faces
    const Image& img,        // in: the image (grayscale)
    const char*,             // in: unused (match virt func signature)
    bool         multiface,  // in: if false, want only the best face
    int          minwidth,   // in: min face width as percentage of img width
    void*        user)       // in: unused (match virt func signature)
{
    CV_Assert(user == NULL);
    CV_Assert(!facedet_g.empty()); // check that OpenFaceDetector_ was called
    DetectFaces(detpars_, img, minwidth);
    TraceFaces(detpars_, img, "facedet_BeforeDiscardMissizedFaces.bmp");
    DiscardMissizedFaces(detpars_);
    TraceFaces(detpars_, img, "facedet_AfterDiscardMissizedFaces.bmp");
    if (multiface) // order faces on increasing distance from left margin
    {
        sort(detpars_.begin(), detpars_.end(), IncreasingLeftMargin);
        TraceFaces(detpars_, img, "facedet.bmp");
    }
    else
    {
        // order faces on decreasing width, keep only the first (the largest face)
        sort(detpars_.begin(), detpars_.end(), DecreasingWidth);
        TraceFaces(detpars_, img, "facedet.bmp");
        if (NSIZE(detpars_))
            detpars_.resize(1);
    }
    iface_ = 0; // next invocation of NextFace_ must get first face
}

// Get the (next) face from the image.
// If no face available, return detpar.x INVALID.
// Eyes, mouth, and rot in detpar always returned INVALID.

const DetPar FaceDet::NextFace_(void)
{
    DetPar detpar; // detpar constructor sets all fields INVALID

    if (iface_ < NSIZE(detpars_))
        detpar = detpars_[iface_++];

    return detpar;
}

} // namespace stasm
コード例 #8
0
ファイル: main.cpp プロジェクト: hkuhn/EECS481-Pupil-Tracking
/**
 * @function main
 */
int main( int argc, const char** argv ) {
  CvCapture* capture;
  cv::Mat frame;

  // Load the cascades
  if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };

  cv::namedWindow(main_window_name,CV_WINDOW_NORMAL);
  cv::moveWindow(main_window_name, 400, 100);
  cv::namedWindow(face_window_name,CV_WINDOW_NORMAL);
  cv::moveWindow(face_window_name, 10, 100);
  cv::namedWindow("Right Eye",CV_WINDOW_NORMAL);
  cv::moveWindow("Right Eye", 10, 600);
  cv::namedWindow("Left Eye",CV_WINDOW_NORMAL);
  cv::moveWindow("Left Eye", 100, 600);

  createCornerKernels();
  ellipse(skinCrCbHist, cv::Point(113, 155.6), cv::Size(23.4, 15.2),
          43.0, 0.0, 360.0, cv::Scalar(255, 255, 255), -1);

    
    
    cout << "press c to quit program" << endl;
    // Use pre-recorded video
    if (argc > 1) {
        const char* path = argv[1];
        
        capture = cvCreateFileCapture(path);

        if( capture ) {
            while( true ) {
                frame = cvQueryFrame( capture );

                frame.copyTo(debugImage);
                
                // Apply the classifier to the frame
                if( !frame.empty() ) {
                    detectAndDisplay( frame );
                }
                else {
                    printf(" --(!) No captured frame -- Break!");
                    break;
                }
                
                imshow(main_window_name,frame);
                
                int c = cv::waitKey(1);
                if( (char)c == 'c' ) { break; }
                if( (char)c == 'f' ) {
                    imwrite("frame.png",frame);
                }
                
            }
        }
        
        releaseCornerKernels();
        
    }
    else {
    
       // Read the video stream
      capture = cvCaptureFromCAM( -1 );
        
      if( capture ) {
        while( true ) {
          frame = cvQueryFrame( capture );
          // mirror it
          cv::flip(frame, frame, 1);
          frame.copyTo(debugImage);

          // Apply the classifier to the frame
          if( !frame.empty() ) {
            detectAndDisplay( frame );
          }
          else {
            printf(" --(!) No captured frame -- Break!");
            break;
          }

          imshow(main_window_name,debugImage);

          int c = cv::waitKey(1);
          if( (char)c == 'c' ) { break; }
          if( (char)c == 'f' ) {
            imwrite("frame.png",frame);
          }

        }
      }

      releaseCornerKernels();
    }

  return 0;
}
コード例 #9
0
ファイル: facedetect.cpp プロジェクト: ddennedy/frei0r
    std::vector<cv::Rect> detect()
    {
        std::vector<cv::Rect> faces;
        if (cascade.empty()) return faces;
        double scale = this->scale == 0? 1.0 : this->scale;
        cv::Mat image_roi = image;
        cv::Mat gray, small;
        int min = cvRound(smallest * 1000. * scale);
        
        // use a region of interest to improve performance
        // This idea comes from the More than Technical blog:
        // http://www.morethantechnical.com/2009/08/09/near-realtime-face-detection-on-the-iphone-w-opencv-port-wcodevideo/
        if ( roi.width > 0 && roi.height > 0)
        {
            image_roi = image(roi);
        }

        // use an equalized grayscale to improve detection
        cv::cvtColor(image_roi, gray, CV_BGR2GRAY);

        // use a smaller image to improve performance
        cv::resize(gray, small, cv::Size(cvRound(gray.cols * scale), cvRound(gray.rows * scale)));
        cv::equalizeHist(small, small);
        
        // detect with OpenCV
        cascade.detectMultiScale(small, faces, 1.1, 2, 0, cv::Size(min, min));
        
#ifdef USE_ROI
        if (faces.size() == 0)
        {
            // clear the region of interest
            roi.width = roi.height = 0;
            roi.x = roi.y = 0;
        }
        else if (faces.size() > 0)
        {
            // determine the region of interest from the detected objects
            int minx = width * scale;
            int miny = height * scale;
            int maxx, maxy = 0;
            for (size_t i = 0; i < faces.size(); i++)
            {
                faces[i].x+= roi.x * scale;
                faces[i].y+= roi.y * scale;
                minx = MIN(faces[i].x, minx);
                miny = MIN(faces[i].y, miny);
                maxx = MAX(faces[i].x + faces[i].width, maxx);
                maxy= MAX(faces[i].y + faces[i].height, maxy);
            }
            minx= MAX(minx - PAD, 0);
            miny= MAX(miny - PAD, 0);
            maxx = MIN(maxx + PAD, width * scale);
            maxy = MIN(maxy + PAD, height * scale);

            // store the region of interest
            roi.x = minx / scale;
            roi.y = miny / scale;
            roi.width = (maxx - minx) / scale;
            roi.height = (maxy - miny) / scale; 
        }
#endif
        return faces;
    }
/*
 * Class:     io_github_melvincabatuan_pedestriandetection_MainActivity
 * Method:    predict
 * Signature: (Landroid/graphics/Bitmap;[B)V
 */
JNIEXPORT void JNICALL Java_io_github_melvincabatuan_pedestriandetection_MainActivity_predict
  (JNIEnv * pEnv, jobject clazz, jobject pTarget, jbyteArray pSource){

   AndroidBitmapInfo bitmapInfo;
   uint32_t* bitmapContent; // Links to Bitmap content

   if(AndroidBitmap_getInfo(pEnv, pTarget, &bitmapInfo) < 0) abort();
   if(bitmapInfo.format != ANDROID_BITMAP_FORMAT_RGBA_8888) abort();
   if(AndroidBitmap_lockPixels(pEnv, pTarget, (void**)&bitmapContent) < 0) abort();

   /// Access source array data... OK
   jbyte* source = (jbyte*)pEnv->GetPrimitiveArrayCritical(pSource, 0);
   if (source == NULL) abort();

   /// cv::Mat for YUV420sp source and output BGRA 
    Mat srcGray(bitmapInfo.height, bitmapInfo.width, CV_8UC1, (unsigned char *)source);
    Mat mbgra(bitmapInfo.height, bitmapInfo.width, CV_8UC4, (unsigned char *)bitmapContent);

/***********************************************************************************************/
    /// Native Image Processing HERE... 
    if(DEBUG){
      LOGI("Starting native image processing...");
    }

    if (full_body_cascade.empty()){
       t = (double)getTickCount();
       sprintf( full_body_cascade_path, "%s/%s", getenv("ASSETDIR"), "visionary.net_pedestrian_cascade_web_LBP.xml");       
    
      /* Load the face cascades */
       if( !full_body_cascade.load(full_body_cascade_path) ){ 
           LOGE("Error loading cat face cascade"); 
           abort(); 
       };

       t = 1000*((double)getTickCount() - t)/getTickFrequency();
       if(DEBUG){
       LOGI("Loading full body cascade took %lf milliseconds.", t);
     }
    }
            
 
     std::vector<Rect> fbody;


       //-- Detect full body
       t = (double)getTickCount();
 
       /// Detection took cat_face_cascade.detectMultiScale() time = 655.334471 ms
      // cat_face_cascade.detectMultiScale( srcGray, faces, 1.1, 2 , 0 , Size(30, 30) ); // Scaling factor = 1.1;  minNeighbors = 2 ; flags = 0; minimumSize = 30,30

      // cat_face_cascade.detectMultiScale() time = 120.117185 ms
      // cat_face_cascade.detectMultiScale( srcGray, faces, 1.2, 3 , 0 , Size(64, 64));

 
      
      full_body_cascade.detectMultiScale( srcGray, fbody, 1.2, 2 , 0 , Size(14, 28));  // Size(double width, double height) 

      // scalingFactor parameters determine how much the classifier will be scaled up after each run.
      // minNeighbors parameter specifies how many positive neighbors a positive face rectangle should have to be considered a possible match; 
      // when a potential face rectangle is moved a pixel and does not trigger the classifier any more, it is most likely that it’s a false positive. 
      // Face rectangles with fewer positive neighbors than minNeighbors are rejected. 
      // If minNeighbors is set to zero, all potential face rectangles are returned. 
      // The flags parameter is from the OpenCV 1.x API and should always be 0. 
      // minimumSize specifies the smallest face rectangle we’re looking for. 

       t = 1000*((double)getTickCount() - t)/getTickFrequency();
       if(DEBUG){
          LOGI("full_body_cascade.detectMultiScale() time = %lf milliseconds.", t);
      }


       // Iterate through all faces and detect eyes
       t = (double)getTickCount();

       for( size_t i = 0; i < fbody.size(); i++ )
       {
          Point center(fbody[i].x + fbody[i].width / 2, fbody[i].y + fbody[i].height / 2);
          ellipse(srcGray, center, Size(fbody[i].width / 2, fbody[i].height / 2), 0, 0, 360, Scalar(255, 0, 255), 4, 8, 0);
       }//endfor
  
       t = 1000*((double)getTickCount() - t)/getTickFrequency();
       if(DEBUG){
          LOGI("Iterate through all faces and detecting eyes took %lf milliseconds.", t);
       }

       /// Display to Android
       cvtColor(srcGray, mbgra, CV_GRAY2BGRA);


      if(DEBUG){
        LOGI("Successfully finished native image processing...");
      }
   
/************************************************************************************************/ 
   
   /// Release Java byte buffer and unlock backing bitmap
   pEnv-> ReleasePrimitiveArrayCritical(pSource,source,0);
   if (AndroidBitmap_unlockPixels(pEnv, pTarget) < 0) abort();
}
コード例 #11
0
ファイル: main.cpp プロジェクト: trevorsenior/playground
void detectAndDisplay(cv::Mat frame) {
  std::vector<cv::Rect> faces;
  cv::Mat frame_gray;

  cv::cvtColor(frame, frame_gray, cv::COLOR_BGR2GRAY);
  cv::equalizeHist(frame_gray, frame_gray);
  
  // Detect Faces
  face_cascade.detectMultiScale(frame_gray, // image
				faces, // objects
				1.1, // scale factor
				2, // min neighbors
				0|cv::CASCADE_SCALE_IMAGE, // flags
				cv::Size(30, 30)); // min size

  for (std::size_t i = 0; i < faces.size(); i++) {
    cv::Point center(faces[i].x + faces[i].width/2,
		     faces[i].y + faces[i].height/2);

    cv::ellipse(frame,
		center,
		cv::Size(faces[i].width/2, faces[i].height/2),
		0,
		0,
		360,
		cv::Scalar(255, 0, 255),
		4,
		8,
		0);

    cv::Mat faceROI = frame_gray(faces[i]);
    std::vector<cv::Rect> eyes;

    // in each face, detect eyes
    eyes_cascade.detectMultiScale(faceROI,
				  eyes,
				  1.1,
				  2,
				  0 | cv::CASCADE_SCALE_IMAGE,
				  cv::Size(30, 30));

    for (std::size_t j = 0; j < eyes.size(); j++) {
      cv::Point eye_center(faces[i].x + eyes[j].x + eyes[j].width/2,
			   faces[i].y + eyes[j].y + eyes[j].height/2);

      int radius = cvRound((eyes[j].width + eyes[j].height) * 0.25);
      cv::circle(frame, 
		 eye_center,
		 radius, 
		 cv::Scalar(255, 0, 0),
		 4,
		 8,
		 0);
    }

  }

  // Show what you got
  cv::imshow(window_name, frame);

}
コード例 #12
0
ファイル: main.cpp プロジェクト: 4sskick/Eye-Tracking_robust
/**
 * @function main
 */
int main( int argc, const char** argv ) {

	srand(time(NULL)); int ra;

/*	char circ_window[] = "Moving dot";

	Mat circ_image = Mat::zeros( 400, 400, CV_8UC3 );
	MyFilledCircle( circ_image, Point( 100, 100) );
	imshow( circ_window, circ_image );
	cv::setWindowProperty( circ_window, CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN);
	moveWindow( circ_window, 900, 200 );*/
	//sleep(8);

	CvCapture* capture;
	cv::Mat frame;
	
	// Load the cascades
	if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading face cascade, please change face_cascade_name in source code.\n"); return -1; };

	cv::namedWindow(main_window_name,CV_WINDOW_NORMAL);
	cv::moveWindow(main_window_name, 400, 100);
	cv::namedWindow(face_window_name,CV_WINDOW_NORMAL);
	cv::moveWindow(face_window_name, 10, 100);
	cv::namedWindow("Right Eye",CV_WINDOW_NORMAL);
	cv::moveWindow("Right Eye", 10, 600);
	cv::namedWindow("Left Eye",CV_WINDOW_NORMAL);
	cv::moveWindow("Left Eye", 10, 800);
	cv::namedWindow("aa",CV_WINDOW_NORMAL);
	cv::moveWindow("aa", 10, 800);
	cv::namedWindow("aaa",CV_WINDOW_NORMAL);
	cv::moveWindow("aaa", 10, 800);

	createCornerKernels();
	ellipse(skinCrCbHist, cv::Point(113, 155.6), cv::Size(23.4, 15.2),
			43.0, 0.0, 360.0, cv::Scalar(255, 255, 255), -1);

	// Read the video stream
	capture = cvCaptureFromCAM( -1 );
	if( capture ) {
		while( true ) {
	
	char circ_window[] = "Moving dot";

	Mat circ_image = Mat::zeros( 414, 414, CV_8UC3 );
	//ra = rand()%4;
	//if (ra==1) rx+=1; else if(ra==2) rx-=1; else if(ra==3) ry+=1; else ry-=1; rx+=1; if(rx==500) rx=0;

	if(stage1 && !stage2)
		if(rx>=6 && rx <=400 && ry==6)
		{
			rx+= 10;
			tl+= lpy;
			tr+= rpy;
			countert ++;
		}
		else if(rx>=400 && ry<400)
		{
			ry+=10;
			rl+= lpx;
			rr+= rpx;
			counterl ++;
		}
		else if(ry>=400 && rx > 6)
		{
			rx-=10;
			bl+= lpy;
			br+= rpy;
		}
		else if(rx<=6 && ry>20)
		{
			ry-=10;
			ll+= lpx;
			lr+= rpx;
		}
		else if(rx <= 6 && ry <= 20 && ry > 6)
		{
			stage1 = 0;
			stage2 = 1;
		}
	if(!stage1 && stage2)
	{
		tal = tl / countert;
		tar = tr / countert;
		bar = br / countert;
		bal = bl / countert;
		lal = ll / (counterl-1);
		lar = lr / (counterl-1);
		ral = rl / counterl;
		rar = rr / counterl;
		std::cout<<tal<<" : "<<tar<<" : "<<lal<<" : "<<lar<<std::endl;
		std::cout<<ral<<" : "<<rar<<" : "<<bal<<" : "<<bar<<std::endl;
		stage2 = 0;
		rx=200;ry=200;
	}

	if(!stage1 && !stage2)
	{
		if( //lpx >= lal && 
			lpx <= ral &&
			//rpx >= lar &&
			rpx <= rar &&
			//lpy >= tal &&
			lpy <= bal &&
			//rpy >= tar &&
			rpy <= bar )
			std::cout<<"INSIDE\n";
		else
			std::cout<<"OUTSIDE\n";
	}

	/*	if(rx<200 ) {rx++; px=100; py=100;}
		else if(rx<400) {rx++; px=200; py=300;}
		else if(rx < 600) {rx++; px=400; py =200;}
		else rx=0;*/
	arr[rx][ry][0]=lpx1; arr[rx][ry][0]=lpy1;

	//int px,py;
	MyFilledCircle( circ_image, Point( rx, ry) );
	setWindowProperty( circ_window, CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN);
	imshow( circ_window, circ_image );
	moveWindow( circ_window, 00, 00 );




	frame = cvQueryFrame( capture );
	// Reducing the resolution of the image to increase speed
	cv::Mat smallFrame;
	cv::resize(frame,smallFrame,cv::Size(round(1*frame.cols), round(1*frame.rows)),1,1,cv::INTER_LANCZOS4);

	// mirror it
	cv::flip(smallFrame, smallFrame, 1);
	smallFrame.copyTo(debugImage);

	// Apply the classifier to the frame
	if( !smallFrame.empty() ) {
		detectAndDisplay( smallFrame );
	}
	else {
		printf(" --(!) No captured frame -- Break!");
		break;
	}

	imshow(main_window_name,debugImage);

	int c = cv::waitKey(10);
	if( (char)c == 'c' ) { break; }
	if( (char)c == 'f' ) {
		imwrite("frame.png",smallFrame);
	}

		}
	}

	releaseCornerKernels();

	return 0;
}
コード例 #13
0
ファイル: main.cpp プロジェクト: sarmadm/Eye-Tracking-1
int main(int argc, char** argv)
{
    // Load the cascade classifiers
    // Make sure you point the XML files to the right path, or
    // just copy the files from [OPENCV_DIR]/data/haarcascades directory
    face_cascade.load("haarcascade_frontalface_alt2.xml");
    eye_cascade.load("haarcascade_eye.xml");

    std::cout << "==================\n";

    // Open webcam
    cv::VideoCapture cap(0);
    // Check if everything is ok
    if (face_cascade.empty() || eye_cascade.empty() || !cap.isOpened())
    {
        std::cout << "bad\n";
        return 1;
    }

    cap.set(CV_CAP_PROP_FRAME_WIDTH, WIDTH);
    cap.set(CV_CAP_PROP_FRAME_HEIGHT, HEIGHT);
    cv::Mat frame, eye_tpl;
    cv::Rect eye_bb;
    while (cv::waitKey(15) != 'q' && cv::waitKey(15) != 'Q')
    {
        cap >> frame;
        if (frame.empty()) break;
        // Flip the frame horizontally, Windows users might need this
        cv::flip(frame, frame, 1);
        // Convert to greyscale and
        // adjust the image contrast using histogram equalization
        cv::Mat gray;
        cv::cvtColor(frame, gray, CV_BGR2GRAY);
        if (eye_bb.width == 0 && eye_bb.height == 0)
        {
            // Detection stage
            // Try to detect the face and the eye of the user
            detectEye(gray, eye_tpl, eye_bb);
        }
        else
        {
            // Tracking stage with template matching
            trackEye(gray, eye_tpl, eye_bb);
            // Draw bounding rectangle for the eye
            cv::rectangle(frame, eye_bb, CV_RGB(0,255,0));
        }

        {//drawing grids

            struct Line { cv::Point from, to; };
            using Lines = std::vector<Line>;
            Lines lines{
                    { { 213, 0 }, { 213, 480 } },
                    { { 427, 0 }, { 427, 480 } },
                    { { 0, 160 }, { 640, 160 } },
                    { { 0, 320 }, { 640, 320 } }
            };
            for (auto const& l : lines)
                cv::line(frame,l.from, l.to, CV_RGB(0,255,0), 1, 1);
        }

        {//generate direction command
            std::vector<cv::Rect> direction_boxes{
                    cv::Rect{cv::Point{213,   0}, cv::Point{427, 160}}, //F
                    cv::Rect{cv::Point{  0, 160}, cv::Point{213, 320}}, //L
                    cv::Rect{cv::Point{427, 160}, cv::Point{640, 320}}  //R
            };

            auto draw_direction = [&](std::string const &direction) {
                cv::putText(frame, direction, cv::Point{280, 435}, cv::FONT_HERSHEY_DUPLEX, 3, CV_RGB(70, 130, 180),  5);
                cv::putText(frame, direction, cv::Point{280, 435}, cv::FONT_HERSHEY_DUPLEX, 3, CV_RGB(102, 105, 170), 4);
            };

            for(int box = 0; box != 3; ++box)
            {
                if (box == 0)
                    if (direction_boxes[0].contains(center_of_rect(eye_bb)))
                    {
                        draw_direction("F");
                        break;
                    }
                if (box == 1)
                    if (direction_boxes[1].contains(center_of_rect(eye_bb)))
                    {
                        draw_direction("L");
                        break;
                    }
                if (box == 2)
                    if (direction_boxes[2].contains(center_of_rect(eye_bb)))
                    {
                        draw_direction("R");
                        break;
                    }
            }
            std::cout << center_of_rect(eye_bb).x << std::endl;
        }

        cv::imshow("video", frame);
    }
    return 0;
}