コード例 #1
0
ファイル: eyedet.cpp プロジェクト: Keerecles/libra
static void DetectAllEyes(
    vec_Rect&    leyes,    // out
    vec_Rect&    reyes,    // out
    const Image& img,      // in
    EYAW         eyaw,     // in
    const Rect&  facerect) // in
{
    CV_Assert(!leye_det_g.empty()); // detector initialized?
    CV_Assert(!reye_det_g.empty());

    // 1.2 is 40ms faster than 1.1 but finds slightly fewer eyes
    static const double EYE_SCALE_FACTOR   = 1.2;
    static const int    EYE_MIN_NEIGHBORS  = 3;
    static const int    EYE_DETECTOR_FLAGS = 0;

    Rect leftrect(EyeSearchRect(eyaw, facerect, false));

    if (leftrect.width)
        leyes = Detect(img, &leye_det_g, &leftrect,
                       EYE_SCALE_FACTOR, EYE_MIN_NEIGHBORS, EYE_DETECTOR_FLAGS,
                       facerect.width / 10);

    Rect rightrect(EyeSearchRect(eyaw, facerect, true));

    if (rightrect.width)
        reyes = Detect(img, &reye_det_g, &rightrect,
                       EYE_SCALE_FACTOR, EYE_MIN_NEIGHBORS, EYE_DETECTOR_FLAGS,
                       facerect.width / 10);
}
コード例 #2
0
ファイル: eye-tracking.cpp プロジェクト: 0991/opencv-code
int main(int argc, char** argv)
{
	// Load the cascade classifiers
	// Make sure you point the XML files to the right path, or 
	// just copy the files from [OPENCV_DIR]/data/haarcascades directory
	face_cascade.load("haarcascade_frontalface_alt2.xml");
	eye_cascade.load("haarcascade_eye.xml");

	// Open webcam
	cv::VideoCapture cap(0);

	// Check if everything is ok
	if (face_cascade.empty() || eye_cascade.empty() || !cap.isOpened())
		return 1;

	// Set video to 320x240
	cap.set(CV_CAP_PROP_FRAME_WIDTH, 320);
	cap.set(CV_CAP_PROP_FRAME_HEIGHT, 240);

	cv::Mat frame, eye_tpl;
	cv::Rect eye_bb;

	while (cv::waitKey(15) != 'q')
	{
		cap >> frame;
		if (frame.empty())
			break;

		// Flip the frame horizontally, Windows users might need this
		cv::flip(frame, frame, 1);

		// Convert to grayscale and 
		// adjust the image contrast using histogram equalization
		cv::Mat gray;
		cv::cvtColor(frame, gray, CV_BGR2GRAY);

		if (eye_bb.width == 0 && eye_bb.height == 0)
		{
			// Detection stage
			// Try to detect the face and the eye of the user
			detectEye(gray, eye_tpl, eye_bb);
		}
		else
		{
			// Tracking stage with template matching
			trackEye(gray, eye_tpl, eye_bb);

			// Draw bounding rectangle for the eye
			cv::rectangle(frame, eye_bb, CV_RGB(0,255,0));
		}

		// Display video
		cv::imshow("video", frame);
	}

	return 0;
}
コード例 #3
0
ファイル: facedet.cpp プロジェクト: Keerecles/libra
void FaceDet::DetectFaces_(  // call once per image to find all the faces
    const Image& img,        // in: the image (grayscale)
    const char*,             // in: unused (match virt func signature)
    bool         multiface,  // in: if false, want only the best face
    int          minwidth,   // in: min face width as percentage of img width
    void*        user)       // in: unused (match virt func signature)
{
    CV_Assert(user == NULL);
    CV_Assert(!facedet_g.empty()); // check that OpenFaceDetector_ was called
    DetectFaces(detpars_, img, minwidth);
    TraceFaces(detpars_, img, "facedet_BeforeDiscardMissizedFaces.bmp");
    DiscardMissizedFaces(detpars_);
    TraceFaces(detpars_, img, "facedet_AfterDiscardMissizedFaces.bmp");
    if (multiface) // order faces on increasing distance from left margin
    {
        sort(detpars_.begin(), detpars_.end(), IncreasingLeftMargin);
        TraceFaces(detpars_, img, "facedet.bmp");
    }
    else
    {
        // order faces on decreasing width, keep only the first (the largest face)
        sort(detpars_.begin(), detpars_.end(), DecreasingWidth);
        TraceFaces(detpars_, img, "facedet.bmp");
        if (NSIZE(detpars_))
            detpars_.resize(1);
    }
    iface_ = 0; // next invocation of NextFace_ must get first face
}
コード例 #4
0
ファイル: facedetect.cpp プロジェクト: ddennedy/frei0r
    void update(double time,
                uint32_t* out,
                const uint32_t* in)
    {
        if (cascade.empty()) {
            cv::setNumThreads(cvRound(threads * 100));
            if (classifier.length() > 0) {
                if (!cascade.load(classifier.c_str()))
                    fprintf(stderr, "ERROR: Could not load classifier cascade %s\n", classifier.c_str());
            }
            else {
                memcpy(out, in, size * 4);
                return;
            }
        }

        // sanitize parameters
        search_scale = CLAMP(search_scale, 0.11, 1.0);
        neighbors = CLAMP(neighbors, 0.01, 1.0);

        // copy input image to OpenCV
        image = cv::Mat(height, width, CV_8UC4, (void*)in);

        // only re-detect periodically to control performance and reduce shape jitter
        int recheckInt = abs(cvRound(recheck * 1000));
        if ( recheckInt > 0 && count % recheckInt )
        {
            // skip detect
            count++;
//            fprintf(stderr, "draw-only counter %u\n", count);
        }
        else
        {
            count = 1;   // reset the recheck counter
            if (objects.size() > 0) // reset the list of objects
                objects.clear();
            double elapsed = (double) cvGetTickCount();

            objects = detect();

            // use detection time to throttle frequency of re-detect vs. redraw (automatic recheck)
            elapsed = cvGetTickCount() - elapsed;
            elapsed = elapsed / ((double) cvGetTickFrequency() * 1000.0);

            // Automatic recheck uses an undocumented negative parameter value,
            // which is not compliant, but technically feasible.
            if (recheck < 0 && cvRound( elapsed / (1000.0 / (recheckInt + 1)) ) <= recheckInt)
                    count += recheckInt - cvRound( elapsed / (1000.0 / (recheckInt + 1)));
//            fprintf(stderr, "detection time = %gms counter %u\n", elapsed, count);
        }
        
        draw();

        // copy filtered OpenCV image to output
        memcpy(out, image.data, size * 4);
    }
コード例 #5
0
ファイル: eyedet.cpp プロジェクト: Keerecles/libra
void OpenEyeMouthDetectors(
    const vec_Mod& mods,    // in: the ASM models (used to see if we need eyes or mouth)
    const char*    datadir) // in
{
    static bool needeyes = true; // static for efficiency
    if (needeyes && leye_det_g.empty()) // not yet opened?
    {
        // we need the eyes if the estart field of any model
        // is ESTART_EYES or ESTART_EYE_AND_MOUTH
        needeyes = false;
        for (int imod = 0; imod < NSIZE(mods); imod++)
            if (mods[imod]->Estart_() == ESTART_EYES ||
                    mods[imod]->Estart_() == ESTART_EYE_AND_MOUTH)
                needeyes = true;
        if (needeyes)
        {
            // I tried all the eye XML files that come with OpenCV 2.1 and found that
            // the files used below give the best results.  The other eye XML files
            // often failed to detect eyes, even with EYE_MIN_NEIGHBORS=1.
            //
            // In the XML filenames, "left" was verified empirically by me to respond
            // to the image left (not the subject's left).  I tested this on the on
            // the MUCT and BioID sets: haarcascade_mcs_lefteye.xml finds more eyes
            // on the viewer's left than it finds on the right (milbo Lusaka Dec 2011).

            OpenDetector(leye_det_g,  "haarcascade_mcs_lefteye.xml",  datadir);
            OpenDetector(reye_det_g,  "haarcascade_mcs_righteye.xml", datadir);
        }
    }
    static bool needmouth = true; // static for efficiency
    if (needmouth && mouth_det_g.empty()) // not yet opened?
    {
        // we need the eyes if the estart field of any model is ESTART_EYE_AND_MOUTH
        needmouth = false;
        for (int imod = 0; imod < NSIZE(mods); imod++)
            if (mods[imod]->Estart_() == ESTART_EYE_AND_MOUTH)
                needmouth = true;
        if (needmouth)
            OpenDetector(mouth_det_g, "haarcascade_mcs_mouth.xml", datadir);
    }
}
コード例 #6
0
ファイル: facedet.cpp プロジェクト: andreajeka/minimal-stasm
void DetectFaces(          // all face rects into detpars
    vec_DetPar&  detpars,  // out
    const Image& img,      // in
    int          minwidth) // in: as percent of img width
{
    CV_Assert(!facedet_g.empty()); // check that OpenFaceDetector_ was called

    int leftborder = 0, topborder = 0; // border size in pixels
    Image bordered_img(BORDER_FRAC == 0?
                       img: EnborderImg(leftborder, topborder, img));

    // Detection results are very slightly better with equalization
    // (tested on the MUCT images, which are not pre-equalized), and
    // it's quick enough to equalize (roughly 10ms on a 1.6 GHz laptop).

    Image equalized_img;
    cv::equalizeHist(bordered_img, equalized_img);

    CV_Assert(minwidth >= 1 && minwidth <= 100);

    // TODO smallest bioid faces are about 80 pixels width, hence 70 below
    const int minpix =
        MAX(minwidth <= 5? 70: 100, cvRound(img.cols * minwidth / 100.));

    // the params below are accurate but slow
    static const double SCALE_FACTOR   = 1.1;
    static const int    MIN_NEIGHBORS  = 3;
    static const int    DETECTOR_FLAGS = 0;

    vec_Rect facerects = // all face rects in image
        Detect(equalized_img, facedet_g, NULL,
               SCALE_FACTOR, MIN_NEIGHBORS, DETECTOR_FLAGS, minpix);

    // copy face rects into the detpars vector

    detpars.resize(NSIZE(facerects));
    for (int i = 0; i < NSIZE(facerects); i++)
    {
        Rect* facerect = &facerects[i];
        DetPar detpar; // detpar constructor sets all fields INVALID
        // detpar.x and detpar.y is the center of the face rectangle
        detpar.x = facerect->x + facerect->width / 2.;
        detpar.y = facerect->y + facerect->height / 2.;
        detpar.x -= leftborder; // discount the border we added earlier
        detpar.y -= topborder;
        detpar.width  = double(facerect->width);
        detpar.height = double(facerect->height);
        detpar.yaw = 0; // assume face has no yaw in this version of Stasm
        detpar.eyaw = EYAW00;
        detpars[i] = detpar;
    }
}
コード例 #7
0
//FLASH: load face detection cascades xml files from flash bytearrays
//must use clib.supplyFile method before calling this method from flash
static AS3_Val loadCascade(void* self, AS3_Val args)
{
	//parse parameters
	char * cascadeType;
	char * cascadeFileName;
	AS3_ArrayValue(args,"StrType, StrType", &cascadeType, &cascadeFileName);
	
	FILE * file;
	long fileSize;
	char * fileBuffer;
	file = fopen(cascadeFileName, "rb");
 
	//Get file size
	fseek (file, 0, SEEK_END);
	fileSize = ftell(file);
	rewind(file);
 
	//Allocate buffer
	fileBuffer = (char*) malloc(sizeof(char)*fileSize);
 
	//Read file into buffer
	fread(fileBuffer, 1, fileSize, file);
	fprintf(stderr, "[OPENCV] loadCascades: %s : %s", cascadeType, cascadeFileName);
	
	//CV CascadeClassifier instance init
	/*cv::FileStorage cascadeFileStorage;
	cascadeFileStorage.open (cascadeFileName, cv::FileStorage::READ);
	fprintf(stderr, "[OPENCV] loadCascades: cascade files empty : %d", cascadeFileStorage.getFirstTopLevelNode().empty());
	bool success = cascade.read (cascadeFileStorage.getFirstTopLevelNode());
	fprintf(stderr, "[OPENCV] loadCascades: cascade files empty : %d", cascade.empty());
	cascadeFileStorage.release ();*/
	
	/*CvHaarClassifierCascade* cascadeClassifier = (CvHaarClassifierCascade*) cvLoad(cascadeFileName,0,0,0);
	fprintf(stderr, "[OPENCV] loadCascades: cascade files loaded");*/
	
	cv::FileNode cascadeFileNode;
	cascadeFileNode.readRaw ("xml", reinterpret_cast<uchar*>(fileBuffer), fileSize);
	bool success = cascade.read (cascadeFileNode);
	fprintf(stderr, "[OPENCV] loadCascades: cascade files empty : %d", cascade.empty());
	if (success) {
		fprintf(stderr, "[OPENCV] loadCascades: cascade files loaded with success !");
	} else {
		fprintf(stderr, "[OPENCV] loadCascades: cascade files failed to load !");
	}
	
	//close file and free allocated buffer
	fclose (file);
	free (fileBuffer);
	
	return 0;
}
コード例 #8
0
ファイル: eyedet.cpp プロジェクト: Keerecles/libra
static void DetectAllMouths(
    vec_Rect&       mouths,      // out
    const Image&    img,         // in
    EYAW            eyaw,        // in
    const Rect&     facerect,    // in
    int             ileft_best,  // in
    int             iright_best, // in
    const vec_Rect& leyes,       // in
    const vec_Rect& reyes)       // in
{
    CV_Assert(!mouth_det_g.empty()); // detector initialized?

    static const double MOUTH_SCALE_FACTOR   = 1.2; // less false pos with 1.2 than 1.1
    static const int    MOUTH_MIN_NEIGHBORS  = 5;   // less false pos with 5 than 3
    static const int    MOUTH_DETECTOR_FLAGS = 0;

    Rect mouth_rect(MouthRect(facerect,
                              eyaw, ileft_best, iright_best, leyes, reyes));

    mouths =
        Detect(img, &mouth_det_g, &mouth_rect,
               MOUTH_SCALE_FACTOR, MOUTH_MIN_NEIGHBORS, MOUTH_DETECTOR_FLAGS,
               facerect.width / 10);
}
コード例 #9
0
void FaceRecognition::detectAndDraw(cv::Mat image,
                                    cv::CascadeClassifier &cascade,
                                    cv::CascadeClassifier &nested_cascade,
                                    double scale,
                                    bool try_flip)
{
    int    i    = 0;
    double tick = 0.0;
    std::vector<cv::Rect> faces_a, faces_b;
    cv::Scalar colors[] = {
        CV_RGB(0, 0, 255),
        CV_RGB(0, 128, 255),
        CV_RGB(0, 255, 255),
        CV_RGB(0, 255, 0),
        CV_RGB(255, 128, 0),
        CV_RGB(255, 255, 0),
        CV_RGB(255, 0, 0),
        CV_RGB(255, 0, 255)
    };
    cv::Mat image_gray;
    cv::Mat image_small(cvRound(image.rows / scale),
                        cvRound(image.cols / scale),
                        CV_8UC1);
    // Convert to gray image.
    cv::cvtColor(image, image_gray, CV_BGR2GRAY);
    // Convert gray image to small size.
    cv::resize(image_gray, image_small, image_small.size(), 0, 0,
               cv::INTER_LINEAR);
    cv::equalizeHist(image_small, image_small);

    tick = (double)cvGetTickCount();
    cascade.detectMultiScale(image_small, faces_a, 1.1, 2, 0 |
                             CV_HAAR_SCALE_IMAGE, cv::Size(30, 30));

    if (try_flip) {
        cv::flip(image_small, image_small, 1);
        cascade.detectMultiScale(image_small, faces_b, 1.1, 2, 0 |
                                 CV_HAAR_SCALE_IMAGE, cv::Size(30, 30));
        std::vector<cv::Rect>::const_iterator it = faces_b.begin();
        for (; it != faces_b.end(); it++) {
            faces_a.push_back(cv::Rect(image_small.cols - it->x - it->width,
                                       it->y, it->width, it->height));
        }
    }

    // Calculate detection's time.
    tick = (double)cvGetTickCount() - tick;
    std::cout << "Detection time: "
              << tick / ((double)cvGetTickCount() * 1000.0)
              << " ms"
              << std::endl;

    std::vector<cv::Rect>::const_iterator it = faces_a.begin();
    for (; it != faces_a.end(); it++, i++) {
        int radius;
        double aspect_ratio = (double)it->width / it->height;
        std::vector<cv::Rect> nested_objects;
        cv::Mat small_image_roi;
        cv::Point center;
        cv::Scalar color = colors[i % 8];

        // Capture detected face and predict it.
        cv::Mat image_gray;
        cv::Mat image_result(cvRound(IMG_HEIGH), cvRound(IMG_WIDTH), CV_8UC1);
        cv::Mat image_temp;
        cv::Rect rect;
        rect.x = cvRound(it->x * scale);
        rect.y = cvRound(it->y * scale);
        rect.height = cvRound(it->height * scale);
        rect.width  = cvRound(it->width  * scale);
        image_temp  = image(rect);
        cv::cvtColor(image_temp, image_gray, CV_BGR2GRAY);
        cv::resize(image_gray, image_result, image_result.size(), 0, 0,
                   cv::INTER_LINEAR);
        int predicted_label = g_model->predict(image_result);

        std::cout << "*************************" << std::endl
                  << "The predicted label: "     << predicted_label
                  << std::endl
                  << "*************************"
                  << std::endl;

        // Recognize specific face for sending character to serial device.
        if (predicted_label == 1) {
            g_face_recognition.writeCharToSerial('Y');
        }
        else {
            g_face_recognition.writeCharToSerial('N');
        }

        // Draw the circle for faces.
        if (0.75 < aspect_ratio && aspect_ratio > 1.3) {
            center.x = cvRound((it->x + it->width * 0.5) * scale);
            center.y = cvRound((it->y + it->height * 0.5) * scale);
            radius = cvRound((it->width + it->height) * 0.25 * scale);
            cv::circle(image, center, radius, color, 3, 8, 0);
        }
        else {
            // Draw the rectangle for faces.
            cv::rectangle(image,
                          cvPoint(cvRound(it->x * scale),
                                  cvRound(it->y * scale)),
                          cvPoint(cvRound((it->x + it->width  - 1) * scale),
                                  cvRound((it->y + it->height - 1) * scale)),
                          color,
                          3,
                          8,
                          0);
            if (nested_cascade.empty()) {
                continue ;
            }
            small_image_roi = image_small(*it);
            nested_cascade.detectMultiScale(small_image_roi, nested_objects,
                                            1.1, 2, 0 | CV_HAAR_SCALE_IMAGE,
                                            cv::Size(30, 30));
            std::vector<cv::Rect>::const_iterator it_temp =
                nested_objects.begin();
            // Draw the circle for eyes.
            for (; it_temp != nested_objects.end(); it_temp++) {
                center.x = cvRound((it->x + it_temp->x + it_temp->width * 0.5)
                    * scale);
                center.y = cvRound((it->y + it_temp->y + it_temp->height * 0.5)
                    * scale);
                radius = cvRound((it_temp->width + it_temp->height) * 0.25
                    * scale);
                cv::circle(image, center, radius, color, 3, 8, 0);
            }
        }
    }
    // Open camera window.
    cv::imshow("Face Recognition", image);
}
コード例 #10
0
// Function to detect and draw any faces that is present in an image
void FaceDetectModuleExt::detectAndDraw(cv::Mat& img, cv::CascadeClassifier& cascade, cv::CascadeClassifier& nestedCascade, double scale) {
	if (cascade.empty()) {
		return;
	}
	int i = 0;
	double t = 0;
	std::vector<cv::Rect> faces;
	const static cv::Scalar colors[] =  { CV_RGB(0,0,255),
			CV_RGB(0,128,255),
			CV_RGB(0,255,255),
			CV_RGB(0,255,0),
			CV_RGB(255,128,0),
			CV_RGB(255,255,0),
			CV_RGB(255,0,0),
			CV_RGB(255,0,255)} ;
	cv::Mat gray, smallImg(cvRound(img.rows/scale), cvRound(img.cols/scale), CV_8UC1);

	cv::cvtColor( img, gray, CV_BGR2GRAY );
	cv::resize( gray, smallImg, smallImg.size(), 0, 0, cv::INTER_LINEAR );
	cv::equalizeHist( smallImg, smallImg );

	t = (double)cvGetTickCount();
	cascade.detectMultiScale( smallImg, faces,
			1.1, 2, 0
			//|CV_HAAR_FIND_BIGGEST_OBJECT
			//|CV_HAAR_DO_ROUGH_SEARCH
			|CV_HAAR_SCALE_IMAGE
			,
			cv::Size(30, 30) );
	t = (double)cvGetTickCount() - t;
	//printf( "detection time = %g ms\n", t/((double)cvGetTickFrequency()*1000.) );
	std::cout << "[FaceDetect] detection time = " << t/((double)cvGetTickFrequency()*1000.) << " ms" << std::endl;
	for(std::vector<cv::Rect>::const_iterator r = faces.begin(); r != faces.end(); r++, i++ )
	{
		cv::Mat smallImgROI;
		std::vector<cv::Rect> nestedObjects;
		cv::Point center;
		cv::Scalar color = colors[i%8];
		int radius;
//		center.x = cvRound((r->x + r->width*0.5)*scale);
//		center.y = cvRound((r->y + r->height*0.5)*scale);
//		radius = cvRound((r->width + r->height)*0.25*scale);
//		cv::circle( img, center, radius, color, 3, 8, 0 );

		cv::rectangle(img, *r, color, 3, 8 ,0);
		if( nestedCascade.empty() )
			continue;
		smallImgROI = smallImg(*r);
		nestedCascade.detectMultiScale( smallImgROI, nestedObjects,
				1.1, 2, 0
				//|CV_HAAR_FIND_BIGGEST_OBJECT
				//|CV_HAAR_DO_ROUGH_SEARCH
				//|CV_HAAR_DO_CANNY_PRUNING
				|CV_HAAR_SCALE_IMAGE
				,
				cv::Size(30, 30) );
		for(std::vector<cv::Rect>::const_iterator nr = nestedObjects.begin(); nr != nestedObjects.end(); nr++ )
		{
			center.x = cvRound((r->x + nr->x + nr->width*0.5)*scale);
			center.y = cvRound((r->y + nr->y + nr->height*0.5)*scale);
			radius = cvRound((nr->width + nr->height)*0.25*scale);
			cv::circle( img, center, radius, color, 3, 8, 0 );
		}
	}
	cv::imshow( "result", img );
	cv::waitKey(10);
}
コード例 #11
0
/*
 * Class:     io_github_melvincabatuan_fullbodydetection_MainActivity
 * Method:    predict
 * Signature: (Landroid/graphics/Bitmap;[B)V
 */
JNIEXPORT void JNICALL Java_io_github_melvincabatuan_fullbodydetection_MainActivity_predict
  (JNIEnv * pEnv, jobject clazz, jobject pTarget, jbyteArray pSource){

   AndroidBitmapInfo bitmapInfo;
   uint32_t* bitmapContent; // Links to Bitmap content

   if(AndroidBitmap_getInfo(pEnv, pTarget, &bitmapInfo) < 0) abort();
   if(bitmapInfo.format != ANDROID_BITMAP_FORMAT_RGBA_8888) abort();
   if(AndroidBitmap_lockPixels(pEnv, pTarget, (void**)&bitmapContent) < 0) abort();

   /// Access source array data... OK
   jbyte* source = (jbyte*)pEnv->GetPrimitiveArrayCritical(pSource, 0);
   if (source == NULL) abort();

   /// cv::Mat for YUV420sp source and output BGRA 
    Mat srcGray(bitmapInfo.height, bitmapInfo.width, CV_8UC1, (unsigned char *)source);
    Mat mbgra(bitmapInfo.height, bitmapInfo.width, CV_8UC4, (unsigned char *)bitmapContent);

/***********************************************************************************************/
    /// Native Image Processing HERE... 
    if(DEBUG){
      LOGI("Starting native image processing...");
    }

    if (full_body_cascade.empty()){
       t = (double)getTickCount();
       sprintf( full_body_cascade_path, "%s/%s", getenv("ASSETDIR"), "haarcascade_fullbody.xml");       
    
      /* Load the face cascades */
       if( !full_body_cascade.load(full_body_cascade_path) ){ 
           LOGE("Error loading cat face cascade"); 
           abort(); 
       };

       t = 1000*((double)getTickCount() - t)/getTickFrequency();
       if(DEBUG){
       LOGI("Loading full body cascade took %lf milliseconds.", t);
     }
    }
            
 
     std::vector<Rect> fbody;


       //-- Detect full body
       t = (double)getTickCount();
 
       /// Detection took cat_face_cascade.detectMultiScale() time = 655.334471 ms
      // cat_face_cascade.detectMultiScale( srcGray, faces, 1.1, 2 , 0 , Size(30, 30) ); // Scaling factor = 1.1;  minNeighbors = 2 ; flags = 0; minimumSize = 30,30

      // cat_face_cascade.detectMultiScale() time = 120.117185 ms
      // cat_face_cascade.detectMultiScale( srcGray, faces, 1.2, 3 , 0 , Size(64, 64));

 
      
      full_body_cascade.detectMultiScale( srcGray, fbody, 1.2, 2 , 0 , Size(14, 28));  // Size(double width, double height) 

      // scalingFactor parameters determine how much the classifier will be scaled up after each run.
      // minNeighbors parameter specifies how many positive neighbors a positive face rectangle should have to be considered a possible match; 
      // when a potential face rectangle is moved a pixel and does not trigger the classifier any more, it is most likely that it’s a false positive. 
      // Face rectangles with fewer positive neighbors than minNeighbors are rejected. 
      // If minNeighbors is set to zero, all potential face rectangles are returned. 
      // The flags parameter is from the OpenCV 1.x API and should always be 0. 
      // minimumSize specifies the smallest face rectangle we’re looking for. 

       t = 1000*((double)getTickCount() - t)/getTickFrequency();
       if(DEBUG){
          LOGI("full_body_cascade.detectMultiScale() time = %lf milliseconds.", t);
      }


       // Iterate through all faces and detect eyes
       t = (double)getTickCount();

       for( size_t i = 0; i < fbody.size(); i++ )
       {
          Point center(fbody[i].x + fbody[i].width / 2, fbody[i].y + fbody[i].height / 2);
          ellipse(srcGray, center, Size(fbody[i].width / 2, fbody[i].height / 2), 0, 0, 360, Scalar(255, 0, 255), 4, 8, 0);
       }//endfor
  
       t = 1000*((double)getTickCount() - t)/getTickFrequency();
       if(DEBUG){
          LOGI("Iterate through all faces and detecting eyes took %lf milliseconds.", t);
       }

       /// Display to Android
       cvtColor(srcGray, mbgra, CV_GRAY2BGRA);


      if(DEBUG){
        LOGI("Successfully finished native image processing...");
      }
   
/************************************************************************************************/ 
   
   /// Release Java byte buffer and unlock backing bitmap
   pEnv-> ReleasePrimitiveArrayCritical(pSource,source,0);
   if (AndroidBitmap_unlockPixels(pEnv, pTarget) < 0) abort();
}
コード例 #12
0
// Search for both eyes within the given face image. Returns the eye centers in 'leftEye' and 'rightEye',
// or sets them to (-1,-1) if each eye was not found. Note that you can pass a 2nd eyeCascade if you
// want to search eyes using 2 different cascades. For example, you could use a regular eye detector
// as well as an eyeglasses detector, or a left eye detector as well as a right eye detector.
// Or if you don't want a 2nd eye detection, just pass an uninitialized CascadeClassifier.
// Can also store the searched left & right eye regions if desired.
void preprocessFace::detectBothEyes(const cv::Mat &face, cv::CascadeClassifier &eyeCascade1, cv::CascadeClassifier &eyeCascade2, cv::Point &leftEye, cv::Point &rightEye, cv::Rect *searchedLeftEye, cv::Rect *searchedRightEye)
{
	// Skip the borders of the face, since it is usually just hair and ears, that we don't care about.
	/*
	// For "2splits.xml": Finds both eyes in roughly 60% of detected faces, also detects closed eyes.
	const float EYE_SX = 0.12f;
	const float EYE_SY = 0.17f;
	const float EYE_SW = 0.37f;
	const float EYE_SH = 0.36f;
	*/
	/*
	// For mcs.xml: Finds both eyes in roughly 80% of detected faces, also detects closed eyes.
	const float EYE_SX = 0.10f;
	const float EYE_SY = 0.19f;
	const float EYE_SW = 0.40f;
	const float EYE_SH = 0.36f;
	*/

	// For default eye.xml or eyeglasses.xml: Finds both eyes in roughly 40% of detected faces, but does not detect closed eyes.
	const float EYE_SX = 0.16f;
	const float EYE_SY = 0.26f;
	const float EYE_SW = 0.30f;
	const float EYE_SH = 0.28f;

	int leftX = cvRound(face.cols * EYE_SX);
	int topY = cvRound(face.rows * EYE_SY);
	int widthX = cvRound(face.cols * EYE_SW);
	int heightY = cvRound(face.rows * EYE_SH);
	int rightX = cvRound(face.cols * (1.0 - EYE_SX - EYE_SW));  // Start of right-eye corner

	cv::Mat topLeftOfFace = face(cv::Rect(leftX, topY, widthX, heightY));
	cv::Mat topRightOfFace = face(cv::Rect(rightX, topY, widthX, heightY));
	cv::Rect leftEyeRect, rightEyeRect;

	// Return the search windows to the caller, if desired.
	if (searchedLeftEye)
		*searchedLeftEye = cv::Rect(leftX, topY, widthX, heightY);
	if (searchedRightEye)
		*searchedRightEye = cv::Rect(rightX, topY, widthX, heightY);

	// Search the left region, then the right region using the 1st eye detector.
	detector.detectLargestObject(topLeftOfFace, eyeCascade1, leftEyeRect, topLeftOfFace.cols);
	detector.detectLargestObject(topRightOfFace, eyeCascade1, rightEyeRect, topRightOfFace.cols);

	// If the eye was not detected, try a different cascade classifier.
	if (leftEyeRect.width <= 0 && !eyeCascade2.empty()) {
		detector.detectLargestObject(topLeftOfFace, eyeCascade2, leftEyeRect, topLeftOfFace.cols);
		//if (leftEyeRect.width > 0)
		//    cout << "2nd eye detector LEFT SUCCESS" << endl;
		//else
		//    cout << "2nd eye detector LEFT failed" << endl;
	}
	//else
	//    cout << "1st eye detector LEFT SUCCESS" << endl;

	// If the eye was not detected, try a different cascade classifier.
	if (rightEyeRect.width <= 0 && !eyeCascade2.empty()) {
		detector.detectLargestObject(topRightOfFace, eyeCascade2, rightEyeRect, topRightOfFace.cols);
		//if (rightEyeRect.width > 0)
		//    cout << "2nd eye detector RIGHT SUCCESS" << endl;
		//else
		//    cout << "2nd eye detector RIGHT failed" << endl;
	}
	//else
	//    cout << "1st eye detector RIGHT SUCCESS" << endl;

	if (leftEyeRect.width > 0) {   // Check if the eye was detected.
		leftEyeRect.x += leftX;    // Adjust the left-eye rectangle because the face border was removed.
		leftEyeRect.y += topY;
		leftEye = cv::Point(leftEyeRect.x + leftEyeRect.width / 2, leftEyeRect.y + leftEyeRect.height / 2);
	}
	else {
		leftEye = cv::Point(-1, -1);    // Return an invalid point
	}

	if (rightEyeRect.width > 0) { // Check if the eye was detected.
		rightEyeRect.x += rightX; // Adjust the right-eye rectangle, since it starts on the right side of the image.
		rightEyeRect.y += topY;  // Adjust the right-eye rectangle because the face border was removed.
		rightEye = cv::Point(rightEyeRect.x + rightEyeRect.width / 2, rightEyeRect.y + rightEyeRect.height / 2);
	}
	else {
		rightEye = cv::Point(-1, -1);    // Return an invalid point
	}
}
コード例 #13
0
ファイル: eyedet.cpp プロジェクト: Keerecles/libra
void DetectEyesAndMouth(  // use OpenCV detectors to find the eyes and mouth
    DetPar&       detpar, // io: eye and mouth fields updated, other fields untouched
    const Image&  img)    // in: ROI around face (already rotated if necessary)
{
    Rect facerect(cvRound(detpar.x - detpar.width/2),
                  cvRound(detpar.y - detpar.height/2),
                  cvRound(detpar.width),
                  cvRound(detpar.height));

    // possibly get the eyes

    detpar.lex = detpar.ley = INVALID; // mark eyes as unavailable
    detpar.rex = detpar.rey = INVALID;
    vec_Rect leyes, reyes;
    int ileft_best = -1, iright_best = -1; // indices into leyes and reyes
    if (!leye_det_g.empty()) // do we need the eyes? (depends on model estart field)
    {
        DetectAllEyes(leyes, reyes,
                      img, detpar.eyaw, facerect);

        SelectEyes(ileft_best, iright_best,
                   detpar.eyaw, leyes, reyes, EyeInnerRect(detpar.eyaw, facerect));

        if (ileft_best >= 0)
            RectToImgFrame(detpar.lex, detpar.ley,
                           leyes[ileft_best]);

        if (iright_best >= 0)
            RectToImgFrame(detpar.rex, detpar.rey,
                           reyes[iright_best]);
    }
    // possibly get the mouth

    detpar.mouthx = detpar.mouthy = INVALID;  // mark mouth as unavailable
    if (!mouth_det_g.empty()) // do we need the mouth? (depends on model estart field)
    {
        vec_Rect mouths;
        DetectAllMouths(mouths,
                        img, detpar.eyaw, facerect,
                        ileft_best, iright_best, leyes, reyes);

        if (!mouths.empty())
        {
            int imouth_best = -1;

            SelectMouth(imouth_best,
                        ileft_best, iright_best, leyes, reyes, mouths,
                        MouthInnerRect(facerect, detpar.eyaw,
                                       ileft_best, iright_best, leyes, reyes));

            if (imouth_best >= 0)
            {
                TweakMouthPosition(mouths,
                                   leyes, reyes, ileft_best, iright_best,
                                   imouth_best, detpar);

                RectToImgFrame(detpar.mouthx, detpar.mouthy,
                               mouths[imouth_best]);
            }
        }
    }
}
コード例 #14
0
ファイル: facedetect.cpp プロジェクト: ddennedy/frei0r
    std::vector<cv::Rect> detect()
    {
        std::vector<cv::Rect> faces;
        if (cascade.empty()) return faces;
        double scale = this->scale == 0? 1.0 : this->scale;
        cv::Mat image_roi = image;
        cv::Mat gray, small;
        int min = cvRound(smallest * 1000. * scale);
        
        // use a region of interest to improve performance
        // This idea comes from the More than Technical blog:
        // http://www.morethantechnical.com/2009/08/09/near-realtime-face-detection-on-the-iphone-w-opencv-port-wcodevideo/
        if ( roi.width > 0 && roi.height > 0)
        {
            image_roi = image(roi);
        }

        // use an equalized grayscale to improve detection
        cv::cvtColor(image_roi, gray, CV_BGR2GRAY);

        // use a smaller image to improve performance
        cv::resize(gray, small, cv::Size(cvRound(gray.cols * scale), cvRound(gray.rows * scale)));
        cv::equalizeHist(small, small);
        
        // detect with OpenCV
        cascade.detectMultiScale(small, faces, 1.1, 2, 0, cv::Size(min, min));
        
#ifdef USE_ROI
        if (faces.size() == 0)
        {
            // clear the region of interest
            roi.width = roi.height = 0;
            roi.x = roi.y = 0;
        }
        else if (faces.size() > 0)
        {
            // determine the region of interest from the detected objects
            int minx = width * scale;
            int miny = height * scale;
            int maxx, maxy = 0;
            for (size_t i = 0; i < faces.size(); i++)
            {
                faces[i].x+= roi.x * scale;
                faces[i].y+= roi.y * scale;
                minx = MIN(faces[i].x, minx);
                miny = MIN(faces[i].y, miny);
                maxx = MAX(faces[i].x + faces[i].width, maxx);
                maxy= MAX(faces[i].y + faces[i].height, maxy);
            }
            minx= MAX(minx - PAD, 0);
            miny= MAX(miny - PAD, 0);
            maxx = MIN(maxx + PAD, width * scale);
            maxy = MIN(maxy + PAD, height * scale);

            // store the region of interest
            roi.x = minx / scale;
            roi.y = miny / scale;
            roi.width = (maxx - minx) / scale;
            roi.height = (maxy - miny) / scale; 
        }
#endif
        return faces;
    }
コード例 #15
0
ファイル: main.cpp プロジェクト: sarmadm/Eye-Tracking-1
int main(int argc, char** argv)
{
    // Load the cascade classifiers
    // Make sure you point the XML files to the right path, or
    // just copy the files from [OPENCV_DIR]/data/haarcascades directory
    face_cascade.load("haarcascade_frontalface_alt2.xml");
    eye_cascade.load("haarcascade_eye.xml");

    std::cout << "==================\n";

    // Open webcam
    cv::VideoCapture cap(0);
    // Check if everything is ok
    if (face_cascade.empty() || eye_cascade.empty() || !cap.isOpened())
    {
        std::cout << "bad\n";
        return 1;
    }

    cap.set(CV_CAP_PROP_FRAME_WIDTH, WIDTH);
    cap.set(CV_CAP_PROP_FRAME_HEIGHT, HEIGHT);
    cv::Mat frame, eye_tpl;
    cv::Rect eye_bb;
    while (cv::waitKey(15) != 'q' && cv::waitKey(15) != 'Q')
    {
        cap >> frame;
        if (frame.empty()) break;
        // Flip the frame horizontally, Windows users might need this
        cv::flip(frame, frame, 1);
        // Convert to greyscale and
        // adjust the image contrast using histogram equalization
        cv::Mat gray;
        cv::cvtColor(frame, gray, CV_BGR2GRAY);
        if (eye_bb.width == 0 && eye_bb.height == 0)
        {
            // Detection stage
            // Try to detect the face and the eye of the user
            detectEye(gray, eye_tpl, eye_bb);
        }
        else
        {
            // Tracking stage with template matching
            trackEye(gray, eye_tpl, eye_bb);
            // Draw bounding rectangle for the eye
            cv::rectangle(frame, eye_bb, CV_RGB(0,255,0));
        }

        {//drawing grids

            struct Line { cv::Point from, to; };
            using Lines = std::vector<Line>;
            Lines lines{
                    { { 213, 0 }, { 213, 480 } },
                    { { 427, 0 }, { 427, 480 } },
                    { { 0, 160 }, { 640, 160 } },
                    { { 0, 320 }, { 640, 320 } }
            };
            for (auto const& l : lines)
                cv::line(frame,l.from, l.to, CV_RGB(0,255,0), 1, 1);
        }

        {//generate direction command
            std::vector<cv::Rect> direction_boxes{
                    cv::Rect{cv::Point{213,   0}, cv::Point{427, 160}}, //F
                    cv::Rect{cv::Point{  0, 160}, cv::Point{213, 320}}, //L
                    cv::Rect{cv::Point{427, 160}, cv::Point{640, 320}}  //R
            };

            auto draw_direction = [&](std::string const &direction) {
                cv::putText(frame, direction, cv::Point{280, 435}, cv::FONT_HERSHEY_DUPLEX, 3, CV_RGB(70, 130, 180),  5);
                cv::putText(frame, direction, cv::Point{280, 435}, cv::FONT_HERSHEY_DUPLEX, 3, CV_RGB(102, 105, 170), 4);
            };

            for(int box = 0; box != 3; ++box)
            {
                if (box == 0)
                    if (direction_boxes[0].contains(center_of_rect(eye_bb)))
                    {
                        draw_direction("F");
                        break;
                    }
                if (box == 1)
                    if (direction_boxes[1].contains(center_of_rect(eye_bb)))
                    {
                        draw_direction("L");
                        break;
                    }
                if (box == 2)
                    if (direction_boxes[2].contains(center_of_rect(eye_bb)))
                    {
                        draw_direction("R");
                        break;
                    }
            }
            std::cout << center_of_rect(eye_bb).x << std::endl;
        }

        cv::imshow("video", frame);
    }
    return 0;
}