/**
 * @function detectAndDisplay
 */
vector<Rect> detectFace(IplImage *frame, bool detectall)
{
   std::vector<Rect> faces, rects;
   Mat frame_gray;
   Mat frame1(frame,0);
   cvtColor( frame1, frame_gray, COLOR_BGR2GRAY );

   equalizeHist( frame_gray, frame_gray );

   //-- Detect faces
   face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0, Size(80, 80) );
   if (faces.size()>=1)
    {
      Mat faceROI = frame_gray( faces[0] );
      //-- Draw the face
      Point center( faces[0].x + faces[0].width/2, faces[0].y + faces[0].height/2 );
//      ellipse( frame1, center, Size( faces[0].width/2, faces[0].height/2), 0, 0, 360, Scalar( 255, 0, 0 ), 2, 8, 0 );
      Point face_topleft(faces[0].x+5, faces[0].y);
      Point face_bottomright(faces[0].x+faces[0].width-5, faces[0].y+faces[0].height+25);
      rectangle(frame1, face_topleft,face_bottomright, Scalar(0,255,0),3, 8, 0);
      rects.push_back(faces[0]);
      if (detectall)
      {
          std::vector<Rect> eyes;
          //-- In each face, detect eyes
          eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CV_HAAR_SCALE_IMAGE, Size(30, 30) );
          if( eyes.size() == 2)
          {
              for( size_t j = 0; j < eyes.size(); j++ )
              { //-- Draw the eyes
                  Point eye_center( faces[0].x + eyes[j].x + eyes[j].width/2, faces[0].y + eyes[j].y + eyes[j].height/2 );
                  int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
                  circle( frame1, eye_center, radius, Scalar( 255, 0, 0 ), 3, 8, 0 );
                  //            rectangle(frame1,Rect(faces[0].x+eyes[j].x, faces[0].y+eyes[j].y,eyes[j].width,eyes[j].height),Scalar(255,0,255),3,8,0);
                  rects.push_back(eyes[j]);
              }

          }
          std::vector<Rect> mouths;
          mouth_cascade.detectMultiScale(faceROI, mouths, 1.1, 2, 0, Size(30, 30));
          if (mouths.size()>=1)
          {
              rectangle(frame1, Rect(faces[0].x+mouths[0].x, faces[0].y+mouths[0].y,mouths[0].width,mouths[0].height),Scalar(0,255,255),3,8,0);
              rects.push_back(mouths[0]);
          }

          std::vector<Rect> noses;
          nose_cascade.detectMultiScale(faceROI, noses, 1.1, 2, 0, Size(30, 30));
          if (noses.size()==1)
          {
              Point nose_center( faces[0].x + noses[0].x + noses[0].width/2, faces[0].y + noses[0].y + noses[0].height/2 );
              int radius = cvRound( (noses[0].width + noses[0].height)*0.15 );
              circle( frame1, nose_center, radius, Scalar( 255, 0, 255), 3, 8, 0 );
              //          rectangle(frame1, Rect(faces[0].x+noses[0].x, faces[0].y+noses[0].y,noses[0].width,noses[0].height),Scalar(0,255,255),3,8,0);
          }
      }
    }
   frame = &IplImage(frame1);
   return rects;
}
Exemplo n.º 2
0
void detectAndDisplay(Mat frame)
{
	std::vector<Rect> faces;
	Mat frame_gray;

	cvtColor(frame, frame_gray, COLOR_BGR2GRAY);
	equalizeHist(frame_gray, frame_gray);

	//-- Detect faces
	face_cascade.detectMultiScale(frame_gray, faces, 1.1, 2, 0 | CASCADE_SCALE_IMAGE, Size(30, 30));

	for (size_t i = 0; i < faces.size(); i++)
	{
		Point center(faces[i].x + faces[i].width / 2, faces[i].y + faces[i].height / 2);
		ellipse(frame, center, Size(faces[i].width / 2, faces[i].height / 2), 0, 0, 360, Scalar(255, 0, 255), 4, 8, 0);

		Mat faceROI = frame_gray(faces[i]);
		std::vector<Rect> eyes;

		//-- In each face, detect eyes
		eyes_cascade.detectMultiScale(faceROI, eyes, 1.1, 2, 0 | CASCADE_SCALE_IMAGE, Size(30, 30));


		for (size_t j = 0; j < eyes.size(); j++)
		{
			Point eye_center(faces[i].x + eyes[j].x + eyes[j].width / 2, faces[i].y + eyes[j].y + eyes[j].height / 2);
			int radius = cvRound((eyes[j].width + eyes[j].height)*0.25);
			circle(frame, eye_center, radius, Scalar(255, 0, 0), 4, 8, 0);
		}
	}
	//-- Show what you got
	imshow(window_name, frame);
}
Exemplo n.º 3
0
void detectAndRecognize( Mat frame, Ptr<FaceRecognizer> model )
{
	std::vector<Rect> faces;
	Mat frame_gray;

	cvtColor( frame, frame_gray, CV_BGR2GRAY );
	//equalizeHist( frame_gray, frame_gray );

	//-- Detect faces
	face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );
	//faces.push_back(Rect(Point(0,0),Size(frame.cols,frame.rows)));
	for( size_t i = 0; i < faces.size(); i++ )
	{
		Point bottom_right(faces[i].x + faces[i].width,faces[i].y + faces[i].height);
		Point center( faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5 );
		ellipse( frame, center, Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, Scalar( 255, 0, 255 ), 4, 8, 0 );

		Mat to_rec = frame_gray( faces[i] );
		cv::resize(to_rec,to_rec,cv::Size(NORM_IMG_WIDTH,NORM_IMG_HEIGHT));
#ifdef DO_EQUALIZE
		equalizeHist(to_rec,to_rec);
#endif

#ifdef FACE_DEBUG
		Mat rgb_to_rec;
		cvtColor( to_rec, rgb_to_rec, CV_GRAY2BGR );
		drawAonB(rgb_to_rec,frame,bottom_right/*+Point(0,NORM_IMG_HEIGHT)*/);
#endif

		int predictLabel=-1;
		double confidence=0.;
		predictLabel=model->predict(to_rec);
		//printf("confidence: %lf\n",confidence);

		if(predictLabel==-1) continue;
		
 		string class_name=g_trainer.label(predictLabel);

		
				
		Mat avatar=imread(string("data/")+class_name+"/"+class_name+".avatar");
#ifndef FACE_DEBUG
		drawAonB(avatar,frame,bottom_right);
#endif
// 		if(!avatar.empty())
// 		{
// 			int w=min(max(frame.cols-1-bottom_right.x,0),avatar.cols),
// 				h=min(max(frame.rows-1-bottom_right.y,0),avatar.rows);
//  			cv::Rect avatar_roi( bottom_right, cv::Size(w,h));
//  			avatar(Rect(Point(0,0),Size(w,h))).copyTo( frame(avatar_roi) );
// 		}		
		
		putText(frame,class_name,bottom_right,FONT_HERSHEY_SIMPLEX, 1.5, cvScalar(250,20,10),3);
	}

	//-- Show what you got
	imshow( CAPTURE_WND_NAME, frame );
}
Exemplo n.º 4
0
void FaceRecognition::run()
{

    doStopMutex.lock();
    if(doStop)
    {
        doStop = false;
        doStopMutex.unlock();
    }
    doStopMutex.unlock();

    processingMutex.lock();

    model->set("threshold", 70.0);

    cv::Mat frame_gray;

    cv::cvtColor(frame, frame_gray, CV_BGR2GRAY);
    cv::equalizeHist(frame_gray, frame_gray);

    height = images[0].rows;

    int predictionLabel;
    double confidence;

    QStringList predictedNames;

    //Detect faces
    faceCascade.detectMultiScale(frame_gray, faces, 1.1, 5, 0|CV_HAAR_SCALE_IMAGE, cv::Size(30, 30));

    for (int i = 0; i < faces.size(); ++i)
    {
        cv::Mat faceROI = frame_gray(faces[i]);
        model->predict(faceROI, predictionLabel, confidence);

        if(predictionLabel != -1)
        {
            predictedNames.append(names.at(predictionLabel - 1));
        }
        else
        {
            predictedNames.append("Unknown");
        }
    }

    processingMutex.unlock();

    //Inform GUI of detected faces
    emit modifiedFrame(faces, predictedNames);
}
Exemplo n.º 5
0
    void Test::detectAndDisplay(Mat frame)
    {
        std::vector<Rect> faces;
        //Mat frame_gray;
        
		//@TODO : Understand what the lines below mean
		std::vector<cv::Mat> rgbChannels(3);
		cv::split(frame, rgbChannels);
		cv::Mat frame_gray = rgbChannels[2];
        
        // cvtColor( frame, frame_gray, CV_BGR2GRAY );
        //equalizeHist( frame_gray, frame_gray );
        
        //-- Detect faces
		//-- Calculate the face orientation and give it as a parameter to the new object person
		//-- Create a new person for each face detected
		//-- For each person detected, store information about that person in a database or something similar
        //face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );
		//@TODO Understand what are the arguments used in detectMultiScale
        face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE|CV_HAAR_FIND_BIGGEST_OBJECT, cv::Size(150, 150) );
        
        for( int i = 0; i < faces.size(); i++ )
        {
            Point center( faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5 );
            ellipse( frame, center, Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, Scalar( 255, 0, 255 ), 4, 8, 0 );			rectangle( frame, center, center, CV_RGB(0, 255,0), 1);
            Mat faceROI = frame_gray( faces[i] );
            std::vector<Rect> eyes;
            std::vector<Rect> mouth;
            
            //-- In each face, detect eyes
            eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CV_HAAR_SCALE_IMAGE, Size(30, 30) );
            
            for( int j = 0; j < eyes.size(); j++ )
            {
                Point center( faces[i].x + eyes[j].x + eyes[j].width*0.5, faces[i].y + eyes[j].y + eyes[j].height*0.5 );
				rectangle(frame,    cvPoint(faces[i].x + eyes[j].x, faces[i].y + eyes[j].y),
                          cvPoint(faces[i].x + eyes[j].x + eyes[j].width ,faces[i].y + eyes[j].y + eyes[j].height),
                          CV_RGB(0, 0, 255),
                          1, 8, 0
                          );
            }
            
			//-- In each face, detect mouth
			performFeatureDetection( mouth, faces, faceROI, mouth_cascade, frame, i);
        }
        //-- Show what you got
        imshow( window_name, frame );
    }
Exemplo n.º 6
0
  vector<PlateRegion> DetectorCUDA::detect(Mat frame, std::vector<cv::Rect> regionsOfInterest)
  {

    Mat frame_gray;
    cvtColor( frame, frame_gray, CV_BGR2GRAY );

    vector<PlateRegion> detectedRegions;
    for (int i = 0; i < regionsOfInterest.size(); i++)
    {
      Mat cropped = frame_gray(regionsOfInterest[i]);
      vector<PlateRegion> subRegions = doCascade(cropped, regionsOfInterest[i].x, regionsOfInterest[i].y);

      for (int j = 0; j < subRegions.size(); j++)
        detectedRegions.push_back(subRegions[j]);
    }
    return detectedRegions;
  }
Exemplo n.º 7
0
void VehicleDetector::detectROI(Mat frame, vector<Rect> &objs) {
	//resize
	double scaleDown = 2;
	Size size = frame.size();
	int frameHeight = size.height;
	int frameWidth = size.width;
	resize(frame, frame, Size(frameWidth / scaleDown, frameHeight / scaleDown),
			0, 0, INTER_LINEAR);
	size = frame.size();
	frameHeight = size.height;
	frameWidth = size.width;
	//detect roi
	vector<Rect> cars;
	Mat frame_gray;
	cvtColor(frame, frame_gray, CV_BGR2GRAY);
	equalizeHist(frame_gray, frame_gray);
	car_cascade.detectMultiScale(frame_gray, cars, 1.1, 0, 0, Size(30, 30), Size(150, 150));
	int minY = (int) (frameHeight * 0.1);
	//int maxY = (int) (frameHeight * 0.66);
	//for each roi
	for (size_t i = 0; i < cars.size(); i++) {
		Mat roiImage = frame_gray(Range(cars[i].y, cars[i].y + cars[i].height),
				Range(cars[i].x, cars[i].x + cars[i].width));
		if (cars[i].y > minY) {
			//double diffX = diffLeftRight(roiImage);
			//double diffY = diffUpDown(roiImage);
			//cout << diffX << " : " << diffY << "\n";
			//if (diffX > 1500 && diffX < 4000 && diffY > 3500) {
			if (/*diffX > 1500 && diffX < 4000 && diffY > 3000 &&*/ isCarLight(roiImage)) {
				objs.push_back(
						Rect(cars[i].x * scaleDown, cars[i].y * scaleDown,
								cars[i].width * scaleDown,
								cars[i].height * scaleDown));
			}
		}
	}
}
  void do_work(const sensor_msgs::ImageConstPtr& msg, const std::string input_frame_from_msg)
  {
    // Work on the image.
    try
    {
      // Convert the image into something opencv can handle.
      cv::Mat frame = cv_bridge::toCvShare(msg, msg->encoding)->image;

      // Messages
      opencv_apps::FaceArrayStamped faces_msg;
      faces_msg.header = msg->header;

      // Do the work
      std::vector<cv::Rect> faces;
      cv::Mat frame_gray;

      cv::cvtColor( frame, frame_gray, cv::COLOR_BGR2GRAY );
      cv::equalizeHist( frame_gray, frame_gray );
      //-- Detect faces
#if OPENCV3
      face_cascade_.detectMultiScale( frame_gray, faces, 1.1, 2, 0, cv::Size(30, 30) );
#else
      face_cascade_.detectMultiScale( frame_gray, faces, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, cv::Size(30, 30) );
#endif

      for( size_t i = 0; i < faces.size(); i++ )
      {
        cv::Point center( faces[i].x + faces[i].width/2, faces[i].y + faces[i].height/2 );
        cv::ellipse( frame,  center, cv::Size( faces[i].width/2, faces[i].height/2), 0, 0, 360, cv::Scalar( 255, 0, 255 ), 2, 8, 0 );
        opencv_apps::Face face_msg;
        face_msg.face.x = center.x;
        face_msg.face.y = center.y;
        face_msg.face.width = faces[i].width;
        face_msg.face.height = faces[i].height;

        cv::Mat faceROI = frame_gray( faces[i] );
        std::vector<cv::Rect> eyes;

        //-- In each face, detect eyes
#if OPENCV3
        eyes_cascade_.detectMultiScale( faceROI, eyes, 1.1, 2, 0, cv::Size(30, 30) );
#else
        eyes_cascade_.detectMultiScale( faceROI, eyes, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, cv::Size(30, 30) );
#endif

        for( size_t j = 0; j < eyes.size(); j++ )
        {
          cv::Point eye_center( faces[i].x + eyes[j].x + eyes[j].width/2, faces[i].y + eyes[j].y + eyes[j].height/2 );
          int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
          cv::circle( frame, eye_center, radius, cv::Scalar( 255, 0, 0 ), 3, 8, 0 );

          opencv_apps::Rect eye_msg;
          eye_msg.x = eye_center.x;
          eye_msg.y = eye_center.y;
          eye_msg.width = eyes[j].width;
          eye_msg.height = eyes[j].height;
          face_msg.eyes.push_back(eye_msg);
        }

        faces_msg.faces.push_back(face_msg);
      }
      //-- Show what you got
      if( debug_view_) {
        cv::imshow( "Face detection", frame );
        int c = cv::waitKey(1);
      }

      // Publish the image.
      sensor_msgs::Image::Ptr out_img = cv_bridge::CvImage(msg->header, msg->encoding,frame).toImageMsg();
      img_pub_.publish(out_img);
      msg_pub_.publish(faces_msg);
    }
    catch (cv::Exception &e)
    {
      NODELET_ERROR("Image processing error: %s %s %s %i", e.err.c_str(), e.func.c_str(), e.file.c_str(), e.line);
    }

    prev_stamp_ = msg->header.stamp;
  }
Exemplo n.º 9
0
void scanWebcam()
{
	//Check that the cascade file was loaded
	if(!face_cascade1.load( face_cascade_name1 ))
	{
		cout << "Error while loading cascade files" << endl;                            
	} 

	CvCapture* capture;
	cv::Mat frame;

	//Connect to the video stream
	capture = cvCaptureFromFile(PATH_TO_CAM);

	//If the connection was successful 
	if(capture)
	{
		//Create a FaceRecognizer object that uses the Fisherfaces algorithm (also works with the eigenfaces and LBPH algorithms)
		cv::Ptr<cv::FaceRecognizer> fisherfaces = cv::createFisherFaceRecognizer();

		//Load the database that was previously created during the training phase
		cv::FileStorage fs_fisher(PATH_TO_XML_FISHERFACES, cv::FileStorage::READ);
		fisherfaces->load(fs_fisher);

		//Infinite loop to detect the faces continuously
		while(true)
		{
			//Get one picture from the videostream (The facial recognition is done on images from the video and not directly from the videostream)
			frame = cvQueryFrame( capture );
			cv::namedWindow("test");

			//Check that one image was successfully extracted from the video
			if(!frame.empty())
			{
				//Variables used for the id process
				int predictedLabel = -1;
				double predictedConfidence = 0.0;

				std::vector<cv::Rect> faces; //Contains the rectangle coordinates in which the face will be included
				cv::Mat frame_gray; //Grey image
				cvtColor( frame, frame_gray, CV_RGB2GRAY ); //Converts the image from RGB to shades of grey
				equalizeHist( frame_gray, frame_gray ); //Histogram equalization
				
				//We perform a face detection
				face_cascade1.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, cv::Size(30, 30) );
				

				//If at least one face was detected then we can perform an identification
				for(int i=0; i<faces.size();i++)
				{
					//Get only (crop) the face (shades of grey)
					cv::Mat croppedFace = frame_gray(cv::Rect(faces[i].x, faces[i].y, faces[i].width, faces[i].height));
					//Resize the image
					cv::resize(croppedFace, croppedFace, sizeOfImage);
					
					//Start the identification
					fisherfaces->predict(croppedFace, predictedLabel, predictedConfidence);
					
					//Print the result in the console
					cout << "##### ID " << predictedLabel << "    confidence : " << predictedConfidence;

					int id=predictedLabel;
					const int THRESHOLD = 1000; //Threshold for the facial recognition. Used to make sure that the face was properly recognized.

					string printedName;

					cv::Point center( faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5 );

					//Print the ID result on the video (it's really bad to do it this way !! A funtion should be created !)
					if(id==1 && predictedConfidence>THRESHOLD)
					{
						printedName="Adrien";
						//Print the circle around the face
						ellipse( frame, center, cv::Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, cv::Scalar(0,255,0), 4, 8, 0);
						//Print the person's name
						cv::putText(frame,printedName, center, cv::FONT_HERSHEY_SIMPLEX, 1.0f, cv::Scalar(0,255,0), 2, 8, false );
					}
					else if(id==2 && predictedConfidence>THRESHOLD)
					{
						printedName="Ofir";
						ellipse( frame, center, cv::Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, cv::Scalar(0,255,0), 4, 8, 0);
						cv::putText(frame,printedName, center, cv::FONT_HERSHEY_SIMPLEX, 1.0f, cv::Scalar(0,255,0), 2, 8, false );
					}
					else if(id==3 && predictedConfidence>THRESHOLD)
					{
						printedName="Jeremie";
						ellipse( frame, center, cv::Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, cv::Scalar(0,255,0), 4, 8, 0);
						cv::putText(frame,printedName, center, cv::FONT_HERSHEY_SIMPLEX, 1.0f, cv::Scalar(0,255,0), 2, 8, false );
					}
					else
					{
						printedName="UNKNOWN";
						ellipse( frame, center, cv::Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, cv::Scalar(0,0,255), 4, 8, 0);
						cv::putText(frame,printedName, center, cv::FONT_HERSHEY_SIMPLEX, 1.0f, cv::Scalar(0,0,255), 2, 8, false );
					}
					
				}
				cout << endl;

				//Print each images to recreate a video
				cv::imshow("test", frame);
			}	
			else
			{
				cout << " --(!) No captured frame -- Break!" << endl;
				break;
			}

			int c = cv::waitKey(10);
		}

	}
}
Exemplo n.º 10
0
void Snapshot::detectAndDisplay(Mat frame)
{
    std::vector<Rect> faces;
    Mat frame_gray;

    cvtColor( frame, frame_gray, CV_BGR2GRAY );
    equalizeHist( frame_gray, frame_gray );

    //-- Detect faces
    face_cascade.detectMultiScale(frame_gray, faces, 1.1, 3, 0 | CV_HAAR_SCALE_IMAGE, Size(120, 120) );

    for(size_t i = 0; i < faces.size(); i++)
    {
        Point center(faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5);
        
        // Draw circle around the user's head
        //ellipse(frame, center, Size( faces[i].width*0.6, faces[i].height*0.7), 0, 0, 360, Scalar(0, 200, 0), 4, 8, 0);
        
        // Frame the person's head
        //rectangle(frame, Point(center.x - faces[i].width*0.6, center.y - faces[i].height*0.7), Point(center.x + faces[i].width*0.6, center.y + faces[i].height*0.7), Scalar(0, 200, 0), 4, 8, 0);
        

        Mat faceROI = frame_gray(faces[i]);
        std::vector<Rect> eyes;

        //-- In each face, detect eyes
        /*
        eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CV_HAAR_SCALE_IMAGE, Size(30, 30) );

        for( size_t j = 0; j < eyes.size(); j++ )
        {
            Point center( faces[i].x + eyes[j].x + eyes[j].width*0.5, faces[i].y + eyes[j].y + eyes[j].height*0.5 );
            int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
            circle( frame, center, radius, Scalar( 255, 0, 0 ), 4, 8, 0 );
        }
        */
        
        
        // Save to file
        // discard first image--it may be currupted.
        if(counter > 0 && counter < 20)
        {
            stringstream ss;
            ss << counter++;
            string buffer = ss.str();
            
            cout << "image width, height: " << faces[i].width << ", " << faces[i].height << endl;
            cout << "x, y, width, height: " << (center.x - faces[i].width*0.6) 
                 << ", " << (center.y - faces[i].height*0.7)
                 << ", " << (center.x + faces[i].width*2.6)
                 << ", " << (center.y + faces[i].height*2.7) << "\n\n";
                 
            Rect rect(center.x - faces[i].width*0.6,
                      center.y - faces[i].height*0.7,
                      faces[i].width*1.2,
                      faces[i].height/0.7);
            Mat croppedImg = frame(rect);
            cout << "saving to file..." << endl;
            imwrite("/home/james/waiter_ws/waiter_nodes/roscpp_nodes/src/person_recognizer/james_raw/" + buffer + ".jpg", croppedImg);
        }
        else if(counter > 0 && counter >= 20)
        {
            exit(0);
        }
        else
        {
            counter++;
        }
        
    }
    
   
        
        
    
    // Show what you got
    imshow("Result", frame);
    waitKey(3);
}
Exemplo n.º 11
0
  vector<PlateRegion> Detector::detect(Mat frame, std::vector<cv::Rect> regionsOfInterest)
  {

    Mat frame_gray;
    
    if (frame.channels() > 2)
    {
      cvtColor( frame, frame_gray, CV_BGR2GRAY );
    }
    else
    {
      frame.copyTo(frame_gray);
    }

    // Apply the detection mask if it has been specified by the user
    if (detector_mask.mask_loaded)
      frame_gray = detector_mask.apply_mask(frame_gray);

    // Setup debug mask image
    Mat mask_debug_img;
    if (detector_mask.mask_loaded && config->debugDetector)
    {
      frame_gray.copyTo(mask_debug_img);
      cvtColor(frame_gray, mask_debug_img, CV_GRAY2BGR);
    }
    
    vector<PlateRegion> detectedRegions;   
    for (int i = 0; i < regionsOfInterest.size(); i++)
    {
      Rect roi = regionsOfInterest[i];
      
      // Adjust the ROI to be inside the detection mask (if it exists)
      if (detector_mask.mask_loaded)
        roi = detector_mask.getRoiInsideMask(roi);

      // Draw ROIs on debug mask image
      if (detector_mask.mask_loaded && config->debugDetector)
        rectangle(mask_debug_img, roi, Scalar(0,255,255), 3);
      
      // Sanity check.  If roi width or height is less than minimum possible plate size,
      // then skip it
      if ((roi.width < config->minPlateSizeWidthPx) || 
          (roi.height < config->minPlateSizeHeightPx))
        continue;
      
      Mat cropped = frame_gray(roi);

      int w = cropped.size().width;
      int h = cropped.size().height;
      int offset_x = roi.x;
      int offset_y = roi.y;
      float scale_factor = computeScaleFactor(w, h);

      if (scale_factor != 1.0)
        resize(cropped, cropped, Size(w * scale_factor, h * scale_factor));

    
      float maxWidth = ((float) w) * (config->maxPlateWidthPercent / 100.0f) * scale_factor;
      float maxHeight = ((float) h) * (config->maxPlateHeightPercent / 100.0f) * scale_factor;
      Size minPlateSize(config->minPlateSizeWidthPx, config->minPlateSizeHeightPx);
      Size maxPlateSize(maxWidth, maxHeight);
    
      vector<Rect> allRegions = find_plates(cropped, minPlateSize, maxPlateSize);
      
      // Aggregate the Rect regions into a hierarchical representation
      for( unsigned int i = 0; i < allRegions.size(); i++ )
      {
        allRegions[i].x = (allRegions[i].x / scale_factor);
        allRegions[i].y = (allRegions[i].y / scale_factor);
        allRegions[i].width = allRegions[i].width / scale_factor;
        allRegions[i].height = allRegions[i].height / scale_factor;

        // Ensure that the rectangle isn't < 0 or > maxWidth/Height
        allRegions[i] = expandRect(allRegions[i], 0, 0, w, h);

        allRegions[i].x = allRegions[i].x + offset_x;
        allRegions[i].y = allRegions[i].y + offset_y;
      }
      
      // Check the rectangles and make sure that they're definitely not masked
      vector<Rect> regions_not_masked;
      for (unsigned int i = 0; i < allRegions.size(); i++)
      {
        if (detector_mask.mask_loaded)
        {
          if (!detector_mask.region_is_masked(allRegions[i]))
            regions_not_masked.push_back(allRegions[i]);
        }
        else
          regions_not_masked.push_back(allRegions[i]);
      }
      
      vector<PlateRegion> orderedRegions = aggregateRegions(regions_not_masked);

      

      for (unsigned int j = 0; j < orderedRegions.size(); j++)
        detectedRegions.push_back(orderedRegions[j]);
    }

    // Show debug mask image
    if (detector_mask.mask_loaded && config->debugDetector && config->debugShowImages)
    {
      imshow("Detection Mask", mask_debug_img);
    }
    
    return detectedRegions;
  }
std::vector<cv::Vec3f> CircularSampleAreaDetector::detect(cv::Mat frame) {
  // Convert the image to grayscale
  cv::Mat frame_gray(frame);
  cv::cvtColor(frame, frame_gray, CV_BGR2GRAY);

  // cv::cvtColor(frame, frame_gray, CV_BGR2HSV);
  // std::vector<cv::Mat> channels;
  // cv::split(frame_gray, channels);
  // frame_gray = channels[2];

  // Blur to remove extraneous detail before edge detection
  // cv::medianBlur(frame_gray, frame_gray, 9);
  // cv::blur(frame_gray, frame_gray, cv::Size(3, 3));
  cv::GaussianBlur(frame_gray, frame_gray, cv::Size(9, 9), 2, 2);

  // cv::imshow("blur_win", frame_gray);

  // Edge detection
  // cv::adaptiveThreshold(frame_gray, frame_gray, 255, cv::ADAPTIVE_THRESH_MEAN_C, cv::THRESH_BINARY, 11, 1);
  cv::Mat frame_canny;

  // int erosion_size = 2;
  // cv::Mat element = getStructuringElement(cv::MORPH_ELLIPSE,
  //     cv::Size( 2*erosion_size + 1, 2*erosion_size+1),
  //     cv::Point( erosion_size, erosion_size ));
  // cv::dilate(frame_gray, frame_gray, element );
  // cv::erode(frame_gray, frame_gray, element );

  // cv::Canny(frame_gray, frame_canny, 5, 50);
  // cv::imshow("canny_win", frame_canny);

  // Extract circle features
  std::vector<cv::Vec3f> circles;
  // HoughCircles(frame_gray, circles, CV_HOUGH_GRADIENT, 1, 50, 50, 40, 0, 0);
  HoughCircles(frame_gray, circles, CV_HOUGH_GRADIENT,
      2,   // inverse resolution ratio
      50,  // min dist between circle centers
      50,  // canny upper threshold
      150,  // center detection threshold
      0,   // min radius
      0    // max radius
    );
  // HoughCircles(frame_gray, circles, CV_HOUGH_GRADIENT,
  //     1,   // inverse resolution ratio
  //     50,  // min dist between circle centers
  //     50,  // canny upper threshold
  //     50,  // center detection threshold
  //     0,   // min radius
  //     0    // max radius
  //   );

  // Of the circles found, pick the one closest to the center of the frame
  // TODO: This is not the best way to do this. Research probabilistic methods?
  cv::Point frame_center(frame_gray.cols / 2, frame_gray.rows / 2);

  std::vector<cv::Vec3f> good_circles;
  for(size_t i = 0; i < circles.size(); i++) {
    cv::Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));
    int radius = circles[i][2];

    // Ensure circle is entirely in screen
    if(center.x - radius < 0 || center.x + radius > frame_gray.cols
        || center.y - radius < 0 || center.y + radius > frame_gray.rows) {
      continue;
    }

    good_circles.push_back(cv::Vec3f(circles[i][0], circles[i][1], circles[i][2] * CIRCLE_SHRINK_FACTOR));
  }

  return good_circles;
}
Exemplo n.º 13
0
void detectAndDisplay(cv::Mat frame) {
  std::vector<cv::Rect> faces;
  cv::Mat frame_gray;

  cv::cvtColor(frame, frame_gray, cv::COLOR_BGR2GRAY);
  cv::equalizeHist(frame_gray, frame_gray);
  
  // Detect Faces
  face_cascade.detectMultiScale(frame_gray, // image
				faces, // objects
				1.1, // scale factor
				2, // min neighbors
				0|cv::CASCADE_SCALE_IMAGE, // flags
				cv::Size(30, 30)); // min size

  for (std::size_t i = 0; i < faces.size(); i++) {
    cv::Point center(faces[i].x + faces[i].width/2,
		     faces[i].y + faces[i].height/2);

    cv::ellipse(frame,
		center,
		cv::Size(faces[i].width/2, faces[i].height/2),
		0,
		0,
		360,
		cv::Scalar(255, 0, 255),
		4,
		8,
		0);

    cv::Mat faceROI = frame_gray(faces[i]);
    std::vector<cv::Rect> eyes;

    // in each face, detect eyes
    eyes_cascade.detectMultiScale(faceROI,
				  eyes,
				  1.1,
				  2,
				  0 | cv::CASCADE_SCALE_IMAGE,
				  cv::Size(30, 30));

    for (std::size_t j = 0; j < eyes.size(); j++) {
      cv::Point eye_center(faces[i].x + eyes[j].x + eyes[j].width/2,
			   faces[i].y + eyes[j].y + eyes[j].height/2);

      int radius = cvRound((eyes[j].width + eyes[j].height) * 0.25);
      cv::circle(frame, 
		 eye_center,
		 radius, 
		 cv::Scalar(255, 0, 0),
		 4,
		 8,
		 0);
    }

  }

  // Show what you got
  cv::imshow(window_name, frame);

}
Exemplo n.º 14
0
void EyeTracker::eyeTracking(std::function<void(std::string)> emitter) {
    cv::VideoCapture capture;
    cv::Mat frame;
    cv::Mat frame_gray;
    capture.open(-1);
    if (!capture.isOpened()) {
        DEBUG("Error opening video capture!");
        stopEyeTracking.test_and_set();
        return;
    }
    auto lastTimestamp = util::timestamp();
    std::vector<cv::Rect> eyes;
    std::pair<cv::Rect, cv::Rect> eyeRects;
    std::pair<StabilizedMedian<int>, StabilizedMedian<int>> left_eye_median =
                std::make_pair(StabilizedMedian<int>(4),
                               StabilizedMedian<int>(4));
    std::pair<StabilizedMedian<int>, StabilizedMedian<int>> right_eye_median =
                std::make_pair(StabilizedMedian<int>(4),
                               StabilizedMedian<int>(4));
                               
    while (stopEyeTracking.test_and_set() && capture.read(frame)) {
        if (frame.empty()) {
            DEBUG("No captured frame!");
            break;
        }
        
        cv::flip(frame, frame, 1);
        
        cv::cvtColor(frame, frame_gray, cv::COLOR_BGR2GRAY);
        cv::equalizeHist(frame_gray, frame_gray);
        
        eye_cascade.detectMultiScale(frame_gray, eyes,
                                     1.1, 2, 0 | cv::CASCADE_SCALE_IMAGE,
                                     cv::Size(30, 30));
                                     
        auto crop_eye = [](cv::Rect eye) {
            return cv::Rect(eye.x + eye.width / 6,
                            eye.y + eye.height / 4,
                            (2 * eye.width) / 3,
                            (1 * eye.height) / 2);
        };
        for (cv::Rect eye : eyes) {
            cv::Point eyeCenter(eye.x + eye.width / 2,
                                eye.y + eye.height / 2);
            cv::circle(frame, eyeCenter,
                       cvRound((eye.width + eye.height) * 0.25),
                       cv::Scalar(255, 0, 255), 3, 8, 0);
        }
        
        if (eyes.size() == 2 && (eyes[0] & eyes[1]).area() == 0) {
            if (eyes[0].x < eyes[1].x)
                eyeRects = std::make_pair(crop_eye(eyes[0]),
                                          crop_eye(eyes[1]));
            else
                eyeRects = std::make_pair(crop_eye(eyes[1]),
                                          crop_eye(eyes[0]));
        }
        
        cv::imshow("eye_view", frame);
        if (eyeRects.first.area() != 0 && eyeRects.second.area() != 0) {
            cv::Mat left_eye = resizeIdeal(frame_gray(eyeRects.first));
            cv::Mat right_eye = resizeIdeal(frame_gray(eyeRects.second));
            cv::Point right_eye_loc =
                computePupilLocationHough(right_eye);
            cv::Point left_eye_loc =
                computePupilLocationHough(left_eye);
            if (right_eye_loc != cv::Point(-1, -1)) {
                right_eye_median.first.put(right_eye_loc.x);
                right_eye_median.second.put(right_eye_loc.y);
            }
            if (left_eye_loc != cv::Point(-1, -1)) {
                left_eye_median.first.put(left_eye_loc.x);
                left_eye_median.second.put(left_eye_loc.y);
            }
            
            if (left_eye_median.first.getLength() > 0 ||
                    left_eye_median.second.getLength() > 0) {
                cv::Point left_eye_loc_stabilized =
                    cv::Point(left_eye_median.first.getMedian(),
                              left_eye_median.second.getMedian());
                cv::circle(left_eye, left_eye_loc_stabilized,
                           2,
                           cv::Scalar(255, 0, 255), 1, 8, 0);
            }
            if (right_eye_median.first.getLength() > 0 ||
                    right_eye_median.second.getLength() > 0) {
                cv::Point right_eye_loc_stabilized =
                    cv::Point(right_eye_median.first.getMedian(),
                              right_eye_median.second.getMedian());
                cv::circle(right_eye, right_eye_loc_stabilized,
                           2,
                           cv::Scalar(255, 0, 255), 1, 8, 0);
            }
            
            if (left_eye_loc != cv::Point(-1, -1)) {
                cv::circle(left_eye, left_eye_loc,
                           10,
                           cv::Scalar(255, 0, 255), 1, 8, 0);
            }
            if (right_eye_loc != cv::Point(-1, -1)) {
                cv::circle(right_eye, right_eye_loc,
                           10,
                           cv::Scalar(255, 0, 255), 1, 8, 0);
            }
            
            DEBUG("Left eye at " << left_eye_loc);
            DEBUG("Right eye at " << right_eye_loc);
            resizeAndRender(left_eye, "left_eye");
            resizeAndRender(right_eye, "right_eye");
        }
        DEBUG("Elapsed milliseconds: " << util::timestamp() - lastTimestamp);
        lastTimestamp = util::timestamp();
    }
    capture.release();
    DEBUG("NOT EYE TRACKING");
}