Example #1
0
/**
 *  buffer header callback function for video
 *
 * @param port Pointer to port from which callback originated
 * @param buffer mmal buffer header pointer
 */
static void video_buffer_callback(MMAL_PORT_T *port, MMAL_BUFFER_HEADER_T *buffer)
{
   MMAL_BUFFER_HEADER_T *new_buffer;
   PORT_USERDATA *pData = (PORT_USERDATA *)port->userdata;

   if (pData)
   {

	   if (buffer->length)
	   {

		   mmal_buffer_header_mem_lock(buffer);

		   //
		   // *** PR : OPEN CV Stuff here !
		   //
		   int w=pData->pstate->width;	// get image size
		   int h=pData->pstate->height;
		   int h4=h/4;

		   memcpy(py->imageData,buffer->data,w*h);	// read Y

		   if (pData->pstate->graymode==0)
		   {
			   memcpy(pu->imageData,buffer->data+w*h,w*h4); // read U
			   memcpy(pv->imageData,buffer->data+w*h+w*h4,w*h4); // read v

			   cvResize(pu, pu_big, CV_INTER_NN);
			   cvResize(pv, pv_big, CV_INTER_NN);  //CV_INTER_LINEAR looks better but it's slower
			   cvMerge(py, pu_big, pv_big, NULL, image);

			   cvCvtColor(image,dstImage,CV_YCrCb2RGB);	// convert in RGB color space (slow)
			   gray=cvarrToMat(dstImage);   
			   //cvShowImage("camcvWin", dstImage );

		   }
		   else
		   {	
			   // for face reco, we just keep gray channel, py
			   gray=cvarrToMat(py);  
			   //cvShowImage("camcvWin", py); // display only gray channel
		   }

		   ////////////////////////////////
		   // FACE RECOGNITION START HERE
		   ////////////////////////////////

		   // detect faces
		   face_cascade.detectMultiScale(gray, faces, 1.1, 3, CV_HAAR_SCALE_IMAGE, Size(80,80));
		   // for each faces founded
		   for(int i = 0; i < faces.size(); i++) 
		   {       
			   // crop face (pretty easy with opencv, don't you think ? 
			   Rect face_i = faces[i];

			   //face = gray(face_i);  
			   //  resized face and display it
			   //cv::resize(face, face_resized, Size(im_width, im_height), 1.0, 1.0, CV_INTER_NN); //INTER_CUBIC);		

			   // create a rectangle around the face      
			   rectangle(gray, face_i, CV_RGB(255, 255 ,255), 1);

		   } // end for


		   /////////////////////////
		   // END OF FACE RECO
		   /////////////////////////

		   // Show the result:
		   imshow("camcvWin", gray);
		   key = (char) waitKey(1);
		   nCount++;		// count frames displayed

		   mmal_buffer_header_mem_unlock(buffer);
	   }
	   else vcos_log_error("buffer null");

   }
   else
   {
	   vcos_log_error("Received a encoder buffer callback with no state");
   }

   // release buffer back to the pool
   mmal_buffer_header_release(buffer);

   // and send one back to the port (if still open)
   if (port->is_enabled)
   {
	   MMAL_STATUS_T status;

	   new_buffer = mmal_queue_get(pData->pstate->video_pool->queue);

	   if (new_buffer)
		   status = mmal_port_send_buffer(port, new_buffer);

	   if (!new_buffer || status != MMAL_SUCCESS)
		   vcos_log_error("Unable to return a buffer to the encoder port");
   }

}
Mat faceDetect(Mat img) {

	std::vector<Rect> faces;
	std::vector<Rect> eyes;
	bool two_eyes = false;
	bool any_eye_detected = false;

	//detecting faces
	face_cascade.detectMultiScale(img, faces, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE,
			Size(30, 30));

	if (faces.size() == 0) {
		cout << "Try again.. I did not dectected any faces..." << endl;
		exit(1);
	}

	Point p1 = Point(0, 0);
	for (size_t i = 0; i < faces.size(); i++) {
		// we cannot draw in the image !!! otherwise we will mess the prediction
		// rectangle( img, faces[i], Scalar( 255, 100, 0 ), 4, 8, 0 );

		Mat frame_gray;
		cvtColor(img, frame_gray, CV_BGR2GRAY);

		//imwrite("frame_gary.jpg", frame_gray);

		// croping only the face in region defined by faces[i]
		std::vector<Rect> eyes;
		Mat faceROI;
		faceROI = frame_gray(faces[i]);

		//imwrite("faceROI.jpg", faceROI);

		//In each face, detect eyes
		eyes_cascade.detectMultiScale(faceROI, eyes, 1.1, 3,
				0 | CV_HAAR_SCALE_IMAGE, Size(30, 30));

		for (size_t j = 0; j < eyes.size(); j++) {
			Point center(faces[i].x + eyes[j].x + eyes[j].width * 0.5,
					faces[i].y + eyes[j].y + eyes[j].height * 0.5);
			// we cannot draw in the image !!! otherwise we will mess the prediction
			//int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
			//circle( img, center, radius, Scalar( 255, 0, 0 ), 4, 8, 0 );

			if (j == 1) {
				p1 = center;
				two_eyes = true;
			} else {
				any_eye_detected = true;
			}
		}
	}

	cout << "SOME DEBUG" << endl;
	cout << "-------------------------" << endl;
	cout << "faces detected:" << faces.size() << endl;
	for (size_t j = 0; j < eyes.size(); j++) {
		cout << j << endl;
		cout << "ex: " << eyes[j].x << endl;
		cout << "ey: " << eyes[j].y << endl;
		cout << "ew: " << eyes[j].width << endl;
		cout << "eh: " << eyes[j].height << endl << endl;
	}
	cout << "x: " << faces[0].x << endl;
	cout << "y: " << faces[0].y << endl;
	cout << "w: " << faces[0].width << endl;
	cout << "h: " << faces[0].height << endl << endl;

	Mat imageInRectangle;
	imageInRectangle = img(faces[0]);
	Size recFaceSize = imageInRectangle.size();
	cout << recFaceSize << endl;

	// for debug
	//imwrite("imageInRectangle2.jpg", imageInRectangle);
	int rec_w = 0;
	int rec_h = faces[0].height * 0.64;

	// checking the (x,y) for cropped rectangle
	// based in human anatomy
	int px = 0;
	int py = 2 * 0.125 * faces[0].height;

	Mat cropImage;

	cout << "faces[0].x:" << faces[0].x << endl;
	p1.x = p1.x - faces[0].x;
	cout << "p1.x:" << p1.x << endl;

	if (any_eye_detected) {
		if (two_eyes) {
			cout << "two eyes detected" << endl;
			// we have detected two eyes
			// we have p1 and p2
			// left eye
			px = p1.x / 1.35;
		} else {
			// only one eye was found.. need to check if the
			// left or right eye
			// we have only p1
			if (p1.x > recFaceSize.width / 2) {
				// right eye
				cout << "only right eye detected" << endl;
				px = p1.x / 1.75;
			} else {
				// left eye
				cout << "only left eye detected" << endl;
				px = p1.x / 1.35;
			}
		}
	} else {
		// no eyes detected but we have a face
		px = 25;
		py = 25;
		rec_w = recFaceSize.width - 50;
		rec_h = recFaceSize.height - 30;
	}

	rec_w = (faces[0].width - px) * 0.75;
	cout << "px   :" << px << endl;
	cout << "py   :" << py << endl;
	cout << "rec_w:" << rec_w << endl;
	cout << "rec_h:" << rec_h << endl;

	cropImage = imageInRectangle(Rect(px, py, rec_w, rec_h));
	Size dstImgSize(70, 70);
	// same image size of db
	Mat finalSizeImg;
	resize(cropImage, finalSizeImg, dstImgSize);

	// for debug
	imwrite("onlyface.jpg", finalSizeImg);

	cvtColor(finalSizeImg, finalSizeImg, CV_BGR2GRAY);

	return finalSizeImg;
}
/** @function detectAndDisplay */
void detectAndDisplay( Mat frame, int argc, char** argv  )
{	
	
	
	std::vector<Rect> faces;
	Mat frame_gray;
	//ROS initiated
	ros::init(argc, argv, "detect_faces");
	ros::NodeHandle n;
	ros::Publisher face_pub = n.advertise<std_msgs::Int32>("num_faces",1000);
	std_msgs::Int32 msg;

	cvtColor( frame, frame_gray, CV_BGR2GRAY );
	equalizeHist( frame_gray, frame_gray );

	//-- Detect faces
	face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );
	if(counter%3 == 0)
	{
		//ROS initiated
		ros::init(argc, argv, "detect_faces");
		ros::NodeHandle n;
		ros::Publisher face_pub = n.advertise<std_msgs::Int32>("num_faces",1000);
		std_msgs::Int32 msg;

		//store data to msg and publish it
		msg.data = faces.size();	
		face_pub.publish(msg);
	}
  if(counter == 0)
  {
    system("espeak \"Hi Matteo! This is our project.\"");
  }
  if(counter%20 == 0 && counter !=0)
  {
    if(faces.size()==0)
    {
      system("espeak \"No one is here. I'm lonely\"");
    }
    else if(faces.size()==1)
    {
      system("espeak \"1 person is here. How can I help?\"");
    }
    else if(faces.size()==2)
    {
      system("espeak \"2 people are here. Does anyone need anything\"");
    }
    else if (faces.size() ==3)
    {
      system("espeak \"3 people are here. Who needs help first\"");
    }
    else
    {
      system("espeak \"So many people! You must really like me\"");
    }
    
  }
  counter++;


	for( size_t i = 0; i < faces.size(); i++ )
	{

		Point center( faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5 );
		ellipse( frame, center, Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, Scalar( 255, 0, 255 ), 4, 8, 0 );

		Mat faceROI = frame_gray( faces[i] );
		std::vector<Rect> eyes;

		//-- In each face, detect eyes
		eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CV_HAAR_SCALE_IMAGE, Size(30, 30) );

		for( size_t j = 0; j < eyes.size(); j++ )
		 {
			 Point center( faces[i].x + eyes[j].x + eyes[j].width*0.5, faces[i].y + eyes[j].y + eyes[j].height*0.5 );
			 int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
			 circle( frame, center, radius, Scalar( 255, 0, 0 ), 4, 8, 0 );
		 }
	}
	//-- Show what you got
	imshow( window_name, frame );
 }
Example #4
0
/**
 *  buffer header callback function for video
 *
 * @param port Pointer to port from which callback originated
 * @param buffer mmal buffer header pointer
 */
static void video_buffer_callback(MMAL_PORT_T *port, MMAL_BUFFER_HEADER_T *buffer)
{
   MMAL_BUFFER_HEADER_T *new_buffer;
   PORT_USERDATA *pData = (PORT_USERDATA *)port->userdata;


   if (pData)
   {
     
      if (buffer->length)
      {

	      mmal_buffer_header_mem_lock(buffer);
 
 		//
		// *** PR : OPEN CV Stuff here !
		//
		int w=pData->pstate->width;	// get image size
		int h=pData->pstate->height;
		int h4=h/4;
		
		memcpy(py->imageData,buffer->data,w*h);	// read Y
		
		if (pData->pstate->graymode==0)
		{
			memcpy(pu->imageData,buffer->data+w*h,w*h4); // read U
			memcpy(pv->imageData,buffer->data+w*h+w*h4,w*h4); // read v
	
			cvResize(pu, pu_big, CV_INTER_NN);
			cvResize(pv, pv_big, CV_INTER_NN);  //CV_INTER_LINEAR looks better but it's slower
			cvMerge(py, pu_big, pv_big, NULL, image);
	
			cvCvtColor(image,dstImage,CV_YCrCb2RGB);	// convert in RGB color space (slow)
			gray=cvarrToMat(dstImage);   
			//cvShowImage("camcvWin", dstImage );
			
		}
		else
		{	
			// for face reco, we just keep gray channel, py
			gray=cvarrToMat(py);  
			//cvShowImage("camcvWin", py); // display only gray channel
		}
		
////////////////////////////////
// FACE RECOGNITION START HERE
////////////////////////////////

	// dynamixel ids
	int id_x = 9;
	int id_y = 11;

	// center coordinates of whole picture
	int center_x = ( w / 2 );
	int center_y = ( h / 2 );

	// dead zone - minimal movement, when reached
	int x_dead_min = ( center_x - ( w / 10 ) );
	int x_dead_max = ( center_x + ( w / 10 ) );
	int y_dead_min = ( center_y - ( h / 5 ) );
	int y_dead_max = ( center_y + ( h / 20 ) );

	/*cv::line(gray, Point(x_dead_min, 0), Point(x_dead_min, h), CV_RGB(255, 255 ,255) );
	cv::line(gray, Point(x_dead_max, 0), Point(x_dead_max, h), CV_RGB(255, 255 ,255) );
	cv::line(gray, Point(0, y_dead_min), Point(w, y_dead_min), CV_RGB(255, 255 ,255) );
	cv::line(gray, Point(0, y_dead_max), Point(w, y_dead_max), CV_RGB(255, 255 ,255) );*/

	// detect faces
	face_cascade.detectMultiScale(gray, faces, 1.1, 3, CV_HAAR_SCALE_IMAGE, Size(35,35));
	// for each faces founded
	for(int i = 0; i < faces.size(); i++) 
	{       
		// crop face (pretty easy with opencv, don't you think ? 
		Rect face_i = faces[i];		
		face = gray(face_i);  

		//  resized face and display it
		cv::resize(face, face_resized, Size(im_width, im_height), 1.0, 1.0, CV_INTER_NN); //INTER_CUBIC);		
	
		// now, we try to predict who is it ? 
		char sTmp[256];		
		double predicted_confidence	= 0.0;
		int prediction				= -1;
		model.predict(face_resized,prediction,predicted_confidence);
		
		// create a rectangle around the face      
		// rectangle(gray, face_i, CV_RGB(255, 255 ,255), 1);
			
		// if good prediction : > threshold 
		if (predicted_confidence>PREDICTION_TRESHOLD)
		{
		// trace
		// sprintf(sTmp,"+ prediction ok = %s (%d) confiance = (%d)",people[prediction].c_str(),prediction,(int)predicted_confidence);
		//trace((string)(sTmp));
	
	 	// display name of the guy on the picture
			string box_text;
			if (prediction<MAX_PEOPLE)
			{
				box_text = "Id="+people[prediction];

				if ( Aim_At == people[prediction] )
				{
					
					// get x,y of upper left corner of face
					int pos_x = std::max(face_i.tl().x, 0);
					int pos_y = std::max(face_i.tl().y, 0);                       

					// get height and width of face
					int face_height = std::max(face_i.size().height, 0);	
					int face_width = std::max(face_i.size().width, 0);

					// calculate center coordinates of the face	
					int face_center_x = pos_x + ( face_width / 2 );
					int face_center_y = pos_y + ( face_height / 2 );
					Point face_center(face_center_x, face_center_y);

					// paint a rectangle and a circle around the face
					// rectangle(gray, face_i, CV_RGB(255, 255 ,255), 1);
					//circle(gray, face_center, 30, CV_RGB(255, 255 ,255));

					// get current servo position
					int dyn_pos = dyn_get_position(9);

					// calculate  distance and speed for the servo to move
					int distance_x = ((abs( center_x - face_center_x )) * 0.2);
					int distance_y = ((abs( center_y - face_center_y )) * 0.2);
					int speed_x = pow((distance_x / 3), 2);
					int speed_y = pow((distance_y / 3), 2);

					if(face_center_x > x_dead_min && face_center_x < x_dead_max && face_center_y > y_dead_min && face_center_y < y_dead_max)
					{
						string fire_text;
						fire_text = "FIRE_THE_LASER !!!";
						//putText(gray, fire_text, Point(100, 20), FONT_HERSHEY_PLAIN, 1.0, CV_RGB(255,100,100), 1.0);	
					}
					else
					{
						if( face_center_x < x_dead_min ) // when face left, go left
						{
							// printf("moving %d left with speed %d\n", distance_x, speed_x);
							dyn_set_speed(9, speed_x);
							dyn_rel_move(9, 1, distance_x, 0, 1024 );
						} 

						if ( face_center_x > x_dead_max ) //when face right, go right
						{
							// printf("moving %d right with speed %d\n", distance_x, speed_x);
							dyn_set_speed(9, speed_x);
							dyn_rel_move(9, 0, distance_x, 0, 1024 );
						}

						if( face_center_y < y_dead_min ) // when face above, go up
						{ 
							// printf("moving %d down with speed %d\n", distance_y, speed_y);
							dyn_set_speed(11, speed_y);
							dyn_rel_move(11, 0, distance_y, 0, 1024 );
						}

						if( face_center_y > y_dead_max ) // when face below, go down
						{
							// printf("moving %d up with speed %d\n", distance_y, speed_y);
							dyn_set_speed(11, speed_y);
							dyn_rel_move(11, 1, distance_y, 0, 1024 );
						}
					}
				}
				else
				{
			//		printf("face not tracked");
				}
			}
			else
			{
				trace("(E) prediction not coherent");
			}
			int pos_x = std::max(face_i.tl().x - 10, 0);
			int pos_y = std::max(face_i.tl().y - 10, 0);			   
			// putText(gray, box_text, Point(pos_x, pos_y), FONT_HERSHEY_PLAIN, 1.0, CV_RGB(255,255,255), 1.0);	
					
		}
		else
		{		
				// trace is commented to speed up
				//sprintf(sTmp,"- prediction too low = %s (%d) confiance = (%d)",people[prediction].c_str(),prediction,(int)predicted_confidence);
				//trace((string)(sTmp));
		} 
	} // end for
	
			
/////////////////////////
// END OF FACE RECO
/////////////////////////
		
	// Show the result:
	imshow("camcvWin", gray);
	key = (char) waitKey(1);
	nCount++;		// count frames displayed
		
         mmal_buffer_header_mem_unlock(buffer);
      }
      else vcos_log_error("buffer null");
      
   }
   else
   {
      vcos_log_error("Received a encoder buffer callback with no state");
   }

   // release buffer back to the pool
   mmal_buffer_header_release(buffer);

   // and send one back to the port (if still open)
   if (port->is_enabled)
   {
      MMAL_STATUS_T status;

      new_buffer = mmal_queue_get(pData->pstate->video_pool->queue);

      if (new_buffer)
         status = mmal_port_send_buffer(port, new_buffer);

      if (!new_buffer || status != MMAL_SUCCESS)
         vcos_log_error("Unable to return a buffer to the encoder port");
   }
}
Example #5
0
/**
 * main
 */
int main(int argc, const char **argv)
{
	
	
/////////////////////////////////
// BEGIN OF FACE RECO INIT
/////////////////////////////////

	//
	// see thinkrpi.wordpress.com, articles on Magic Mirror to understand this command line and parameters
	//
	cout<<"start\n";
	   if ((argc != 4)&&(argc!=3)) {
	       cout << "usage: " << argv[0] << " ext_files  seuil(opt) \n files.ext histo(0/1) 5000 \n" << endl;
	       exit(1);
	   }
	
	// set value by default for prediction treshold = minimum value to recognize
	if (argc==3) { trace("(init) prediction treeshold = 4500.0 by default");PREDICTION_SEUIL = 4500.0;}
	if (argc==4) PREDICTION_SEUIL = atoi(argv[3]);
	
	// do we do a color histogram equalization ? 
    bHisto=atoi(argv[2]);
	

	// init people, should be do in a config file,
	// but I don't have time, I need to go to swimming pool
	// with my daughters
	// and they prefer to swimm than to see their father do a config file
	// life is hard.
    people[P_ANON]   	= "Unknown";
    people[P_PHIL]   	= "Phil";
	
	// init...
	// reset counter
	for (int i=0;i>MAX_PEOPLE;i++) 
	{
		nPictureById[i]=0;
	}
	int bFirstDisplay	=1;
	trace("(init) People initialized");
	
	// Get the path to your CSV
	fn_csv = string(argv[1]);
	
	// Note : /!\ change with your opencv path	
	//fn_haar = "/usr/share/opencv/haarcascades/haarcascade_frontalface_alt.xml";
	// change fn_harr to be quicker LBP (see article)
	fn_haar = "/usr/share/opencv/lbpcascades/lbpcascade_frontalface.xml";
	DEBUG cout<<"(OK) csv="<<fn_csv<<"\n";
	
    // Read in the data (fails if no valid input filename is given, but you'll get an error message):
    try {
        read_csv(fn_csv, images, labels);
		DEBUG cout<<"(OK) read CSV ok\n";
    	} 
    catch (cv::Exception& e) 
    {
        cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl;
        exit(1);
    }

	// get heigh, witdh of 1st images--> must be the same
    im_width = images[0].cols;
    im_height = images[0].rows;
	trace("(init) taille images ok");
 
 	//
    // Create a FaceRecognizer and train it on the given images:
	//
	
	// this a Eigen model, but you could replace with Fisher model (in this case
	// threshold value should be lower) (try)	
	
    //	Fisherfaces model; 
    
    // train the model with your nice collection of pictures	
    trace("(init) start train images");
    model.train(images, labels);
 	trace("(init) train images : ok");
 
	// load face model
    if (!face_cascade.load(fn_haar))
   	{
    			cout <<"(E) face cascade model not loaded :"+fn_haar+"\n"; 
    			return -1;
    }
    trace("(init) Load modele : ok");
    
/////////////////////////////////
// END OF FACE RECO INIT
/////////////////////////////////
	
	
	// Our main data storage vessel..
	RASPIVID_STATE state;
	
	MMAL_STATUS_T status;// = -1;
	MMAL_PORT_T *camera_video_port = NULL;
	MMAL_PORT_T *camera_still_port = NULL;
	MMAL_PORT_T *preview_input_port = NULL;
	MMAL_PORT_T *encoder_input_port = NULL;
	MMAL_PORT_T *encoder_output_port = NULL;
	
	time_t timer_begin,timer_end;
	double secondsElapsed;
	
	bcm_host_init();
	signal(SIGINT, signal_handler);

	// read default status
	default_status(&state);



    ////////////////////////////////////////////////////////////////////////////////////////////////
    ////////////////////////////////////////////////////////////////////////////////////////////////
    // Save output!
    outputFileName = "output.mp4";
    cout << "Saving output to: " << outputFileName << endl;

    Size size = Size(state.width, state.height);

    //output.open(outputFileName, 0, 15, size, false);

    //if(output.isOpened() == false)
    //{
    //    cout << "Failed to open the output file! \n" << endl;
    //    exit(1);
    //}
    ////////////////////////////////////////////////////////////////////////////////////////////////
    ////////////////////////////////////////////////////////////////////////////////////////////////



	// init windows and OpenCV Stuff
	cvNamedWindow("camcvWin", CV_WINDOW_AUTOSIZE); 
	int w=state.width;
	int h=state.height;
	dstImage = cvCreateImage(cvSize(w,h), IPL_DEPTH_8U, 3);
	py = cvCreateImage(cvSize(w,h), IPL_DEPTH_8U, 1);		// Y component of YUV I420 frame
	pu = cvCreateImage(cvSize(w/2,h/2), IPL_DEPTH_8U, 1);	// U component of YUV I420 frame
	pv = cvCreateImage(cvSize(w/2,h/2), IPL_DEPTH_8U, 1);	// V component of YUV I420 frame
	pu_big = cvCreateImage(cvSize(w,h), IPL_DEPTH_8U, 1);
	pv_big = cvCreateImage(cvSize(w,h), IPL_DEPTH_8U, 1);
	image = cvCreateImage(cvSize(w,h), IPL_DEPTH_8U, 3);	// final picture to display

   
	// create camera
	if (!create_camera_component(&state))
	{
	   vcos_log_error("%s: Failed to create camera component", __func__);
	}
	else if ((status = raspipreview_create(&state.preview_parameters)) != MMAL_SUCCESS)
	{
	   vcos_log_error("%s: Failed to create preview component", __func__);
	   destroy_camera_component(&state);
	}
	else
	{
		PORT_USERDATA callback_data;
		
		camera_video_port   = state.camera_component->output[MMAL_CAMERA_VIDEO_PORT];
		camera_still_port   = state.camera_component->output[MMAL_CAMERA_CAPTURE_PORT];
	   
		VCOS_STATUS_T vcos_status;
		
		callback_data.pstate = &state;
		
		vcos_status = vcos_semaphore_create(&callback_data.complete_semaphore, "RaspiStill-sem", 0);
		vcos_assert(vcos_status == VCOS_SUCCESS);
		
		// assign data to use for callback
		camera_video_port->userdata = (struct MMAL_PORT_USERDATA_T *)&callback_data;
        
        // init timer
  		time(&timer_begin); 

       
       // start capture
		if (mmal_port_parameter_set_boolean(camera_video_port, MMAL_PARAMETER_CAPTURE, 1) != MMAL_SUCCESS)
		{
		   	return 0;
		}
		
		// Send all the buffers to the video port
		
		int num = mmal_queue_length(state.video_pool->queue);
		int q;
		for (q=0;q<num;q++)
		{
		   MMAL_BUFFER_HEADER_T *buffer = mmal_queue_get(state.video_pool->queue);
		
		   if (!buffer)
		   		vcos_log_error("Unable to get a required buffer %d from pool queue", q);
		
			if (mmal_port_send_buffer(camera_video_port, buffer)!= MMAL_SUCCESS)
		    	vcos_log_error("Unable to send a buffer to encoder output port (%d)", q);
		}
		
		
		// Now wait until we need to stop
		vcos_sleep(state.timeout);
  
		//mmal_status_to_int(status);
		// Disable all our ports that are not handled by connections
		check_disable_port(camera_still_port);
		
		if (state.camera_component)
		   mmal_component_disable(state.camera_component);
		
		//destroy_encoder_component(&state);
		raspipreview_destroy(&state.preview_parameters);
		destroy_camera_component(&state);
		
		}
	if (status != 0)
	raspicamcontrol_check_configuration(128);
	
	time(&timer_end);  /* get current time; same as: timer = time(NULL)  */
	cvReleaseImage(&dstImage);
	cvReleaseImage(&pu);
	cvReleaseImage(&pv);
	cvReleaseImage(&py);
	cvReleaseImage(&pu_big);
    cvReleaseImage(&pv_big);
	
	secondsElapsed = difftime(timer_end,timer_begin);
	
	printf ("%.f seconds for %d frames : FPS = %f\n", secondsElapsed,nCount,(float)((float)(nCount)/secondsElapsed));
		
   return 0;
}
Example #6
0
	void detectAndDisplay(Mat frame)
	{
		vector<Rect> faces;
		vector<Rect> eyes;
		vector<vector<Point> > contours;
		vector<Vec4i> hierarchy;
		Mat frame_gray;
		int thresh = 100;

		cvtColor(frame, frame_gray, CV_BGR2GRAY);
		equalizeHist(frame_gray, frame_gray);
		
	
		face_cascade.detectMultiScale(frame_gray, faces, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, Size(150,150));
		
		for (size_t i = 0; i < faces.size(); i++)
		{
			
			Point center(faces[i].x + faces[i].width, faces[i].y + faces[i].height);
			circle(frame, center, 1, Scalar(0, 255, 0), 1, 8, 0);
			Point x1(faces[i].x , faces[i].y);
			circle(frame, x1, 1, Scalar(0, 0, 255), 6, 8, 0);
			Point x2(faces[i].x + faces[i].width  , faces[i].y + faces[i].height);
		//	faces[i].x = x1.x;
		//	faces[i].y = x1.y;
		//	faces[i].width *= 0.75;
		//	faces[i].height = faces[i].width*0.5;
			circle(frame, x2, 1, Scalar(0, 0, 255), 6, 8, 0);
			rectangle(frame, x1, x2, Scalar(255, 0, 255), 2, 8, 0);

			Mat faceROI = (frame_gray)(faces[i]);
			

			eyes_cascade.detectMultiScale(faceROI, eyes, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, Size(10, 10));
			
			for (size_t j = 0; j < eyes.size(); j++)
			{
				Point center(faces[i].x + eyes[j].x + eyes[j].width*0.5, faces[i].y + eyes[j].y + eyes[j].height*0.5);
				int radius = cvRound((eyes[j].width + eyes[j].height)*0.25);
				circle(frame, center, radius, Scalar(255, 0, 0), 1, 8, 0);
				circle(frame, center, 1, Scalar(0, 255, 0), 1, 8, 0);
				Mat eyesROI = (faceROI)(eyes[i]);
				Mat canny_output;
				Canny(eyesROI, canny_output, thresh, thresh * 2, 3);
				findContours(canny_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
				Mat::zeros(canny_output.size(), CV_8UC3);
				for (unsigned int i = 0; i< contours.size(); i++)
				{
					drawContours(canny_output, contours, i, Scalar(255, 0, 0), 1, 8, hierarchy, 0, Point());
					contours.erase(contours.begin() + i);
				}
				imshow("countours", canny_output);
			}

		}
		
		string window_name = "Face and Eyes detection";
		flip(frame, frame, 1);
		imshow(window_name, frame);

	}
Example #7
0
static void
kms_nose_detect_process_frame(KmsNoseDetect *nose_detect,int width,int height,double scale_f2n,
			      double scale_n2o, double scale_o2f,GstClockTime pts)
{
  Mat img (nose_detect->priv->img_orig);
  vector<Rect> *faces=nose_detect->priv->faces;
  vector<Rect> *noses=nose_detect->priv->noses;
  vector<Rect> nose;
  Scalar color;
  Mat gray, nose_frame (cvRound(img.rows/scale_n2o), cvRound(img.cols/scale_n2o), CV_8UC1);
  Mat  smallImg( cvRound (img.rows/scale_o2f), cvRound(img.cols/scale_o2f), CV_8UC1 );
  Mat noseROI;
  Rect r_aux;
  int i=0,j=0;
  const static Scalar colors[] =  { CV_RGB(255,0,255),
				    CV_RGB(255,0,0),
				    CV_RGB(255,255,0),
				    CV_RGB(255,128,0),
				    CV_RGB(0,255,0),
				    CV_RGB(0,255,255),
				    CV_RGB(0,128,255),
				    CV_RGB(0,0,255)} ;

  if ( ! __process_alg(nose_detect,pts) && nose_detect->priv->num_frames_to_process <=0)
    return;

  nose_detect->priv->num_frame++;

  if ( (2 == nose_detect->priv->process_x_every_4_frames && // one every 2 images
	(1 == nose_detect->priv->num_frame % 2)) ||  
       ( (2 != nose_detect->priv->process_x_every_4_frames) &&
	 (nose_detect->priv->num_frame <= nose_detect->priv->process_x_every_4_frames)))    
    {

      nose_detect->priv->num_frames_to_process --;
      cvtColor( img, gray, CV_BGR2GRAY );

      //if detect_event != 0 we have received faces as meta-data
      if ( 0 == nose_detect->priv->detect_event)
	{
	  //setting up the image where the face detector will be executed
	  resize( gray, smallImg, smallImg.size(), 0, 0, INTER_LINEAR );
	  equalizeHist( smallImg, smallImg );
	  faces->clear();
	  fcascade.detectMultiScale(smallImg,*faces,
				    MULTI_SCALE_FACTOR(nose_detect->priv->scale_factor),2,
				    0 |CV_HAAR_SCALE_IMAGE,
				    Size(3,3));
	}

      //setting up the image e where the nose detector will be executed	
      resize(gray,nose_frame,nose_frame.size(), 0,0,INTER_LINEAR);
      equalizeHist( nose_frame, nose_frame);

      noses->clear();

      for( vector<Rect>::iterator r = faces->begin(); r != faces->end(); r++,i++ )
	{

	  color = colors[i%8];
	  const int top_height=cvRound((float)r->height*TOP_PERCENTAGE/100);
	  const int down_height=cvRound((float)r->height*DOWN_PERCENTAGE/100);
	  const int side_width=cvRound((float)r->width*SIDE_PERCENTAGE/100);      
	  

	  //Transforming the point detected in face image to nose coordinates
	  //we only take the down half of the face to avoid excessive processing
	  r_aux.y=(r->y + top_height)*scale_f2n;
	  r_aux.x=(r->x+side_width)*scale_f2n;
	  r_aux.height = (r->height-down_height-top_height)*scale_f2n;
	  r_aux.width = (r->width-side_width)*scale_f2n;
	  noseROI = nose_frame(r_aux);
	  nose.clear();
	  ncascade.detectMultiScale( noseROI, nose,
				     NOSE_SCALE_FACTOR, 3,
				     0|CV_HAAR_FIND_BIGGEST_OBJECT,
				     Size(1, 1));   

	  for ( vector<Rect>::iterator m = nose.begin(); m != nose.end();m++,j++)
	    {
	      Rect m_aux;
	      m_aux.x=(r_aux.x + m->x)*scale_n2o;
	      m_aux.y=(r_aux.y + m->y)*scale_n2o;
	      m_aux.width=(m->width-1)*scale_n2o;
	      m_aux.height=(m->height-1)*scale_n2o;
	      noses->push_back(m_aux);
	    }
	}
    }

  if (GOP == nose_detect->priv->num_frame )
    nose_detect->priv->num_frame=0;

  //Printing on image
  j=0;
  if (1 == nose_detect->priv->view_noses  )
    for ( vector<Rect>::iterator m = noses->begin(); m != noses->end();m++,j++)	  
      {
	color = colors[j%8];     
	cvRectangle( nose_detect->priv->img_orig, cvPoint(m->x,m->y),
		     cvPoint(cvRound(m->x + m->width), 
			     cvRound(+m->y+ m->height-1)),
		     color, 3, 8, 0);	    

      }
  
}
Example #8
0
int main( int argc, const char** argv )
{
    // Check if arguments are given correct
    if( argc == 1 ) {
        cout << "Usage of model detection software: " << endl;
        cout <<	"detect_objects.exe <object_model.xml> <test_images.txt> <detection_result.txt>" << endl;
        return 0;
    }

    // Load the cascade model into the model container
    string model_name = argv[1];
    if( !cascade.load( model_name ) ) {
        cout << "Error loading the trained model from the provided model file! " << endl;
        return -1;
    };

    // Retrieve the filenames of all the test images
    string test_images = argv[2];
    ifstream input (test_images.c_str());
    string current_line;
    vector<string> filenames;
    while ( getline(input, current_line) ) {
        vector<string> line_elements;
        stringstream temp (current_line);
        string first_element;
        getline(temp, first_element, ' ');
        filenames.push_back(first_element);
    }
    int number_input_samples = filenames.size();
    input.close();

    // Create an output file for storing detections
    string location_output = argv[3];
    ofstream output_file (location_output.c_str());

    // Loop over each image in the test image sequence and perform detection
    for(int i = 0; i < filenames.size(); i++) {
        // Read in the first image
        Mat current_frame = imread(filenames[i]);

        // ------------------------------------------------------------------------
        // PERFORM THE ACTUAL DETECTION
        // ------------------------------------------------------------------------

        // Specific variables for the actual detection phase
        vector<Rect> objects;
        Mat frame_gray;

        // Convert the input frame to grayscale image and apply lightning normalization using histogram equilization
        cvtColor( current_frame, frame_gray, CV_BGR2GRAY );
        equalizeHist( frame_gray, frame_gray );

        // Detect object in a given image
        // Parameters should be checked at : http://docs.opencv.org/modules/objdetect/doc/cascade_classification.html?highlight=detectmultiscale#void%20CascadeClassifier::detectMultiScale%28const%20Mat&%20image,%20vector%3CRect%3E&%20objects,%20double%20scaleFactor,%20int%20minNeighbors,%20int%20flags,%20Size%20minSize,%20Size%20maxSize%29
        // Below command would detect and visualize all single detections
        //cascade.detectMultiScale( frame_gray, objects, 1.05, 0, 0);

        // Below command would detect and visualize all detections that have 5 or more matching overlaps
        cascade.detectMultiScale( frame_gray, objects, 1.05, 5, 0);

        // Below command would detect and visualize all detections that have 5 or more matching overlaps and specify a maximum and minimal object size
        //cascade.detectMultiScale( interested_region, objects, 1.05, 5, 0, Size(10, 25), Size(100, 250));

        // ------------------------------------------------------------------------
        // VISUALIZE THE ACTUAL DETECTION
        // ------------------------------------------------------------------------

        // Visualize detections on the input frame and show in the given window
        for( int j = 0; j < objects.size(); j++ )
        {
            // Use a rectangle representation on the frame
            // Frame width 3 pixels in color red (BGR format)
            rectangle(current_frame, Point(objects[j].x, objects[j].y), Point(objects[j].x + objects[j].width, objects[j].y + objects[j].height), Scalar(0, 0, 255), 1);
        }

        // Show the result
        imshow( window_name, current_frame );
        waitKey(0);

        // ------------------------------------------------------------------------
        // SAVE THE DETECTION RESULTS
        // Universal format
        // filename #detections x1 y1 w1 h1 x2 y2 w2 h2 ... xN yN wN hN
        // ------------------------------------------------------------------------

        output_file << filenames[i];
        output_file << " " << objects.size();
        for(int i = 0; i < objects.size(); i++) {
            output_file << " " << objects[i].x << " " << objects[i].y << " " << objects[i].width << " " << objects[i].height;
        }
        output_file << endl;
    }

    output_file.close();

    return 0;
}
/*
 * To work with Kinect or XtionPRO the user must install OpenNI library and PrimeSensorModule for OpenNI and
 * configure OpenCV with WITH_OPENNI flag is ON (using CMake).
 */
int main( int argc, char* argv[] )
{   
    time_t start = time(0);
    bool isColorizeDisp, isFixedMaxDisp;
    int imageMode;
    bool retrievedImageFlags[5];
    string filename;
    bool isVideoReading;
    //parseCommandLine( argc, argv, isColorizeDisp, isFixedMaxDisp, imageMode, retrievedImageFlags, filename, isVideoReading );

    if (pcl::io::loadPCDFile<pcl::PointXYZ> ("test_pcd.pcd", *cloud_golden) == -1) //* load the file
    {
    	PCL_ERROR ("Couldn't read file test_pcd.pcd \n");
    	return (-1);
    }
    std::cout << "Loaded "
            << cloud_golden->width * cloud_golden->height
            << " data points from test_pcd.pcd with the following fields: "
            << std::endl;
// 

    pcl::copyPointCloud (*cloud_golden, *cloud_transformed);
    cout << "Device opening ..." << endl;
    cout << CV_CAP_OPENNI <<endl;
    VideoCapture capture;
    if( isVideoReading )
        capture.open( filename );
    else
        capture.open(CV_CAP_OPENNI);

    cout << "done." << endl;

    if( !capture.isOpened() )
    {
        cout << "Can not open a capture object." << endl;
        return -1;
    }

    if( !isVideoReading )
    {
        bool modeRes=false;
        switch ( imageMode )
        {
            case 0:
                modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_VGA_30HZ );
                break;
            case 1:
                modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_SXGA_15HZ );
                break;
            case 2:
                modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_SXGA_30HZ );
                break;
                //The following modes are only supported by the Xtion Pro Live
            case 3:
                modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_QVGA_30HZ );
                break;
            case 4:
                modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_QVGA_60HZ );
                break;
            default:
                CV_Error( CV_StsBadArg, "Unsupported image mode property.\n");
        }
        if (!modeRes)
            cout << "\nThis image mode is not supported by the device, the default value (CV_CAP_OPENNI_SXGA_15HZ) will be used.\n" << endl;
    }
    if(capture.get( CV_CAP_PROP_OPENNI_REGISTRATION ) == 0) capture.set(CV_CAP_PROP_OPENNI_REGISTRATION,1);
    // Print some avalible device settings.
    cout << "\nDepth generator output mode:" << endl <<
            "FRAME_WIDTH      " << capture.get( CV_CAP_PROP_FRAME_WIDTH ) << endl <<
            "FRAME_HEIGHT     " << capture.get( CV_CAP_PROP_FRAME_HEIGHT ) << endl <<
            "FRAME_MAX_DEPTH  " << capture.get( CV_CAP_PROP_OPENNI_FRAME_MAX_DEPTH ) << " mm" << endl <<
            "FPS              " << capture.get( CV_CAP_PROP_FPS ) << endl <<
            "REGISTRATION     " << capture.get( CV_CAP_PROP_OPENNI_REGISTRATION ) << endl;
    if( capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR_PRESENT ) )
    {
        cout <<
            "\nImage generator output mode:" << endl <<
            "FRAME_WIDTH   " << capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR+CV_CAP_PROP_FRAME_WIDTH ) << endl <<
            "FRAME_HEIGHT  " << capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR+CV_CAP_PROP_FRAME_HEIGHT ) << endl <<
            "FPS           " << capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR+CV_CAP_PROP_FPS ) << endl;
    }
    else
    {
        cout << "\nDevice doesn't contain image generator." << endl;
        if (!retrievedImageFlags[0] && !retrievedImageFlags[1] && !retrievedImageFlags[2])
            return 0;
    }
    if( !face_cascade.load( cascade_name[0] ) )
    { 
	printf("--(!)Error loading\n"); return -1; 
    };
    if( !eyes_cascade.load( cascade_name[1] ) )
    { 
	printf("--(!)Error loading\n"); return -1; 
    };
    //printf("Entering for\n");

    int last_printed = 0;
    int WAIT_SEC = 10;

    viewer = simpleVis(cloud_transformed);
    for(;;)
    {
        Mat depthMap;
        Point image_center;
        Mat Display_image;
        Mat validDepthMap;
        Mat disparityMap;
        Mat bgrImage;
        Mat grayImage;
        Mat show;
        double seconds_since_start = difftime( time(0), start);

        if( !capture.grab() )
        {
            cout << "Can not grab images." << endl;
            return -1;
        }
        else
        {
            if( capture.retrieve( depthMap, CV_CAP_OPENNI_DEPTH_MAP ) )
            {
                const float scaleFactor = 0.05f;
		depthMap.convertTo( show, CV_8UC1, scaleFactor );
                //imshow( "depth map", show );
            }

            if( capture.retrieve( bgrImage, CV_CAP_OPENNI_BGR_IMAGE ) ) {
                
            // Align nose with the circle


                int rad = 40;
               	int row_rgb = bgrImage.rows;
            	int col_rgb = bgrImage.cols;
                image_center.y = row_rgb/2 - 100;
                image_center.x = col_rgb/2;
                Display_image = bgrImage.clone();
                // Copying bgrImage so that circle is shown temporarily only
                circle( Display_image, image_center, rad, Scalar( 255, 0, 0 ), 3, 8, 0 );
                imshow( "rgb image", Display_image );

                // Wait for a key Press
                //std::cin.ignore();
                // Now it will capture Golden data 
            }

        /*    if( retrievedImageFlags[4] && capture.retrieve( grayImage, CV_CAP_OPENNI_GRAY_IMAGE ) )
                imshow( "gray image", grayImage );*/

        int seconds = int(seconds_since_start);
        if(last_printed<seconds && seconds<=WAIT_SEC){
            printf(" Capturing Golden Face template after %d Seconds ...\n\n", WAIT_SEC - seconds);
                last_printed=seconds;
        }
            
	    if(!depthMap.empty() && !bgrImage.empty() && (seconds_since_start > WAIT_SEC)) 
		    detectAndDisplay(bgrImage, depthMap, argc, argv);
	    
	    //writeMatToFile("depth.txt",depthMap);
        }

  	viewer->spinOnce (10);
  	boost::this_thread::sleep (boost::posix_time::microseconds (10));
        viewer->removePointCloud("sample cloud");
  	viewer->addPointCloud<pcl::PointXYZ> (cloud_transformed, "sample cloud");
  	viewer->setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 1, "sample cloud");

        if( waitKey( 30 ) >= 0 )
            break;
    }
    Trans_dump.close();
    return 0;
}
// Function detectAndDisplay
int detectAndDisplay(Mat frame)
{
    std::vector<Rect> faces;
    Mat frame_gray;
    Mat crop;
    Mat res;
    Mat gray;
    string text;
    stringstream sstm;

    cvtColor(frame, frame_gray, COLOR_BGR2GRAY);
    equalizeHist(frame_gray, frame_gray);

    // Detect faces
    face_cascade.detectMultiScale(frame_gray, faces, 1.1, 2, 0 | CASCADE_SCALE_IMAGE, Size(30, 30));

    // Set Region of Interest
    cv::Rect roi_b;
    cv::Rect roi_c;

    size_t ic = 0; // ic is index of current element
    int ac = 0; // ac is area of current element

    size_t ib = 0; // ib is index of biggest element
    int ab = 0; // ab is area of biggest element

    for (ic = 0; ic < faces.size(); ic++) // Iterate through all current elements (detected faces)

    {
        roi_c.x = faces[ic].x;
        roi_c.y = faces[ic].y;
        roi_c.width = (faces[ic].width);
        roi_c.height = (faces[ic].height);

        ac = roi_c.width * roi_c.height; // Get the area of current element (detected face)

        roi_b.x = faces[ib].x;
        roi_b.y = faces[ib].y;
        roi_b.width = (faces[ib].width);
        roi_b.height = (faces[ib].height);

        ab = roi_b.width * roi_b.height; // Get the area of biggest element, at beginning it is same as "current" element

        if (ac > ab)
        {
            ib = ic;
            roi_b.x = faces[ib].x;
            roi_b.y = faces[ib].y;
            roi_b.width = (faces[ib].width);
            roi_b.height = (faces[ib].height);
        }

        crop = frame(roi_b);
        resize(crop, res, Size(128, 128), 0, 0, INTER_LINEAR); // This will be needed later while saving images
        cvtColor(crop, gray, CV_BGR2GRAY); // Convert cropped image to Grayscale

        Point pt1(faces[ic].x, faces[ic].y); // Display detected faces on main window - live stream from camera
        Point pt2((faces[ic].x + faces[ic].height), (faces[ic].y + faces[ic].width));
        rectangle(frame, pt1, pt2, Scalar(0, 255, 0), 2, 8, 0);
    }
    if(!crop.empty())
	return 1;
    else
	return 0;
}
Example #11
0
void detectAndDraw( Mat& img, CascadeClassifier& cascade,
                    CascadeClassifier& nestedCascade,
                    double scale, bool tryflip )
{
    int i = 0;
    double t = 0;
    vector<Rect> faces, faces2;
    const static Scalar colors[] =  { CV_RGB(0,0,255),
        CV_RGB(0,128,255),
        CV_RGB(0,255,255),
        CV_RGB(0,255,0),
        CV_RGB(255,128,0),
        CV_RGB(255,255,0),
        CV_RGB(255,0,0),
        CV_RGB(255,0,255)} ;
    Mat gray, smallImg( cvRound (img.rows/scale), cvRound(img.cols/scale), CV_8UC1 );

    cvtColor( img, gray, COLOR_BGR2GRAY );
    resize( gray, smallImg, smallImg.size(), 0, 0, INTER_LINEAR );
    equalizeHist( smallImg, smallImg );

    t = (double)cvGetTickCount();
    cascade.detectMultiScale( smallImg, faces,
        1.1, 2, 0
       // |CASCADE_FIND_BIGGEST_OBJECT
       // |CASCADE_DO_ROUGH_SEARCH
        |CASCADE_SCALE_IMAGE
        ,
        Size(30, 30) );
    if( tryflip )
    {
        flip(smallImg, smallImg, 1);
        cascade.detectMultiScale( smallImg, faces2,
                                 1.1, 2, 0
                                // |CASCADE_FIND_BIGGEST_OBJECT
                                 //|CASCADE_DO_ROUGH_SEARCH
                                 |CASCADE_SCALE_IMAGE
                                 ,
                                 Size(30, 30) );
        for( vector<Rect>::const_iterator r = faces2.begin(); r != faces2.end(); r++ )
        {
            faces.push_back(Rect(smallImg.cols - r->x - r->width, r->y, r->width, r->height));
        }
    }
    t = (double)cvGetTickCount() - t;
    printf( "detection time = %g ms\n", t/((double)cvGetTickFrequency()*1000.) );
    for( vector<Rect>::const_iterator r = faces.begin(); r != faces.end(); r++, i++ )
    {
        Mat smallImgROI;
        vector<Rect> nestedObjects;
        Point center;
        Scalar color = colors[i%8];
        int radius;

        double aspect_ratio = (double)r->width/r->height;
        if( 0.75 < aspect_ratio && aspect_ratio < 1.3 )
        {
             Mat newImage;
            img.copyTo(newImage);

            center.x = cvRound((r->x + r->width*0.5)*scale);
            center.y = cvRound((r->y + r->height*0.5)*scale);
            radius = cvRound((r->width + r->height)*0.25*scale);
            //circle( img, center, radius, color, 3, 8, 0 );
            radius=radius*0.8;
            int hf=0.2*radius;
            printf("size=%d\n",radius*2);
            Rect roi(center.x-radius,center.y-radius,2*radius,2*radius+hf);
            int se=20;
            Rect roi_out(center.x-radius-se,center.y-radius-se,2*radius+2*se,2*radius+2*se+hf);
            Rect roi_in(center.x-radius+se,center.y-radius+se,2*radius-2*se,2*radius-2*se);
            Mat mask_smooth=Mat::zeros(img.rows,img.cols,CV_8U);
            mask_smooth(roi_out)=255;
            mask_smooth(roi_in)=0;
            Mat image_roi=img(roi);
            Mat mask=Mat::zeros(img.rows,img.cols,CV_8U);
            mask(roi)=1;
            Scalar avg_pic_intensity=mean(image_roi);
            newImage(Rect(0,0,img.cols,img.rows))=avg_pic_intensity;
            img.copyTo(newImage,mask);
            Mat newImageBlurred; 
            blur(newImage,newImageBlurred,Size(20,20));
            blur(mask_smooth,mask_smooth,Size(20,20));
            newImageBlurred.copyTo(newImage,mask_smooth);
            
            //newImage(roi)=image_roi;
            imshow("blurred",newImageBlurred);
            imwrite("OutputImage.jpg",newImage);
            imshow("new",newImage);
            imshow("mask",mask_smooth);
            waitKey(0);
            std::cout<<avg_pic_intensity; 
            /*for(int i=img.;i<img.rows;i++)
            {
                for(int j=0;j<img.cols;j++)
                {
                    
                }
            }*/
            
            rectangle(img,cvPoint(center.x-radius,center.y-radius),cvPoint(center.x+radius,center.y+radius),avg_pic_intensity,3,8,0);
        }
        else
            rectangle( img, cvPoint(cvRound(r->x*scale), cvRound(r->y*scale)),
                       cvPoint(cvRound((r->x + r->width-1)*scale), cvRound((r->y + r->height-1)*scale)),
                       color, 3, 8, 0);
        if( nestedCascade.empty() )
            continue;
        smallImgROI = smallImg(*r);
        nestedCascade.detectMultiScale( smallImgROI, nestedObjects,
            1.1, 2, 0
            //|CASCADE_FIND_BIGGEST_OBJECT
            //|CASCADE_DO_ROUGH_SEARCH
            //|CASCADE_DO_CANNY_PRUNING
            |CASCADE_SCALE_IMAGE
            ,
            Size(30, 30) );
        for( vector<Rect>::const_iterator nr = nestedObjects.begin(); nr != nestedObjects.end(); nr++ )
        {
            center.x = cvRound((r->x + nr->x + nr->width*0.5)*scale);
            center.y = cvRound((r->y + nr->y + nr->height*0.5)*scale);
            radius = cvRound((nr->width + nr->height)*0.25*scale);
            circle( img, center, radius, color, 3, 8, 0 );
        }
    }
    cv::imshow( "result", img );
}
Example #12
0
int main (int argc, char** argv) {
	Aria::init();
	ArRobot robot;
	ArSonarDevice sonar;
	ArArgumentParser parser(&argc, argv);
	parser.loadDefaultArguments();
	ArRobotConnector robotConnector(&parser, &robot);
	if (!robotConnector.connectRobot()) {
		ArLog::log(ArLog::Terse, "Could not connect to the robot.");
		if(parser.checkHelpAndWarnUnparsed())
		{
			Aria::logOptions();
			Aria::exit(1);
			return 1;
		}
	}

	ArSonarDevice sonarDev;
	ArPose* poseList = readPostitions("positions.txt");
	robot.runAsync(true);
	robot.enableMotors();
	robot.moveTo(ArPose(0,0,0));
	robot.comInt(ArCommands::ENABLE, 1);
	robot.addRangeDevice(&sonarDev);
	ArActionGoto gotoPoseAction("goto", ArPose(0, 0, 0), 200);
	ArActionAvoidFront avoidFront("avoid front");
	ArActionStallRecover stallRecover("stall recover");
	robot.addAction(&gotoPoseAction, 50);
	robot.addAction(&avoidFront, 60);
	robot.moveTo(ArPose(0,0,0));
	int length = ARRAY_SIZE(poseList);
	cout<<"do dai"<<length;
	ArServerBase server;
	ArServerSimpleOpener simpleOpener(&parser);
	char fileDir[1024];
	  ArUtil::addDirectories(fileDir, sizeof(fileDir), Aria::getDirectory(),
				 "ArNetworking/examples");

	  // first open the server up
	  if (!simpleOpener.open(&server, fileDir, 240))
	  {
	    if (simpleOpener.wasUserFileBad())
	      printf("Bad user/password/permissions file\n");
	    else
	      printf("Could not open server port\n");
	    exit(1);
	  }
	ArServerInfoRobot serverInfo(&server, &robot);
	GotoGoal gotoGoal(&robot, &sonar, &server, &serverInfo);
	gotoGoal.init(argc, argv);
	float angle = 0;
	VideoCapture cap;
	cap.open(0);
	Rect trackWindow;
	//var check find ball
	bool checkObject = false;
	int hsize = 16;

	namedWindow( "threshold", 0 );
	namedWindow( "trackbar", 0 );
	namedWindow( "Histogram", 0 );
	namedWindow( "main", 0 );
	createTrackbar( "Vmin", "trackbar", &vmin, 256, 0 );
	createTrackbar( "Vmax", "trackbar", &vmax, 256, 0 );
	createTrackbar( "Smin", "trackbar", &smin, 256, 0 );

	CascadeClassifier c;
	c.load("cascade.xml");
	Mat frame, hsv, hue, mask, hist, histimg = Mat::zeros(200, 320, CV_8UC3), backproj;
	float vel = 0;
	int i = 0;
	while(1)
	{
		cap >> frame;
		if( frame.empty() ){
			cout<<"error camera"<<endl;
			break;
		}
		frame.copyTo(image);
		cvtColor(image, hsv, COLOR_BGR2HSV);
		int _vmin = vmin, _vmax = vmax;
		inRange(hsv, Scalar(0, smin, MIN(_vmin,_vmax)),	Scalar(180, 256, MAX(_vmin, _vmax)), mask);
		gotoPoseAction.setGoal(poseList[i]);
		while (!gotoPoseAction.haveAchievedGoal()) 
		{
			ArLog::log(ArLog::Normal, "goal(%.2f, %0.2f) x = %.2f, y = %.2f", poseList[i].getX(), poseList[i].getY(), robot.getX(), robot.getY());
//			if (!checkObject)
			   checkObject = detect(frame, c);
			if (checkObject)
				cout <<"Phat hien doi tuong"<<endl;
			else
				cout <<"Khong phat hien doi tuong"<<endl;
			if (checkObject) {
				if(trackObject(hsv, mask)) {
					float d = distance();
					if (d < 250) {
						gotoGoal.move(-200);
					} else if ( d >= 250 && d <= 300) {
						gotoGoal.stop();
					}
					else {
						vel = d * 0.7;
						vel = (int) (vel/50) * 50;
						if(vel > 200) {
							vel = 200;
							gotoGoal.setVel(vel);
						}
						angle =  determindRotate();
						cout <<"khoang cach: "<<d<<"\tGoc quay: "<<angle<<"\t van toc = "<<vel<<endl;
						if (angle != 0) {
							gotoGoal.stop();
							gotoGoal.rotate(angle);
						}
					}
				}
			}
			imshow("main", image);
			imshow( "threshold", mask );
			imshow( "Histogram", histimg );
		}
		i++;
	}

	ArUtil::sleep(2000);
	Aria::shutdown();

}
Mat detectAndDraw( Mat& img, CascadeClassifier& cascade,
                    CascadeClassifier& nestedCascade,
                    double scale, bool tryflip )
{
	Mat img2;
	Mat newROI;
	 //Converson to rgchromaticity space
	img.copyTo(img2);
	Mat M;
	img.copyTo(M);
	//for( int i = 0; i < img.rows; i++ ) {
   	//	const int* ptr = img2.ptr<int>(i);
    	//	float* dptr = M.ptr<float>(i);
    	//	for( int j = 0; j < img.cols; j++ ) {
        //	dptr[3*j] = ptr[3*j+2]*1.0 /(ptr[3*j+0] + ptr[3*j+1] + ptr[3*j+2]);
        //		dptr[3*j+1] = ptr[3*j+1]*1.0 /(ptr[3*j+0] + ptr[3*j+1] + ptr[3*j+2]);
        //		dptr[3*j+2] = (ptr[3*j+0] + ptr[3*j+1] + ptr[3*j+2]);
    	//	}
  	//}

	//cvtColor(img,img2,COLOR_BGR2GRAY);
    int i = 0;
    double t = 0;
    vector<Rect> faces, faces2;
    const static Scalar colors[] =  { CV_RGB(0,0,255),
        CV_RGB(0,128,255),
        CV_RGB(0,255,255),
        CV_RGB(0,255,0),
        CV_RGB(255,128,0),
        CV_RGB(255,255,0),
        CV_RGB(255,0,0),
        CV_RGB(255,0,255)} ;
    Mat gray, smallImg( cvRound (img.rows/scale), cvRound(img.cols/scale), CV_8UC1 );

    cvtColor( img, gray, COLOR_BGR2GRAY );
    resize( gray, smallImg, smallImg.size(), 0, 0, INTER_LINEAR );
    equalizeHist( smallImg, smallImg );

    t = (double)cvGetTickCount();
    cascade.detectMultiScale( smallImg, faces,
        1.1, 2, 0
        |CASCADE_FIND_BIGGEST_OBJECT
        //|CASCADE_DO_ROUGH_SEARCH
        |CASCADE_SCALE_IMAGE
        ,
        Size(30, 30) );
    if( tryflip )
    {
        flip(smallImg, smallImg, 1);
        cascade.detectMultiScale( smallImg, faces2,
                                 1.1, 2, 0
                                 |CASCADE_FIND_BIGGEST_OBJECT
                                 //|CASCADE_DO_ROUGH_SEARCH
                                 |CASCADE_SCALE_IMAGE
                                 ,
                                 Size(30, 30) );
        for( vector<Rect>::const_iterator r = faces2.begin(); r != faces2.end(); r++ )
        {
            faces.push_back(Rect(smallImg.cols - r->x - r->width, r->y, r->width, r->height));
        }
    }
    t = (double)cvGetTickCount() - t;
    printf( "detection time = %g ms\n", t/((double)cvGetTickFrequency()*1000.) );
    for( vector<Rect>::const_iterator r = faces.begin(); r != faces.end(); r++, i++ )
    {
        Mat smallImgROI;
        vector<Rect> nestedObjects;
        Point center;
        
        Point faceCenter;
        Scalar color = colors[i%8];
        int radius;

        double aspect_ratio = (double)r->width/r->height;
        if( 0.75 < aspect_ratio && aspect_ratio < 1.3 )
        {
            
            center.x = cvRound((r->x + r->width*0.5)*scale);
            center.y = cvRound((r->y + r->height*0.5)*scale);
            faceCenter = center;
            radius = cvRound((r->width + r->height)*0.25*scale);
            circle( img, center, radius, color, 3, 8, 0 );
        }
        else
            rectangle( img, cvPoint(cvRound(r->x*scale), cvRound(r->y*scale)),
                       cvPoint(cvRound((r->x + r->width-1)*scale), cvRound((r->y + r->height-1)*scale)),
                       color, 3, 8, 0);
        if( nestedCascade.empty() )
            continue;
        smallImgROI = smallImg(*r);
        nestedCascade.detectMultiScale( smallImgROI, nestedObjects,
            1.1, 2, 0
            //|CASCADE_FIND_BIGGEST_OBJECT
            //|CASCADE_DO_ROUGH_SEARCH
            //|CASCADE_DO_CANNY_PRUNING
            |CASCADE_SCALE_IMAGE,
            Size(30, 30) );
        int nestedFlag=0;
        Scalar s1,s2;
            
        float gvalue=0;
        int count=0;
        stringstream s;
        for( vector<Rect>::const_iterator nr = nestedObjects.begin(); nr != nestedObjects.end(); nr++ )
        {
            
            center.x = cvRound((r->x + nr->x + nr->width*0.5)*scale);
            center.y = cvRound((r->y + nr->y + nr->height*0.5)*scale);
            //printf("%d",center.x);
            radius = cvRound((nr->width + nr->height)*0.25*scale);
            	
            char c= waitKey(5);
            
            
            if(center.x < faceCenter.x )
            {
            	
            	circle( img, center, radius, color, 3, 8, 0 );
            	newROI = img2(Rect(center.x-radius,center.y-radius,radius*2,radius*2));
                imshow("CroppedImage",newROI); 
                MatchingMethod(newROI,template1);
            }
        }
      
      	printf("%f\n",gvalue);
    }
    	
    cv::imshow( "result", img);
   // cv::imshow( "newROI", newROI );
   return img;
}
Example #14
0
/**
 * main
 */
int main(int argc, const char **argv)
{
	
	
/////////////////////////////////
// BEGIN OF FACE RECO INIT
/////////////////////////////////

	//
	// see thinkrpi.wordpress.com, articles on Magic Mirror to understand this command line and parameters
	//
	
	//fn_haar = "/home/pi/Desktop/haarcascade_frontalface_default.xml";
	fn_haar = "/home/pi/Desktop/lbpcascade_frontalface.xml";
	// load face model
	if (!face_cascade.load(fn_haar))
	{
		cout <<"(E) face cascade model not loaded :"+fn_haar+"\n"; 
		return -1;
	}
    
/////////////////////////////////
// END OF FACE RECO INIT
/////////////////////////////////
	
	
	// Our main data storage vessel..
	RASPIVID_STATE state;
	
	MMAL_STATUS_T status;// = -1;
	MMAL_PORT_T *camera_video_port = NULL;
	MMAL_PORT_T *camera_still_port = NULL;
	MMAL_PORT_T *preview_input_port = NULL;
	MMAL_PORT_T *encoder_input_port = NULL;
	MMAL_PORT_T *encoder_output_port = NULL;
	
	time_t timer_begin,timer_end;
	double secondsElapsed;
	
	bcm_host_init();
	signal(SIGINT, signal_handler);

	// read default status
	default_status(&state);

	// init windows and OpenCV Stuff
	cvNamedWindow("camcvWin", CV_WINDOW_AUTOSIZE); 
	int w=state.width;
	int h=state.height;
	dstImage = cvCreateImage(cvSize(w,h), IPL_DEPTH_8U, 3);
	py = cvCreateImage(cvSize(w,h), IPL_DEPTH_8U, 1);		// Y component of YUV I420 frame
	pu = cvCreateImage(cvSize(w/2,h/2), IPL_DEPTH_8U, 1);	// U component of YUV I420 frame
	pv = cvCreateImage(cvSize(w/2,h/2), IPL_DEPTH_8U, 1);	// V component of YUV I420 frame
	pu_big = cvCreateImage(cvSize(w,h), IPL_DEPTH_8U, 1);
	pv_big = cvCreateImage(cvSize(w,h), IPL_DEPTH_8U, 1);
	image = cvCreateImage(cvSize(w,h), IPL_DEPTH_8U, 3);	// final picture to display

   
	// create camera
	if (!create_camera_component(&state))
	{
	   vcos_log_error("%s: Failed to create camera component", __func__);
	}
	//else if (!raspipreview_create(&state.preview_parameters))
	//{
	   //vcos_log_error("%s: Failed to create preview component", __func__);
	   //destroy_camera_component(&state);
	//}
	else
	{
		PORT_USERDATA callback_data;
		
		camera_video_port   = state.camera_component->output[MMAL_CAMERA_VIDEO_PORT];
		camera_still_port   = state.camera_component->output[MMAL_CAMERA_CAPTURE_PORT];
	   
		VCOS_STATUS_T vcos_status;
		
		callback_data.pstate = &state;
		
		vcos_status = vcos_semaphore_create(&callback_data.complete_semaphore, "RaspiStill-sem", 0);
		vcos_assert(vcos_status == VCOS_SUCCESS);
		
		// assign data to use for callback
		camera_video_port->userdata = (struct MMAL_PORT_USERDATA_T *)&callback_data;
        
        // init timer
  		time(&timer_begin); 

       
       // start capture
		if (mmal_port_parameter_set_boolean(camera_video_port, MMAL_PARAMETER_CAPTURE, 1) != MMAL_SUCCESS)
		{
		   	return 0;
		}
		
		// Send all the buffers to the video port
		
		int num = mmal_queue_length(state.video_pool->queue);
		int q;
		for (q=0;q<num;q++)
		{
		   MMAL_BUFFER_HEADER_T *buffer = mmal_queue_get(state.video_pool->queue);
		
		   if (!buffer)
		   		vcos_log_error("Unable to get a required buffer %d from pool queue", q);
		
			if (mmal_port_send_buffer(camera_video_port, buffer)!= MMAL_SUCCESS)
		    	vcos_log_error("Unable to send a buffer to encoder output port (%d)", q);
		}
		
		
		// Now wait until we need to stop
		vcos_sleep(state.timeout);
  
		//mmal_status_to_int(status);
		// Disable all our ports that are not handled by connections
		check_disable_port(camera_still_port);
		
		if (state.camera_component)
		   mmal_component_disable(state.camera_component);
		
		//destroy_encoder_component(&state);
		raspipreview_destroy(&state.preview_parameters);
		destroy_camera_component(&state);
		
		}
	if (status != 0)
	raspicamcontrol_check_configuration(128);
	
	time(&timer_end);  /* get current time; same as: timer = time(NULL)  */
	cvReleaseImage(&dstImage);
	cvReleaseImage(&pu);
	cvReleaseImage(&pv);
	cvReleaseImage(&py);
	cvReleaseImage(&pu_big);
	cvReleaseImage(&pv_big);
	
	secondsElapsed = difftime(timer_end,timer_begin);
	
	printf ("%.f seconds for %d frames : FPS = %f\n", secondsElapsed,nCount,(float)((float)(nCount)/secondsElapsed));
		
   return 0;
}
Example #15
0
	void findcars()                 //main function
	{
    	int i = 0;

		Mat img = storage.clone();
		Mat temp;                    //for region of interest.If a car is detected(after testing) by one classifier,then it will not be available for other one

		if(img.empty() )
        {
			cout << endl << "detect not successful" << endl;
		}
		int cen_x;
		int cen_y;
    	vector<Rect> cars;
    	const static Scalar colors[] =  { CV_RGB(0,0,255),CV_RGB(0,255,0),CV_RGB(255,0,0),CV_RGB(255,255,0),CV_RGB(255,0,255),CV_RGB(0,255,255),CV_RGB(255,255,255),CV_RGB(128,0,0),CV_RGB(0,128,0),CV_RGB(0,0,128),CV_RGB(128,128,128),CV_RGB(0,0,0)};

    	Mat gray;

    	cvtColor( img, gray, CV_BGR2GRAY );

		Mat resize_image(cvRound (img.rows), cvRound(img.cols), CV_8UC1 );

    	resize( gray, resize_image, resize_image.size(), 0, 0, INTER_LINEAR );
    	equalizeHist( resize_image, resize_image );


    	cascade.detectMultiScale( resize_image, cars,1.1,2,0,Size(10,10));                 //detection using main classifier


		for( vector<Rect>::const_iterator main = cars.begin(); main != cars.end(); main++, i++ )
    	{
       		Mat resize_image_reg_of_interest;
        	vector<Rect> nestedcars;
        	Point center;
        	Scalar color = colors[i%8];


			//getting points for bouding a rectangle over the car detected by main
			int x0 = cvRound(main->x);
			int y0 = cvRound(main->y);
			int x1 = cvRound((main->x + main->width-1));
			int y1 = cvRound((main->y + main->height-1));



        	if( checkcascade.empty() )
            	continue;
        	resize_image_reg_of_interest = resize_image(*main);
        	checkcascade.detectMultiScale( resize_image_reg_of_interest, nestedcars,1.1,2,0,Size(30,30));

        	for( vector<Rect>::const_iterator sub = nestedcars.begin(); sub != nestedcars.end(); sub++ )      //testing the detected car by main using checkcascade
        	{
           		center.x = cvRound((main->x + sub->x + sub->width*0.5));        //getting center points for bouding a circle over the car detected by checkcascade
				cen_x = center.x;
			   	center.y = cvRound((main->y + sub->y + sub->height*0.5));
				cen_y = center.y;
				if(cen_x>(x0+15) && cen_x<(x1-15) && cen_y>(y0+15) && cen_y<(y1-15))         //if centre of bounding circle is inside the rectangle boundary over a threshold the the car is certified
				{

					rectangle( image_main_result, cvPoint(x0,y0),
                    	   		cvPoint(x1,y1),
                     	  		color, 3, 8, 0);               //detecting boundary rectangle over the final result



					//masking the detected car to detect second car if present

					Rect region_of_interest = Rect(x0, y0, x1-x0, y1-y0);
					temp = storage(region_of_interest);
					temp = Scalar(255,255,255);

					num = num+1;     //num if number of cars detected

				}
			}

		}


	if(image_main_result.empty() )
    {
		cout << endl << "result storage not successful" << endl;
	}

    }
Example #16
0
void detectAndDisplay( Mat rgbframe, Mat depthframe, int argc, char *argv[] )
{   

    // Resize Image
    Size new_size;
    new_size = rgbframe.size();
    new_size.width = new_size.width/2;
    new_size.height = new_size.height/2;
    
    resize(rgbframe, rgbframe, new_size,0,0,INTER_NEAREST); 
    resize(depthframe, depthframe, new_size,0,0,INTER_NEAREST); 
    
    //cv::FileStorage file("face_template.xml", cv::FileStorage::WRITE);
    //printf("detectAndDisplay\n");	
    Mat frame_gray;
    std::vector<Rect> faces;

   cvtColor( rgbframe, frame_gray, COLOR_BGR2GRAY );
   equalizeHist( frame_gray, frame_gray );
   //-- Detect faces
   //VIOLA JONESSSSSSSSSSSSSSS
   face_cascade.detectMultiScale( frame_gray, faces, 1.1, 4, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );
   //face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );

   double min_g = DBL_MAX;  
   cv::Point min_loc_g;
   int face_index = 0;
   int found = 0; 
   Rect nose;
   Point nose_center;
   //printf("detectAndDisplay faces found %d\n",faces.size());	
   int max_rect_area = 0;
   int area;

   for( size_t i = 0; i < faces.size(); i++ )
    {

      Mat faceROI = frame_gray( faces[i] );
      std::vector<Rect> eyes;
      found = 1;
      area = faces[i].width * faces[i].height;

      if(area > max_rect_area) {
	  face_index = i;
      max_rect_area = area;
      }




      // Checking which face rectangle is the biggest, in case there are multiple faces, the nearest face will be taken

      //printf("min is %f\n",min);
            //-- In each face, detect eyes
      //eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CV_HAAR_SCALE_IMAGE, Size(30, 30) );
//      eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CV_HAAR_SCALE_IMAGE, Size(30, 30) );
//
//
//        // Taking the largest rectangle        
//
//      for( size_t j = 0; j < eyes.size(); j++ )
//       {
//         //Point eye_center( faces[i].x + eyes[j].x + eyes[j].width/2, faces[i].y + eyes[j].y + eyes[j].height/2 );
//         //circle( rgbframe, eye_center, radius, Scalar( 255, 0, 0 ), 3, 8, 0 );
//      	 Mat d_rect = faceROI(eyes[j]);
//      	 double min,max;
//      	 cv::Point min_loc,max_loc;
//      	 cv::minMaxLoc(d_rect, &min, &max, &min_loc, &max_loc);
//	 if (min < min_g)
//         {
//	 	min_g = min;
//		face_index = i;
//		min_loc_g.x = faces[i].x + min_loc.x ;
//		min_loc_g.y = faces[i].y + min_loc.y ;
//		nose = eyes[j];
//                nose_center.x = faces[i].x + eyes[j].x + eyes[j].width/2;
//		nose_center.y = faces[i].y + eyes[j].y + eyes[j].height/2 ;
		
//         }      


//       }
    }
   //-- Show what you got
   if (found == 1)
   {
    
       
       
   	Point center( faces[face_index].x + faces[face_index].width/2, faces[face_index].y + faces[face_index].height/2 );
	ellipse( rgbframe, center, Size( faces[face_index].width/2, faces[face_index].height/2), 0, 0, 360, Scalar( 255, 0, 255 ), 2, 8, 0 );
   	//printf("Depth is %f\n",min_g);
        //int radius = cvRound( (nose.width + nose.height)*0.25 );
        //circle( rgbframe, nose_center, radius, Scalar( 255, 0, 0 ), 3, 8, 0 );
   	imshow( window_name, rgbframe );
    
    // Capturing depth image based on found rectangle
    Mat d_rect = depthframe(faces[face_index]);
       	//imshow("Face Depth",depthframe);  
        //imshow("Rectangle Depth",d_rect);
	//printf("Nose depth is nose_center %d \n",depthframe.at<int>(nose_center.x,nose_center.y));
	//file<<"depthMap"<<d_rect(nose);
	//file<<"depthMap"<<d_rect;
	//int filenumber2 = 1;
	//cin >> filenumber2;
	//printf("Point is %d,%d \n",min_loc_g.x,min_loc_g.y);

	if (filenumber == 1 )
	{   
        // global rectangle is saved at this point
        golden_face.x = faces[face_index].x;
        golden_face.y = faces[face_index].y;
        golden_face.width = faces[face_index].width;
        golden_face.height = faces[face_index].height;
        golden_image = d_rect.clone();
        printf(" GOLDEN FACE RECTANGLE :  X = %d, Y = %d\n",golden_face.x, golden_face.y);
		matToCSV(d_rect, faces[face_index]);
	}
	else
	{
        // Resizing
        //imshow("Before resize",d_rect);
        resize(d_rect, d_rect, golden_image.size(), 0, 0, INTER_NEAREST );
        //imshow("After resize",d_rect);
        //std::cin.ignore();
		matToCSV(d_rect, faces[face_index]);
		transformation(argc, argv);
	}
	
   }
}
// Function to detect faces
void detectAndDisplay( Mat frame )
{
  std::vector<Rect> faces;
  Mat frame_gray;
  cv::Point center;

  cvtColor( frame, frame_gray, COLOR_BGR2GRAY );
  equalizeHist( frame_gray, frame_gray );

  // Detect faces 1.1, 2, S
  //face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CASCADE_SCALE_IMAGE, Size(30, 30));
  face_cascade.detectMultiScale( frame_gray, faces, 1.05, 3, 0|CASCADE_SCALE_IMAGE, Size(30, 30), Size(120,120) );

  for ( size_t i = 0; i < faces.size(); i++ )
    {
      // Detect center of face and draw ellipse
      //Point center( faces[i].x + faces[i].width/2, faces[i].y + faces[i].height/2 );
      center.x = faces[i].x + faces[i].width/2;
      center.y = faces[i].y + faces[i].height/2;
      ellipse( frame, center, Size( faces[i].width/2, faces[i].height/2 ), 0, 0, 360, Scalar( 255, 0, 255 ), 4, 8, 0 );

      // In each face, detect eyes
      // Mat faceROI = frame_gray( faces[i] );
      // std::vector<Rect> eyes;
      // eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CASCADE_SCALE_IMAGE, Size(30, 30) );

      // for ( size_t j = 0; j < eyes.size(); j++ )
      //   {
      // 	  Point eye_center( faces[i].x + eyes[j].x + eyes[j].width/2, faces[i].y + eyes[j].y + eyes[j].height/2 );
      // 	  int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
      // 	  circle( frame, eye_center, radius, Scalar( 255, 0, 0 ), 4, 8, 0 );
      //   }
    }
  
  if(faces.size()>0)
  {   
    // Without Kalman results are better
    yaw_angle = getAngleFromCentroid("horizontal", frame.size().width, center);
    pitch_angle = getAngleFromCentroid("vertical", frame.size().height, center);
    // Draw face centroid
    drawCross( frame, center, Scalar(0,0,255), 5 );

    // // Get Kalman centroid of face
    // if (first)
    //   {
    // 	first = false;
    // 	initializeKalman(center.x, center.y);  
    //   }   
    // cv::Point kalman_centroid = getKalmanCentroid(center.x, center.y);
    // last_centroid = kalman_centroid;
    // drawCross( frame, kalman_centroid, Scalar(0,255,0), 5 ); // Kalman
    // drawCross( frame, center, Scalar(0,0,255), 5 ); // Original
    // // Get angle from kalman centroid
    // yaw_angle = getAngleFromCentroid("horizontal", frame.size().width, kalman_centroid);
    // pitch_angle = getAngleFromCentroid("vertical", frame.size().height, kalman_centroid);
    
    // // Without Kalman results are better
    // yaw_angle = getAngleFromCentroid("horizontal", frame.size().width, center);
    // pitch_angle = getAngleFromCentroid("vertical", frame.size().height, center);
    
    publishFaceTracking(center.x, center.y, yaw_angle, pitch_angle);
  }

  // Show what you got
  cv::imshow( window_name, frame );
  cv::waitKey(1);
}
Example #18
0
void detect(Mat& img, CascadeClassifier& cascade,
    CascadeClassifier& nestedCascade, double scale) {
  int i = 0;
  double t = 0;
  vector<Rect> faces;
  Mat gray, smallImg;

  cvtColor(img, gray, CV_BGR2GRAY);
  resize(gray, smallImg, Size(), 1 / scale, 1 / scale, INTER_LINEAR);
  equalizeHist(smallImg, smallImg);

  t = (double) cvGetTickCount();
  cascade.detectMultiScale(smallImg, faces, 1.1, 2, 0
  //|CV_HAAR_FIND_BIGGEST_OBJECT
  //|CV_HAAR_DO_ROUGH_SEARCH
      | CV_HAAR_SCALE_IMAGE, Size(30, 30));
  t = (double) cvGetTickCount() - t;
//  printf("detection time = %g ms\n",
//      t / ((double) cvGetTickFrequency() * 1000.));

  // Bounding boxes of all eyes found this frame, in full-size camera space
  vector<Rect> allEyes;
  for (vector<Rect>::const_iterator r = faces.begin(); r != faces.end();
      r++, i++) {
    Mat smallImgROI;
    vector<Rect> nestedObjects;

    if (nestedCascade.empty())
      continue;
    smallImgROI = smallImg(*r);
    nestedCascade.detectMultiScale(smallImgROI, nestedObjects, 1.1, 2, 0
    //|CV_HAAR_FIND_BIGGEST_OBJECT
    //|CV_HAAR_DO_ROUGH_SEARCH
    //|CV_HAAR_DO_CANNY_PRUNING
        | CV_HAAR_SCALE_IMAGE, Size(30, 30));

    for (vector<Rect>::const_iterator e = nestedObjects.begin();
        e != nestedObjects.end(); ++e) {
      allEyes.push_back((*e + r->tl()) * scale);
    }
  }

  // Find which tracked eyes are closest to the observed eyes this frame
  vector<pair<int, int> > matching = assignTracksToEyes(eyeTracks, allEyes);
  vector<bool> foundTracks(eyeTracks.size());
  vector<bool> foundEyes(allEyes.size());
  for (vector<pair<int, int> >::const_iterator trackToEye = matching.begin();
      trackToEye != matching.end(); ++trackToEye) {
    foundTracks[trackToEye->first] = true;
    foundEyes[trackToEye->second] = true;
    const Rect& eyeSrc = allEyes[trackToEye->second];
    EyeTrack& track = eyeTracks[trackToEye->first];

    record(track, img, eyeSrc);
  }

  // Forget any tracks that haven't been picked up for a while
  for (int i = 0; i < eyeTracks.size(); ++i) {
    if (!eyeTracks[i].lastSeen.empty() && !foundTracks[i]) {
      int framesSinceLastSeen = ++(eyeTracks[i].numFramesSinceLastSeen);
      if (framesSinceLastSeen > 5)
        eyeTracks[i] = EyeTrack();
    }
  }

  // Put any new eyes into a free slot, if possible
  int j = 0;
  for (int i = 0; i < allEyes.size(); ++i) {
    if (!foundEyes[i]) {
      while (j < eyeTracks.size() && !eyeTracks[j].lastSeen.empty())
        j++;
      if (j == eyeTracks.size())
        break;
      record(eyeTracks[j], img, allEyes[i]);
    }
  }

  for (vector<EyeTrack>::iterator t = eyeTracks.begin(); t != eyeTracks.end();
      ++t) {
    if (t->frames.size() >= 10) {
      t->frames.erase(t->frames.begin(), t->frames.end() - 10);
      t->lastSeen.erase(t->lastSeen.begin(), t->lastSeen.end() - 10);
    }
  }

  i = 0;
  for (vector<Rect>::const_iterator r = faces.begin(); r != faces.end();
      ++r, ++i) {
    Scalar color = colors[i % 8];
    rectangle(img, *r * scale, color, 3);
  }

  i = 0;
  // for(vector<Rect>::const_iterator e = allEyes.begin(); e != allEyes.end(); ++e, ++i) {
  //   const Scalar& color = colors[i % maxEyes];
  //   rectangle(img, *e, color, 3);
  // }
}
            ncvAssertPrintReturn(capture.open(camid) != 0, "Can't open source", -1);
        }

        capture >> frame;
        ncvAssertPrintReturn(!frame.empty(), "Empty video source", -1);

        frameSize.width = frame.cols;
        frameSize.height = frame.rows;
    }

    NcvBool bUseGPU = true;
    NcvBool bLargestObject = false;
    NcvBool bFilterRects = true;
    NcvBool bHelpScreen = false;

    CascadeClassifier classifierOpenCV;
    ncvAssertPrintReturn(classifierOpenCV.load(cascadeName) != 0, "Error (in OpenCV) opening classifier", -1);

    int devId;
    ncvAssertCUDAReturn(cudaGetDevice(&devId), -1);
    cudaDeviceProp devProp;
    ncvAssertCUDAReturn(cudaGetDeviceProperties(&devProp, devId), -1);
    cout << "Using GPU: " << devId << "(" << devProp.name <<
            "), arch=" << devProp.major << "." << devProp.minor << endl;

    //==============================================================================
    //
    // Load the classifier from file (assuming its size is about 1 mb)
    // using a simple allocator
    //
    //==============================================================================
Example #20
0
int main(int argc, char** argv){
	//Window playing the sequence

	//Open Cascade Classifier
	if( !pedestrian_cascade.load( pedestrian_cascade_name ) ){ printf("--(!)Error loading pedestrian cascade\n"); return -1; };

	//Create instance of HOGDescriptor
	HOGDescriptor hog;
	//Set the SVM for the HOG descriptors
	hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());

	vector<Rect> pedestrianVJ;	//Rectangles around each pedestrian (Viola-Jones Module)
	vector<Rect> pedestrian;	//Rectangles around each pedestrian (HOG module)

	vector<Rect> ROI_disparity;
	ROI_disparity.reserve(30);
    vector<Vec4i> buildings;	//Vector con máximo dos elementos (edificios)
    buildings.reserve(2);
	int n_ROI;

	//Code to process sequence of images


		int i, num_pedestrian_VJ=0, num_pedestrian_VJ_HOG=0, num_pedestrian_HOG=0;
		string path="Video/";
		char image_number[12];
		string image_name_left, image_name_right, output_image_name;
		string image_dimensions;

		//Start processing frames
		for(i=0;i<=N_FRAMES;i++){

			sprintf(image_number,"%010d",i);
			cout<<path<<image_number<<".png"<<endl;
			image_name_left=path+"Peatones/left2/"+(string)image_number+".png";
			image_name_right=path+"Peatones/right2/"+(string)image_number+".png";
			left_frame = imread(image_name_left.c_str(), IMREAD_GRAYSCALE);
			left_color_frame = imread(image_name_left.c_str(), CV_LOAD_IMAGE_COLOR);
			right_frame = imread(image_name_right.c_str(), IMREAD_GRAYSCALE);
			printf("Image cols %d \t",left_frame.cols);
			printf("Image rows %d\n",left_frame.rows);


		    if( left_frame.empty() )                      // Check for invalid input
		    {
		        cout <<  "Could not open or find the left image" << endl ;
		        return -1;
		    }

		    if( right_frame.empty() )                      // Check for invalid input
		    {
		        cout <<  "Could not open or find the right image" << endl ;
		        return -1;
		    }


			//DetectPedestrianViolaJones(pedestrianVJ);
			//DetectPedestrianHOG(pedestrianVJ, pedestrian, hog);
		    //DetectPedestrianHOGnotROI(pedestrian, hog);
			//DrawPedestrians(pedestrian);
			//SaveImage(pedestrianVJ,VJ_DETECTION, num_pedestrian_VJ);
			//SaveImage(pedestrian,VJ_HOG_DETECTION, num_pedestrian_VJ_HOG);
		    //SaveImage(pedestrian,HOG_DETECTION, num_pedestrian_HOG);
			//num_pedestrian_VJ+=pedestrianVJ.size();
			//num_pedestrian_VJ_HOG+=pedestrian.size();
		    //num_pedestrian_HOG+=pedestrian.size();
		    n_ROI = Stereo_SGBM(ROI_disparity, buildings);
		    DrawPedestrians(ROI_disparity, n_ROI);


		    /******************************************************************************
		     * Guardar imágenes para debugging
		     ******************************************************************************/
		    output_image_name=path+"StereoROI_16bits/"+(string)image_number+".jpg";
		    imwrite(output_image_name, left_color_frame);

		    output_image_name=path+"StereoROI_16bits/Debug/disparity/"+(string)image_number+".jpg";
		    imwrite(output_image_name, disp_print);

		    output_image_name=path+"StereoROI_16bits/Debug/u_disparity/"+(string)image_number+".jpg";
		    imwrite(output_image_name, u_disparity_print);

		    output_image_name=path+"StereoROI_16bits/Debug/u_detect_obstacles/"+(string)image_number+".jpg";
		    imwrite(output_image_name, u_detect_obstacles);







			imshow( "Debug window", left_color_frame );
			int c = waitKey(10);
			if( (char)c == 27 )
				return 0;
			else if((char)c == 's'){	//s stops playing the video
				int c = waitKey(0);
			}
		}

}
Example #21
0
/** @function detectAndDisplay */
void detectAndDisplay( Mat obj, Mat scene, int index ,bool useSURF)
{
	std::vector<Rect> dartBoards;
	std::vector<Rect> dartBoards1;
	Mat frame_gray;
	int buffer = 30;
	cvtColor( scene, frame_gray, CV_BGR2GRAY );
	equalizeHist( frame_gray, frame_gray );
	bool surfUsed = false;
	int aBoardFound = 0;
	Rect surfRect;
	//-- Detect faces
	cascade.detectMultiScale( frame_gray, dartBoards, 1.1, 1, 0|CV_HAAR_SCALE_IMAGE, Size(50, 50), Size(500,500) );
	cascade_1.detectMultiScale( frame_gray, dartBoards1, 1.1, 1, 0|CV_HAAR_SCALE_IMAGE, Size(50, 50), Size(500,500) );

	if(useSURF)
	{
		surfRect = findObjectSURF(obj , scene, 500 ,index);
		//dartBoards.push_back(surfRect);
		
	}
	//dartBoards = getCleanVector(dartBoards,dartBoards1);
	std::cout << dartBoards.size() << std::endl;
	
	
	for( int i = 0; i < dartBoards.size(); i++ )
	{
		if(index != 10)
		{
			if(dartBoards[i].x > surfRect.x -buffer && dartBoards[i].x < surfRect.x + buffer)
			{
					if(dartBoards[i].y > surfRect.y -buffer && dartBoards[i].y < surfRect.y + buffer)
					{
						//this has been verified by the surf detector
						if(surfRect.area() > 600 &&  verifyDartBoard(Mat(scene, surfRect)).rad > 0)
						{
							rectangle(scene,surfRect, Scalar( 0, 255, 0 ), 2);
							surfUsed = true;
							aBoardFound++;
						}
					}
					else
					{
						if( verifyDartBoard(Mat(scene, dartBoards[i])).rad > 0)
						{
							rectangle(scene, Point(dartBoards[i].x, dartBoards[i].y), Point(dartBoards[i].x + dartBoards[i].width, dartBoards[i].y + dartBoards[i].height), Scalar( 0, 255, 0 ), 2);
							aBoardFound ++;
						}
					}

			}
			else
			{
				if( verifyDartBoard(Mat(scene, dartBoards[i])).rad > 0)
				{
					rectangle(scene, Point(dartBoards[i].x, dartBoards[i].y), Point(dartBoards[i].x + dartBoards[i].width, dartBoards[i].y + dartBoards[i].height), Scalar( 0, 255, 0 ), 2);
					aBoardFound ++;
				}
			}
		}
		else
		{
		}
		
	}
	if(surfUsed == false && index != 10)
	{
		if(surfRect.area() > 600 &&  verifyDartBoard(Mat(scene, surfRect)).rad > 0)
		{
			rectangle(scene,surfRect, Scalar( 0, 255, 0 ), 2);
			surfUsed = true;
			aBoardFound++;
		}
	}
	if(aBoardFound == 0)
	{
		dartBoardCenter c =  verifyDartBoard(scene);
		if(c.rad > 0)
		{
			Rect foundRect  = cv::Rect(c.x- c.rad,c.y- c.rad,2*c.rad,2*c.rad);
			rectangle(scene,foundRect, Scalar( 0, 255, 0 ), 2);
		}
	}
	std::stringstream ss;
	ss << window_name << index << " surf used: " << useSURF;
	std::string s = ss.str();
	imshow(s, scene );
		/*if(useSURF)
		{
			if(surfUsed == true)
			{
				//surf has been used to verify one dartboard or surf does not detect a dartboard
				if( verifyDartBoard(Mat(scene, dartBoards[i])))
				{
					rectangle(scene, Point(dartBoards[i].x, dartBoards[i].y), Point(dartBoards[i].x + dartBoards[i].width, dartBoards[i].y + dartBoards[i].height), Scalar( 0, 255, 0 ), 2);
					aBoardFound ++;
				}
			}
			else
			{
				if(dartBoards[i].x > surfRect.x -buffer && dartBoards[i].x < surfRect.x + buffer)
				{
					if(dartBoards[i].y > surfRect.y -buffer && dartBoards[i].y < surfRect.y + buffer)
					{
						//this has been verified by the surf detector
						rectangle(scene, Point(dartBoards[i].x, dartBoards[i].y), Point(dartBoards[i].x + dartBoards[i].width, dartBoards[i].y + dartBoards[i].height), Scalar( 0, 255, 0 ), 2);
						surfUsed = true;
						aBoardFound++;
					}

				}
				if(surfUsed == false)
				{
					//this didnt match the surf, verify
					if( verifyDartBoard(Mat(scene, dartBoards[i])))
					{
						rectangle(scene, Point(dartBoards[i].x, dartBoards[i].y), Point(dartBoards[i].x + dartBoards[i].width, dartBoards[i].y + dartBoards[i].height), Scalar( 0, 255, 0 ), 2);
						aBoardFound++;
					}
				}
			}
		}
		else
		{
			if( verifyDartBoard(Mat(scene, dartBoards[i])))
			{
				rectangle(scene, Point(dartBoards[i].x, dartBoards[i].y), Point(dartBoards[i].x + dartBoards[i].width, dartBoards[i].y + dartBoards[i].height), Scalar( 0, 255, 0 ), 2);
				aBoardFound ++;
			}
		}
	}
	if(surfUsed == false && useSURF == true)
	{
		//surf available and noBoardFound
		//so check if the surf is correct
		if(verifyDartBoard(Mat(scene,surfRect)))
		{
			rectangle(scene,surfRect, Scalar( 0, 255, 0 ), 2);
		}
	}
	*/
	
}
Example #22
0
int main(int argc, const char *argv[]) {
	

	   // Check for valid command line arguments, print usage
	   // if no arguments were given.
	cout<<"start\n";
	   if ((argc != 4)&&(argc!=3)) {
	       cout << "usage: " << argv[0] << " ext_files  seuil(opt) \n files.ext histo(0/1) 5000 \n" << endl;
	       exit(1);
	   }
	   int PREDICTION_SEUIL ;
	   // set value by default for prediction treshold = minimum value to recognize
	if (argc==3) { trace("(init) prediction treeshold = 4500.0 by default");PREDICTION_SEUIL = 4500.0;}
	if (argc==4) PREDICTION_SEUIL = atoi(argv[3]);
	
	// do we do a color histogram equalization ? 
	bHisto=atoi(argv[2]);
	
	// load for eyes reco
	if( !eyes_cascade.load( eyes_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
	if( !glasses_cascade.load( glasses_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
	
	//
	// init people
	//
	Mat gray, frame,original,face,face_resized;


	// init people, should be do in a config file,
	// but I don't have time, I need to go to swimming pool
	// with my daughters
	// and they prefer to swimm than to see their father do a config file
	// life is hard.
	people[P_FEMALE_1624_HAPPY] 	= "Female 16-24 Happy";
	people[P_FEMALE_1624_ANGER] 	= "Female 16-24 Angry";
	people[P_FEMALE_1624_DISGUST]	= "Female 16-24 Disgusted";
	people[P_FEMALE_1624_FEAR]	= "Female 16-24 Fearful";
	people[P_FEMALE_1624_SAD]	= "Female 16-24 Sad";
	people[P_FEMALE_1624_SURPRISE]	= "Female 16-24 Surprised";
	people[P_FEMALE_2534_HAPPY]	= "Female 25-34 Happy";
	people[P_FEMALE_2534_ANGER] 	= "Female 25-34 Angry";
	people[P_FEMALE_2534_DISGUST]	= "Female 25-34 Disgusted";
	people[P_FEMALE_2534_FEAR]	= "Female 25-34 Fearful";
	people[P_FEMALE_2534_SAD]	= "Female 25-34 Sad";
	people[P_FEMALE_2534_SURPRISE]	= "Female 25-34 Surprised";
	people[P_FEMALE_3549_HAPPY] 	= "Female 35-49 Happy";
	people[P_FEMALE_3549_ANGER] 	= "Female 35-49 Angry";
	people[P_FEMALE_3549_DISGUST]	= "Female 35-49 Disgusted";
	people[P_FEMALE_3549_FEAR]	= "Female 35-49 Fearful";
	people[P_FEMALE_3549_SAD]	= "Female 35-49 Sad";
	people[P_FEMALE_3549_SURPRISE]	= "Female 35-49 Surprised";
	people[P_FEMALE_5000_DISGUST] 	= "Female 50+ Disgust";
	people[P_FEMALE_5000_FEAR] 	= "Female 50+ Fear";
	people[P_FEMALE_5000_SURPRISE]	= "Female 50+ Surprise";
	people[P_MALE_1624_HAPPY] 	= "Male 16-24 Happy";
	people[P_MALE_1624_ANGER] 	= "Male 16-24 Angry";
	people[P_MALE_1624_DISGUST]	= "Male 16-24 Disgusted";
	people[P_MALE_1624_FEAR]	= "Male 16-24 Fearful";
	people[P_MALE_1624_SAD]		= "Male 16-24 Sad";
	people[P_MALE_1624_SURPRISE]	= "Male 16-24 Surprised";
	people[P_MALE_2534_HAPPY]	= "Male 25-34 Happy";
	people[P_MALE_2534_ANGER] 	= "Male 25-34 Angry";
	people[P_MALE_2534_DISGUST]	= "Male 25-34 Disgusted";
	people[P_MALE_2534_FEAR]	= "Male 25-34 Fearful";
	people[P_MALE_2534_SAD]		= "Male 25-34 Sad";
	people[P_MALE_2534_SURPRISE]	= "Male 25-34 Surprised";
	people[P_MALE_3549_HAPPY] 	= "Male 35-49 Happy";
	people[P_MALE_3549_ANGER] 	= "Male 35-49 Angry";
	people[P_MALE_3549_DISGUST]	= "Male 35-49 Disgusted";
	people[P_MALE_3549_FEAR]	= "Male 35-49 Fearful";
	people[P_MALE_3549_SAD]		= "Male 35-49 Sad";
	people[P_MALE_3549_SURPRISE]	= "Male 35-49 Surprised";

	// init...
	// reset counter
	for (int i=0;i>MAX_PEOPLE;i++) 
	{
		nbSpeak[i] =0;
		nPictureById[i]=0;
	}
	int bFirstDisplay	=1;
	trace("(init) People initialized");
	
	// Get the path to your CSV
	string fn_csv = string(argv[1]);
	
	// change with opencv path	
	string fn_haar = "/usr/share/opencv/haarcascades/haarcascade_frontalface_alt.xml";
	DEBUG cout<<"(OK) csv="<<fn_csv<<"\n";

    
    vector<Mat> images;
    vector<int> labels;
    
    // Read in the data (fails if no valid input filename is given, but you'll get an error message):
    try {
        read_csv(fn_csv, images, labels);
		DEBUG cout<<"(OK) read CSV ok\n";
    	} 
    catch (cv::Exception& e) 
    {
        cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl;
        exit(1);
    }

	// get heigh, witdh of 1st images--> must be the same
    int im_width = images[0].cols;
    int im_height = images[0].rows;
	trace("(init) taille images ok");
 
 	//
    // Create a FaceRecognizer and train it on the given images:
	//
	
	// this a Eigen model, but you could replace with Fisher model (in this case
	// threshold value should be lower) (try)	
//	Ptr<FaceRecognizer> model = createEigenFaceRecognizer();
//    	Ptr<FaceRecognizer> model = createFisherFaceRecognizer(); 
        Ptr<FaceRecognizer> model = createLBPHFaceRecognizer();    

    // train the model with your nice collection of pictures	
    trace("(init) start train images");
    model->train(images, labels);
 	trace("(init) train images : ok");
 
	// load face model
    CascadeClassifier face_cascade;
    if (!face_cascade.load(fn_haar))
   	{
    			cout <<"(E) face cascade model not loaded :"+fn_haar+"\n"; 
    			return -1;
    }
    trace("(init) Load modele : ok");
    
    // capture video
    CvCapture* capture;
 	capture = cvCaptureFromCAM(-1);
 	
 	// set size of webcam 320x240, enough for RPI power
	cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH,320);
	cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT,240);
	trace("(init) webcam initialized : ok");
	
	// can't capture, doc'
	if (!capture)
	{   
		cout << "(E) Capture Device cannot be opened." << endl;
        return -1;
    }
    int nFirst=0;
    
	// Holds the current frame from the Video device
	for(;;)
	{
		// get the picture from webcam
		original= cvQueryFrame( capture);
		char key;        		
      
   		// Convert the current frame to grayscale:
        cvtColor(original, gray, CV_BGR2GRAY);
        
        // and equalize Histo (as model pictures)
        if (bHisto)equalizeHist( gray, gray);        
		
        vector< Rect_<int> > faces;

        // detect faces
		face_cascade.detectMultiScale(gray, faces, 1.1, 3, CV_HAAR_SCALE_IMAGE, Size(10,10));	

		// for each faces founded
  	  	for(int i = 0; i < faces.size(); i++) 
  	  	{       
  	  		// crop face (pretty easy with opencv, don't you think ? 
            Rect face_i = faces[i];
            face = gray(face_i);
           
            
			//  resized face and display it
			cv::resize(face, face_resized, Size(im_width, im_height), 1.0, 1.0, INTER_CUBIC);
			imshow("face", face_resized);
			
			if (bFirstDisplay) // first display, allow more time to display
			{
				key = (char) waitKey(100);
				bFirstDisplay = 0;
			}
			else
			{
				key = (char) waitKey(10);
			}
			
			
			nFirst++;
			
			// at this stage, face is detect,
			// now, we try to predict who is it ? 
			char sTmp[256];		
			double predicted_confidence	= 0.0;
			int prediction				= -1;
			model->predict(face_resized,prediction,predicted_confidence);
			
			// create a rectangle around the face      
			rectangle(original, face_i, CV_RGB(0, 255 ,0), 1);
				
			// if good prediction : > threshold 
			if (predicted_confidence>PREDICTION_SEUIL)
			{
				// trace
//				system("python analytics.py");
				sprintf(sTmp,"+ prediction ok = %s (%d) confiance = (%d)",people[prediction].c_str(),prediction,(int)predicted_confidence);
				trace((string)(sTmp));
			
			 	// display name of the guy on the picture
				string box_text;
				if (prediction<MAX_PEOPLE)
				{
					box_text = "Id="+people[prediction];
				}
				else
				{
					trace("(E) prediction id incohérent");
				}
				int pos_x = std::max(face_i.tl().x - 10, 0);
				int pos_y = std::max(face_i.tl().y - 10, 0);			   
				putText(original, box_text, Point(pos_x, pos_y), FONT_HERSHEY_PLAIN, 1.0, CV_RGB(0,255,0), 1.0);
				
				/// Show the result:
        		imshow("Reco", original);
        		key = (char) waitKey(10);
        		
				// say hello to ...
				saySomething(prediction);
				}
				else
				{
					// oh my god ! prediction result is too low
					// RPI prefers not to name my wife with the name of my mother-in-law.
					// just do nothing, except trace
					sprintf(sTmp,"- prediction too low = %s (%d) confiance = (%d)",people[prediction].c_str(),prediction,(int)predicted_confidence);
					trace((string)(sTmp));
				} 
			}
	
		    // Show the result:
		    // notice we display twice the picture, first time before .predict, one after. 
		    // to double display freq.
		    imshow("Reco", original);
		   
		    // And display it:
		    key = (char) waitKey(100);

		}
	return 0;
}
Example #23
0
/**
 *  buffer header callback function for video
 *
 * @param port Pointer to port from which callback originated
 * @param buffer mmal buffer header pointer
 */
static void video_buffer_callback(MMAL_PORT_T *port, MMAL_BUFFER_HEADER_T *buffer)
{
   MMAL_BUFFER_HEADER_T *new_buffer;
   PORT_USERDATA *pData = (PORT_USERDATA *)port->userdata;

   if (pData)
   {
     
      if (buffer->length)
      {

	      mmal_buffer_header_mem_lock(buffer);
 
 		//
		// *** PR : OPEN CV Stuff here !
		//
		int w=pData->pstate->width;	// get image size
		int h=pData->pstate->height;
		int h4=h/4;
		
		memcpy(py->imageData,buffer->data,w*h);	// read Y
		
		if (pData->pstate->graymode==0)
		{
			memcpy(pu->imageData,buffer->data+w*h,w*h4); // read U
			memcpy(pv->imageData,buffer->data+w*h+w*h4,w*h4); // read v
	
			cvResize(pu, pu_big, CV_INTER_NN);
			cvResize(pv, pv_big, CV_INTER_NN);  //CV_INTER_LINEAR looks better but it's slower
			cvMerge(py, pu_big, pv_big, NULL, image);
	
			cvCvtColor(image,dstImage,CV_YCrCb2RGB);	// convert in RGB color space (slow)
			gray=cvarrToMat(dstImage);   
			//cvShowImage("camcvWin", dstImage );
			
		}
		else
		{	
			// for face reco, we just keep gray channel, py
			gray=cvarrToMat(py);  
			//cvShowImage("camcvWin", py); // display only gray channel
		}
		
////////////////////////////////
// FACE RECOGNITION START HERE
////////////////////////////////

	// detect faces
	face_cascade.detectMultiScale(gray, faces, 1.1, 3, CV_HAAR_SCALE_IMAGE, Size(80,80));
	// for each faces founded
	for(int i = 0; i < faces.size(); i++) 
	{       
		// crop face (pretty easy with opencv, don't you think ? 
		Rect face_i = faces[i];
		
		face = gray(face_i);  
		//  resized face and display it
		cv::resize(face, face_resized, Size(im_width, im_height), 1.0, 1.0, CV_INTER_NN); //INTER_CUBIC);		
	
		// now, we try to predict who is it ? 
		char sTmp[256];		
		double predicted_confidence	= 0.0;
		int prediction				= -1;
		model.predict(face_resized,prediction,predicted_confidence);
		
		// create a rectangle around the face      
		rectangle(gray, face_i, CV_RGB(255, 255 ,255), 1);
			
		// if good prediction : > threshold 
		if (predicted_confidence>PREDICTION_SEUIL)
		{
		// trace
		//sprintf(sTmp,"+ prediction ok = %s (%d) confiance = (%d)",people[prediction].c_str(),prediction,(int)predicted_confidence);
		//trace((string)(sTmp));
	
	 	// display name of the guy on the picture
		string box_text;
		if (prediction<MAX_PEOPLE)
		{
			box_text = "Id="+people[prediction];
		}
		else
		{
			trace("(E) prediction id incohérent");
		}
		int pos_x = std::max(face_i.tl().x - 10, 0);
		int pos_y = std::max(face_i.tl().y - 10, 0);			   
		putText(gray, box_text, Point(pos_x, pos_y), FONT_HERSHEY_PLAIN, 1.0, CV_RGB(255,255,255), 1.0);	
	        		
	}
	else
	{		
			// trace is commented to speed up
			//sprintf(sTmp,"- prediction too low = %s (%d) confiance = (%d)",people[prediction].c_str(),prediction,(int)predicted_confidence);
			//trace((string)(sTmp));
	} 
	} // end for
	
			
/////////////////////////
// END OF FACE RECO
/////////////////////////

    //output << gray;
		
	// Show the result:
	imshow("camcvWin", gray);
	key = (char) waitKey(1);
	nCount++;		// count frames displayed
		
         mmal_buffer_header_mem_unlock(buffer);
      }
      else vcos_log_error("buffer null");
      
   }
   else
   {
      vcos_log_error("Received a encoder buffer callback with no state");
   }

   // release buffer back to the pool
   mmal_buffer_header_release(buffer);

   // and send one back to the port (if still open)
   if (port->is_enabled)
   {
      MMAL_STATUS_T status;

      new_buffer = mmal_queue_get(pData->pstate->video_pool->queue);

      if (new_buffer)
         status = mmal_port_send_buffer(port, new_buffer);

      if (!new_buffer || status != MMAL_SUCCESS)
         vcos_log_error("Unable to return a buffer to the encoder port");
   }
    
}
/** @function main */
int main( int argc, const char** argv )
{
    bool useCamera = true;
    bool useFiles = false;
    String big_directory;
    if(argc > 1){
        useCamera = false;
        useFiles = true;
        big_directory = argv[1];
    }
    VideoCapture cap;
    Mat frame;
    if(useCamera){
        cap.open(0);
        if(!cap.isOpened()){
            cerr<<"Failed to open camera"<<endl;
            return -1;
        }
        while(frame.empty()){
            cap>>frame;
        }
    }
    //-- 1. Load the cascades
    if( !face_cascade.load( face_cascade_name ) ){ 
        cerr<<"Error loading cascade"<<endl;
        return -1; 
    }    
    vector<string> dirs;
    if(useFiles){
        GetFilesInDirectory(dirs, big_directory);
        cout<<dirs.size()<<endl;
        for(int i = 0; i < dirs.size(); i++){
            cout<<dirs[i]<<endl;
        }
    }
    while(!dirs.empty() || useCamera) {
        vector<string> files;
        string subdir;
        if(useFiles){
            subdir = dirs.back();
            dirs.pop_back();
            GetFilesInDirectory(files, subdir);
        }
        while (true) {
            if (useCamera) {
                cap >> frame;
                if (!frame.empty()) {
                    detectAndDisplay(frame, "camera");
                }
                else {
                    cout << " --(!) No captured frame -- Break!" << endl;
                    break;
                }
            }
            if (useFiles) {
                if (files.empty()) {
                    cout << subdir<<" finished" << endl;
                    break;
                }
                string name = files.back();
                cout << "converting " << name << endl;
                frame = imread(name);
                transpose(frame, frame);
                flip(frame, frame, 1);
                files.pop_back();
                vector<string> splitName;
                splitName = split(subdir, '/');


                detectAndDisplay(frame, splitName.back().c_str());
            }

            int c = waitKey(10);
            if (c == 27) {
                return 0;
            }
        }
    }
    
    return 0;
}
Example #25
0
void App::run(int argc, char **argv)
{
    parseCmdArgs(argc, argv);
    if (help_showed) 
        return;

    if (getCudaEnabledDeviceCount() == 0)
        throw runtime_error("No GPU found or the library is compiled without GPU support");    

    if (cascade_name.empty())
    {
        cout << "Using default cascade file...\n";
        cascade_name = "data/face_detect/haarcascade_frontalface_alt.xml";
    }      

    if (!cascade_gpu.load(cascade_name) || !cascade_cpu.load(cascade_name))
    {
        stringstream msg;
        msg << "Could not load cascade classifier \"" << cascade_name << "\"";
        throw runtime_error(msg.str());
    }

    if (sources.size() != 1)
    {
        cout << "Loading default frames source...\n";
        sources.resize(1);
        sources[0] = new VideoSource("data/face_detect/browser.flv");
    }

    Mat frame, frame_cpu, gray_cpu, resized_cpu, faces_downloaded, frameDisp;
    vector<Rect> facesBuf_cpu;

    GpuMat frame_gpu, gray_gpu, resized_gpu, facesBuf_gpu;

    int detections_num;
    while (!exited)
    {
        sources[0]->next(frame_cpu);
        frame_gpu.upload(frame_cpu);

        convertAndResize(frame_gpu, gray_gpu, resized_gpu, scaleFactor);
        convertAndResize(frame_cpu, gray_cpu, resized_cpu, scaleFactor);

        TickMeter tm;
        tm.start();

        if (useGPU)
        {
            cascade_gpu.visualizeInPlace = true;
            cascade_gpu.findLargestObject = findLargestObject;

            detections_num = cascade_gpu.detectMultiScale(resized_gpu, facesBuf_gpu, 1.2,
                                                          (filterRects || findLargestObject) ? 4 : 0);
            facesBuf_gpu.colRange(0, detections_num).download(faces_downloaded);
        }
        else
        {
            Size minSize = cascade_gpu.getClassifierSize();
            cascade_cpu.detectMultiScale(resized_cpu, facesBuf_cpu, 1.2,
                                         (filterRects || findLargestObject) ? 4 : 0,
                                         (findLargestObject ? CV_HAAR_FIND_BIGGEST_OBJECT : 0)
                                            | CV_HAAR_SCALE_IMAGE,
                                         minSize);
            detections_num = (int)facesBuf_cpu.size();
        }

        if (!useGPU && detections_num)
        {
            for (int i = 0; i < detections_num; ++i)
            {
                rectangle(resized_cpu, facesBuf_cpu[i], Scalar(255));
            }
        }

        if (useGPU)
        {
            resized_gpu.download(resized_cpu);
        }

        tm.stop();
        double detectionTime = tm.getTimeMilli();
        double fps = 1000 / detectionTime;

        /*//print detections to console
        cout << setfill(' ') << setprecision(2);
        cout << setw(6) << fixed << fps << " FPS, " << detections_num << " det";
        if ((filterRects || findLargestObject) && detections_num > 0)
        {
            Rect *faceRects = useGPU ? faces_downloaded.ptr<Rect>() : &facesBuf_cpu[0];
            for (int i = 0; i < min(detections_num, 2); ++i)
            {
                cout << ", [" << setw(4) << faceRects[i].x
                     << ", " << setw(4) << faceRects[i].y
                     << ", " << setw(4) << faceRects[i].width
                     << ", " << setw(4) << faceRects[i].height << "]";
            }
        }
        cout << endl;*/

        cvtColor(resized_cpu, frameDisp, CV_GRAY2BGR);
        displayState(frameDisp, helpScreen, useGPU, findLargestObject, filterRects, fps);
        imshow("face_detect_demo", frameDisp);

        processKey(waitKey(3));
    }   
}
Example #26
0
/*
 * detectObj
 * Object Detection using openCV
 */
int detectObj(unsigned int increment)
{
  cout << "Processing\n";

  int num_found = 0;
  Mat rotated, temp, rotated_gray;
  
  double centerx = image.cols/2.0;
  double centery = image.rows/2.0;
  Point2f center(centerx, centery);
  
  for (int theta=0; theta<360; theta+=increment) {
    showProgress(theta, 360);
    // Rotate image
    temp = getRotationMatrix2D(center, -double(theta), 1.0);
    warpAffine(image, rotated, temp, image.size());
  
    // Convert to greyscale
    Mat image_gray;
    cvtColor(rotated, rotated_gray, CV_BGR2GRAY);
    equalizeHist(rotated_gray, rotated_gray);
    
    // Run detection
    vector<Rect> flowers;
    flower_cascade.detectMultiScale(rotated_gray, flowers);
    num_found += flowers.size();

    //  Draws Circles
    for(size_t i = 0; i < flowers.size(); i++) {
      double rotated_locationx = flowers[i].x + flowers[i].width*0.5;
      double rotated_locationy = flowers[i].y + flowers[i].height*0.5;
      Point rotated_location(rotated_locationx, rotated_locationy);
      double dx = rotated_locationx - centerx;
      double dy = rotated_locationy - centery;
      double alpha = theta * M_PI/180.0;
      double beta = atan2(dy, dx);
      double distance = sqrt(dx*dx + dy*dy);
      
      //  Checks if flower was already found
      int radius = max(flowers[i].width*0.5, flowers[i].height*0.5);
      Point location(centerx + distance * cos(beta - alpha),
                     centery + distance * sin(beta - alpha));
      int inlist = 0;
      if (!id.empty()){
        for(size_t j = 0; j < id.size(); j++){
          if( radius/2 >= abs( location.x-id[j].get_cvx() ) && 
              radius/2 >= abs( location.y-id[j].get_cvy() ) ){
            num_found--;
            inlist = 1;
            break;
          }
        }
      }
      if (!inlist)
      {
        identified tempid(location.x, location.y, radius, true, image.cols, image.rows, asp_src);
        id.push_back(tempid);
        //circle(image, location, radius, Scalar(20, 0, 255), 4);
      }
    }
  }
  showProgress(100,100);
  return num_found;
}
int main(int argc, const char *argv[]) {

	// check that we are running on Galileo or Edison
	mraa_platform_t platform = mraa_get_platform_type();
	if ((platform != MRAA_INTEL_GALILEO_GEN1)
			&& (platform != MRAA_INTEL_GALILEO_GEN2)
			&& (platform != MRAA_INTEL_EDISON_FAB_C)) {
		std::cerr << "Unsupported platform, exiting" << std::endl;
		return MRAA_ERROR_INVALID_PLATFORM;
	}

	mraa::Pwm* pwm_pin_3 = new mraa::Pwm(3);
	if (pwm_pin_3 == NULL) {
		std::cerr << "Can't create mraa::Pwm object, exiting" << std::endl;
		return MRAA_ERROR_UNSPECIFIED;
	}

	mraa::Pwm* pwm_pin_5 = new mraa::Pwm(5);
	if (pwm_pin_5 == NULL) {
		std::cerr << "Can't create mraa::Pwm object, exiting" << std::endl;
		return MRAA_ERROR_UNSPECIFIED;
	}

	mraa::Pwm* pwm_pin_6 = new mraa::Pwm(6);
	if (pwm_pin_6 == NULL) {
		std::cerr << "Can't create mraa::Pwm object, exiting" << std::endl;
		return MRAA_ERROR_UNSPECIFIED;
	}

	// create a gpio object from MRAA using pin 8
	mraa::Gpio* led_yellow_pin = new mraa::Gpio(8);
	if (led_yellow_pin == NULL) {
		std::cerr << "Can't create mraa::Gpio object, exiting" << std::endl;
		return MRAA_ERROR_UNSPECIFIED;
	}

	// create a gpio object from MRAA using pin 9
	mraa::Gpio* led_blue_pin = new mraa::Gpio(9);
	if (led_blue_pin == NULL) {
		std::cerr << "Can't create mraa::Gpio object 9, exiting" << std::endl;
		return MRAA_ERROR_UNSPECIFIED;
	}

	// create a gpio object from MRAA using pin 10
	mraa::Gpio* led_green_pin = new mraa::Gpio(10);
	if (led_green_pin == NULL) {
		std::cerr << "Can't create mraa::Gpio object 10, exiting" << std::endl;
		return MRAA_ERROR_UNSPECIFIED;
	}

	// create a GPIO object from MRAA using pin 11
	mraa::Pwm* tone_pin = new mraa::Pwm(11);
	if (tone_pin == NULL) {
		std::cerr << "Can't create mraa::Pwm object 11, exiting" << std::endl;
		return MRAA_ERROR_UNSPECIFIED;
	}

	// create a GPIO object from MRAA using pin 12
	mraa::Gpio* button_pin = new mraa::Gpio(12);
	if (button_pin == NULL) {
		std::cerr << "Can't create mraa::Gpio object 12, exiting" << std::endl;
		return MRAA_ERROR_UNSPECIFIED;
	}

	// enable PWM on the selected pin
	if (pwm_pin_3->enable(true) != MRAA_SUCCESS) {
		std::cerr << "Cannot enable PWM on mraa::PWM object, exiting"
				<< std::endl;
		return MRAA_ERROR_UNSPECIFIED;
	}

	if (pwm_pin_5->enable(true) != MRAA_SUCCESS) {
		std::cerr << "Cannot enable PWM on mraa::PWM object, exiting"
				<< std::endl;
		return MRAA_ERROR_UNSPECIFIED;
	}

	if (pwm_pin_6->enable(true) != MRAA_SUCCESS) {
		std::cerr << "Cannot enable PWM on mraa::PWM object, exiting"
				<< std::endl;
		return MRAA_ERROR_UNSPECIFIED;
	}

	if (tone_pin->enable(true) != MRAA_SUCCESS) {
		std::cerr << "Cannot enable PWM on mraa::PWM object, exiting"
				<< std::endl;
		return MRAA_ERROR_UNSPECIFIED;
	}

	// set the button as input
	if (button_pin->dir(mraa::DIR_IN) != MRAA_SUCCESS) {
		std::cerr << "Can't set digital pin as input, exiting" << std::endl;
		return MRAA_ERROR_UNSPECIFIED;
	}

	// set the pin as output
	if (led_yellow_pin->dir(mraa::DIR_OUT) != MRAA_SUCCESS) {
		std::cerr << "Can't set digital pin as output, exiting" << std::endl;
		return MRAA_ERROR_UNSPECIFIED;
	}

	if (led_blue_pin->dir(mraa::DIR_OUT) != MRAA_SUCCESS) {
		std::cerr << "Can't set digital pin as output, exiting" << std::endl;
		return MRAA_ERROR_UNSPECIFIED;
	}

	if (led_green_pin->dir(mraa::DIR_OUT) != MRAA_SUCCESS) {
		std::cerr << "Can't set digital pin as output, exiting" << std::endl;
		return MRAA_ERROR_UNSPECIFIED;
	}

	//EmotionState_t emotion;
	// Check for valid command line arguments, print usage
	// if no arguments were given.
	argc = 2;
	argv[1] = "/media/card/emotions/my_csv.csv";
	argv[2] = "/home/root/emotions";
	if (argc < 2) {
		cout << "usage: " << argv[0] << " <csv.ext> <output_folder> " << endl;
		exit(1);
	}

	if (!face_cascade.load(face_cascade_name)) {
		printf("--(!)Error loading\n");
		return -1;
	};
	if (!eyes_cascade.load(eye_cascade_name)) {
		printf("--(!)Error loading\n");
		return -1;
	};

	///turn on the camera
	VideoCapture cap(-1);

	//check if the file was opened successfully
	if (!cap.isOpened()) {
		cout << "Capture could not be opened successfully" << endl;
		return -1;
	} else {
		cout << "Camera is ok.. Stay 50 cm away from your camera\n" << endl;
	}

	int w = 432;
	int h = 240;
	cap.set(CV_CAP_PROP_FRAME_WIDTH, w);
	cap.set(CV_CAP_PROP_FRAME_HEIGHT, h);

	cout << endl << "Press the button to take a picture!" << endl;

	//test button until it's pressed
	//when it's pressed the program start to run

	int button_value = 0;
	while (!button_value) {
		button_value = button_pin->read();
		//std::cout << "value " << button_value << std::endl;
		//sleep(1);
	}

	// select a pulse width period of 1ms
	int period = 1;

	//ring 3 times
	for (int i = 0; i < 3; ++i) {
		cout << 3 - i << endl;
		tone_pin->config_percent(period, 0.5);
		usleep(50000);
		tone_pin->config_percent(period, 0);
		sleep(1);
	}

	//the frame for picture
	Mat frame;

	//ring one time, but longer
	//the tone stop after the photo is taken
	tone_pin->config_percent(period, 0.9);

	//take the picture
	cap >> frame;
	//take the picture

	//stop the tone
	tone_pin->config_percent(period, 0);

	cout << "processing the image...." << endl;

	//imwrite("image.jpg", frame);

	Mat testSample;
	testSample = faceDetect(frame);

	//imwrite("testSampe.jpg", testSample);

	// Get the path to your CSV.
	string fn_csv = string(argv[1]);

	// These vectors hold the images and corresponding labels
	vector<Mat> images;
	vector<int> labels;

	// Read in the data. This can fail if no valid
	// input filename is given.

	try {
		read_csv(fn_csv, images, labels);
	} catch (cv::Exception& e) {
		cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg
				<< endl;
		// nothing more we can do
		exit(1);
	}

	// Quit if there are not enough images for this demo.
	if (images.size() <= 1) {
		string error_message =
				"This demo needs at least 2 images to  work. Please add more images to your database!";
		CV_Error(CV_StsError, error_message);
	}

	//
	//      cv::createFisherFaceRecognizer(10);
	//
	//
	//      cv::createFisherFaceRecognizer(0, 123.0);
	//

	Ptr<FaceRecognizer> model = createFisherFaceRecognizer(10);
	model->train(images, labels);

	int predictedLabel = model->predict(testSample);

	//      int predictedLabel = -1;
	//      double confidence = 0.0;
	//      model->predict(testSample, predictedLabel, confidence);

	string result_message = format("Predicted class = %d", predictedLabel);
	cout << result_message << endl;

	// giving the result
	switch (predictedLabel) {
	case HAPPY:
		cout << "You are happy!" << endl;
		led_blue_pin->write(1);
		led_green_pin->write(0);
		led_yellow_pin->write(0);
		//pwm_pin_5->config_percent(1, 0.4);
		//sleep(4);
		//pwm_pin_5->config_percent(1, 0);
		system("echo 'Happy,' $(date) $(ls -1 | wc -l) >> emotions_2.txt");
		break;
	case ANGRY:
		cout << "You are angry!" << endl;
		led_blue_pin->write(0);
		led_green_pin->write(1);
		led_yellow_pin->write(0);
		//pwm_pin_3->config_percent(1, 0.4);
		//sleep(4);
		//pwm_pin_3->config_percent(1, 0);
		system("echo 'Angry,' $(date) $(ls -1 | wc -l) >> emotions_2.txt");
		break;
	case AMAZED:
		cout << "You are amazed!" << endl;
		led_blue_pin->write(0);
		led_green_pin->write(0);
		led_yellow_pin->write(1);
		//pwm_pin_6->config_percent(1, 0.4);
		//sleep(4);
		//pwm_pin_6->config_percent(1, 0);
		system("echo 'Amazed,' $(date) $(ls -1 | wc -l) >> emotions_2.txt");
		break;
	}

	cap.release();

	return 0;
}
/*----------------------------------------------------------------------------*/
void FindFace2Frame(Mat frame) {
    
    std::vector<Rect> faces;
    
    Mat frame_gray;
    Mat crop;
    Mat res;
    Mat gray;
    string text;
    stringstream sstm;

    cvtColor(frame, frame_gray, COLOR_BGR2GRAY);
    equalizeHist(frame_gray, frame_gray);

    // Detect faces
    //face_cascade.detectMultiScale(frame_gray, faces, 1.1, 2, 0 | CASCADE_SCALE_IMAGE, Size(30, 30));
    face_cascade_classifier.detectMultiScale(frame_gray, faces, 1.1, 2, 0 | CASCADE_SCALE_IMAGE, Size(50, 50));

    // Set Region of Interest
    cv::Rect roi_b;
    cv::Rect roi_c;

    size_t ic = 0; // ic is index of current element
    int ac = 0;    // ac is area of current element

    size_t ib = 0; // ib is index of biggest element
    int ab = 0;    // ab is area of biggest element

    
    // Iterate through all current elements (detected faces)
    for (ic = 0; ic < faces.size(); ic++) 
    {
        roi_c.x = faces[ic].x;
        roi_c.y = faces[ic].y;
        roi_c.width = (faces[ic].width);
        roi_c.height = (faces[ic].height);

        // Get the area of current element (detected face)
        ac = roi_c.width * roi_c.height; 

        roi_b.x = faces[ib].x;
        roi_b.y = faces[ib].y;
        roi_b.width = (faces[ib].width);
        roi_b.height = (faces[ib].height);

        //--- Area of biggest detected element
        ab = roi_b.width * roi_b.height; 

        if (ac > ab) {
            ib = ic;
            roi_b.x = faces[ib].x;
            roi_b.y = faces[ib].y;
            roi_b.width = (faces[ib].width);
            roi_b.height = (faces[ib].height);
        }

        crop = frame(roi_b);
        
        //--- Resize image to save it into images subdirectory
        resize(crop, res, Size(256, 256), 0, 0, INTER_LINEAR); 
        
        //--- Convert cropped image into Gray-scale for further processing
        cvtColor(crop, gray, CV_BGR2GRAY); 

        //--- create a filename
        filename = "";
        stringstream ssfn;
        //ssfn << "images/" << filenumber << ".png";
        ssfn << "../src/main/java/faces/" << filenumber << ".png";
        filename = ssfn.str();
        filenumber++;

        imwrite(filename, gray);

        //--- Draw green square around the a detected face
        Point pt1(faces[ic].x, faces[ic].y);
        Point pt2((faces[ic].x + faces[ic].height)
                , (faces[ic].y + faces[ic].width));
        
        rectangle(frame, pt1, pt2, Scalar(0, 255, 0), 2, 8, 0);
    }

    //--- Show Image within separate frame
    sstm << "Crop area size: " << roi_b.width << "x" 
         << roi_b.height << " Filename: " << filename;
    text = sstm.str();

    putText(frame, text, cvPoint(30, 30)
                 , FONT_HERSHEY_COMPLEX_SMALL
                 , 0.8, cvScalar(0, 0, 255), 1, CV_AA);
    
    imshow("original", frame);

    if (!crop.empty()) {
        imshow("detected", crop);
    } else {
        destroyWindow("detected");
    }
}
Example #29
0
void detectAndDraw( Mat& img, CascadeClassifier& cascade,
                    CascadeClassifier& nestedCascade,
                    double scale, bool tryflip, vector<Rect>& faces)
{
    int i = 0;
    double t = 0;
    vector<Rect> faces2;
    const static Scalar colors[] =  { CV_RGB(0,0,255),
        CV_RGB(0,128,255),
        CV_RGB(0,255,255),
        CV_RGB(0,255,0),
        CV_RGB(255,128,0),
        CV_RGB(255,255,0),
        CV_RGB(255,0,0),
        CV_RGB(255,0,255)} ;
    Mat gray, smallImg( cvRound (img.rows/scale), cvRound(img.cols/scale), CV_8UC1 );

    cvtColor( img, gray, CV_BGR2GRAY );
    resize( gray, smallImg, smallImg.size(), 0, 0, INTER_LINEAR );
    equalizeHist( smallImg, smallImg );

    t = (double)cvGetTickCount();
    cascade.detectMultiScale( smallImg, faces,
        1.1, 2, 0
        //|CV_HAAR_FIND_BIGGEST_OBJECT
        //|CV_HAAR_DO_ROUGH_SEARCH
        |CV_HAAR_SCALE_IMAGE
        ,
        Size(30, 30) );
    if( tryflip )
    {
        flip(smallImg, smallImg, 1);
        cascade.detectMultiScale( smallImg, faces2,
                                 1.1, 2, 0
                                 //|CV_HAAR_FIND_BIGGEST_OBJECT
                                 //|CV_HAAR_DO_ROUGH_SEARCH
                                 |CV_HAAR_SCALE_IMAGE
                                 ,
                                 Size(30, 30) );
        for( vector<Rect>::const_iterator r = faces2.begin(); r != faces2.end(); r++ )
        {
            faces.push_back(Rect(smallImg.cols - r->x - r->width, r->y, r->width, r->height));
        }
    }
    t = (double)cvGetTickCount() - t;
    printf( "detection time = %g ms\n", t/((double)cvGetTickFrequency()*1000.) );
    for( vector<Rect>::const_iterator r = faces.begin(); r != faces.end(); r++, i++ )
    {
        Mat smallImgROI;
        vector<Rect> nestedObjects;
        Point center;
        Scalar color = colors[i%8];
        int radius;

        double aspect_ratio = (double)r->width/r->height;
        if( 0.75 < aspect_ratio && aspect_ratio < 1.3 )
        {
            center.x = cvRound((r->x + r->width*0.5)*scale);
            center.y = cvRound((r->y + r->height*0.5)*scale);
            radius = cvRound((r->width + r->height)*0.25*scale);
            circle( img, center, radius, color, 3, 8, 0 );
        }
        else
            rectangle( img, cvPoint(cvRound(r->x*scale), cvRound(r->y*scale)),
                       cvPoint(cvRound((r->x + r->width-1)*scale), cvRound((r->y + r->height-1)*scale)),
                       color, 3, 8, 0);
        if( nestedCascade.empty() )
            continue;
        smallImgROI = smallImg(*r);
        nestedCascade.detectMultiScale( smallImgROI, nestedObjects,
            1.1, 2, 0
            //|CV_HAAR_FIND_BIGGEST_OBJECT
            //|CV_HAAR_DO_ROUGH_SEARCH
            //|CV_HAAR_DO_CANNY_PRUNING
            |CV_HAAR_SCALE_IMAGE
            ,
            Size(30, 30) );
        for( vector<Rect>::const_iterator nr = nestedObjects.begin(); nr != nestedObjects.end(); nr++ )
        {
            center.x = cvRound((r->x + nr->x + nr->width*0.5)*scale);
            center.y = cvRound((r->y + nr->y + nr->height*0.5)*scale);
            radius = cvRound((nr->width + nr->height)*0.25*scale);
            circle( img, center, radius, color, 3, 8, 0 );
        }
    }
    cv::imshow( "result", img );
}
Example #30
0
static void
kms_mouth_detect_process_frame(KmsMouthDetect *mouth_detect,int width,int height,
			       double scale_f2m,double scale_m2o, double scale_o2f,GstClockTime pts)
{
  int i = 0,j=0;
  Scalar color;
  Mat img (mouth_detect->priv->img_orig);
  Mat gray, mouth_frame (cvRound(img.rows/scale_m2o), cvRound(img.cols/scale_m2o), CV_8UC1);
  Mat  smallImg( cvRound (img.rows/scale_o2f), cvRound(img.cols/scale_o2f), CV_8UC1 );
  Mat mouthROI;
  vector<Rect> *faces=mouth_detect->priv->faces;
  vector<Rect> *mouths =mouth_detect->priv->mouths;
  vector<Rect> mouth;
  Rect r_aux;
  const static Scalar colors[] =  { CV_RGB(255,255,0),
				    CV_RGB(255,128,0),
				    CV_RGB(255,0,0),
				    CV_RGB(255,0,255),
				    CV_RGB(0,128,255),
				    CV_RGB(0,0,255),
				    CV_RGB(0,255,255),
				    CV_RGB(0,255,0)};	


  if ( ! __process_alg(mouth_detect,pts) && mouth_detect->priv->num_frames_to_process <=0)
    return;

  mouth_detect->priv->num_frame++;

  if ( (2 == mouth_detect->priv->process_x_every_4_frames && // one every 2 images
	(1 == mouth_detect->priv->num_frame % 2)) ||  
       ( (2 != mouth_detect->priv->process_x_every_4_frames) &&
	 (mouth_detect->priv->num_frame <= mouth_detect->priv->process_x_every_4_frames)))    
    {

      mouth_detect->priv->num_frames_to_process --;
      cvtColor( img, gray, CV_BGR2GRAY );

      //if detect_event != 0 we have received faces as meta-data
      if (0 == mouth_detect->priv->detect_event )
	{ //detecting faces
	  //setting up the image where the face detector will be executed
	  resize( gray, smallImg, smallImg.size(), 0, 0, INTER_LINEAR );
	  equalizeHist( smallImg, smallImg );
	  faces->clear();
	  fcascade.detectMultiScale( smallImg, *faces,
				     MULTI_SCALE_FACTOR(mouth_detect->priv->scale_factor), 2, 
				     0 | CV_HAAR_SCALE_IMAGE,
				     Size(3, 3) );
	}
      
    
      //setting up the image where the mouth detector will be executed	
      resize(gray,mouth_frame,mouth_frame.size(), 0,0,INTER_LINEAR);
      equalizeHist( mouth_frame, mouth_frame);
    
      mouths->clear();

      for( vector<Rect>::iterator r = faces->begin(); r != faces->end(); r++, i++ )
	{	

	  const int half_height=cvRound((float)r->height/1.8);
	  //Transforming the point detected in face image to mouht coordinates
	  //we only take the down half of the face to avoid excessive processing
	  
	  r_aux.y=(r->y + half_height)*scale_f2m;
	  r_aux.x=r->x*scale_f2m;
	  r_aux.height = half_height*scale_f2m;
	  r_aux.width = r->width*scale_f2m;
	  
	  mouthROI = mouth_frame(r_aux);
	  /*In this case, the scale factor is fixed, values higher than 1.1 work much worse*/
	  mouth.clear();
	  mcascade.detectMultiScale( mouthROI, mouth,
				     MOUTH_SCALE_FACTOR, 3, 0
				     |CV_HAAR_FIND_BIGGEST_OBJECT,
				     Size(1, 1));
	  
	  for ( vector<Rect>::iterator m = mouth.begin(); m != mouth.end();m++,j++)	  
	    {

	      Rect m_aux;
	      //Transforming the point detected in mouth image to oring coordinates
	      m_aux.x = cvRound((r_aux.x + m->x)*scale_m2o);
	      m_aux.y= cvRound((r_aux.y+m->y)*scale_m2o);
	      m_aux.width=(m->width-1)*scale_m2o;
	      m_aux.height=(m->height-1)*scale_m2o;
	      mouths->push_back(m_aux);	      
	    }
	}
	  
    }

  if (GOP == mouth_detect->priv->num_frame )
    mouth_detect->priv->num_frame=0;
  
  //Printing on image
  j=0;
  if (1 == mouth_detect->priv->view_mouths)
    for ( vector<Rect>::iterator m = mouths->begin(); m != mouths->end();m++,j++)	  
      {
	color = colors[j%8];     
	cvRectangle( mouth_detect->priv->img_orig, cvPoint(m->x,m->y),
		     cvPoint(cvRound(m->x + m->width), 
			     cvRound(m->y + m->height-1)),
		     color, 3, 8, 0);	    
      }
}