Example #1
0
void FaceDetector::detect( cv::Mat & frame )
{
    using namespace cv;

    std::vector<Rect> faces;
    Mat frame_gray;

    cvtColor( frame, frame_gray, CV_BGR2GRAY );
    equalizeHist( frame_gray, frame_gray );

    //-- Detect faces
    face_cascade_.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );

    for( auto face : faces )
    {
        Point center( face.x + face.width*0.5, face.y + face.height*0.5 );
        ellipse( frame, center, Size( face.width*0.5, face.height*0.5), 0, 0, 360, Scalar( 0, 0, 255 ), 4, 8, 0 );

        Mat faceROI = frame_gray( face );
        std::vector<Rect> eyes;

        //-- In each face, detect eyes
        eyes_cascade_.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CV_HAAR_SCALE_IMAGE, Size(30, 30) );

        for( size_t j = 0; j < eyes.size(); j++ )
        {
            Point center( face.x + eyes[j].x + eyes[j].width*0.5, face.y + eyes[j].y + eyes[j].height*0.5 );
            int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
            circle( frame, center, radius, Scalar( 255, 0, 0 ), 4, 8, 0 );
        }
    }
}
bool detect(workshop_examples::DetectFaces::Request  &req,
            workshop_examples::DetectFaces::Response &res
            )
{
    ROS_INFO_STREAM("Request: detect_all = " << (int)req.detect_all);

    if( !image_received )
    {
        return false;
    }

    cv::Mat frame_gray;
    cv::cvtColor( frame_rgb, frame_gray, CV_BGR2GRAY );
    cv::equalizeHist( frame_gray, frame_gray );

    std::vector<cv::Rect> faces;
    face_cascade.detectMultiScale( frame_gray, faces, 1.5, 2, 0|CV_HAAR_SCALE_IMAGE, cv::Size(30, 30) );

    // Fill in service response
    res.detections.type = workshop_msgs::Detections::FACE;

    for( int i = 0; i < (int)faces.size(); i++ )
    {
        sensor_msgs::RegionOfInterest det;
        det.x_offset = faces[i].x;
        det.y_offset = faces[i].y;
        det.width = faces[i].width;
        det.height = faces[i].height;
        res.detections.rects.push_back(det);
    }

    ROS_INFO_STREAM("...sending back response.");

    return true;
}
bool AnchorPointSelector::detectLargestObject(cv::CascadeClassifier cascade, cv::Mat image, cv::Rect &largestObject, double scaleFactor, int minNeighbors, int flags, cv::Size minSize) {
	std::vector<cv::Rect> results;

	largestObject.x = 0;
	largestObject.y = 0;
	largestObject.width = 0;
	largestObject.height = 0;

	cascade.detectMultiScale(image, results, scaleFactor, minNeighbors, flags, minSize);

	// Save the largest object
	if (results.size() > 0) {
    	for(int i=0; i<results.size(); i++) {
    		cv::Rect temp = results[0];

    		if((temp.width * temp.height) > (largestObject.width*largestObject.height)) {
    			largestObject = temp;
			}
    	}

		return true;
	}

	return false;
}
/*
	Detects faces on an image and displays it to the screen.
*/
void detect( const cv::Mat frame, cv::CascadeClassifier face_cascade ){

	std::vector< cv::Rect > faces;
	cv::Mat frame_gray;
	
	// Create gray scale duplicate image
	cv::cvtColor( frame, frame_gray, CV_BGR2GRAY );
	
	// Run detection
	face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, cv::Size( 24, 24 ) );
	
	// Iterate through all detected faces
	for( int i = 0; i < faces.size(); i++ ){
		
		// Draw rectangle over each face
		cv::rectangle( 
					frame, 
					cv::Point( faces[i].x, faces[i].y ), 
					cv::Point( faces[i].x + faces[i].width, 
					faces[i].y + faces[i].height ), 
					cv::Scalar( 0, 0, 255 ), // red
					+1, 
					4 
				);
	}
	
	cv::imwrite( "output.pgm", frame ); // Save image
}
void TellThatToMyCamera_v1_0App::updateExpressions (Surface cameraImage){
	cv::Mat grayCameraImage( toOcv( cameraImage, CV_8UC1 ) );   // create a grayscale copy of the input image
   	cv::equalizeHist( grayCameraImage, grayCameraImage );       // equalize the histogram for (just a little) more accuracy
    mExpressions.clear();                                       // clear out the previously deteced expressions
    mPredictions.clear();
    vector<cv::Rect> expressions;
    
    // Next is to detect the faces and iterate them, appending them to mExpressions
    mExpressionsCascade.detectMultiScale(grayCameraImage, expressions);
    
    // At this point the position of the faces has been calculated!
    // Now it's time to get the faces, make a prediction and save it for the video.
    
    cv::Mat graySq(100,100,CV_8UC1);    // gray square for assigning the proper size of the resized detected faces
    
    for(vector<cv::Rect>::const_iterator expressionIter = expressions.begin(); expressionIter != expressions.end(); ++expressionIter){
        // Get the process face by face (in case there's more than one face in the video frame image)
        Rectf expressionRect(fromOcv(*expressionIter));
        mExpressions.push_back(expressionRect);
        
        cv::Rect face_i (*expressionIter);                      // Rect with data (size and position) of the detected face
        cv::Mat face = grayCameraImage(face_i);                 // Image containing the detected face
        cv::Mat face_resized;                                   // Image for the resized version of the detected face
        cv::resize(face, face_resized, graySq.size(), 1, 1, cv::INTER_CUBIC); // resizes the image
        // cv::resize(face, face_resized, graySq.size(), 0, 0, cv::INTER_LINEAR);
        
        // Now, perform the EXPRESSION PREDICTION!!!
        int predicted = mFisherFaceRec->predict(face_resized);
        mPredictions.push_back(predicted);                      // put the corresponding label to the corresponding face
    }
}
void faceDetect(){
  //Detect faces
   cv::cvtColor( frame, frame_gray, CV_BGR2GRAY );
   cv::equalizeHist( frame_gray, frame_gray );
   face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, cv::Size(30, 30) );
  
   //for each face draws an ellipse arround and look for the red color at a distance from 
   for( i = 0; i < faces.size(); i++ )
    {
      cv::Point center( faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5 );
      cv::ellipse( frame, center, cv::Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, cv::Scalar( 0, 255, 0 ), 2, 8, 0 );
      faceX = (float) faces[i].x;
      faceY = (float) faces[i].y;
     
      if( ((faceX + faceColorThresh) > (posX ) | (faceX - faceColorThresh) < (posX )) ) {
	face = true; 
	//publishing camera image
	out_msg.image    = frame; //frame
	out_msg.encoding = sensor_msgs::image_encodings::BGR8;
	msg = out_msg.toImageMsg();
	pub.publish(msg);
	ROS_FATAL("PERSON DETECTED");
	break;
      }
    }
}
Example #7
0
void fd_timeslice( cv::Mat& frame_gray, cv::Mat& original, bool mFindEyes )
{
	frame_gray.copyTo(debugImage);

	//equalizeHist( frame_gray, frame_gray  );
	//cv::pow(frame_gray, CV_64F, frame_gray);
	uint64_t  start = GetTimeStamp2();
	face_cascade.detectMultiScale( frame_gray, faces, 1.015, 2, 0|CV_HAAR_SCALE_IMAGE|CV_HAAR_FIND_BIGGEST_OBJECT, cv::Size(150, 150) );
	//  findSkin(debugImage);
	uint64_t end = GetTimeStamp2();
	num_faces_present = faces.size();
		
	// extract Face : 
	if (num_faces_present) 
	{
		printf("FaceDetect() Duration = %6.2f;  faces_present=%d\n", (float)(end-start)/1000000., num_faces_present );
		//cv::flip(frame_gray, frame_gray, 1);
		start = GetTimeStamp2();
		printf("FaceRect: <%6.2f, %d>\n", faces[0].height, faces[0].width );
		
		faceROIc = original(faces[0]);
		if (mFindEyes)
			findEyes( frame_gray, faces[0] );
		  end = GetTimeStamp2();
		printf("FindEyes() Duration = %6.2f\n", (float)(end-start)/1000000. );
		imshow(face_window_name, faceROIc);
	}

	annotate_faces( original );
}
Example #8
0
/**
 * @function detectAndDisplay
 */
void detectAndDisplay( cv::Mat frame ) {
	clock_t start,end;
	double time_taken;
	start = clock();

	std::vector<cv::Rect> faces;
	//cv::Mat frame_gray;

	std::vector<cv::Mat> rgbChannels(3);
	cv::split(frame, rgbChannels);
	cv::Mat frame_gray = rgbChannels[2];

	//cvtColor( frame, frame_gray, CV_BGR2GRAY );
	//equalizeHist( frame_gray, frame_gray );
	//cv::pow(frame_gray, CV_64F, frame_gray);

	//-- Detect faces
	//Detects objects of different sizes in the input image. The detected objects are returned as a list of rectangles.
	face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE|CV_HAAR_FIND_BIGGEST_OBJECT, cv::Size(80, 80) );

	//  findSkin(debugImage);

	for( int i = 0; i < faces.size(); i++ )
	{
		rectangle(debugImage, faces[i], 1234);
	}
	//-- Show what you got
	if (faces.size() > 0) {
		findEyes(frame_gray, faces[0]);
	}

	end = clock();
	time_taken = ((double) (end - start)) / CLOCKS_PER_SEC;
	//std::cout<<"Time taken by the detect and display function : "<<time_taken<<std::endl;
}
Example #9
0
void MainWindow::detectAndDisplay(const QImage &image){
#if 0
    std::vector<Rect> faces;
    Mat face_gray;
     cv::Mat face = QImage2cvMat(image);

    cvtColor( face, face_gray, CV_BGR2GRAY );  //rgb类型转换为灰度类型
    equalizeHist( face_gray, face_gray );   //直方图均衡化

    face_cascade.detectMultiScale( face_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(1, 1) );

    for( int i = 0; i < faces.size(); i++ ){
        Point center( faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5 );
        ellipse( face, center, Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, Scalar( 255, 0, 0), 2,7, 0 );
    }

    QImage image1 = cvMat2QImage(face);
    QPixmap pix = QPixmap::fromImage(image1.scaled(image1.width(),image1.height()));
    ui->Lab_VideoOut->setPixmap(pix);
#else
    QPixmap pix = QPixmap::fromImage(image.scaled(image.width(),image.height()));
    ui->Lab_VideoOut->setPixmap(pix);
    m_timerPlay->start();
#endif
}
Example #10
0
/**
* Function to detect human face and the eyes from an image.
*
* @param im The source image
* @param tpl Will be filled with the eye template, if detection success.
* @param rect Will be filled with the bounding box of the eye
* @return zero=failed, nonzero=success
*/
auto detectEye(cv::Mat const& im, cv::Mat & tpl, cv::Rect & rect)
{
    std::vector<cv::Rect> faces, eyes;
    face_cascade.detectMultiScale(im, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, cv::Size(30,30));
    for (int i = 0; i < faces.size(); i++)
    {
        cv::Mat face = im(faces[i]);
        eye_cascade.detectMultiScale(face, eyes, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, cv::Size(20,20));
        if (eyes.size())
        {
            rect = eyes[0] + cv::Point(faces[i].x, faces[i].y);
            tpl = im(rect);
        }
    }
    return eyes.size();
}
// Analiza una imagen tratando de identificar un vehículo.
bool detect_hand(cv::Mat& frame){
	// Variables necesarias.
	float puntos_extraidos[2*stasm_NLANDMARKS];
	std::vector<cv::Rect> detecciones;
	cv::Mat gray_frame;
	int salida = false;
	
	// Preparo la imagen.
	cv::cvtColor(frame, gray_frame, cv::COLOR_BGR2GRAY);

	
	// Uso el detector sobre la imagen. Me aseguro de detectar rectángulos mayores de la mitad de la
	// imagen.
	hand_detector.detectMultiScale(
		gray_frame,
		detecciones,
		1.5,
		3,
		0|cv::CASCADE_SCALE_IMAGE,
		frame.size()/2,
		frame.size());
		
	// Si se han detectado objetos.
	if(detecciones.size() > 0){
		int mano_encontrada;
		std::cout << "Mano encontrada en "<< detecciones[0] << std::endl;
		cv::rectangle(frame, detecciones[0], cv::Scalar(0,0,255), 4);			
		
		if(!stasm_search_single(
			&mano_encontrada,
			puntos_extraidos,
			(char*)gray_frame.data,
			gray_frame.size().width,
			gray_frame.size().height,
			"path_prueba",
			"../data")){
				std::cout << "Puntos no encontrados" << std::endl;
		}
		else{
			salida = true;
			
			cv::Point2f p1, p2;
			p1 = cv::Point2f(puntos_extraidos[0],puntos_extraidos[1]);
			cv::circle(frame, p1, 1, cv::Scalar(0,255,0), 3);
			
			
			for(int i=2; i<stasm_NLANDMARKS*2; i+=2){
				p2 = cv::Point2f(puntos_extraidos[i],puntos_extraidos[i+1]);
				cv::circle(frame, p2, 1, cv::Scalar(0,255,0), 3);
				cv::line(frame, p1, p2, cv::Scalar(0,255,0));
				p1 = p2;
			}// Fin for.
		}// Fin else.	
	}// Fin if.
	
	return salida;
}//Fin 'detect_hand'.
Example #12
0
double MainWindow::calculateReferenceMaxDistanceAndPosition(cv::VideoCapture cap,cv::CascadeClassifier frontal_face_cascade, cv::Point2d &referencePosition)
{
    // find a real face (consistent detections)
    std::vector<double> faceDistancesBuffer;
    std::vector<cv::Point2d> facePositionsBuffer;
    cv::Point2d previousPosition(0,0);
    double sumDistances = 0;
    int facesFound = 0;
    while (facesFound < 10)
    {
        cv::Mat frame;
        cap >> frame;
        cv::Mat frame_gray;
        cv::cvtColor( frame, frame_gray, cv::COLOR_BGR2GRAY );
        cv::equalizeHist( frame_gray, frame_gray );

        std::vector<cv::Rect> frontal_faces;
        frontal_face_cascade.detectMultiScale(frame_gray, frontal_faces);
        if (!frontal_faces.empty())
        {
            facesFound++;
            cv::Point2d currentPosition(frontal_faces[0].x,frontal_faces[0].y);
            facePositionsBuffer.push_back(currentPosition);
            double distance = cv::norm(cv::Mat(previousPosition),cv::Mat(currentPosition));
            sumDistances += distance;
            faceDistancesBuffer.push_back(distance);
            previousPosition = currentPosition;

            rectangle(frame,frontal_faces[0],cv::Scalar(0,255,0));
        }
        QImage image = QImage((const uchar *)frame.data, frame.cols, frame.rows, QImage::Format_RGB888).rgbSwapped();
        ui->imageDisplay->setPixmap(QPixmap::fromImage(image));
        qApp->processEvents();
    }

    double averageDistance = sumDistances/faceDistancesBuffer.size();

    int countDistancesNoOutliers = 0;
    double sumDistancesNoOutliers = 0;
    for (unsigned int i = 0; i < faceDistancesBuffer.size(); i++)
    {
        if (faceDistancesBuffer[i] < 2*averageDistance)
        {
            countDistancesNoOutliers++;
            sumDistancesNoOutliers += faceDistancesBuffer[i];
            if (i<facePositionsBuffer.size())
            {
                referencePosition = facePositionsBuffer[i];
            }
        }
    }

    double goundTruthMaxDistance = 4*(sumDistancesNoOutliers/countDistancesNoOutliers);
    return goundTruthMaxDistance;
}
Example #13
0
/**
 * @function detectAndDisplay
 */
int detectAndDisplay( cv::Mat frame ) {
  std::vector<cv::Rect> faces;
  //cv::Mat frame_gray;

  std::vector<cv::Mat> rgbChannels(3);
  cv::split(frame, rgbChannels);
  cv::Mat frame_gray = rgbChannels[2];
  face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE|CV_HAAR_FIND_BIGGEST_OBJECT, cv::Size(150, 150) );
//  findSkin(debugImage);

  for( int i = 0; i < faces.size(); i++ )
  {
    rectangle(debugImage, faces[i], 1234);
  }
  //-- Show what you got
  if (faces.size() > 0) {
    findEyes(frame_gray, faces[0]);
      
      if (frame_gray.data) {
          frame_gray.convertTo(frame_gray, CV_32FC1);
          cv::Mat target = CropFace(frame_gray,leftPupil, rightPupil, offset_pct, dest_sz);
          
          if (errorflag) {
              return -1;
          }
          if (target.data) {
              
              target.convertTo(target, CV_8UC1);

              equalizeHist(target,target);
              
              target.convertTo(target, CV_32FC1);
              
              int index = query(target,ef); 
              index+= startnum;
              
              char temp[3];
              sprintf(temp, "%d", index);
              
              std::string s(temp);
              std::cout << "face recognized, index : " << index << std::endl;
              
              //    imshow("target", ef.norm_0_255(target).reshape(1, dest_sz.x));
              imshow("result"+s,  ef.getdb()[index-startnum]);
              waitKey(0);
              destroyWindow("result"+s);
              return index;
              
          }
      }
  }
    return -1;
    
}
void ocvFaceDetectApp::updateFaces( Surface cameraImage )
{
	const int calcScale = 2; // calculate the image at half scale

	// create a grayscale copy of the input image
	cv::Mat grayCameraImage( toOcv( cameraImage, CV_8UC1 ) );

	// scale it to half size, as dictated by the calcScale constant
	int scaledWidth = cameraImage.getWidth() / calcScale;
	int scaledHeight = cameraImage.getHeight() / calcScale; 
	cv::Mat smallImg( scaledHeight, scaledWidth, CV_8UC1 );
	cv::resize( grayCameraImage, smallImg, smallImg.size(), 0, 0, cv::INTER_LINEAR );
	
	// equalize the histogram
	cv::equalizeHist( smallImg, smallImg );

	// clear out the previously deteced faces & eyes
	mFaces.clear();
	mEyes.clear();

	// detect the faces and iterate them, appending them to mFaces
	vector<cv::Rect> faces;
	mFaceCascade.detectMultiScale( smallImg, faces );
	for( vector<cv::Rect>::const_iterator faceIter = faces.begin(); faceIter != faces.end(); ++faceIter ) {
		Rectf faceRect( fromOcv( *faceIter ) );
		faceRect *= calcScale;
		mFaces.push_back( faceRect );
		
		// detect eyes within this face and iterate them, appending them to mEyes
		vector<cv::Rect> eyes;
		mEyeCascade.detectMultiScale( smallImg( *faceIter ), eyes );
		for( vector<cv::Rect>::const_iterator eyeIter = eyes.begin(); eyeIter != eyes.end(); ++eyeIter ) {
			Rectf eyeRect( fromOcv( *eyeIter ) );
			eyeRect = eyeRect * calcScale + faceRect.getUpperLeft();
			mEyes.push_back( eyeRect );
		}
	}
}
Example #15
0
void detectObjects(const cv::Mat &img, cv::CascadeClassifier &cascade, std::vector<cv::Rect> &objects, int scaledWidth, int flags, Size minFeatureSize, float searchScaleFactor, int minNeighbors)
{
    cv::Mat gray;
    if (img.channels() == 3) {
        cvtColor(img, gray, CV_BGR2GRAY);
    }
    else if (img.channels() == 4) {
        cvtColor(img, gray, CV_BGRA2GRAY);
    }
    else {
        gray = img;
    }

    cv::Mat inputImg;
    float scale = img.cols / (float)scaledWidth;
    if (img.cols > scaledWidth) {
        int scaledHeight = cvRound(img.rows / scale);
        cv::resize(gray, inputImg, cv::Size(scaledWidth, scaledHeight));
        inputImg = gray;
    }
    else {
        inputImg = gray;
    }

    cv::Mat equalizedImg;
    cv::equalizeHist(inputImg, equalizedImg);

    cascade.detectMultiScale(equalizedImg, objects, searchScaleFactor, minNeighbors, flags, minFeatureSize);

    if (img.cols > scaledWidth) {
        for (int i = 0; i < (int)objects.size(); i++ ) {
            objects[i].x = cvRound(objects[i].x * scale);
            objects[i].y = cvRound(objects[i].y * scale);
            objects[i].width = cvRound(objects[i].width * scale);
            objects[i].height = cvRound(objects[i].height * scale);
        }
    }

    for (int i = 0; i < (int)objects.size(); i++ ) {
        if (objects[i].x < 0)
            objects[i].x = 0;
        if (objects[i].y < 0)
            objects[i].y = 0;
        if (objects[i].x + objects[i].width > img.cols)
            objects[i].x = img.cols - objects[i].width;
        if (objects[i].y + objects[i].height > img.rows)
            objects[i].y = img.rows - objects[i].height;
    }
}
Example #16
0
void ASMModel::fit(const Mat & img, vector< FitResult > & fitResult, 
             cv::CascadeClassifier &faceCascade, bool biggestOnly, int verbose)
{
	int cd=0;
	if (biggestOnly)
		cd = CV_HAAR_FIND_BIGGEST_OBJECT;
	
	// [zl] trying to make this faster
    // Step 1: Search for faces
    vector<cv::Rect> faces;
    try{
        faceCascade.detectMultiScale(img, faces,
            1.2, 2, cd
            //|CV_HAAR_FIND_BIGGEST_OBJECT
            |CV_HAAR_DO_ROUGH_SEARCH
            |CV_HAAR_SCALE_IMAGE
            , cv::Size(60, 60) );
    }
    catch (cv::Exception e){
        printf("Face detect error...(%s)\n", e.err.data());
        faces.clear();
    }
    fitResult.resize(faces.size());
    for (uint i=0; i<faces.size(); i++){
        cv::Rect r = faces[i];
        r.y -= r.height*searchYOffset;
        r.x -= r.width*searchXOffset;
        if (r.x<0) r.x = 0;
        if (r.y<0) r.y = 0;
        r.width *= searchWScale;
        r.height *= searchHScale;
        if (r.x+r.width>img.cols) r.width = img.cols-r.x;
        if (r.y+r.height>img.rows) r.height = img.rows-r.y;
        // Do the search!
        doSearch(img(r), fitResult[i], verbose);
        SimilarityTrans s2;
        s2.Xt = r.x;
        s2.Yt = r.y;
        s2.a = 1;
        fitResult[i].transformation = s2 * fitResult[i].transformation;
    }
    if (faces.size()==0){
        printf("No face found!\n");
    }
}
void display(IplImage& img) {
    do {
      std::vector<Rect> faces;
      IplImage* grey;

      cvCvtColor(img, grey, CV_BGR2GRAY);
      cvEqualizeHist(grey, grey);

      faceCascade.detectMultiScale(grey, faces, 1.1, 5, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30));

      for (size_t i = 0; i < faces.size(); i++) {
        Point center( faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5 );
        ellipse( img, center, Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, Scalar( 255, 0, 255 ), 4, 8, 0 );
      }

      cvShowImage("Display", img);
      pressedKey = cvWaitKey(1);
    } while (pressedKey != 27);
}
void imageCallback(const sensor_msgs::ImageConstPtr& msg)
{
    ROS_INFO_STREAM_ONCE("Image received.");

    cv_bridge::CvImagePtr cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8);
    cv::Mat frame_rgb = cv_ptr->image;

	cv::Mat frame_gray;
	cv::cvtColor( frame_rgb, frame_gray, CV_BGR2GRAY );
	cv::equalizeHist( frame_gray, frame_gray );

	std::vector<cv::Rect> faces;
	face_cascade.detectMultiScale( frame_gray, faces, 1.5, 2, 0|CV_HAAR_SCALE_IMAGE, cv::Size(30, 30) );

	// Prepare and publish all the detections
    workshop_msgs::DetectionsStamped msg_out;
    msg_out.header.stamp = msg->header.stamp;
    msg_out.header.frame_id = msg->header.frame_id;
    msg_out.detections.type = workshop_msgs::Detections::FACE;

    for( int i = 0; i < (int)faces.size(); i++ )
    {
        sensor_msgs::RegionOfInterest det;
        det.x_offset = faces[i].x;
        det.y_offset = faces[i].y;
        det.width = faces[i].width;
        det.height = faces[i].height;
        msg_out.detections.rects.push_back(det);
    }

    pub.publish(msg_out);

    ROS_INFO_ONCE( "Message sent" );

    // Show the detections using OpenCV window
//    for( int i = 0; i < (int)faces.size(); i++ )
//    {
//        cv::rectangle( frame_rgb, faces[i], cv::Scalar( 0, 255, 0 ), 4, 8, 0 );
//    }
//
//    cv::imshow( "Image from Topic", frame_rgb );
//    cv::waitKey(10);
}
Example #19
0
	void Test::performFeatureDetection( std::vector<Rect> feature,
                                       std::vector<Rect> faces,
                                       cv::Mat faceROI,
                                       cv::CascadeClassifier feature_cascade,
                                       cv::Mat frame, int i){
        feature_cascade.detectMultiScale( faceROI, feature, 1.1, 2, 0 |CV_HAAR_SCALE_IMAGE, Size(30, 30) );
        
        for( int j = 0; j < 1 /*mouth.size()*/; j++ )
        {
            Point center( faces[i].x + feature[j].x + feature[j].width*0.5, faces[i].y + feature[j].y + feature[j].height*0.5 );
            Point corner = cvPoint(faces[i].x + feature[j].x + feature[j].width ,faces[i].y + feature[j].y + feature[j].height);
            rectangle(frame,    cvPoint(faces[i].x + feature[j].x, faces[i].y + feature[j].y),
                      cvPoint(faces[i].x + feature[j].x + feature[j].width ,faces[i].y + feature[j].y + feature[j].height),
                      CV_RGB(225, 228, 0), //225, 228, 0
                      1, 8, 0
                      );
            putSomeText("Mouth detected",frame, corner);
        }
    }
cv::Mat detectFace(cv::Mat frame) {
	cv::Mat gray(frame.size(), CV_8UC1);
	cv::cvtColor(frame, gray, cv::COLOR_BGR2GRAY); //Not needed, detectMultiScale takes care of conversion of conversion to grayscale.

	std::vector<cv::Rect> faceDetected;
	faceDetector.detectMultiScale(gray, faceDetected, scaleFactor, 0, flags, minFaceSize);
	
	std::vector<int> numDetections;
	cv::groupRectangles(faceDetected, numDetections, minNeighbors, GROUP_EPS);

	for (unsigned int i = 0; i < faceDetected.size(); i++) {
		cv::Rect rect = faceDetected.at(i);
		cv::rectangle(frame, rect, cv::Scalar(0, 0, 255), 1);
		cv::putText(frame, std::to_string(numDetections[i]),
			cv::Point(rect.x, rect.y), 1, 1, cv::Scalar(0, 0, 255));
	}

	return frame;
}
Example #21
0
void FaceFrameCutter::cut(int video)
{
	try
	{
		cv::VideoCapture cap(video);
		while(true)
		{
			cv::Mat frame,grayFrame;
			cap >> frame;
			DUMP(frame.channels());
			cv::cvtColor(frame, grayFrame, cv::COLOR_BGR2GRAY);
			// Detect faces
			vector<cv::Rect> faces;
		    faceCascade_.detectMultiScale(grayFrame, faces,scaleFactorFace,minNeighborsFace,flagsFace,minSizeFace,maxSizeFace);
			auto faceNum = faces.size();
			if(1 == faceNum)
			{
				auto face = faces.front();
				cv::Point pt1(face.x, face.y);
		        cv::Point pt2((face.x + face.height), (face.y + face.width));
				cv::Mat faceFrame(frame,face);
				cv::Mat dstFrame(256,256,faceFrame.type());
				cv::resize(faceFrame,dstFrame,cv::Size(256,256));
				string image_path(outputPitures_);
				DUMP(outputPitures_);
				cv::imwrite(outputPitures_, dstFrame);
				break;
			}
			else
			{
				
			}
		}
	}
	catch(cv::Exception e)
	{
		DUMP(e.what());
	}
}
Example #22
0
/**
 * @function detectAndDisplay
 */
void detectAndDisplay( cv::Mat frame ) {
  std::vector<cv::Rect> faces;
  //cv::Mat frame_gray;

  std::vector<cv::Mat> rgbChannels(3);
  cv::split(frame, rgbChannels);
  cv::Mat frame_gray = rgbChannels[2];

  //cvtColor( frame, frame_gray, CV_BGR2GRAY );
  //equalizeHist( frame_gray, frame_gray );
  //cv::pow(frame_gray, CV_64F, frame_gray);
  //-- Detect faces
  face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE|CV_HAAR_FIND_BIGGEST_OBJECT, cv::Size(150, 150) );
//  findSkin(debugImage);

  for( int i = 0; i < faces.size(); i++ )
  {
    rectangle(debugImage, faces[i], 1234);
  }
  //-- Show what you got
  if (faces.size() > 0) {
    findEyes(frame_gray, faces[0]);
  }
}
Example #23
0
void findUpperBody(cv::CascadeClassifier classifier, cv::Mat sequence, std::vector<cv::Rect> roi, std::vector<std::vector<cv::Rect>> &upper_body)
{
    cv::Mat sequenceGray;

    upper_body.clear();
    upper_body.resize(roi.size());

    cv::cvtColor(sequence, sequenceGray, CV_BGR2GRAY);
    cv::equalizeHist(sequenceGray, sequenceGray);

    for(size_t i=0;i<roi.size();i++)
    {
//        cv::imshow("temp", sequenceGray(roi[i]));
//        cv::waitKey(0);

        classifier.detectMultiScale(sequenceGray(roi[i]), upper_body[i], 1.1, 0, 0|CV_HAAR_SCALE_IMAGE, cv::Size(20, 20));

        for(size_t j=0;j<upper_body[i].size();j++)
        {
            upper_body[i][j].x += roi[i].x;
            upper_body[i][j].y += roi[i].y;
        }
    }
}
int main()
{
    double timeUse;
    struct timeval startTime, stopTime;

    cv::Mat rawImage, grayImage;
    std::vector<cv::Rect> faces;

    init();
    spider_head(45);

    std::stringstream logStream, outStream;
    float faceX, faceY;
    int8_t rotateDegree;
    uint8_t rotatePwm;
    uint8_t lostCounter = 0;
    while(running)
    {
        logStream.str("");
        logStream << std::fixed << std::setprecision(3);
        outStream.str("");
        outStream << std::fixed << std::setprecision(3);

        gettimeofday(&startTime, NULL);

        camera.retrieve(rawImage);

        cv::cvtColor(rawImage, grayImage, cv::COLOR_BGR2GRAY);
        cv::equalizeHist(grayImage, grayImage);

        faces.clear();
        face_cascade.detectMultiScale(grayImage, faces, 1.1,
            2, 0|cv::CASCADE_SCALE_IMAGE, cv::Size(30, 30));
        if(faces.empty())
        {
            if(lostCounter != 0)
            {
                lostCounter --;
                spider_rotate_degree(rotateDegree, rotatePwm, NULL, NULL);

                logStream << "Face lost, lost counter: " << static_cast<int>(lostCounter) << ", ";
            }
            else
            {
                spider_move_stop();
                
                logStream << "No face!";
            }
        }
        else
        {
            lostCounter = 5;
            faceX = faces[0].x+faces[0].width*0.5;
            faceY = faces[0].y+faces[0].height*0.5;

            logStream << "Get face, size: " << faces.size() << ", ";
            logStream << "coordinate: x " << faceX << " y " << faceY;

            if(faceX < 80)
            {
                rotateDegree = -5;
                rotatePwm = 80;
            }
            else if(faceX > 80)
            {
                rotateDegree = 5;
                rotatePwm = 80;
            }

            if(faceX < 70 || faceX > 90)
            {
                spider_rotate_degree(rotateDegree, rotatePwm, NULL, NULL);
            }

            //spider_move(1, 55);
        }

        gettimeofday(&stopTime, NULL);
        timeUse = stopTime.tv_sec - startTime.tv_sec + (stopTime.tv_usec - startTime.tv_usec)/1000000.0;
        if(timeUse < 0.25)
            usleep((0.25 - timeUse) * 1000000);

        outStream << "Time use: " << timeUse << "s, " << logStream.str();
        std::cout << outStream.str() << std::endl;
    }

    void* result;
    pthread_join(grabThread, &result);

    spider_head(35);
    spider_move_stop();
    spider_rotate_stop();
    spider_close();

    camera.release();

    std::cout << "Program exit!" << std::endl;

    return 0;
}
void FaceRecognition::detectAndDraw(cv::Mat image,
                                    cv::CascadeClassifier &cascade,
                                    cv::CascadeClassifier &nested_cascade,
                                    double scale,
                                    bool try_flip)
{
    int    i    = 0;
    double tick = 0.0;
    std::vector<cv::Rect> faces_a, faces_b;
    cv::Scalar colors[] = {
        CV_RGB(0, 0, 255),
        CV_RGB(0, 128, 255),
        CV_RGB(0, 255, 255),
        CV_RGB(0, 255, 0),
        CV_RGB(255, 128, 0),
        CV_RGB(255, 255, 0),
        CV_RGB(255, 0, 0),
        CV_RGB(255, 0, 255)
    };
    cv::Mat image_gray;
    cv::Mat image_small(cvRound(image.rows / scale),
                        cvRound(image.cols / scale),
                        CV_8UC1);
    // Convert to gray image.
    cv::cvtColor(image, image_gray, CV_BGR2GRAY);
    // Convert gray image to small size.
    cv::resize(image_gray, image_small, image_small.size(), 0, 0,
               cv::INTER_LINEAR);
    cv::equalizeHist(image_small, image_small);

    tick = (double)cvGetTickCount();
    cascade.detectMultiScale(image_small, faces_a, 1.1, 2, 0 |
                             CV_HAAR_SCALE_IMAGE, cv::Size(30, 30));

    if (try_flip) {
        cv::flip(image_small, image_small, 1);
        cascade.detectMultiScale(image_small, faces_b, 1.1, 2, 0 |
                                 CV_HAAR_SCALE_IMAGE, cv::Size(30, 30));
        std::vector<cv::Rect>::const_iterator it = faces_b.begin();
        for (; it != faces_b.end(); it++) {
            faces_a.push_back(cv::Rect(image_small.cols - it->x - it->width,
                                       it->y, it->width, it->height));
        }
    }

    // Calculate detection's time.
    tick = (double)cvGetTickCount() - tick;
    std::cout << "Detection time: "
              << tick / ((double)cvGetTickCount() * 1000.0)
              << " ms"
              << std::endl;

    std::vector<cv::Rect>::const_iterator it = faces_a.begin();
    for (; it != faces_a.end(); it++, i++) {
        int radius;
        double aspect_ratio = (double)it->width / it->height;
        std::vector<cv::Rect> nested_objects;
        cv::Mat small_image_roi;
        cv::Point center;
        cv::Scalar color = colors[i % 8];

        // Capture detected face and predict it.
        cv::Mat image_gray;
        cv::Mat image_result(cvRound(IMG_HEIGH), cvRound(IMG_WIDTH), CV_8UC1);
        cv::Mat image_temp;
        cv::Rect rect;
        rect.x = cvRound(it->x * scale);
        rect.y = cvRound(it->y * scale);
        rect.height = cvRound(it->height * scale);
        rect.width  = cvRound(it->width  * scale);
        image_temp  = image(rect);
        cv::cvtColor(image_temp, image_gray, CV_BGR2GRAY);
        cv::resize(image_gray, image_result, image_result.size(), 0, 0,
                   cv::INTER_LINEAR);
        int predicted_label = g_model->predict(image_result);

        std::cout << "*************************" << std::endl
                  << "The predicted label: "     << predicted_label
                  << std::endl
                  << "*************************"
                  << std::endl;

        // Recognize specific face for sending character to serial device.
        if (predicted_label == 1) {
            g_face_recognition.writeCharToSerial('Y');
        }
        else {
            g_face_recognition.writeCharToSerial('N');
        }

        // Draw the circle for faces.
        if (0.75 < aspect_ratio && aspect_ratio > 1.3) {
            center.x = cvRound((it->x + it->width * 0.5) * scale);
            center.y = cvRound((it->y + it->height * 0.5) * scale);
            radius = cvRound((it->width + it->height) * 0.25 * scale);
            cv::circle(image, center, radius, color, 3, 8, 0);
        }
        else {
            // Draw the rectangle for faces.
            cv::rectangle(image,
                          cvPoint(cvRound(it->x * scale),
                                  cvRound(it->y * scale)),
                          cvPoint(cvRound((it->x + it->width  - 1) * scale),
                                  cvRound((it->y + it->height - 1) * scale)),
                          color,
                          3,
                          8,
                          0);
            if (nested_cascade.empty()) {
                continue ;
            }
            small_image_roi = image_small(*it);
            nested_cascade.detectMultiScale(small_image_roi, nested_objects,
                                            1.1, 2, 0 | CV_HAAR_SCALE_IMAGE,
                                            cv::Size(30, 30));
            std::vector<cv::Rect>::const_iterator it_temp =
                nested_objects.begin();
            // Draw the circle for eyes.
            for (; it_temp != nested_objects.end(); it_temp++) {
                center.x = cvRound((it->x + it_temp->x + it_temp->width * 0.5)
                    * scale);
                center.y = cvRound((it->y + it_temp->y + it_temp->height * 0.5)
                    * scale);
                radius = cvRound((it_temp->width + it_temp->height) * 0.25
                    * scale);
                cv::circle(image, center, radius, color, 3, 8, 0);
            }
        }
    }
    // Open camera window.
    cv::imshow("Face Recognition", image);
}
Example #26
0
/**
 * @function detectEyes
 * - Uses OpenCV to detect face
 * - Interpolate eyes position on image
 * - Computes eyes position in space
 * - Add some display for the detection
 */
cv::Mat detectEyes(cv::Mat image)
{
    // INIT
    std::vector<cv::Rect> faces;
    cv::Mat image_gray;
    cv::cvtColor( image, image_gray, CV_BGR2GRAY );
    cv::equalizeHist( image_gray, image_gray );

    // DETECT FACE
    //-- Find bigger face (opencv documentation)
    face_cascade.detectMultiScale( image_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE|CV_HAAR_FIND_BIGGEST_OBJECT, cv::Size(minFaceSize, minFaceSize) );

    for( size_t i = 0; i < faces.size(); i++ )
    {
        // DETECT EYES
        //-- points in pixel
        cv::Point leftEyePt( faces[i].x + faces[i].width*0.30, faces[i].y + faces[i].height*0.37 );
        cv::Point rightEyePt( faces[i].x + faces[i].width*0.70, faces[i].y + faces[i].height*0.37 );
        cv::Point eyeCenterPt( faces[i].x + faces[i].width*0.5, leftEyePt.y );

        //-- normalize with webcam internal parameters
        GLdouble normRightEye = (rightEyePt.x - camWidth/2)/f;
        GLdouble normLeftEye = (leftEyePt.x - camWidth/2)/f;
        GLdouble normCenterX = (eyeCenterPt.x - camWidth/2)/f;
        GLdouble normCenterY = (eyeCenterPt.y - camHeight/2)/f;

        //-- get space coordinates
        float tempZ = eyesGap/(normRightEye-normLeftEye);
        float tempX = normCenterX*glCamZ;
        float tempY = -normCenterY*glCamZ;

        //-- update cam coordinates (smoothing)
        glCamZ = (glCamZ*0.5) + (tempZ*0.5);
        glCamX = (glCamX*0.5) + (tempX*0.5);
        glCamY = (glCamY*0.5) + (tempY*0.5);

        // DISPLAY
        if(bDisplayCam && bDisplayDetection)
        {
            //-- face rectangle
            cv::rectangle(image, faces[i], 1234);

            //-- face lines
            cv::Point leftPt( faces[i].x, faces[i].y + faces[i].height*0.37 );
            cv::Point rightPt( faces[i].x + faces[i].width, faces[i].y + faces[i].height*0.37 );
            cv::Point topPt( faces[i].x + faces[i].width*0.5, faces[i].y);
            cv::Point bottomPt( faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height);
            cv::line(image, leftPt, rightPt, cv::Scalar( 0, 0, 0 ), 1, 1, 0);
            cv::line(image, topPt, bottomPt, cv::Scalar( 0, 0, 0 ), 1, 1, 0);

            //-- eyes circles
            cv::circle(image, rightEyePt, 0.06*faces[i].width, cv::Scalar( 255, 255, 255 ), 1, 8, 0);
            cv::circle(image, leftEyePt, 0.06*faces[i].width, cv::Scalar( 255, 255, 255 ), 1, 8, 0);

            //-- eyes line & center
            cv::line(image, leftEyePt, rightEyePt, cv::Scalar( 0, 0, 255 ), 1, 1, 0);
            cv::circle(image, eyeCenterPt, 2, cv::Scalar( 0, 0, 255 ), 3, 1, 0);
        }
    }
    return image;
}
// Function to detect and draw any faces that is present in an image
void FaceDetectModuleExt::detectAndDraw(cv::Mat& img, cv::CascadeClassifier& cascade, cv::CascadeClassifier& nestedCascade, double scale) {
	if (cascade.empty()) {
		return;
	}
	int i = 0;
	double t = 0;
	std::vector<cv::Rect> faces;
	const static cv::Scalar colors[] =  { CV_RGB(0,0,255),
			CV_RGB(0,128,255),
			CV_RGB(0,255,255),
			CV_RGB(0,255,0),
			CV_RGB(255,128,0),
			CV_RGB(255,255,0),
			CV_RGB(255,0,0),
			CV_RGB(255,0,255)} ;
	cv::Mat gray, smallImg(cvRound(img.rows/scale), cvRound(img.cols/scale), CV_8UC1);

	cv::cvtColor( img, gray, CV_BGR2GRAY );
	cv::resize( gray, smallImg, smallImg.size(), 0, 0, cv::INTER_LINEAR );
	cv::equalizeHist( smallImg, smallImg );

	t = (double)cvGetTickCount();
	cascade.detectMultiScale( smallImg, faces,
			1.1, 2, 0
			//|CV_HAAR_FIND_BIGGEST_OBJECT
			//|CV_HAAR_DO_ROUGH_SEARCH
			|CV_HAAR_SCALE_IMAGE
			,
			cv::Size(30, 30) );
	t = (double)cvGetTickCount() - t;
	//printf( "detection time = %g ms\n", t/((double)cvGetTickFrequency()*1000.) );
	std::cout << "[FaceDetect] detection time = " << t/((double)cvGetTickFrequency()*1000.) << " ms" << std::endl;
	for(std::vector<cv::Rect>::const_iterator r = faces.begin(); r != faces.end(); r++, i++ )
	{
		cv::Mat smallImgROI;
		std::vector<cv::Rect> nestedObjects;
		cv::Point center;
		cv::Scalar color = colors[i%8];
		int radius;
//		center.x = cvRound((r->x + r->width*0.5)*scale);
//		center.y = cvRound((r->y + r->height*0.5)*scale);
//		radius = cvRound((r->width + r->height)*0.25*scale);
//		cv::circle( img, center, radius, color, 3, 8, 0 );

		cv::rectangle(img, *r, color, 3, 8 ,0);
		if( nestedCascade.empty() )
			continue;
		smallImgROI = smallImg(*r);
		nestedCascade.detectMultiScale( smallImgROI, nestedObjects,
				1.1, 2, 0
				//|CV_HAAR_FIND_BIGGEST_OBJECT
				//|CV_HAAR_DO_ROUGH_SEARCH
				//|CV_HAAR_DO_CANNY_PRUNING
				|CV_HAAR_SCALE_IMAGE
				,
				cv::Size(30, 30) );
		for(std::vector<cv::Rect>::const_iterator nr = nestedObjects.begin(); nr != nestedObjects.end(); nr++ )
		{
			center.x = cvRound((r->x + nr->x + nr->width*0.5)*scale);
			center.y = cvRound((r->y + nr->y + nr->height*0.5)*scale);
			radius = cvRound((nr->width + nr->height)*0.25*scale);
			cv::circle( img, center, radius, color, 3, 8, 0 );
		}
	}
	cv::imshow( "result", img );
	cv::waitKey(10);
}
int main(int argc, char **argv)
{
    ros::init(argc, argv, "people_counting");
    ros::NodeHandle node;

    ros::param::param<std::string>(ros::this_node::getName() + "dataset", fileDataset_param, "haarcascade_upperbody.xml");
    ros::param::param<double>(ros::this_node::getName() + "scaleFactor", scaleFactor_param, 0.1);
    ros::param::param<int>(ros::this_node::getName() + "minNeighbors", minNeighbors_param, 0);
    // ros::param::param<double>("minSize", minSize_param, 0);
    ros::param::param<double>(ros::this_node::getName() + "maxSize", maxSize_param, 1.0);

    // argc--; argv++;
    // while( argc && *argv[0] == '-' )
    // {
    //     if( !strcmp(*argv, "-fps") && ( argc > 1 ) )
    //     {
    //         fps = atof(*(argv+1));
    //         printf("With %ffps\n",fps);
    //         argc--; argv++;
    //     }
    //     if( !strcmp(*argv, "-crowd_dataset") && ( argc > 1 ) )
    //     {
    //         file_dataset = *(argv+1);
    //         printf("Using dataset form %s\n",file_dataset.c_str());
    //         argc--; argv++;
    //     }
    //     argc--; argv++;
    // }

    std::vector<cv::Rect> objects;
    std::vector<cv::Scalar> colors;
    cv::RNG rng(12345);

    img_msg.encoding = "bgr8";

    if ( ! classifier.load(fileDataset_param))
        ROS_ERROR("Can't open %s", fileDataset_param.c_str());

    ros::Publisher img_pub = node.advertise<sensor_msgs::Image>("crowd/count_img", 100);
    ros::Publisher count_pub = node.advertise<std_msgs::UInt16>("crowd/people_count", 100);

    image_transport::ImageTransport it(node);
    image_transport::Subscriber img_sub = it.subscribe("image", 1, imageCallback);

    ros::Rate loop_rate(fps);

    while (ros::ok())
    {
        if (! img_msg.image.empty()) {
            classifier.detectMultiScale(img_msg.image, objects, 1.0+scaleFactor_param, minNeighbors_param+1, 0, cv::Size(), maxSize);
            for (int j = 0; j < objects.size(); ++j) {
                if ( j > max_detected) {
                    colors.push_back(cv::Scalar(rng.uniform(0,255),rng.uniform(0, 255),rng.uniform(0, 255)));
                    max_detected = j;
                }
                cv::rectangle(img_msg.image, objects[j], colors[j], 2);
            }
            people_count.data = (u_int16_t)objects.size();
        }
        img_pub.publish(img_msg.toImageMsg());
        count_pub.publish(people_count);

        ros::spinOnce();
        loop_rate.sleep();
    }

    return 0;
}
/*
 * Class:     io_github_melvincabatuan_fullbodydetection_MainActivity
 * Method:    predict
 * Signature: (Landroid/graphics/Bitmap;[B)V
 */
JNIEXPORT void JNICALL Java_io_github_melvincabatuan_fullbodydetection_MainActivity_predict
  (JNIEnv * pEnv, jobject clazz, jobject pTarget, jbyteArray pSource){

   AndroidBitmapInfo bitmapInfo;
   uint32_t* bitmapContent; // Links to Bitmap content

   if(AndroidBitmap_getInfo(pEnv, pTarget, &bitmapInfo) < 0) abort();
   if(bitmapInfo.format != ANDROID_BITMAP_FORMAT_RGBA_8888) abort();
   if(AndroidBitmap_lockPixels(pEnv, pTarget, (void**)&bitmapContent) < 0) abort();

   /// Access source array data... OK
   jbyte* source = (jbyte*)pEnv->GetPrimitiveArrayCritical(pSource, 0);
   if (source == NULL) abort();

   /// cv::Mat for YUV420sp source and output BGRA 
    Mat srcGray(bitmapInfo.height, bitmapInfo.width, CV_8UC1, (unsigned char *)source);
    Mat mbgra(bitmapInfo.height, bitmapInfo.width, CV_8UC4, (unsigned char *)bitmapContent);

/***********************************************************************************************/
    /// Native Image Processing HERE... 
    if(DEBUG){
      LOGI("Starting native image processing...");
    }

    if (full_body_cascade.empty()){
       t = (double)getTickCount();
       sprintf( full_body_cascade_path, "%s/%s", getenv("ASSETDIR"), "haarcascade_fullbody.xml");       
    
      /* Load the face cascades */
       if( !full_body_cascade.load(full_body_cascade_path) ){ 
           LOGE("Error loading cat face cascade"); 
           abort(); 
       };

       t = 1000*((double)getTickCount() - t)/getTickFrequency();
       if(DEBUG){
       LOGI("Loading full body cascade took %lf milliseconds.", t);
     }
    }
            
 
     std::vector<Rect> fbody;


       //-- Detect full body
       t = (double)getTickCount();
 
       /// Detection took cat_face_cascade.detectMultiScale() time = 655.334471 ms
      // cat_face_cascade.detectMultiScale( srcGray, faces, 1.1, 2 , 0 , Size(30, 30) ); // Scaling factor = 1.1;  minNeighbors = 2 ; flags = 0; minimumSize = 30,30

      // cat_face_cascade.detectMultiScale() time = 120.117185 ms
      // cat_face_cascade.detectMultiScale( srcGray, faces, 1.2, 3 , 0 , Size(64, 64));

 
      
      full_body_cascade.detectMultiScale( srcGray, fbody, 1.2, 2 , 0 , Size(14, 28));  // Size(double width, double height) 

      // scalingFactor parameters determine how much the classifier will be scaled up after each run.
      // minNeighbors parameter specifies how many positive neighbors a positive face rectangle should have to be considered a possible match; 
      // when a potential face rectangle is moved a pixel and does not trigger the classifier any more, it is most likely that it’s a false positive. 
      // Face rectangles with fewer positive neighbors than minNeighbors are rejected. 
      // If minNeighbors is set to zero, all potential face rectangles are returned. 
      // The flags parameter is from the OpenCV 1.x API and should always be 0. 
      // minimumSize specifies the smallest face rectangle we’re looking for. 

       t = 1000*((double)getTickCount() - t)/getTickFrequency();
       if(DEBUG){
          LOGI("full_body_cascade.detectMultiScale() time = %lf milliseconds.", t);
      }


       // Iterate through all faces and detect eyes
       t = (double)getTickCount();

       for( size_t i = 0; i < fbody.size(); i++ )
       {
          Point center(fbody[i].x + fbody[i].width / 2, fbody[i].y + fbody[i].height / 2);
          ellipse(srcGray, center, Size(fbody[i].width / 2, fbody[i].height / 2), 0, 0, 360, Scalar(255, 0, 255), 4, 8, 0);
       }//endfor
  
       t = 1000*((double)getTickCount() - t)/getTickFrequency();
       if(DEBUG){
          LOGI("Iterate through all faces and detecting eyes took %lf milliseconds.", t);
       }

       /// Display to Android
       cvtColor(srcGray, mbgra, CV_GRAY2BGRA);


      if(DEBUG){
        LOGI("Successfully finished native image processing...");
      }
   
/************************************************************************************************/ 
   
   /// Release Java byte buffer and unlock backing bitmap
   pEnv-> ReleasePrimitiveArrayCritical(pSource,source,0);
   if (AndroidBitmap_unlockPixels(pEnv, pTarget) < 0) abort();
}
  void do_work(const sensor_msgs::ImageConstPtr& msg, const std::string input_frame_from_msg)
  {
    // Work on the image.
    try
    {
      // Convert the image into something opencv can handle.
      cv::Mat frame = cv_bridge::toCvShare(msg, msg->encoding)->image;

      // Messages
      opencv_apps::FaceArrayStamped faces_msg;
      faces_msg.header = msg->header;

      // Do the work
      std::vector<cv::Rect> faces;
      cv::Mat frame_gray;

      cv::cvtColor( frame, frame_gray, cv::COLOR_BGR2GRAY );
      cv::equalizeHist( frame_gray, frame_gray );
      //-- Detect faces
#if OPENCV3
      face_cascade_.detectMultiScale( frame_gray, faces, 1.1, 2, 0, cv::Size(30, 30) );
#else
      face_cascade_.detectMultiScale( frame_gray, faces, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, cv::Size(30, 30) );
#endif

      for( size_t i = 0; i < faces.size(); i++ )
      {
        cv::Point center( faces[i].x + faces[i].width/2, faces[i].y + faces[i].height/2 );
        cv::ellipse( frame,  center, cv::Size( faces[i].width/2, faces[i].height/2), 0, 0, 360, cv::Scalar( 255, 0, 255 ), 2, 8, 0 );
        opencv_apps::Face face_msg;
        face_msg.face.x = center.x;
        face_msg.face.y = center.y;
        face_msg.face.width = faces[i].width;
        face_msg.face.height = faces[i].height;

        cv::Mat faceROI = frame_gray( faces[i] );
        std::vector<cv::Rect> eyes;

        //-- In each face, detect eyes
#if OPENCV3
        eyes_cascade_.detectMultiScale( faceROI, eyes, 1.1, 2, 0, cv::Size(30, 30) );
#else
        eyes_cascade_.detectMultiScale( faceROI, eyes, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, cv::Size(30, 30) );
#endif

        for( size_t j = 0; j < eyes.size(); j++ )
        {
          cv::Point eye_center( faces[i].x + eyes[j].x + eyes[j].width/2, faces[i].y + eyes[j].y + eyes[j].height/2 );
          int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
          cv::circle( frame, eye_center, radius, cv::Scalar( 255, 0, 0 ), 3, 8, 0 );

          opencv_apps::Rect eye_msg;
          eye_msg.x = eye_center.x;
          eye_msg.y = eye_center.y;
          eye_msg.width = eyes[j].width;
          eye_msg.height = eyes[j].height;
          face_msg.eyes.push_back(eye_msg);
        }

        faces_msg.faces.push_back(face_msg);
      }
      //-- Show what you got
      if( debug_view_) {
        cv::imshow( "Face detection", frame );
        int c = cv::waitKey(1);
      }

      // Publish the image.
      sensor_msgs::Image::Ptr out_img = cv_bridge::CvImage(msg->header, msg->encoding,frame).toImageMsg();
      img_pub_.publish(out_img);
      msg_pub_.publish(faces_msg);
    }
    catch (cv::Exception &e)
    {
      NODELET_ERROR("Image processing error: %s %s %s %i", e.err.c_str(), e.func.c_str(), e.file.c_str(), e.line);
    }

    prev_stamp_ = msg->header.stamp;
  }