Esempio n. 1
0
void 
RoboyVision::recognizeFaces()
{

	// Resizing the face is necessary for Eigenfaces and Fisherfaces. You can easily
	// verify this, by reading through the face recognition tutorial coming with OpenCV.
	// Resizing IS NOT NEEDED for Local Binary Patterns Histograms, so preparing the
	// input data really depends on the algorithm used.
	//
	// I strongly encourage you to play around with the algorithms. See which work best
	// in your scenario, LBPH should always be a contender for robust face recognition.
	//
	// Since I am showing the Fisherfaces algorithm here, I also show how to resize the
	// face you have just found:

	faceRecognitionPredictions.clear();
	faceRecognitionConfidences.clear();

	for(int i = 0; i < faces.size(); i++) {

		cv::Rect face_i = faces[i];

		// Crop the face from the image. So simple with OpenCV C++:
		cv::Mat face = grayFrame(face_i);

		cv::Mat face_resized;

		cv::resize(face, face_resized, cv::Size(RECOGNITION_FACE_WIDTH, RECOGNITION_FACE_HEIGHT), 1.0, 1.0, cv::INTER_CUBIC);

		int prediction = -1;
		double confidence = 0.0;

		recognitionModel->predict(face_resized, prediction, confidence);
		faceRecognitionPredictions.push_back(prediction);
		faceRecognitionConfidences.push_back(confidence);


	}

}
Esempio n. 2
0
 void GTVideo::snakeTracking2()
 {    
     if(foregroundMask.isEmpty() || abnormallist.isEmpty())
     {
         QMessageBox msgBox;
         msgBox.setText("Video source and initial abnormal range must be set before tracking");
         msgBox.exec();
         return;
     }

     int totalAbnormalCount = getAbnormalFrameCount();
     int currCount = 0;
     QProgressDialog progress("Generating Groundtruth Images...", "Abort", 0, totalAbnormalCount-1);
     progress.setWindowModality(Qt::WindowModal);
     progress.setValue(currCount);

     // Initialize the MATLAB Compiler Runtime global state
     if (!mclInitializeApplication(NULL,0))
     {
         std::cerr << "Could not initialize the application properly."
                   <<  std::endl;
     }
     // Initialize the segmentation library
     if (!libsegInitialize())
     {
         std::cerr << "Could not initialize the library properly."
                   << std::endl;
     }

     //initialize the groundtruth
     cv::Mat eye = foregroundMask.at(0).clone();
     eye.setTo(cv::Scalar(0)); //cv::Scalar(0,0,0)
     grdtruth.fill(eye);

     for (int iAb=0; iAb<abnormallist.size(); iAb++)
     {
         uint start = abnormallist[iAb].getStart();
         uint end = abnormallist[iAb].getEnd();

         const QVector<cv::Point>& boundaryPoints = abnormallist[iAb].getBoundaryPoints();
         const cv::Point *pAddBoundary = boundaryPoints.data();
         const cv::Point **pBoundaryPoints = &pAddBoundary;

         // initialize the segmentation mask
         cv::Mat initmask = abnormallist[iAb].getROI();

         // set tracked object as abnormal ROI
         for (uint iFrame=start; iFrame<=end; iFrame++)
         {
             currCount++;
             progress.setValue(currCount);
             if(progress.wasCanceled())
             {
                 return;
             }
             else
             {
                 cv::Mat grayFrame(foregroundMask.at(iFrame));
                 // generate groundtruth and update mask using that in previous frame
                 cv::Mat resultImage = segmentByActiveContour(grayFrame, initmask, 200, false);
                 if (NULL == resultImage.data)
                 {
                     qDebug() << QString("No groudtruth generated for frame %1").arg(iFrame);
                     continue;
                 }
                 initmask = resultImage.clone();

                 // set groudtruth result
                 setGroundtruth(resultImage, iFrame);
             }
         }
     }
     progress.setValue(totalAbnormalCount-1);

     mclTerminateApplication();
     libsegTerminate();
 }
Esempio n. 3
0
 void GTVideo::snakeTracking()
 {
     if(foregroundMask.isEmpty() || abnormallist.isEmpty())
     {
         qDebug() << "Video source and initial abnormal range must be set before tracking\n";
         return;
     }

     //initialize the groundtruth
     cv::Mat eye = foregroundMask.at(0);
     eye.setTo(cv::Scalar(0)); //cv::Scalar(0,0,0)
     grdtruth.fill(eye);

     for (int iAb=0; iAb<abnormallist.size(); iAb++)
     {
         uint start = abnormallist[iAb].getStart();
         uint end = abnormallist[iAb].getEnd();
         int length = end-start+1;
         const QVector<cv::Point>& boundaryPoints = abnormallist[iAb].getBoundaryPoints();

         // consctruct a new array of type CvPoint because it will be modified for each frame
         const int npts = boundaryPoints.size();
         CvPoint pts_snake[npts];
         for (int i=0; i<npts; i++)
         {
             pts_snake[i] = boundaryPoints[i];
         }

         // set parameters for cvSnakeImage()
         float alpha = 0.5f;
         float beta = 0.5f;
         float gamma = 0.5f;
         int coeff_usage = CV_VALUE;
         CvSize win = cvSize(21,21);
         CvTermCriteria criteria = cvTermCriteria(CV_TERMCRIT_ITER, 100, 0.5);

         // set tracked object as abnormal ROI
         for (uint iFrame=start; iFrame<=end; iFrame++)
         {
             // update boundary using that in previous frame
             cv::Mat grayFrame(foregroundMask[iFrame]);
             //cv::cvtColor(foregroundMask[iFrame], grayFrame, CV_RGB2GRAY);
             IplImage *ipFrame = new IplImage(grayFrame);
             cvSnakeImage(ipFrame, pts_snake, npts, &alpha, &beta, &gamma, coeff_usage, win, criteria, 1);

             cvSaveImage("frame.tif", ipFrame);


             // convert boundary points from CvPoint[] to vector<Point>
             std::vector<cv::Point> stdBoundPoints;
             for (int i=0; i<npts; i++)
             {
                 cv::Point p(pts_snake[i].x, pts_snake[i].y);
                 stdBoundPoints.push_back(p);
             }

             // fill the empty grayFrame using popygon to get roi
             cv::Mat roi(foregroundMask[iFrame]);
             //cv::cvtColor(foregroundMask[iFrame], roi, CV_RGB2GRAY);
             roi.setTo(cv::Scalar(0)); //cv::Scalar(0,0,0)
             //cv::fillPoly(roi, stdBoundPoints, cv::Scalar(0)); //cv::Scalar(255,255,255)
             const cv::Point *pAddBoundary = stdBoundPoints.data();
             const cv::Point **pBoundaryPoints = &pAddBoundary;
             cv::fillPoly(roi, pBoundaryPoints, &npts, 1, cv::Scalar(255));  //cv::Scalar(255,255,255)

             setGroundtruth(roi, iFrame);

             delete ipFrame;


             cv::imwrite("output.tif", roi);
         }
     }
 }