Esempio n. 1
0
Model Model::create(string path){

    ostringstream s4, s5;
    s4 << "INSERT INTO Model(directory) VALUES('" + path + ".txt');";
    QSqlQuery query4(QString(s4.str().c_str()));
    int idModel = query4.lastInsertId().toInt();

    cout << s4.str() << endl;

    string command = "bash createCSV.sh " + path + " > FaceRecognition/modelos.csv";
    cout << command << endl;
    system(command.c_str());

    vector<Mat> images;
    vector<int> labels;

    try{
        read_csv("FaceRecognition/modelos.csv", images, labels);
    }catch (cv::Exception& e) {
        cerr << "ERROR OPENING CSV FILE" << endl;
        exit(1);
    }

    s5 << "INSERT INTO ModelStudent("
          "id_model,"
          "id_student) VALUES";

    set<int> ids(labels.begin(), labels.end());
    map<int, int> pairs;

    for(auto it = ids.begin(); it != ids.end(); ++it){
        Alumno* a = Alumno::create("", string(path + "/" + to_string(*it)));
        pairs[(*it)] = a->getId();
        s5 << "(" << idModel << ", " << a->getId() << ")";
        if(next(it) == ids.end()) s5 << ";";
        else s5 << ", \n";
    }
    cout << s5.str() << endl;
    vector<int> newLabels;
    for(int i = 0; i < labels.size(); i++){
        newLabels.push_back(pairs[labels[i]]);
    }

    for(int i = 0; i < images.size(); i++){
        equalizeHist(images[i], images[i]);
        cv::resize(images[i], images[i], Size(48,48));
    }


    QSqlQuery query5(QString(s5.str().c_str()));

    Model model = Model(createEigenFaceRecognizer(0, 3000));
    model->train(images, newLabels);
    model->save(string(path + ".txt"));
    cout << "END" << endl;

    return model;
}
void ofxFaceRecognizer::setup(int method_used, int _maxFaces, bool bAlreadySavedModel, string folderName) {

    //eigen take much longer to load and longer to generate. also makes much larger yml file
    string method_names[3] = {"eigen","fisher","lbph"};
    
    // Create a FaceRecognizer and train it on the given images:
    methodId = method_used;
    methodName = method_names[method_used];
    if(methodId == 0){
        model = createEigenFaceRecognizer();
    }
    if(methodId == 1){
        model = createFisherFaceRecognizer();
    }
    if(methodId == 2){
        model = createLBPHFaceRecognizer();
    }
    
    //if(_maxFaces > 0){
    
    maxFaces = _maxFaces;
   
 
    //by default my training images should be 150x150 pixels
    //will reset if you use loadTrainingImages()
    image_width = 150;
    image_height = 150;
    //load in training images
    loadTrainingImages(folderName,maxFaces);
    
    
    string compiledDatabasePath = ofToDataPath("model_"+methodName+"_"+ofToString(maxFaces)+"_"+folderName+".yml");
   
    if(bAlreadySavedModel){
        cout<<"model .yml supposedly existing"<<endl;
        model->load(compiledDatabasePath);
        
        cout<<"loaded "<<maxFaces<<" faces with model "<<methodName<<endl;
    }else{
        cout<<"start training new model. this might take a very long time"<<endl;
        cout<<"compiledDatabasePath "<<compiledDatabasePath<<endl;
        cout<<"more so for fisher than eigen"<<endl;
        model->train(allTrainingMats, allTrainingLabels);
        model->save(ofToDataPath(compiledDatabasePath));
        cout<<"trained and saved .yml with "<<maxFaces<<" faces with model "<<methodName<<endl;
    }
    
    // Quit if there are not enough images for this demo.
    if(allTrainingMats.size() <= 1) {
        string error_message = "This demo needs at least 2 images to work. Please add more images to your data set!";
        CV_Error(CV_StsError, error_message);
    }
    

}
int main( int argc, char** argv )
{
	// Medida de tiempo inicial
	clock_t t0, t_load, t_loop, t_loop0;
	double dtime;
	t0 = clock();

	// Declaration of variables
	CascadeClassifier face_cascade;				// Cascade Classifier
	Mat captureFrame, grayscaleFrame;			// Captured and converted to gray Frames
	double x_face_pos, y_face_pos, area_face;	// Coordinates of the detected face
	vector< Rect_<int> > faces;					// Vector of faces
	#ifdef RASPBERRY
		RaspiCamCvCapture * captureDevice;		// Video input
	#else
		CvCapture * captureDevice;				// Video input
	#endif
	char sTmp[255];

#ifdef TRACE
	sprintf(sTmp,"\n Directorio de ejecucion del programa: ");
	trace (sTmp);
	cout << get_ProgramDirectory();
#endif

#ifdef RECOGNITION
	// Declaration of variables Face recognition
	string people[MAX_PEOPLE];					// Each person of the model of face recognition
	int im_width, im_height;					// heigh, witdh of 1st images of the model of face recognition
	int prediction_seuil;						// Prediction limit
	Ptr<FaceRecognizer> model;					// Model of face recognitio

	// Prediction limit depending on the device
	#ifdef EIGENFACES
		prediction_seuil = 10;
	#else
		prediction_seuil = 1000;
	#endif

	// Model of face recognition
	#ifdef EIGENFACES
		model = createEigenFaceRecognizer();
	#else
		model = createFisherFaceRecognizer();
	#endif

	// Read measures file
	read_measures_file(im_width, im_height, '=');

	// Read people file
	read_people_file(people, '(', ')', '=');

	// Load model
	load_model_recognition(model);
	t_load = clock();

	#ifdef DEBUG
		dtime = difftime(t_load,t0);
		sprintf(sTmp,"\n (Face Tracking) tiempo de carga del modelo = ");
		debug(sTmp);
		cout << print_time(dtime);
	#endif

#endif

	// Video input depending on the device
	#ifdef RASPBERRY
		captureDevice = raspiCamCvCreateCameraCapture(0); // Index doesn't really matter
	#else
		captureDevice = cvCreateCameraCapture(0);
	#endif

	// Load of Haar Cascade
	if (!load_haar_cascade(face_cascade)) {	return -1;}

	// Create new window
	SHOW cvNamedWindow("Face tracking", 1);

	do {
		t_loop0 = clock();

		#ifdef RASPBERRY
				IplImage* image = raspiCamCvQueryFrame(captureDevice);	// Get images from the video input
		#else
				IplImage* image = cvQueryFrame(captureDevice);			// Get images from the video input
		#endif
		captureFrame = cvarrToMat(image);			// Convert images to Mat
		cvtColor(captureFrame, grayscaleFrame, CV_RGB2GRAY);	// Convert the image to gray scale

		// Detection and Face Recognition
		face_detection(face_cascade, grayscaleFrame, captureFrame, &faces, x_face_pos, y_face_pos, area_face);

		#ifdef RECOGNITION
				// Detection and Face Recognition
				face_recognition(people, grayscaleFrame, captureFrame, &faces, im_width, im_height, model,
								prediction_seuil, x_face_pos, y_face_pos, area_face);
		#endif

		// Display results
		#ifdef SHOW
				imshow("Face tracking", captureFrame);
		#endif

		t_loop = clock();
		#ifdef DEBUG
			dtime = difftime(t_loop,t_loop0);
			sprintf(sTmp,"\n (Face Tracking) tiempo del bucle del reconocimiento de cara = ");
			debug(sTmp);
			cout << print_time(dtime);
		#endif
	} while(cvWaitKey(10) < 0);

	// Destroy window
	#ifdef SHOW
		cvDestroyWindow("Face tracking");
	#endif

	#ifdef TRACE
		trace ("\n");
	#endif

	#ifdef RASPBERRY
		raspiCamCvReleaseCapture(&captureDevice);
	#else
		cvReleaseCapture(&captureDevice);
	#endif

	return 0;
}
Esempio n. 4
0
	//////////////////// FaceRecognizer ////////////////////
	ICLASS_API FaceRecognizer* __stdcall Create_EigenFaceRecognizer(int num_components, double threshold)
	{
		Ptr<FaceRecognizer> r = createEigenFaceRecognizer(num_components, threshold);
		r.addref();
		return r;
	}
void PCAFaceRecognition() {
#define NUMBER_OF_FACES 10
#define NUMBER_OF_IMAGES_PER_FACE 3
    vector<Mat> known_face_images;
    vector<int> known_labels;
    vector<Mat> unknown_face_images;
    vector<int> unknown_labels;
    // Load greyscale face images (which are from http://www.cl.cam.ac.uk/research/dtg/attarchive/facedatabase.html)
	char file_name[40];
	Mat original_images,row_so_far,image_so_far,temp_image1;
	int face_number = 1;
	for (; face_number<=NUMBER_OF_FACES; face_number++)
	{
		for (int image_number = 1; image_number<=NUMBER_OF_IMAGES_PER_FACE; image_number++)
		{
			sprintf(file_name,"Media/att_faces/s%d/%d.pgm",face_number,image_number);
			Mat current_image = imread(file_name,0);
			if (image_number>1)
			{
				known_face_images.push_back(current_image);
				known_labels.push_back(face_number);
			}
			else
			{
				// Keep the last image of each face as a test case.
				unknown_face_images.push_back(current_image);
				unknown_labels.push_back(face_number);
			}
			cvtColor(current_image, current_image, CV_GRAY2BGR);
			if (image_number == 2)
			{
				if (face_number%10 == 1)
				{
					if (face_number > 1)
						if (face_number == 11)
							original_images = row_so_far.clone();
						else original_images = JoinImagesVertically( original_images, "", row_so_far, "", 1 );
					row_so_far = current_image.clone();
				}
				else
				{
					char image_number_string[10],previous_image_number_string[10];
					sprintf(previous_image_number_string,"%d",face_number-1);
					sprintf(image_number_string,"%d",face_number);
					row_so_far = JoinImagesHorizontally( row_so_far, (face_number%10==2)?previous_image_number_string:"", current_image, image_number_string, 1 );
				}
			}
		}
	}
	if (face_number <= 11)
		original_images = row_so_far.clone();
	else original_images = JoinImagesVertically( original_images, "", row_so_far, "", 1 );
	imshow("Known face images", original_images);
	imwrite("pca_unknown_faces.bmp",original_images);
    Ptr<FaceRecognizer> face_recogniser = createEigenFaceRecognizer();
    face_recogniser->train(known_face_images, known_labels);
	char previous_face_number_string[100]="";
	char face_number_string[100]="";
	int correct_count = 0;
	for (face_number = 0; face_number < (int)unknown_face_images.size(); face_number++)
	{
		int predicted_face_number = 0;
		double recognition_confidence = 0.0;
		face_recogniser->predict(unknown_face_images[face_number],predicted_face_number,recognition_confidence);
		if (unknown_labels[face_number]==predicted_face_number)
			correct_count++;
		strcpy(previous_face_number_string,face_number_string);
		cvtColor(unknown_face_images[face_number], temp_image1, CV_GRAY2BGR);
		sprintf(face_number_string,"%d (%.0f)",predicted_face_number,recognition_confidence);
		Point location(2,15);
		putText( temp_image1, face_number_string, location, FONT_HERSHEY_SIMPLEX, 0.4, unknown_labels[face_number]==predicted_face_number?Scalar( 0,255,0 ):Scalar( 0,0,255 ) );
		if (face_number%10 == 0)
		{
			if (face_number > 10)
				image_so_far = JoinImagesVertically( image_so_far, "", row_so_far, "", 1 );
			else image_so_far = row_so_far.clone();
			row_so_far = temp_image1.clone();
		}
		else 
		{
			row_so_far = JoinImagesHorizontally( row_so_far, "", temp_image1, "", 1 );
		}
	}
	if (face_number > 10)
		image_so_far = JoinImagesVertically( image_so_far, "", row_so_far, "", 1 );
	else image_so_far = row_so_far.clone();
	char output[300];
	sprintf(output,"OVERALL Recognised %d/%d (with %d training image%s of %d subjects)",correct_count,unknown_face_images.size(),NUMBER_OF_IMAGES_PER_FACE-1,(NUMBER_OF_IMAGES_PER_FACE-1==1)?"":"s",NUMBER_OF_FACES);
	Point location(10,image_so_far.rows-10);
	putText( image_so_far, output, location, FONT_HERSHEY_SIMPLEX, 0.4, Scalar( 255,0,0 ) );
	imshow("Recognised faces using PCA (Eigenfaces)", image_so_far);
}
Esempio n. 6
0
Model Model::create(vector<Alumno *> students){

    ostringstream s, s2;

    QString sDir = QFileDialog::getSaveFileName(0, "Nombre Modelo", "/home");

    s << "INSERT INTO Model("
         "directory) VALUES('" << sDir.toStdString().c_str() << "');";

    QSqlQuery query(QString(s.str().c_str()));

    cout << s.str() << endl;

    int idModel = query.lastInsertId().toInt();

    s2 << "INSERT INTO ModelStudent("
          "id_student,"
          "id_model) VALUES";

    system("echo '' > FaceRecognition/modelos.csv");

    for(int i = 0; i < students.size(); i++){

        string com ="bash createCSV.sh " + students[i]->getDirectory() + " " +
                to_string(students[i]->getId()) + " >> FaceRecognition/modelos.csv";
        cout << com << endl;
        system(com.c_str());

        s2 << "(" << students[i]->getId() << ", " << idModel << ")";
        if(i == students.size() - 1){
            s2 << ";";
        } else s2 << ", \n";
    }

    cout << s2.str() << endl;
    QSqlQuery query2(QString(s2.str().c_str()));

    vector<Mat> images;
    vector<int> labels;

    try{
        read_csv("FaceRecognition/modelos.csv", images, labels);
    }catch (cv::Exception& e) {
        cerr << "ERROR OPENING CSV FILE" << endl;
        exit(1);
    }

    for(int i = 0; i < images.size(); i++){
        equalizeHist(images[i], images[i]);
        cv::resize(images[i], images[i], Size(48,48));
    }

    Model model = Model(createEigenFaceRecognizer(0, 3000));
    model->train(images, labels);
    model->save(sDir.toStdString());
    model.setId(idModel);
    model.setDirectory(sDir.toStdString());
    model.setStudents(students);

    cout << "END" << endl;

    return model;

}