コード例 #1
0
ファイル: cvface.cpp プロジェクト: codedhead/facerec
Ptr<FaceRecognizer> Trainer::load(const char* fname,int type)
{
	Ptr<FaceRecognizer> model;
	if(type==0)
		model=createFisherFaceRecognizer();
	else
		model=createLBPHFaceRecognizer();

	model->load(fname);	


	char lable_fname[MAX_PATH];
	sprintf(lable_fname,"%s.labels",fname);
	FILE* fp=fopen(lable_fname,"rt");
	if(fp==0)
	{
		printf("Fail to load .labels file, please re-train the model\n");
		return NULL;
	}
	int class_count=0;
	fscanf(fp,"%d",&class_count);fgetc(fp);

	names.clear();
	char buf[256];
	for(int i=0;i<class_count;++i)
	{
		fscanf(fp,"%s",buf);
		names.push_back(buf);
	}
	fclose(fp);
	
	return model;
}
コード例 #2
0
void FacialFeatureRecognizerTest::compareSmileFrownSimilarFacesTest()
{
	Ptr<FaceRecognizer> model = createFisherFaceRecognizer();
    FacialFeatureRecognizer recognizer(model, 0, NULL, NULL, NULL);
	QString trainingFile("/home/zane/Documents/COS301/training.xml");
	recognizer.loadTrainingFromXML(trainingFile);

	Mat face1 = imread("../../testFiles/FaceRec/barack_smile.jpg", CV_LOAD_IMAGE_UNCHANGED);
	Mat face2 = imread("../../testFiles/FaceRec/barack_frown.jpg", CV_LOAD_IMAGE_UNCHANGED);

	string faceCascade = "/home/zane/Documents/COS301/MainProject/testFiles/haarcascade_frontalface_alt2.xml";
	Filter* faceDetect = new FaceDetectFilter(faceCascade);
	Filter* preProc = new PreProcessingFilter(140, 150);

	ImageData* data1 = new ImageData(face1, 0);
	data1 = faceDetect->filter(data1);
	data1 = preProc->filter(data1);

	ImageData* data2 = new ImageData(face2, 0);
	data2 = faceDetect->filter(data2);
	data2 = preProc->filter(data2);

	double expected = 5800;
	double actual = recognizer.compareFaces(data1->faces[0], data2->faces[0]);

	QVERIFY(actual <= expected);
}
コード例 #3
0
ファイル: TrackFace.cpp プロジェクト: duguyue100/TrackFace
void TrackFace::training()
{
    model=createFisherFaceRecognizer();

    model->train(images, labels);

    model->save(fn_features);
}
コード例 #4
0
void ofxFaceRecognizer::setup(int method_used, int _maxFaces, bool bAlreadySavedModel, string folderName) {

    //eigen take much longer to load and longer to generate. also makes much larger yml file
    string method_names[3] = {"eigen","fisher","lbph"};
    
    // Create a FaceRecognizer and train it on the given images:
    methodId = method_used;
    methodName = method_names[method_used];
    if(methodId == 0){
        model = createEigenFaceRecognizer();
    }
    if(methodId == 1){
        model = createFisherFaceRecognizer();
    }
    if(methodId == 2){
        model = createLBPHFaceRecognizer();
    }
    
    //if(_maxFaces > 0){
    
    maxFaces = _maxFaces;
   
 
    //by default my training images should be 150x150 pixels
    //will reset if you use loadTrainingImages()
    image_width = 150;
    image_height = 150;
    //load in training images
    loadTrainingImages(folderName,maxFaces);
    
    
    string compiledDatabasePath = ofToDataPath("model_"+methodName+"_"+ofToString(maxFaces)+"_"+folderName+".yml");
   
    if(bAlreadySavedModel){
        cout<<"model .yml supposedly existing"<<endl;
        model->load(compiledDatabasePath);
        
        cout<<"loaded "<<maxFaces<<" faces with model "<<methodName<<endl;
    }else{
        cout<<"start training new model. this might take a very long time"<<endl;
        cout<<"compiledDatabasePath "<<compiledDatabasePath<<endl;
        cout<<"more so for fisher than eigen"<<endl;
        model->train(allTrainingMats, allTrainingLabels);
        model->save(ofToDataPath(compiledDatabasePath));
        cout<<"trained and saved .yml with "<<maxFaces<<" faces with model "<<methodName<<endl;
    }
    
    // Quit if there are not enough images for this demo.
    if(allTrainingMats.size() <= 1) {
        string error_message = "This demo needs at least 2 images to work. Please add more images to your data set!";
        CV_Error(CV_StsError, error_message);
    }
    

}
コード例 #5
0
DWORD WINAPI FaceRecog::FaceRecogThread()
{
	vector<Mat> images;
    vector<int> labels;
	vector<string> names;

	// Read in the data (fails if no valid input filename is given, but you'll get an error message):
    try {
        Read_CSV(CSVFileName, images, labels, names);
    } catch (cv::Exception& e) {
        cerr << "Error opening file \"" << CSVFileName << "\". Reason: " << e.msg << endl;
        // nothing more we can do
        exit(1);
    }

	// Get unique names

	getUniqStrs(names, m_uniq_names);
	
	m_imwidth = images[0].cols;
	m_imheight = images[0].rows;

	m_pmodel = createFisherFaceRecognizer();
    m_pmodel->train(images, labels);
    //m_pmodel->load("C:\\Users\\yuner\\Desktop\\Fisher_at.yml");

	m_haar_cascade.load(HaarCascadeFileName);

	//Capture a frame and process it in a loop
	while (1)
	{
		processFrame();
		Sleep(16);
	}

	return 0;
}
コード例 #6
0
ファイル: facerecognize.cpp プロジェクト: vbirds/FaceColect
facerecognize::facerecognize()
{
    this->model =  createFisherFaceRecognizer(20);
    //this->model = createLBPHFaceRecognizer(1, 5, 5, 5);
    this->faceDetec = new facedetec();
    this->loaded = false;
    this->readcsved = false;

    this->im_width = 92;
    this->im_height = 112;

    this->output_folder = "./face.xml";

    mapint[1] = "WL";
    mapint[2] = "JHZ";
    mapint[3] = "XSY";
    mapint[4] = "ZZC";
    mapint[5] = "CZM";
    mapint[6] = "XD";
    mapint[7] = "YYF";
    mapint[8] = "WYQ";
    mapint[9] = "YY";

}
コード例 #7
0
ファイル: cvface.cpp プロジェクト: codedhead/facerec
void Trainer::train(const char* saveto,int type)
{
	Ptr<FaceRecognizer> model;
	if(type==0)
		model=createFisherFaceRecognizer();
	else
		model=createLBPHFaceRecognizer();

	
	printf("\nbegin training...\n");
	model->train(images, labels);
	model->save(saveto);

	char lable_fname[MAX_PATH];
	sprintf(lable_fname,"%s.labels",saveto);
	FILE* fp=fopen(lable_fname,"wt");
	fprintf(fp,"%d\n",names.size());
	for(int i=0,iLen=names.size();i<iLen;++i)
	{
		fprintf(fp,"%s\n",names[i].c_str());
	}
	fclose(fp);
	printf("Trained model saved to \"%s\"\n",saveto);
}
コード例 #8
0
int main( int argc, char** argv )
{
	// Medida de tiempo inicial
	clock_t t0, t_load, t_loop, t_loop0;
	double dtime;
	t0 = clock();

	// Declaration of variables
	CascadeClassifier face_cascade;				// Cascade Classifier
	Mat captureFrame, grayscaleFrame;			// Captured and converted to gray Frames
	double x_face_pos, y_face_pos, area_face;	// Coordinates of the detected face
	vector< Rect_<int> > faces;					// Vector of faces
	#ifdef RASPBERRY
		RaspiCamCvCapture * captureDevice;		// Video input
	#else
		CvCapture * captureDevice;				// Video input
	#endif
	char sTmp[255];

#ifdef TRACE
	sprintf(sTmp,"\n Directorio de ejecucion del programa: ");
	trace (sTmp);
	cout << get_ProgramDirectory();
#endif

#ifdef RECOGNITION
	// Declaration of variables Face recognition
	string people[MAX_PEOPLE];					// Each person of the model of face recognition
	int im_width, im_height;					// heigh, witdh of 1st images of the model of face recognition
	int prediction_seuil;						// Prediction limit
	Ptr<FaceRecognizer> model;					// Model of face recognitio

	// Prediction limit depending on the device
	#ifdef EIGENFACES
		prediction_seuil = 10;
	#else
		prediction_seuil = 1000;
	#endif

	// Model of face recognition
	#ifdef EIGENFACES
		model = createEigenFaceRecognizer();
	#else
		model = createFisherFaceRecognizer();
	#endif

	// Read measures file
	read_measures_file(im_width, im_height, '=');

	// Read people file
	read_people_file(people, '(', ')', '=');

	// Load model
	load_model_recognition(model);
	t_load = clock();

	#ifdef DEBUG
		dtime = difftime(t_load,t0);
		sprintf(sTmp,"\n (Face Tracking) tiempo de carga del modelo = ");
		debug(sTmp);
		cout << print_time(dtime);
	#endif

#endif

	// Video input depending on the device
	#ifdef RASPBERRY
		captureDevice = raspiCamCvCreateCameraCapture(0); // Index doesn't really matter
	#else
		captureDevice = cvCreateCameraCapture(0);
	#endif

	// Load of Haar Cascade
	if (!load_haar_cascade(face_cascade)) {	return -1;}

	// Create new window
	SHOW cvNamedWindow("Face tracking", 1);

	do {
		t_loop0 = clock();

		#ifdef RASPBERRY
				IplImage* image = raspiCamCvQueryFrame(captureDevice);	// Get images from the video input
		#else
				IplImage* image = cvQueryFrame(captureDevice);			// Get images from the video input
		#endif
		captureFrame = cvarrToMat(image);			// Convert images to Mat
		cvtColor(captureFrame, grayscaleFrame, CV_RGB2GRAY);	// Convert the image to gray scale

		// Detection and Face Recognition
		face_detection(face_cascade, grayscaleFrame, captureFrame, &faces, x_face_pos, y_face_pos, area_face);

		#ifdef RECOGNITION
				// Detection and Face Recognition
				face_recognition(people, grayscaleFrame, captureFrame, &faces, im_width, im_height, model,
								prediction_seuil, x_face_pos, y_face_pos, area_face);
		#endif

		// Display results
		#ifdef SHOW
				imshow("Face tracking", captureFrame);
		#endif

		t_loop = clock();
		#ifdef DEBUG
			dtime = difftime(t_loop,t_loop0);
			sprintf(sTmp,"\n (Face Tracking) tiempo del bucle del reconocimiento de cara = ");
			debug(sTmp);
			cout << print_time(dtime);
		#endif
	} while(cvWaitKey(10) < 0);

	// Destroy window
	#ifdef SHOW
		cvDestroyWindow("Face tracking");
	#endif

	#ifdef TRACE
		trace ("\n");
	#endif

	#ifdef RASPBERRY
		raspiCamCvReleaseCapture(&captureDevice);
	#else
		cvReleaseCapture(&captureDevice);
	#endif

	return 0;
}
コード例 #9
0
GenderRecognizer::GenderRecognizer(void)
{
	model = createFisherFaceRecognizer();
	model->load("data\\gender_classifier_25imgs.xml");
	
}
コード例 #10
0
	ICLASS_API FaceRecognizer* __stdcall Create_FisherFaceRecognizer(int num_components, double threshold)
	{
		Ptr<FaceRecognizer> r = createFisherFaceRecognizer(num_components, threshold);
		r.addref();
		return r;
	}
コード例 #11
0
ファイル: genderDetection.cpp プロジェクト: xqjiang/cvproject
Ptr<FaceRecognizer> gender_detection(string fn_csv)
{
    string output_folder;
    
    //string fn_csv = string("/Users/xueqianjiang/Desktop/male.txt");
    
    // These vectors hold the images and corresponding labels.
    vector<Mat> images;
    vector<int> labels;
    
    // Read in the data. This can fail if no valid
    // input filename is given.
    try {
        read_csv(fn_csv, images, labels);
    } catch (Exception& e) {
        cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl;
        // nothing more we can do
        exit(1);
    }
    
    if(images.size() <= 1) {
        string error_message = "This demo needs at least 2 images to work. Please add more images to your data set!";
        CV_Error(CV_StsError, error_message);
    }
    
    // this part will add in the EigenFaceRecognizer in order to in reduce the dimension further
    // int height = images[0].rows;
    //PCA model
    //Ptr<FaceRecognizer> model = createEigenFaceRecognizer();
    //train the PCA model and without loss of information
    //model->train(images, labels);
    //save the results of the train
    // model->save("eigenface.yml");
    //take out the feature values from the eigenfaces and then rank them from largest to smallest
    //Mat eigenvalues = model->getMat("eigenvalues");
    //take out the feature values from the model,eigenvectures and the eigenvalues are in the same order
    //Mat W = model->getMat("eigenvectors");
    //打算保留前121个特征向量,代码中没有体现原因,但选择121是经过斟酌的,首先,在我的实验中,"前121个特征值之和/所有特征值总和>0.97";其次,121=11^2,可以将结果表示成一个11*11的2维图像方阵,交给fisherface去计算
    //  int xth = 121;
    //after PCA
    //vector<Mat> reduceDemensionimages;
    //choose the first xth eigenvalues and get rid of the rest
    // Mat evs = Mat(W, Range::all(), Range(0, xth));
    
    //for(int i=0;i<images.size();i++)
    //{
    //  Mat mean= model->getMat("mean");
    //subspaceProjection
    //Mat projection = subspaceProject(W, mean, images[i].reshape(1,1));
    //reduced dimensionality and save them
    //reduceDemensionimages.push_back(projection.reshape(1,sqrt(xth)));
    // }
    
    // Quit if there are not enough images for this demo.
    //     Ptr<FaceRecognizer> fishermodel = createFisherFaceRecognizer();
    //fishermodel->train(reduceDemensionimages,labels);
    
    Ptr<FaceRecognizer> model = createFisherFaceRecognizer();
    model->train(images, labels);
    return model;
}