Ejemplo n.º 1
0
void CImageProcess::ANN_Train(float (*trainData)[MAX_TRAIN_COLS], int t_r, int t_c,float (*obj)[MAX_OBJ_COLS], int o_r, int o_c)
{
//	CvANN_MLP m_bpANN;
	

	CvANN_MLP_TrainParams param;
	param.term_crit = cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,5000,0.01);
	param.train_method = CvANN_MLP_TrainParams::BACKPROP;
	param.bp_dw_scale = 0.1;
	param.bp_moment_scale = 0.1;

	Mat	layerSize = (Mat_<int>(1,3)<<t_c ,o_c,o_c);
	//Mat layerSize=(Mat_<int>(1,5) << 256,2,2,2,24);
	m_bpANN.create(layerSize, CvANN_MLP::SIGMOID_SYM);

	/*float labels[3][24] = {1};
	Mat labelsMat(3, 24, CV_32FC1, labels);
	float trainingData[3][256] = {1};
	Mat trainingDataMat(3, 256, CV_32FC1, trainingData);*/

	Mat labelsMat(o_r, o_c, CV_32FC1, obj);
	Mat trainingDataMat(t_r, t_c, CV_32FC1, trainData);

	//m_bpANN.train(input, obj, Mat(), Mat(), param);
	m_bpANN.train(trainingDataMat, labelsMat, Mat(), Mat(), param);
	m_bpANN.save("./sources/mlp.xml");

//	CvANN_MLP m_bpInden;
//	m_bpInden.load("mlp.xml");

//	m_bpInden.predict();
}
Ejemplo n.º 2
0
int cv_ann()
{
	//Setup the BPNetwork  
	CvANN_MLP bp;   
	// Set up BPNetwork's parameters  
	CvANN_MLP_TrainParams params;  
	params.train_method=CvANN_MLP_TrainParams::BACKPROP;  //(Back Propagation,BP)反向传播算法
	params.bp_dw_scale=0.1;  
	params.bp_moment_scale=0.1;  

	// Set up training data  
	float labels[10][2] = {{0.9,0.1},{0.1,0.9},{0.9,0.1},{0.1,0.9},{0.9,0.1},{0.9,0.1},{0.1,0.9},{0.1,0.9},{0.9,0.1},{0.9,0.1}};  
	//这里对于样本标记为0.1和0.9而非0和1,主要是考虑到sigmoid函数的输出为一般为0和1之间的数,只有在输入趋近于-∞和+∞才逐渐趋近于0和1,而不可能达到。
	Mat labelsMat(10, 2, CV_32FC1, labels);  

	float trainingData[10][2] = { {11,12},{111,112}, {21,22}, {211,212},{51,32}, {71,42}, {441,412},{311,312}, {41,62}, {81,52} };  
	Mat trainingDataMat(10, 2, CV_32FC1, trainingData);  
 	Mat layerSizes=(Mat_<int>(1,5) << 2, 2, 2, 2, 2); //5层:输入层,3层隐藏层和输出层,每层均为两个perceptron
	bp.create(layerSizes,CvANN_MLP::SIGMOID_SYM);//CvANN_MLP::SIGMOID_SYM ,选用sigmoid作为激励函数
	bp.train(trainingDataMat, labelsMat, Mat(),Mat(), params);  //训练

	// Data for visual representation  
	int width = 512, height = 512;  
	Mat image = Mat::zeros(height, width, CV_8UC3);  
	Vec3b green(0,255,0), blue (255,0,0);  
	// Show the decision regions
	for (int i = 0; i < image.rows; ++i)
	{
		for (int j = 0; j < image.cols; ++j)  
		{  
			Mat sampleMat = (Mat_<float>(1,2) << i,j);  
			Mat responseMat;  
			bp.predict(sampleMat,responseMat);  
			float* p=responseMat.ptr<float>(0);  
			//
			if (p[0] > p[1])
			{
				image.at<Vec3b>(j, i)  = green;  
			} 
			else
			{
				image.at<Vec3b>(j, i)  = blue;  
			}
		}  
	}
	// Show the training data  
	int thickness = -1;  
	int lineType = 8;  
	circle( image, Point(111,  112), 5, Scalar(  0,   0,   0), thickness, lineType); 
	circle( image, Point(211,  212), 5, Scalar(  0,   0,   0), thickness, lineType);  
	circle( image, Point(441,  412), 5, Scalar(  0,   0,   0), thickness, lineType);  
	circle( image, Point(311,  312), 5, Scalar(  0,   0,   0), thickness, lineType);  
	circle( image, Point(11,  12), 5, Scalar(255, 255, 255), thickness, lineType);  
	circle( image, Point(21, 22), 5, Scalar(255, 255, 255), thickness, lineType);       
	circle( image, Point(51,  32), 5, Scalar(255, 255, 255), thickness, lineType);  
	circle( image, Point(71, 42), 5, Scalar(255, 255, 255), thickness, lineType);       
	circle( image, Point(41,  62), 5, Scalar(255, 255, 255), thickness, lineType);  
	circle( image, Point(81, 52), 5, Scalar(255, 255, 255), thickness, lineType);       

	imwrite("result.png", image);        // save the image   

	imshow("BP Simple Example", image); // show it to the user  
	waitKey(0); 
	return 0;
}
Ejemplo n.º 3
0
int main(int argc, char *argv[]) {

    float labels[4] = {1.0, 1.0, -1.0, -1.0};
    cv::Mat labelsMat(4, 1, CV_32FC1, labels);
    float trainingData[4][2] = {{501, 10}, {255, 10},
                                {501, 255}, {10, 501}};
    cv::Mat trainingDataMat(4, 2, CV_32FC1, trainingData);
    
    svm_parameter param;
    param.svm_type = C_SVC;
    param.kernel_type = LINEAR;
    param.degree = 3;
    param.gamma = 0;
    param.coef0 = 0;
    param.nu = 0.5;
    param.cache_size = 100;
    param.C = 1;
    param.eps = 1e-6;
    param.p = 0.1;
    param.shrinking = 1;
    param.probability = 1;
    param.nr_weight = 0;
    param.weight_label = NULL;
    param.weight = NULL;
    
    svm_problem svm_prob_vector = libSVMWrapper(
       trainingDataMat, labelsMat, param);
    struct svm_model *model = new svm_model;
    if (svm_check_parameter(&svm_prob_vector, &param)) {
       std::cout << "ERROR" << std::endl;
    } else {
       model = svm_train(&svm_prob_vector, &param);
    }

    bool is_compute_probability = true;
    std::string model_file_name = "svm";
    bool save_model = true;
    if (save_model) {
       try {
          svm_save_model(model_file_name.c_str(), model);
          std::cout << "Model file Saved Successfully..." << std::endl;
       } catch(std::exception& e) {
          std::cout << e.what() << std::endl;
       }
    }


    bool is_probability_model = svm_check_probability_model(model);
    int svm_type = svm_get_svm_type(model);
    int nr_class = svm_get_nr_class(model);  // number of classes
    double *prob_estimates = new double[nr_class];

    cv::Vec3b green(0, 255, 0);
    cv::Vec3b blue(255, 0, 0);
    int width = 512, height = 512;
    cv::Mat image = cv::Mat::zeros(height, width, CV_8UC3);
    for (int i = 0; i < image.rows; ++i) {
       for (int j = 0; j < image.cols; ++j) {
          cv::Mat sampleMat = (cv::Mat_<float>(1, 2) << j, i);
              
          int dims = sampleMat.cols;
          svm_node* test_pt = new svm_node[dims];
          for (int k = 0; k < dims; k++) {
             test_pt[k].index = k + 1;
             test_pt[k].value = static_cast<double>(sampleMat.at<float>(0, k));
          }
          test_pt[dims].index = -1;

          float response = 0.0f;
          if (is_probability_model && is_compute_probability) {
             response = svm_predict_probability(model, test_pt, prob_estimates);
          } else {
             response = svm_predict(model, test_pt);
          }
          
          /*
          std::cout << "Predict: " << prob << std::endl;
          for (int y = 0; y < nr_class; y++) {
             std::cout << prob_estimates[y] << "  ";
          }std::cout <<  std::endl;
          */
          
          if (prob_estimates[0] > 0.5 || response == 1) {
             image.at<cv::Vec3b>(i, j)  = green;
          } else if (prob_estimates[1] >= 0.5 || response == -1) {
             image.at<cv::Vec3b>(i, j)  = blue;
          }
       }
    }
    cv::imshow("image", image);
    cv::waitKey(0);
    return 0;
}
Ejemplo n.º 4
0
void TrackFace::on_oneClassClassifier_clicked()
{
    TrackFace::capture.open(0);

    string windowName="Family-Stranger Classifier";
    cv::namedWindow(windowName.c_str(), cv::WINDOW_AUTOSIZE);
    cv::moveWindow(windowName.c_str(), window_x, window_y);

    cv::Mat trainingImages;

    for (int i=0;i<images.size();i++)
        //for (int j=1;j<=10;j++)
        trainingImages.push_back(images[i].reshape(1,1));
    //for (int i=images.size()/3*2;i<images.size();i++)
        //for (int j=1;j<=2;j++)
      //      trainingImages.push_back(images[i].reshape(1,1));

    const unsigned int noFeatures=trainingImages.rows;;
    //const unsigned int featureWidth=trainingImages.cols;

    cout << noFeatures << endl;

    float labelImages[noFeatures];
    for (int i=0;i<noFeatures/3;i++) labelImages[i]=1.0;
    for (int i=noFeatures/3;i<noFeatures/3*2;i++) labelImages[i]=1.0;
    for (int i=noFeatures/3*2;i<noFeatures;i++) labelImages[i]=-1.0;

    trainingImages.convertTo(trainingImages, CV_32FC1);
    trainingImages/=128.0;
    trainingImages-=1;

    cv::Mat labelsMat(noFeatures, 1, CV_32FC1, labelImages);

    cv::SVMParams params;
    params.svm_type=cv::SVM::NU_SVC;
    params.kernel_type=cv::SVM::RBF;
    params.nu=0.4;
    params.gamma=100;
    params.term_crit=cv::TermCriteria(CV_TERMCRIT_ITER, (int)1e7, 1e-6);

    cv::SVM svm;

    svm.train(trainingImages, labelsMat, cv::Mat(), cv::Mat(), params);

    while (true)
    {
        cv::Mat frame, buffer;
        if (!capture.isOpened()) break;

        capture >> buffer;
        cv::resize(buffer, frame,Size(buffer.cols/2,buffer.rows/2),0,0,INTER_LINEAR);

        vector<Rect_<int> > faces=haar_faces(frame);

        for (size_t i=0;i<faces.size();i++)
        {
            cv::Mat face_resized=resizeRecognitionFace(frame, faces[i]);
            face_resized=face_resized.reshape(1,1);
            face_resized.convertTo(face_resized, CV_32FC1);
            float response=svm.predict(face_resized);
            string box_text;

            cout << response << endl;

            if (response==1.0)
            {
                box_text=format("Prediction is family");
                drawFace(frame, faces[i], box_text);
            }
            else
            {
                box_text=format("Prediction is stranger");
                drawFace(frame, faces[i], box_text);
            }
        }

        cv::imshow(windowName.c_str(), frame);
        while (cv::waitKey(5)==27)
        {
            capture.release();
            cv::destroyWindow(windowName.c_str());
        }
    }
}
Ejemplo n.º 5
0
void SupportVectorMachineDemo(Mat& class1_samples, char* class1_name, Mat& class2_samples, char* class2_name, Mat& unknown_samples)
{
    float labels[MAX_SAMPLES];
    float training_data[MAX_SAMPLES][2];
	CvSVM SVM;

    // Image for visual representation of (2-D) feature space
    int width = MAX_FEATURE_VALUE+1, height = MAX_FEATURE_VALUE+1;
    Mat feature_space = Mat::zeros(height, width, CV_8UC3);

	int number_of_samples = 0;
	// Loops three times:
	//  1st time - extracts feature values for class 1
	//  2nd time - extracts feature values for class 2 AND trains SVM
	//  3rd time - extracts feature values for unknowns AND predicts their classes using SVM
	for (int current_class = 1; current_class<=UNKNOWN_CLASS; current_class++)
	{
		Mat gray_image,binary_image;
		if (current_class == 1)
			cvtColor(class1_samples, gray_image, CV_BGR2GRAY);
		else if (current_class == 2)
			cvtColor(class2_samples, gray_image, CV_BGR2GRAY);
		else cvtColor(unknown_samples, gray_image, CV_BGR2GRAY);        
		threshold(gray_image,binary_image,128,255,THRESH_BINARY_INV);

	    vector<vector<Point>> contours;
		vector<Vec4i> hierarchy;
		findContours(binary_image,contours,hierarchy,CV_RETR_TREE,CV_CHAIN_APPROX_NONE);
		Mat contours_image = Mat::zeros(binary_image.size(), CV_8UC3);
		contours_image = Scalar(255,255,255);
		// Do some processing on all contours (objects and holes!)
		vector<vector<Point>> hulls(contours.size());
		vector<vector<int>> hull_indices(contours.size());
		vector<vector<Vec4i>> convexity_defects(contours.size());
		vector<Moments> contour_moments(contours.size());
		for (int contour_number=0; (contour_number>=0); contour_number=hierarchy[contour_number][0])
		{
			if (contours[contour_number].size() > 10)
			{
				convexHull(contours[contour_number], hulls[contour_number]);
				convexHull(contours[contour_number], hull_indices[contour_number]);
				convexityDefects( contours[contour_number], hull_indices[contour_number], convexity_defects[contour_number]);
				contour_moments[contour_number] = moments( contours[contour_number] );
				// Draw the shape and features
				Scalar colour( rand()&0x7F, rand()&0x7F, rand()&0x7F );
				drawContours( contours_image, contours, contour_number, colour, CV_FILLED, 8, hierarchy );
				char output[500];
				double area = contourArea(contours[contour_number])+contours[contour_number].size()/2+1;
				// Draw the convex hull
				drawContours( contours_image, hulls, contour_number, Scalar(127,0,127) );
				// Highlight any convexities
				int largest_convexity_depth=0;
				for (int convexity_index=0; convexity_index < (int)convexity_defects[contour_number].size(); convexity_index++)
				{
					if (convexity_defects[contour_number][convexity_index][3] > largest_convexity_depth)
						largest_convexity_depth = convexity_defects[contour_number][convexity_index][3];
					if (convexity_defects[contour_number][convexity_index][3] > 256*2)
					{
						line( contours_image, contours[contour_number][convexity_defects[contour_number][convexity_index][0]], contours[contour_number][convexity_defects[contour_number][convexity_index][2]], Scalar(0,0, 255));
						line( contours_image, contours[contour_number][convexity_defects[contour_number][convexity_index][1]], contours[contour_number][convexity_defects[contour_number][convexity_index][2]], Scalar(0,0, 255));
					}
				}
				// Compute moments and a measure of the deepest convexity
				double hu_moments[7];
				HuMoments( contour_moments[contour_number], hu_moments );
				double diameter = ((double) contours[contour_number].size())/PI;
				double convexity_depth = ((double) largest_convexity_depth)/256.0;
				double convex_measure = convexity_depth/diameter;
				int class_id = current_class;
				float feature[2] = { (float) convex_measure*((float) MAX_FEATURE_VALUE), (float) hu_moments[0]*((float) MAX_FEATURE_VALUE) };
				if (feature[0] > ((float) MAX_FEATURE_VALUE)) feature[0] = ((float) MAX_FEATURE_VALUE);
				if (feature[1] > ((float) MAX_FEATURE_VALUE)) feature[1] = ((float) MAX_FEATURE_VALUE);
				if (current_class == UNKNOWN_CLASS)
				{
					// Try to predict the class
					Mat sampleMat = (Mat_<float>(1,2) << feature[0], feature[1]);
					float prediction = SVM.predict(sampleMat);
					class_id = (prediction == 1.0) ? 1 : (prediction == -1.0) ? 2 : 0;
				}
				char* current_class_name = (class_id==1) ? class1_name : (class_id==2) ? class2_name : "Unknown";

				sprintf(output,"Class=%s, Features %.2f, %.2f", current_class_name, feature[0]/((float) MAX_FEATURE_VALUE), feature[1]/((float) MAX_FEATURE_VALUE));
				Point location( contours[contour_number][0].x-40, contours[contour_number][0].y-3 );
				putText( contours_image, output, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
				if (current_class == UNKNOWN_CLASS)
				{
				}
				else if (number_of_samples < MAX_SAMPLES)
				{
					labels[number_of_samples] = (float) ((current_class == 1) ? 1.0 : -1.0);
					training_data[number_of_samples][0] = feature[0];
					training_data[number_of_samples][1] = feature[1];
					number_of_samples++;
				}
			}
		}
		if (current_class == 1)
		{
			Mat temp_output = contours_image.clone();
			imshow(class1_name, temp_output );
		}
		else if (current_class == 2)
		{
			Mat temp_output2 = contours_image.clone();
			imshow(class2_name, temp_output2 );

			// Now that features for both classes have been determined, train the SVM
			Mat labelsMat(number_of_samples, 1, CV_32FC1, labels);
			Mat trainingDataMat(number_of_samples, 2, CV_32FC1, training_data);
			// Set up SVM's parameters
			CvSVMParams params;
			params.svm_type    = CvSVM::C_SVC;
			params.kernel_type = CvSVM::POLY;
			params.degree = 1;
			params.term_crit   = cvTermCriteria(CV_TERMCRIT_ITER, 100, 1e-6);
			// Train the SVM
			SVM.train(trainingDataMat, labelsMat, Mat(), Mat(), params);

			// Show the SVM classifier for all possible feature values
			Vec3b green(192,255,192), blue (255,192,192);
			// Show the decision regions given by the SVM
			for (int i = 0; i < feature_space.rows; ++i)
				for (int j = 0; j < feature_space.cols; ++j)
				{
					Mat sampleMat = (Mat_<float>(1,2) << j,i);
					float prediction = SVM.predict(sampleMat);
					if (prediction == 1)
						feature_space.at<Vec3b>(i,j) = green;
					else if (prediction == -1)
					    feature_space.at<Vec3b>(i,j)  = blue;
				}
			// Show the training data (as dark circles)
			for(int sample=0; sample < number_of_samples; sample++)
				if (labels[sample] == 1.0)
					circle( feature_space, Point((int) training_data[sample][0], (int) training_data[sample][1]), 3, Scalar( 0, 128, 0 ), -1, 8);
				else circle( feature_space, Point((int) training_data[sample][0], (int) training_data[sample][1]), 3, Scalar( 128, 0, 0 ), -1, 8);
			// Highlight the support vectors (in red)
			int num_support_vectors = SVM.get_support_vector_count();
			for (int support_vector_index = 0; support_vector_index < num_support_vectors; ++support_vector_index)
			{
				const float* v = SVM.get_support_vector(support_vector_index);
				circle( feature_space,  Point( (int) v[0], (int) v[1]),   3,  Scalar(0, 0, 255));
			}
			imshow("SVM feature space", feature_space);
		}
		else if (current_class == 3)
		{
			imshow("Classification of unknowns", contours_image );
		}
	}
}