bool NeuralDetector::Train(std::vector<Image_t> images)
{
    size_t images_count = images.size();
    cv::Mat trainData(images_count, ROWS * COLS, CV_32FC1);
    cv::Mat trainOutput(images_count, 1, CV_32FC1);

    for (size_t i = 0; i < images_count; ++i) {
        (images[i].image.reshape(0, 1)).copyTo(trainData.row(i));
        if (images[i].info == SI_UNDEF) {
            return false;
        }
        trainOutput.at<float>(i, 0) = (images[i].info == SI_EXBNZ ? 1 : -1);
    }
    machineBrain.train(
       trainData,
       trainOutput,
       cv::Mat(),
       cv::Mat(),
       CvANN_MLP_TrainParams(
           cvTermCriteria(
               CV_TERMCRIT_ITER | CV_TERMCRIT_EPS,
               MAX_ITER,
               MLP_EPSILON
               ),
           CvANN_MLP_TrainParams::BACKPROP,
           PARAM1,
           PARAM2
           )
       );
    machineBrain.save("savednet.txt");
    std::cout << "Saved\n";
    return true;
}
	/**
	 * Train the Image
	 */
	void NN::train(){
		cout << Matrix->width << " - " << Matrix->height << "-"<< Length<< "-" << Labels->height << endl;
		int layer_sz[] = { Matrix->cols, int(Matrix->cols/4), Length };
		CvMat layer_sizes =	cvMat( 1, (int)(sizeof(layer_sz)/sizeof(layer_sz[0])), CV_32S, layer_sz );
		NNModel.create( &layer_sizes, CvANN_MLP::SIGMOID_SYM );
		NNModel.train( Matrix, Labels, 0, 0,
			CvANN_MLP_TrainParams(cvTermCriteria(CV_TERMCRIT_ITER,10000,0.001),
			CvANN_MLP_TrainParams::RPROP,0.01));
		cout << Length << endl;
		save("NNModel");
	}
Beispiel #3
0
std::string TrainProcessor::processInput(const std::string &input) {
	cv::Mat samples, categories;
	if (!fillSamples(input, samples, categories)) {
		return "Unable to load samples from file";
	}

	const int inputSize = myCvPCA.eigenvalues.rows;
	const int outputSize = myClassesList.length();

	cv::Mat inputs;
	myCvPCA.project(samples, inputs);

	cv::Mat outputs(samples.rows, outputSize, CV_32FC1);
	outputs = 0.0f;
	for (int i = 0; i < categories.rows; ++i) {
		char cat = categories.at<unsigned char>(i, 0);
		int index = (int) myClassesList.find(cat);
		outputs.at<float>(i, index) = 1.0f;
	}

	int layers = (myLayersCount > 0) ? myLayersCount : std::max(3, (int)(inputSize * myLayersScale));

	std::cout << std::endl;
	std::cout << "Layers number = " << layers << std::endl;

	cv::Mat layerSizes(1, layers, CV_32SC1);
	--layers;

	std::cout << "Layer sizes: " << inputSize;
	layerSizes.at<int>(0, 0) = inputSize;

	for (int i = 1; i < layers; ++i) {
		const float scale = myLayersSizeScale + (1.0f - myLayersSizeScale) * (i-1) / (layers-1);
		const int sz = (int)(scale * (inputSize + (outputSize - inputSize) * i / layers));
		std::cout << " " << sz;
		layerSizes.at<int>(0, i) = sz;
	}

	std::cout << " " << outputSize << std::endl;
	layerSizes.at<int>(0, layers) = outputSize;

	std::cout << std::endl;
	double timer = (double)cv::getTickCount();

	myCvMLP.create(layerSizes, CvANN_MLP::SIGMOID_SYM, 1.0, 1.0);
	myCvMLP.train(inputs, outputs, cv::Mat(), cv::Mat(), CvANN_MLP_TrainParams(), 0);

	timer = (double)cv::getTickCount() - timer;
	std::cout << "Training time = " << (timer / cv::getTickFrequency()) << " s" << std::endl;
	std::cout << std::endl;

	return "";
}
Beispiel #4
0
//==============================================================================
void ANN::train(Mat_<float> &trainData, vector<uchar> &labels, int numClasses)
{
    cout << "ANN training..." << endl;

    attributesPerSample = trainData.cols;
    numberOfClasses = numClasses;
    int numberOfSamples = trainData.rows;

    Mat layers = Mat(1, 3, CV_32SC1);
    layers.at<int>(0,0) = attributesPerSample; // input
    layers.at<int>(0,1) = 2; // hidden
    layers.at<int>(0,2) = numberOfClasses; // output


    // Create the network using a sigmoid function
    //--------------------------------------------------------------------------
    nnetwork = new CvANN_MLP();
    nnetwork->create(layers, CvANN_MLP::SIGMOID_SYM, 1.0, 1.0);


    // Prepare labels in required format
    //--------------------------------------------------------------------------
    Mat_<float> trainLabels = Mat_<uchar>::zeros(numberOfSamples, numberOfClasses);

    // Fill in the matrix with labels
    for(int i = 0; i < numberOfSamples; i++) {
        trainLabels(i, labels[i]) = 1.0;
    }

    // Set the training parameters
    //--------------------------------------------------------------------------
    CvANN_MLP_TrainParams params = CvANN_MLP_TrainParams(
        // Terminate the training after either 1000 iterations or a very small
        // change in the network weights below the specified value
        cvTermCriteria(CV_TERMCRIT_ITER + CV_TERMCRIT_EPS, 10, 0.001),

        // Use back-propagation for training
        CvANN_MLP_TrainParams::BACKPROP,

        // Coefficients for back-propagation training
        0.1,
        0.1
    );

    // Train the neural network
    //--------------------------------------------------------------------------
    int iters = nnetwork->train(trainData, trainLabels, Mat(), Mat(), params);

    cout << "Number of iterations: " << iters << endl;
}
Beispiel #5
0
void ANN::parametricTrain(cv::Mat_<float> &trainData, std::vector<uchar> &labels, int iter, vector<int> & nodes, int hidden){
    // Prepare labels in required format
    //--------------------------------------------------------------------------
    Mat_<float> trainLabels = Mat_<uchar>::zeros(trainData.rows, this->numberOfClasses);
    // Fill in the matrix with labels
    for(int i = 0; i < trainData.rows; i++) {
        trainLabels(i, labels[i]) = 1.0;
    }

    //cout << trainLabels << endl;

    // Set the training parameters
    //--------------------------------------------------------------------------
    CvANN_MLP_TrainParams params = CvANN_MLP_TrainParams(
        // Terminate the training after either 1000 iterations or a very small
        // change in the network weights below the specified value
        cvTermCriteria(CV_TERMCRIT_ITER + CV_TERMCRIT_EPS, iter, 0.00000001),

        // Use back-propagation for training
        CvANN_MLP_TrainParams::BACKPROP,

        // Coefficients for back-propagation training
        0.1,
        0.1
    );

    // Train the neural network
    //--------------------------------------------------------------------------
    int iters = nnetwork->train(trainData, trainLabels, Mat(), Mat(), params);

#if INFO
    cout << "Number of iterations: " << iters << endl;
#endif

    stringstream ss;
    ss << iters << "_" << hidden << "_Classes_";
    for(int i = 0; i < nodes.size(); i++){
        ss << nodes[i] << "_";
    }
    ss <<"nodes";
    string name = ss.str();
    name="iter_"+name;
    //this->saveTofile((char*)name.c_str());
}
static
int build_mlp_classifier( char* data_filename,
    char* filename_to_save, char* filename_to_load )
{
    const int class_count = 26;
    CvMat* data = 0;
    CvMat train_data;
    CvMat* responses = 0;
    CvMat* mlp_response = 0;

    int ok = read_num_class_data( data_filename, 16, &data, &responses );
    int nsamples_all = 0, ntrain_samples = 0;
    int i, j;
    double train_hr = 0, test_hr = 0;
    CvANN_MLP mlp;

    if( !ok )
    {
        printf( "Could not read the database %s\n", data_filename );
        return -1;
    }

    printf( "The database %s is loaded.\n", data_filename );
    nsamples_all = data->rows;
    ntrain_samples = (int)(nsamples_all*0.8);

    // Create or load MLP classifier
    if( filename_to_load )
    {
        // load classifier from the specified file
        mlp.load( filename_to_load );
        ntrain_samples = 0;
        if( !mlp.get_layer_count() )
        {
            printf( "Could not read the classifier %s\n", filename_to_load );
            return -1;
        }
        printf( "The classifier %s is loaded.\n", data_filename );
    }
    else
    {
        // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
        //
        // MLP does not support categorical variables by explicitly.
        // So, instead of the output class label, we will use
        // a binary vector of <class_count> components for training and,
        // therefore, MLP will give us a vector of "probabilities" at the
        // prediction stage
        //
        // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

        CvMat* new_responses = cvCreateMat( ntrain_samples, class_count, CV_32F );

        // 1. unroll the responses
        printf( "Unrolling the responses...\n");
        for( i = 0; i < ntrain_samples; i++ )
        {
            int cls_label = cvRound(responses->data.fl[i]) - 'A';
            float* bit_vec = (float*)(new_responses->data.ptr + i*new_responses->step);
            for( j = 0; j < class_count; j++ )
                bit_vec[j] = 0.f;
            bit_vec[cls_label] = 1.f;
        }
        cvGetRows( data, &train_data, 0, ntrain_samples );

        // 2. train classifier
        int layer_sz[] = { data->cols, 100, 100, class_count };
        CvMat layer_sizes =
            cvMat( 1, (int)(sizeof(layer_sz)/sizeof(layer_sz[0])), CV_32S, layer_sz );
        mlp.create( &layer_sizes );
        printf( "Training the classifier (may take a few minutes)...\n");
        mlp.train( &train_data, new_responses, 0, 0,
            CvANN_MLP_TrainParams(cvTermCriteria(CV_TERMCRIT_ITER,300,0.01),
#if 1
            CvANN_MLP_TrainParams::BACKPROP,0.001));
#else
            CvANN_MLP_TrainParams::RPROP,0.05));
#endif
        cvReleaseMat( &new_responses );
        printf("\n");
    }

    mlp_response = cvCreateMat( 1, class_count, CV_32F );

    // compute prediction error on train and test data
    for( i = 0; i < nsamples_all; i++ )
    {
        int best_class;
        CvMat sample;
        cvGetRow( data, &sample, i );
        CvPoint max_loc = {0,0};
        mlp.predict( &sample, mlp_response );
        cvMinMaxLoc( mlp_response, 0, 0, 0, &max_loc, 0 );
        best_class = max_loc.x + 'A';

        int r = fabs((double)best_class - responses->data.fl[i]) < FLT_EPSILON ? 1 : 0;

        if( i < ntrain_samples )
            train_hr += r;
        else
            test_hr += r;
    }

    test_hr /= (double)(nsamples_all-ntrain_samples);
    train_hr /= (double)ntrain_samples;
    printf( "Recognition rate: train = %.1f%%, test = %.1f%%\n",
            train_hr*100., test_hr*100. );

    // Save classifier to file if needed
    if( filename_to_save )
        mlp.save( filename_to_save );

    cvReleaseMat( &mlp_response );
    cvReleaseMat( &data );
    cvReleaseMat( &responses );

    return 0;
}
Beispiel #7
0
//---------------------------------------------------------
bool COpenCV_NNet::On_Execute(void)
{
	//-------------------------------------------------
	bool					b_updateWeights, b_noInputScale, b_noOutputScale, b_NoData;
	int						i_matType, i_layers, i_maxIter, i_neurons, i_areasClassId, i_trainFeatTotalCount, *i_outputFeatureIdxs, i_outputFeatureCount, i_Grid, x, y, i_evalOut, i_winner;
	double					d_alpha, d_beta, d_eps;
	DATA_TYPE				e_dataType;
	TRAINING_METHOD			e_trainMet;
	ACTIVATION_FUNCTION		e_actFunc;
	CSG_Table				*t_Weights, *t_Indices, *t_TrainInput, *t_EvalInput, *t_EvalOutput;
	CSG_Parameter_Grid_List	*gl_TrainInputs;
	CSG_Grid				*g_EvalOutput, *g_EvalOutputCert;
	CSG_Shapes				*s_TrainInputAreas;
	CSG_Parameters			*p_TrainFeatures;
	TSG_Point				p;
	CvMat					*mat_Weights, *mat_Indices, **mat_data, *mat_neuralLayers, mat_layerSizesSub, *mat_EvalInput, *mat_EvalOutput;	// todo: mat_indices to respect input indices, mat_weights for initialization
	CvANN_MLP_TrainParams	tp_trainParams;
	CvANN_MLP				model;

	b_updateWeights		= Parameters("UPDATE_WEIGHTS"							)->asBool();
	b_noInputScale		= Parameters("NO_INPUT_SCALE"							)->asBool();
	b_noOutputScale		= Parameters("NO_OUTPUT_SCALE"							)->asBool();
	i_layers			= Parameters("NNET_LAYER"								)->asInt();
	i_neurons			= Parameters("NNET_NEURONS"								)->asInt();
	i_maxIter			= Parameters("MAX_ITER"									)->asInt();
	i_areasClassId		= Parameters("TRAIN_INPUT_AREAS_CLASS_FIELD"			)->asInt();
	e_dataType			= (DATA_TYPE)Parameters("DATA_TYPE"						)->asInt();
	e_trainMet			= (TRAINING_METHOD)Parameters("TRAINING_METHOD"			)->asInt();
	e_actFunc			= (ACTIVATION_FUNCTION)Parameters("ACTIVATION_FUNCTION"	)->asInt();
	d_alpha				= Parameters("ALPHA"									)->asDouble();
	d_beta				= Parameters("BETA"										)->asDouble();
	d_eps				= Parameters("EPSILON"									)->asDouble();
	t_Weights			= Parameters("WEIGHTS"									)->asTable();
	t_Indices			= Parameters("INDICES"									)->asTable();
	t_TrainInput		= Parameters("TRAIN_INPUT_TABLE"						)->asTable();
	t_EvalInput			= Parameters("EVAL_INPUT_TABLE"							)->asTable();
	t_EvalOutput		= Parameters("EVAL_OUTPUT_TABLE"						)->asTable();
	p_TrainFeatures		= Parameters("TRAIN_FEATURES_TABLE"						)->asParameters();
	gl_TrainInputs		= Parameters("TRAIN_INPUT_GRIDS"						)->asGridList();
	g_EvalOutput		= Parameters("EVAL_OUTPUT_GRID_CLASSES"					)->asGrid();
	g_EvalOutputCert	= Parameters("EVAL_OUTPUT_GRID_CERTAINTY"				)->asGrid();
	s_TrainInputAreas	= Parameters("TRAIN_INPUT_AREAS"						)->asShapes();

	// Fixed matrix type (TODO: Analyze what to do for other types of data (i.e. images))
	i_matType = CV_32FC1;

	//-------------------------------------------------
	if (e_dataType == TABLE)
	{	
		// We are working with TABLE data
		if( t_TrainInput->Get_Count() == 0 || p_TrainFeatures->Get_Count() == 0 )
		{
			Error_Set(_TL("Select an input table and at least one output feature!"));
			return( false );
		}

		// Count the total number of available features
		i_trainFeatTotalCount = t_TrainInput->Get_Field_Count();

		// Count the number of selected output features
		i_outputFeatureIdxs = (int *)SG_Calloc(i_trainFeatTotalCount, sizeof(int));
		i_outputFeatureCount = 0;
	
		for(int i=0; i<p_TrainFeatures->Get_Count(); i++)
		{
			if( p_TrainFeatures->Get_Parameter(i)->asBool() )
			{
				i_outputFeatureIdxs[i_outputFeatureCount++] = CSG_String(p_TrainFeatures->Get_Parameter(i)->Get_Identifier()).asInt();
			}
		}

		// Update the number of training features
		i_trainFeatTotalCount = i_trainFeatTotalCount-i_outputFeatureCount;

		if( i_outputFeatureCount <= 0 )
		{
			Error_Set(_TL("Select at least one output feature!"));
			return( false );
		}

		// Now convert the input and output training data into a OpenCV matrix objects
		mat_data = GetTrainAndOutputMatrix(t_TrainInput, i_matType, i_outputFeatureIdxs, i_outputFeatureCount);
	}
	else
	{
		// TODO: Add some grid validation logic
		i_trainFeatTotalCount = gl_TrainInputs->Get_Count();
		i_outputFeatureCount = s_TrainInputAreas->Get_Count();

		// Convert the data from the grid into the matrix from
		mat_data = GetTrainAndOutputMatrix(gl_TrainInputs, i_matType, s_TrainInputAreas, i_areasClassId, g_EvalOutput, g_EvalOutputCert);
	}

	//-------------------------------------------------
	// Add two additional layer to the network topology (0-th layer for input and the last as the output)
	i_layers = i_layers + 2;
	mat_neuralLayers = cvCreateMat(i_layers, 1, CV_32SC1);
	cvGetRows(mat_neuralLayers, &mat_layerSizesSub, 0, i_layers);
	
	//Setting the number of neurons on each layer
	for (int i = 0; i < i_layers; i++)
	{
		if (i == 0)
		{
			// The first layer needs the same size (number of nerons) as the number of columns in the training data
			cvSet1D(&mat_layerSizesSub, i, cvScalar(i_trainFeatTotalCount));
		}
		else if (i == i_layers-1)
		{
			// The last layer needs the same size (number of neurons) as the number of output columns
			cvSet1D(&mat_layerSizesSub, i, cvScalar(i_outputFeatureCount));
		}
		else
		{
			// On every other layer set the layer size selected by the user
			cvSet1D(&mat_layerSizesSub, i, cvScalar(i_neurons));	
		}
	}

	//-------------------------------------------------
	// Create the training params object
	tp_trainParams = CvANN_MLP_TrainParams();
	tp_trainParams.term_crit = cvTermCriteria(CV_TERMCRIT_ITER + CV_TERMCRIT_EPS, i_maxIter, d_eps);

	// Check which training method was selected and set corresponding params
	if(e_trainMet == RPROP)
	{
		// Set all RPROP specific params
		tp_trainParams.train_method = CvANN_MLP_TrainParams::RPROP;
		tp_trainParams.rp_dw0 = Parameters("RP_DW0"				)->asDouble();
		tp_trainParams.rp_dw_plus = Parameters("RP_DW_PLUS"		)->asDouble();
		tp_trainParams.rp_dw_minus = Parameters("RP_DW_MINUS"	)->asDouble();
		tp_trainParams.rp_dw_min = Parameters("RP_DW_MIN"		)->asDouble();
		tp_trainParams.rp_dw_max = Parameters("RP_DW_MAX"		)->asDouble();
	}
	else
	{
		// Set all BPROP specific params
		tp_trainParams.train_method = CvANN_MLP_TrainParams::BACKPROP;
		tp_trainParams.bp_dw_scale = Parameters("BP_DW_SCALE"			)->asDouble();
		tp_trainParams.bp_moment_scale = Parameters("BP_MOMENT_SCALE"	)->asInt();
	}
	
	//-------------------------------------------------
	// Create the model (depending on the activation function)
	if(e_actFunc == SIGMOID)
	{
		model.create(mat_neuralLayers);
	}
	else
	{
		model.create(mat_neuralLayers, CvANN_MLP::GAUSSIAN, d_alpha, d_beta);
	}

	//-------------------------------------------------
	// Now train the network

	// TODO: Integrate init weights and indicies for record selection
	// mat_Weights  = GetMatrix(t_Weights, i_matType);
	// mat_Indices = GetMatrix(t_Indices, i_matType);
	
	//model.train(mat_TrainInput, mat_TrainOutput, NULL, NULL, tp_trainParams);
	model.train(mat_data[0], mat_data[1], NULL, NULL, tp_trainParams);

	//-------------------------------------------------
	// Predict data
	if (e_dataType == TABLE)
	{
		// Get the eavaluation/test matrix from the eval table
		mat_EvalInput = GetEvalMatrix(t_EvalInput, i_matType);
	}
	else
	{
		// Train and eval data overlap in grid mode
		mat_EvalInput = GetEvalMatrix(gl_TrainInputs, i_matType);
	}

	// Prepare output matrix
	mat_EvalOutput = cvCreateMat(mat_EvalInput->rows, i_outputFeatureCount, i_matType);

	// Start prediction
	model.predict(mat_EvalInput, mat_EvalOutput);

	Message_Add(_TL("Successfully trained the network and predicted the values. Here comes the output."));
	
	//-------------------------------------------------
	// Save and print results
	if (e_dataType == TABLE)
	{
		// DEBUG -> Save results to output table and print results
		for (int i = 0; i < i_outputFeatureCount; i++)
		{
			t_EvalOutput->Add_Field(CSG_String(t_TrainInput->Get_Field_Name(i_outputFeatureIdxs[i])), SG_DATATYPE_Float);
		}
	
		for (int i = 0; i < mat_EvalOutput->rows; i++)
		{
			CSG_Table_Record* tr_record = t_EvalOutput->Add_Record();

			for (int j = 0; j < i_outputFeatureCount; j++)
			{
				float f_targetValue = mat_EvalOutput->data.fl[i*i_outputFeatureCount+j];
				tr_record->Set_Value(j, f_targetValue);
			}
		}
	}
	else
	{
		// Fill the output table output
		for (int i = 0; i < i_outputFeatureCount; i++)
		{
			// TODO: Get the class name
			t_EvalOutput->Add_Field(CSG_String::Format(SG_T("CLASS_%d"), i), SG_DATATYPE_Float);
		}
	
		for (int i = 0; i < mat_EvalOutput->rows; i++)
		{
			CSG_Table_Record* tr_record = t_EvalOutput->Add_Record();

			for (int j = 0; j < i_outputFeatureCount; j++)
			{
				float f_targetValue = mat_EvalOutput->data.fl[i*i_outputFeatureCount+j];
				tr_record->Set_Value(j, f_targetValue);
			}
		}

		i_evalOut = 0;

		// Fill the output grid
		for(y=0, p.y=Get_YMin(); y<Get_NY() && Set_Progress(y); y++, p.y+=Get_Cellsize())
		{
			for(x=0, p.x=Get_XMin(); x<Get_NX(); x++, p.x+=Get_Cellsize())
			{
				for(i_Grid=0, b_NoData=false; i_Grid<gl_TrainInputs->Get_Count() && !b_NoData; i_Grid++)
				{
					// If there is one grid that has no data in this point p, then set the no data flag
					if( gl_TrainInputs->asGrid(i_Grid)->is_NoData(x, y) )
					{
						b_NoData = true;
					}
				}

				if (!b_NoData)
				{
					// We have data in all grids, so this is a point that was predicted
					// Get the winner class for this point and set it to the output grid
					float f_targetValue = 0;

					for (int j = 0; j < i_outputFeatureCount; j++)
					{
						if (mat_EvalOutput->data.fl[i_evalOut*i_outputFeatureCount+j] > f_targetValue)
						{
							// The current value is higher than the last one, so lets memorize the current class
							f_targetValue = mat_EvalOutput->data.fl[i_evalOut*i_outputFeatureCount+j];
							i_winner = j;
						}
					}

					// Now finally set the values to the grids
					g_EvalOutput->Set_Value(x, y, i_winner);
					g_EvalOutputCert->Set_Value(x, y, f_targetValue);

					i_evalOut++;
				}
			}
		}
	}

	return( true );
}
// Read the training data and train the network.
void trainMachine()
{
    int i;
    //The number of training samples. 
    int train_sample_count = 130;
    
    //The training data matrix. 
    float td[130][61];
    
    //Read the training file
    FILE *fin;
    fin = fopen("data/sonar_train.csv", "r");
    
    //Create the matrices    
    //Input data samples. Matrix of order (train_sample_count x 60)
    CvMat* trainData = cvCreateMat(train_sample_count, 60, CV_32FC1);
    
    //Output data samples. Matrix of order (train_sample_count x 1)
    CvMat* trainClasses = cvCreateMat(train_sample_count, 1, CV_32FC1);
    
    //The weight of each training data sample. We'll later set all to equal weights.
    CvMat* sampleWts = cvCreateMat(train_sample_count, 1, CV_32FC1);
    
    //The matrix representation of our ANN. We'll have four layers.
    CvMat* neuralLayers = cvCreateMat(4, 1, CV_32SC1);
    
    //Setting the number of neurons on each layer of the ANN
    /* 
     We have in Layer 1: 60 neurons (60 inputs)
     Layer 2: 150 neurons (hidden layer)
     Layer 3: 225 neurons (hidden layer)
     Layer 4: 1 neurons (1 output)
     */
    cvSet1D(neuralLayers, 0, cvScalar(60));
    cvSet1D(neuralLayers, 1, cvScalar(150));
    cvSet1D(neuralLayers, 2, cvScalar(225));
    cvSet1D(neuralLayers, 3, cvScalar(1));
    
    //Read and populate the samples.
    for (i=0;i<train_sample_count;i++)
        fscanf(fin,"%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f",
               &td[i][0],&td[i][1],&td[i][2],&td[i][3],&td[i][4],&td[i][5],&td[i][6],&td[i][7],&td[i][8],&td[i][9],&td[i][10],&td[i][11],&td[i][12],&td[i][13],&td[i][14],&td[i][15],&td[i][16],&td[i][17],&td[i][18],&td[i][19],&td[i][20],&td[i][21],&td[i][22],&td[i][23],&td[i][24],&td[i][25],&td[i][26],&td[i][27],&td[i][28],&td[i][29],&td[i][30],&td[i][31],&td[i][32],&td[i][33],&td[i][34],&td[i][35],&td[i][36],&td[i][37],&td[i][38],&td[i][39],&td[i][40],&td[i][41],&td[i][42],&td[i][43],&td[i][44],&td[i][45],&td[i][46],&td[i][47],&td[i][48],&td[i][49],&td[i][50],&td[i][51],&td[i][52],&td[i][53],&td[i][54],&td[i][55],&td[i][56],&td[i][57],&td[i][58],&td[i][59],&td[i][60]);
    
    //we are done reading the file, so close it
    fclose(fin);
    
    //Assemble the ML training data.
    for (i=0; i<train_sample_count; i++)
    {
        //inputs
        for (int j = 0; j < 60; j++) 
            cvSetReal2D(trainData, i, j, td[i][j]);
    
        //Output
        cvSet1D(trainClasses, i, cvScalar(td[i][60]));
        //Weight (setting everything to 1)
        cvSet1D(sampleWts, i, cvScalar(1));
    }
    
    //Create our ANN.
    ann.create(neuralLayers);
    cout << "training...\n";
    //Train it with our data.
    ann.train(
        trainData,
        trainClasses,
        sampleWts,
        0,
        CvANN_MLP_TrainParams(
            cvTermCriteria(
                CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,
                100000,
                0.000001),
            CvANN_MLP_TrainParams::BACKPROP,
            0.01,
            0.05));
}
Beispiel #9
0
// Read the training data and train the network.
void trainMachine()
{ int i; //The number of training samples. 
    int train_sample_count;

    //The training data matrix. 
    //Note that we are limiting the number of training data samples to 1000 here.
    //The data sample consists of two inputs and an output. That's why 3.
    float td[10000][3];

    //Read the training file
    /*
       A sample file contents(say we are training the network for generating 
       the mean given two numbers) would be:

       5
       12 16 14
       10 5  7.5
       8  10 9
       5  4  4.5
       12 6  9

     */
    FILE *fin;
    fin = fopen("train.txt", "r");

    //Get the number of samples.
    fscanf(fin, "%d", &train_sample_count);
    printf("Found training file with %d samples...\n", train_sample_count);

    //Create the matrices

    //Input data samples. Matrix of order (train_sample_count x 2)
    CvMat* trainData = cvCreateMat(train_sample_count, 2, CV_32FC1);

    //Output data samples. Matrix of order (train_sample_count x 1)
    CvMat* trainClasses = cvCreateMat(train_sample_count, 1, CV_32FC1);

    //The weight of each training data sample. We'll later set all to equal weights.
    CvMat* sampleWts = cvCreateMat(train_sample_count, 1, CV_32FC1);

    //The matrix representation of our ANN. We'll have four layers.
    CvMat* neuralLayers = cvCreateMat(4, 1, CV_32SC1);

    CvMat trainData1, trainClasses1, neuralLayers1, sampleWts1;

    cvGetRows(trainData, &trainData1, 0, train_sample_count);
    cvGetRows(trainClasses, &trainClasses1, 0, train_sample_count);
    cvGetRows(trainClasses, &trainClasses1, 0, train_sample_count);
    cvGetRows(sampleWts, &sampleWts1, 0, train_sample_count);
    cvGetRows(neuralLayers, &neuralLayers1, 0, 4);

    //Setting the number of neurons on each layer of the ANN
    /* 
       We have in Layer 1: 2 neurons (2 inputs)
       Layer 2: 3 neurons (hidden layer)
       Layer 3: 3 neurons (hidden layer)
       Layer 4: 1 neurons (1 output)
     */
    cvSet1D(&neuralLayers1, 0, cvScalar(2));
    cvSet1D(&neuralLayers1, 1, cvScalar(3));
    cvSet1D(&neuralLayers1, 2, cvScalar(3));
    cvSet1D(&neuralLayers1, 3, cvScalar(1));

    //Read and populate the samples.
    for (i=0;i<train_sample_count;i++)
        fscanf(fin,"%f %f %f",&td[i][0],&td[i][1],&td[i][2]);

    fclose(fin);

    //Assemble the ML training data.
    for (i=0; i<train_sample_count; i++)
    {
        //Input 1
        cvSetReal2D(&trainData1, i, 0, td[i][0]);
        //Input 2
        cvSetReal2D(&trainData1, i, 1, td[i][1]);
        //Output
        cvSet1D(&trainClasses1, i, cvScalar(td[i][2]));
        //Weight (setting everything to 1)
        cvSet1D(&sampleWts1, i, cvScalar(1));
    }

    //Create our ANN.
    machineBrain.create(neuralLayers);//sigmoid 0 0(激活函数的两个参数)

    //Train it with our data.   
    machineBrain.train(
        trainData,//输入
        trainClasses,//输出
        sampleWts,//输入项的权值
        0,
        CvANN_MLP_TrainParams(
            cvTermCriteria(
                CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,///类型 CV_TERMCRIT_ITER 和CV_TERMCRIT_EPS二值之一,或者二者的组合
                10000000,//最大迭代次数
                0.00000001//结果的精确性 两次迭代间权值变化量
                ),
            CvANN_MLP_TrainParams::BACKPROP,//BP算法
            0.01,//几个可显式调整的参数 学习速率 阿尔法
            0.05                      //惯性参数
        )
    );
}
Beispiel #10
0
void CvANN_MLP::read_params( CvFileStorage* fs, CvFileNode* node )
{
    //CV_FUNCNAME( "CvANN_MLP::read_params" );

    __BEGIN__;

    const char* activ_func_name = cvReadStringByName( fs, node, "activation_function", 0 );
    CvFileNode* tparams_node;

    if( activ_func_name )
        activ_func = strcmp( activ_func_name, "SIGMOID_SYM" ) == 0 ? SIGMOID_SYM :
                     strcmp( activ_func_name, "IDENTITY" ) == 0 ? IDENTITY :
                     strcmp( activ_func_name, "GAUSSIAN" ) == 0 ? GAUSSIAN : 0;
    else
        activ_func = cvReadIntByName( fs, node, "activation_function" );

    f_param1 = cvReadRealByName( fs, node, "f_param1", 0 );
    f_param2 = cvReadRealByName( fs, node, "f_param2", 0 );
    
    set_activ_func( activ_func, f_param1, f_param2 );
    
    min_val = cvReadRealByName( fs, node, "min_val", 0. );
    max_val = cvReadRealByName( fs, node, "max_val", 1. );
    min_val1 = cvReadRealByName( fs, node, "min_val1", 0. );
    max_val1 = cvReadRealByName( fs, node, "max_val1", 1. );

    tparams_node = cvGetFileNodeByName( fs, node, "training_params" );
    params = CvANN_MLP_TrainParams();

    if( tparams_node )
    {
        const char* tmethod_name = cvReadStringByName( fs, tparams_node, "train_method", "" );
        CvFileNode* tcrit_node;

        if( strcmp( tmethod_name, "BACKPROP" ) == 0 )
        {
            params.train_method = CvANN_MLP_TrainParams::BACKPROP;
            params.bp_dw_scale = cvReadRealByName( fs, tparams_node, "dw_scale", 0 );
            params.bp_moment_scale = cvReadRealByName( fs, tparams_node, "moment_scale", 0 );
        }
        else if( strcmp( tmethod_name, "RPROP" ) == 0 )
        {
            params.train_method = CvANN_MLP_TrainParams::RPROP;
            params.rp_dw0 = cvReadRealByName( fs, tparams_node, "dw0", 0 );
            params.rp_dw_plus = cvReadRealByName( fs, tparams_node, "dw_plus", 0 );
            params.rp_dw_minus = cvReadRealByName( fs, tparams_node, "dw_minus", 0 );
            params.rp_dw_min = cvReadRealByName( fs, tparams_node, "dw_min", 0 );
            params.rp_dw_max = cvReadRealByName( fs, tparams_node, "dw_max", 0 );
        }

        tcrit_node = cvGetFileNodeByName( fs, tparams_node, "term_criteria" );
        if( tcrit_node )
        {
            params.term_crit.epsilon = cvReadRealByName( fs, tcrit_node, "epsilon", -1 );
            params.term_crit.max_iter = cvReadIntByName( fs, tcrit_node, "iterations", -1 );
            params.term_crit.type = (params.term_crit.epsilon >= 0 ? CV_TERMCRIT_EPS : 0) +
                                   (params.term_crit.max_iter >= 0 ? CV_TERMCRIT_ITER : 0);
        }
    }

    __END__;
}
// Read the training data and train the network.
void trainMachine()
{
    int i;
    //The number of training samples.
    int train_sample_count;

    //The training data matrix.
    //Note that we are limiting the number of training data samples to 1000 here.
    //The data sample consists of two inputs and an output. That's why 3.
    //td es la matriz dinde se cargan las muestras
    float td[3000][7];

    //Read the training file
    /*
     A sample file contents(say we are training the network for generating
     the mean given two numbers) would be:

     5
     12 16 14
     10 5  7.5
     8  10 9
     5  4  4.5
     12 6  9

     */
    FILE *fin;
    fin = fopen("train.txt", "r");

    //Get the number of samples.
    fscanf(fin, "%d", &train_sample_count);
    printf("Found training file with %d samples...\n", train_sample_count);

    //Create the matrices

    //Input data samples. Matrix of order (train_sample_count x 2)
    CvMat* trainData = cvCreateMat(train_sample_count, 6, CV_32FC1);

    //Output data samples. Matrix of order (train_sample_count x 1)
    CvMat* trainClasses = cvCreateMat(train_sample_count, 1, CV_32FC1);

    //The weight of each training data sample. We'll later set all to equal weights.
    CvMat* sampleWts = cvCreateMat(train_sample_count, 1, CV_32FC1);

    //The matrix representation of our ANN. We'll have four layers.
    CvMat* neuralLayers = cvCreateMat(2, 1, CV_32SC1);

    CvMat trainData1, trainClasses1, neuralLayers1, sampleWts1;

    cvGetRows(trainData, &trainData1, 0, train_sample_count);
    cvGetRows(trainClasses, &trainClasses1, 0, train_sample_count);
    cvGetRows(trainClasses, &trainClasses1, 0, train_sample_count);
    cvGetRows(sampleWts, &sampleWts1, 0, train_sample_count);
    cvGetRows(neuralLayers, &neuralLayers1, 0, 2);

    //Setting the number of neurons on each layer of the ANN
    /*
     We have in Layer 1: 2 neurons (6 inputs)
                Layer 2: 3 neurons (hidden layer)
                Layer 3: 3 neurons (hidden layer)
                Layer 4: 1 neurons (1 output)
     */
    cvSet1D(&neuralLayers1, 0, cvScalar(6));
    //cvSet1D(&neuralLayers1, 1, cvScalar(3));
    //cvSet1D(&neuralLayers1, 2, cvScalar(3));
    cvSet1D(&neuralLayers1, 1, cvScalar(1));

    //Read and populate the samples.
    for (i=0; i<train_sample_count; i++)
        fscanf(fin,"%f %f %f %f",&td[i][0],&td[i][1],&td[i][2],&td[i][3]);

    fclose(fin);

    //Assemble the ML training data.
    for (i=0; i<train_sample_count; i++)
    {
        //Input 1
        cvSetReal2D(&trainData1, i, 0, td[i][0]);
        //Input 2
        cvSetReal2D(&trainData1, i, 1, td[i][1]);
        cvSetReal2D(&trainData1, i, 2, td[i][2]);
        cvSetReal2D(&trainData1, i, 3, td[i][3]);
        cvSetReal2D(&trainData1, i, 4, td[i][4]);
        cvSetReal2D(&trainData1, i, 5, td[i][5]);
        //Output
        cvSet1D(&trainClasses1, i, cvScalar(td[i][6]));
        //Weight (setting everything to 1)
        cvSet1D(&sampleWts1, i, cvScalar(1));
    }

    //Create our ANN.
    machineBrain.create(neuralLayers);

    //Train it with our data.
    //See the Machine learning reference at http://www.seas.upenn.edu/~bensapp/opencvdocs/ref/opencvref_ml.htm#ch_ann
    machineBrain.train(
        trainData,
        trainClasses,
        sampleWts,
        0,
        CvANN_MLP_TrainParams(
            cvTermCriteria(
                CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,
                100000,
                1.0
            ),
            CvANN_MLP_TrainParams::BACKPROP,
            0.01,
            0.05
        )
    );
}