コード例 #1
0
ファイル: LevelCorrection.cpp プロジェクト: fughz/frayer
unsigned _stdcall UpdateImageThread(void* data)
{
	LPLEVEL_CORR_DATA pLCorrData = (LPLEVEL_CORR_DATA)data;
	double gamma = pLCorrData->gamma;
	int min_level = pLCorrData->min_level;
	int max_level = pLCorrData->max_level;
	int min_out_val = pLCorrData->min_out_val;
	int max_out_val = pLCorrData->max_out_val;
	int edit_channel_index = pLCorrData->edit_channel_index;
	CvMat* BGRA_LUT = pLCorrData->BGRA_LUT;
	RECT mask_rect = pLCorrData->mask_rect;
	EditNode* pEditNode = pLCorrData->pEditNode;
	EditLayerHandle* pEditLayerHandle = pLCorrData->pEditLayerHandle;

	int x,y;
	double v;
	double d;
	double tmp = FUNC_GAMMA(gamma);
	for(x=0; x<256; x++){
		v = pow((double)x/255, tmp);
		y = v*255.0;
		if(y < min_level) y = min_level;
		if(y > max_level) y = max_level;
		
		y = 255.0 * (y - min_level) / (max_level - min_level);
		d = ((double)y - min_level)/(max_level - min_level);
		y = d*(max_out_val - min_out_val) + min_out_val;
		//set look up table
		switch(edit_channel_index){
			case LC_RGB:
				cvSet1D(BGRA_LUT, x, cvScalar(y, y, y, x));
				break;
			case LC_R:
				cvSet1D(BGRA_LUT, x, cvScalar(x, x, y, x));
				break;
			case LC_G:
				cvSet1D(BGRA_LUT, x, cvScalar(x, y, x, x));
				break;
			case LC_B:
				cvSet1D(BGRA_LUT, x, cvScalar(y, x, x, x));
				break;
		}
	}

	pEditNode->edit_img.LUT(
		mask_rect.left,
		mask_rect.top,
		mask_rect.right - mask_rect.left,
		mask_rect.bottom - mask_rect.top,
		&(pEditNode->undo_img),
		mask_rect.left,
		mask_rect.top,
		BGRA_LUT);
	pEditLayerHandle->Update(&mask_rect);
	return 0;
}
コード例 #2
0
/**
 * A method for adding another element to the end of a vector.
 *
 * @param src A pointer to a matrix.
 * @param value Value to be appended to the end.
 *
 * @return Returns a pointer to the new vector.
 */
CvMat* LibFaceUtils::addScalar(CvMat* src, CvScalar value)
{
    CvMat* result = cvCreateMat(src->rows+1,1,src->type);
    int i;

    for (i=0 ; i < src->rows ; ++i)
    {
        cvSet1D(result, i, cvGet1D(src, i));
    }

    cvSet1D(result, src->rows,value);

    return result;
}
コード例 #3
0
ファイル: classifierMLP.cpp プロジェクト: barak/mldemos
void ClassifierMLP::Train(std::vector< fvec > samples, ivec labels)
{
	u32 sampleCnt = samples.size();
	if(!sampleCnt) return;
	DEL(mlp);
	dim = samples[0].size();

	CvMat *layers;
//	if(neuronCount == 3) neuronCount = 2; // don't ask me why but 3 neurons mess up everything...

	if(!layerCount || neuronCount < 2)
	{
		layers = cvCreateMat(2,1,CV_32SC1);
		cvSet1D(layers, 0, cvScalar(dim));
		cvSet1D(layers, 1, cvScalar(1));
	}
	else
	{
		layers = cvCreateMat(2+layerCount,1,CV_32SC1);
		cvSet1D(layers, 0, cvScalar(dim));
		cvSet1D(layers, layerCount+1, cvScalar(1));
		FOR(i, layerCount) cvSet1D(layers, i+1, cvScalar(neuronCount));
	}

	u32 *perm = randPerm(sampleCnt);

	CvMat *trainSamples = cvCreateMat(sampleCnt, dim, CV_32FC1);
	CvMat *trainLabels = cvCreateMat(labels.size(), 1, CV_32FC1);
	CvMat *sampleWeights = cvCreateMat(samples.size(), 1, CV_32FC1);
	FOR(i, sampleCnt)
	{
		FOR(d, dim) cvSetReal2D(trainSamples, i, d, samples[perm[i]][d]);
		cvSet1D(trainLabels, i, cvScalar(labels[perm[i]]));
		cvSet1D(sampleWeights, i, cvScalar(1));
	}
コード例 #4
0
ファイル: quantify.c プロジェクト: haldai/Logic-Vision
// use k-means to reduce color number
MyQuantifiedImage* kmeansQuantification(IplImage* img, int tableSize) {

    // step 1: transfer image to kmeans samples
    int sample_count = img->height * img->width;
    CvMat* samples = cvCreateMat(sample_count, 1, CV_32FC3);
    CvRNG rng = cvRNG(0xffffffff);

    int idx = 0;
    for (int i = 0; i < img->height; i++) {
	for (int j = 0; j < img->width; j++) {
	    cvSet1D(samples, idx++, cvGet2D(img, i, j));
	}
    }
    
    // step 2: apply kmeans;
    CvMat* labels = cvCreateMat(sample_count, 1, CV_32SC1);
    CvMat* centers = cvCreateMat(tableSize, 1, CV_32FC3);
    cvSetZero(labels);
    cvSetZero(centers);
    
    cvKMeans2(samples, tableSize, labels,
	      cvTermCriteria(CV_TERMCRIT_ITER + CV_TERMCRIT_EPS, 
			     10, CV_KMEANS_ACC), 
	      CV_KMEANS_ATTEMPTS, &rng,
	      CV_KMEANS_PP_CENTERS, centers, 0); // flag = KMEANS_PP_CENTERS

    // step 3: rebuild the image
    IplImage* quantImg = cvCreateImage(cvGetSize(img), IPL_DEPTH_32F, 3);
    CvMat* labelImg = cvCreateMat(img->height, img->width, CV_32SC1);
    cvSetZero(quantImg);
    cvSetZero(labelImg);
    
    idx = 0;
    for (int i = 0; i < img->height; i++) {
	for (int j = 0; j < img->width; j++) {
	    int cluster_idx = labels->data.i[idx++];
	    CvScalar color = cvGet1D(centers, cluster_idx);
	    cvSet2D(quantImg, i, j, color);
	    cvSetReal2D(labelImg, i, j, (double) cluster_idx);
	}
    }

    MyQuantifiedImage* re = malloc(sizeof(MyQuantifiedImage));
    re->labelMat = labelImg;
    re->qImg = quantImg;
    re->tableSize = tableSize;
    
    CvScalar* colorTable = calloc(tableSize, sizeof(CvScalar));
    for (int i = 0; i < tableSize; i++) {
	colorTable[i] = cvGet1D(centers, i);
    }
    re->colorTable = colorTable;


    return re;
}
コード例 #5
0
/**
 *  Method for accessing a coloumn from a matrix.
 *
 *  @param src A pointer to a matrix.
 *  @param id of coloumn to be returned.
 *
 *  @return Returns the pointer to the coloumn.
 */
CvMat* LibFaceUtils::getColoumn(CvMat* src, int col)
{
    int i;
    CvMat* result = cvCreateMat(src->rows, 1, src->type);

    for (i = 0 ; i < src->rows ; ++i)
    {
        cvSet1D(result, i, cvGet2D(src, i, col));
    }

    return result;
}
コード例 #6
0
/**
 * Method for reshaping a matrix into a single coloumn vector.
 *
 * @param src A pointer to the matrix to be reshaped.
 *
 * @return Returns the pointer to the reshaped vector.
 */
CvMat* LibFaceUtils::reshape(CvMat* src)
{
    int i, j, t = 0;
    CvMat* row  = cvCreateMat(src->rows*src->cols, 1, src->type);

    for (i=0; i < src->rows; ++i)
    {
        for (j=0; j < src->cols; ++j)
        {
            cvSet1D(row, t, cvGet2D(src, i, j));
            t++;
        }
    }

    return row;
}
コード例 #7
0
/**
 *   Method for calculating mean value for every row.
 *
 *  @param src A pointer to a matrix.
 *
 *  @return Returns a pointer to the matrix containing means for every row.
 */
CvMat* LibFaceUtils::mean(CvMat* src)
{
    int    i, j;
    double mean;
    CvMat* result = cvCreateMat(src->rows, 1, src->type);

    for (i = 0; i < src->rows; ++i)
    {
        mean = 0;
        for (j = 0; j < src->cols; ++j)
        {
            mean = mean + cvGet2D(src, i, j).val[0];
        }
        mean = mean / src->cols;

        cvSet1D(result, i, cvScalarAll(mean));
    }

    return result;
}
コード例 #8
0
//---------------------------------------------------------------
//【関数名 】:cv_ColorExtraction
//【処理概要】:色抽出
//【引数  】:src_img        = 入力画像(8bit3ch)
//      :dst_img        = 出力画像(8bit3ch)
//      :code        = 色空間の指定(CV_BGR2HSV,CV_BGR2Labなど)
//      :ch1_lower    = ch1のしきい値(小)
//      :ch1_upper    = ch1のしきい値(大)
//      :ch2_lower    = ch2のしきい値(小)
//      :ch2_upper    = ch2のしきい値(大)
//      :ch3_lower    = ch3のしきい値(小)
//      :ch3_upper    = ch3のしきい値(大)
//【戻り値 】:なし
//【備考  】:lower <= upperの場合、lower以上upper以下の範囲を抽出、
//      :lower >  upperの場合、upper以下lower以上の範囲を抽出します。
//---------------------------------------------------------------
void cv_ColorExtraction(IplImage* src_img, IplImage* dst_img,
                        int code,
                        int ch1_lower, int ch1_upper,
                        int ch2_lower, int ch2_upper,
                        int ch3_lower, int ch3_upper
                       ) {

    int i, k;

    IplImage *Color_img;
    IplImage *ch1_img, *ch2_img, *ch3_img;
    IplImage *Mask_img;

    int lower[3];
    int upper[3];
    int val[3];

    CvMat *lut;

    //codeに基づいたカラー変換
    Color_img = cvCreateImage(cvGetSize(src_img), src_img->depth, src_img->nChannels);
    cvCvtColor(src_img, Color_img, code);

    //3ChのLUT作成
    lut    = cvCreateMat(256, 1, CV_8UC3);

    lower[0] = ch1_lower;
    lower[1] = ch2_lower;
    lower[2] = ch3_lower;

    upper[0] = ch1_upper;
    upper[1] = ch2_upper;
    upper[2] = ch3_upper;

    for (i = 0; i < 256; i++) {
        for (k = 0; k < 3; k++) {
            if (lower[k] <= upper[k]) {
                if ((lower[k] <= i) && (i <= upper[k])) {
                    val[k] = 255;
                } else {
                    val[k] = 0;
                }
            } else {
                if ((i <= upper[k]) || (lower[k] <= i)) {
                    val[k] = 255;
                } else {
                    val[k] = 0;
                }
            }
        }
        //LUTの設定
        cvSet1D(lut, i, cvScalar(val[0], val[1], val[2]));
    }

    //3ChごとのLUT変換(各チャンネルごとに2値化処理)
    cvLUT(Color_img, Color_img, lut);
    cvReleaseMat(&lut);

    //各チャンネルごとのIplImageを確保する
    ch1_img = cvCreateImage(cvGetSize(Color_img), Color_img->depth, 1);
    ch2_img = cvCreateImage(cvGetSize(Color_img), Color_img->depth, 1);
    ch3_img = cvCreateImage(cvGetSize(Color_img), Color_img->depth, 1);

    //チャンネルごとに二値化された画像をそれぞれのチャンネルに分解する
    cvSplit(Color_img, ch1_img, ch2_img, ch3_img, NULL);

    //3Ch全てのANDを取り、マスク画像を作成する。
    Mask_img = cvCreateImage(cvGetSize(Color_img), Color_img->depth, 1);
    cvAnd(ch1_img, ch2_img, Mask_img);
    cvAnd(Mask_img, ch3_img, Mask_img);

    //入力画像(src_img)のマスク領域を出力画像(dst_img)へコピーする
    cvZero(dst_img);
    cvCopy(src_img, dst_img, Mask_img);

    //解放
    cvReleaseImage(&Color_img);
    cvReleaseImage(&ch1_img);
    cvReleaseImage(&ch2_img);
    cvReleaseImage(&ch3_img);
    cvReleaseImage(&Mask_img);

}
コード例 #9
0
ファイル: opencv_nnet.cpp プロジェクト: johanvdw/saga-debian
//---------------------------------------------------------
bool COpenCV_NNet::On_Execute(void)
{
	//-------------------------------------------------
	bool					b_updateWeights, b_noInputScale, b_noOutputScale, b_NoData;
	int						i_matType, i_layers, i_maxIter, i_neurons, i_areasClassId, i_trainFeatTotalCount, *i_outputFeatureIdxs, i_outputFeatureCount, i_Grid, x, y, i_evalOut, i_winner;
	double					d_alpha, d_beta, d_eps;
	DATA_TYPE				e_dataType;
	TRAINING_METHOD			e_trainMet;
	ACTIVATION_FUNCTION		e_actFunc;
	CSG_Table				*t_Weights, *t_Indices, *t_TrainInput, *t_EvalInput, *t_EvalOutput;
	CSG_Parameter_Grid_List	*gl_TrainInputs;
	CSG_Grid				*g_EvalOutput, *g_EvalOutputCert;
	CSG_Shapes				*s_TrainInputAreas;
	CSG_Parameters			*p_TrainFeatures;
	TSG_Point				p;
	CvMat					*mat_Weights, *mat_Indices, **mat_data, *mat_neuralLayers, mat_layerSizesSub, *mat_EvalInput, *mat_EvalOutput;	// todo: mat_indices to respect input indices, mat_weights for initialization
	CvANN_MLP_TrainParams	tp_trainParams;
	CvANN_MLP				model;

	b_updateWeights		= Parameters("UPDATE_WEIGHTS"							)->asBool();
	b_noInputScale		= Parameters("NO_INPUT_SCALE"							)->asBool();
	b_noOutputScale		= Parameters("NO_OUTPUT_SCALE"							)->asBool();
	i_layers			= Parameters("NNET_LAYER"								)->asInt();
	i_neurons			= Parameters("NNET_NEURONS"								)->asInt();
	i_maxIter			= Parameters("MAX_ITER"									)->asInt();
	i_areasClassId		= Parameters("TRAIN_INPUT_AREAS_CLASS_FIELD"			)->asInt();
	e_dataType			= (DATA_TYPE)Parameters("DATA_TYPE"						)->asInt();
	e_trainMet			= (TRAINING_METHOD)Parameters("TRAINING_METHOD"			)->asInt();
	e_actFunc			= (ACTIVATION_FUNCTION)Parameters("ACTIVATION_FUNCTION"	)->asInt();
	d_alpha				= Parameters("ALPHA"									)->asDouble();
	d_beta				= Parameters("BETA"										)->asDouble();
	d_eps				= Parameters("EPSILON"									)->asDouble();
	t_Weights			= Parameters("WEIGHTS"									)->asTable();
	t_Indices			= Parameters("INDICES"									)->asTable();
	t_TrainInput		= Parameters("TRAIN_INPUT_TABLE"						)->asTable();
	t_EvalInput			= Parameters("EVAL_INPUT_TABLE"							)->asTable();
	t_EvalOutput		= Parameters("EVAL_OUTPUT_TABLE"						)->asTable();
	p_TrainFeatures		= Parameters("TRAIN_FEATURES_TABLE"						)->asParameters();
	gl_TrainInputs		= Parameters("TRAIN_INPUT_GRIDS"						)->asGridList();
	g_EvalOutput		= Parameters("EVAL_OUTPUT_GRID_CLASSES"					)->asGrid();
	g_EvalOutputCert	= Parameters("EVAL_OUTPUT_GRID_CERTAINTY"				)->asGrid();
	s_TrainInputAreas	= Parameters("TRAIN_INPUT_AREAS"						)->asShapes();

	// Fixed matrix type (TODO: Analyze what to do for other types of data (i.e. images))
	i_matType = CV_32FC1;

	//-------------------------------------------------
	if (e_dataType == TABLE)
	{	
		// We are working with TABLE data
		if( t_TrainInput->Get_Count() == 0 || p_TrainFeatures->Get_Count() == 0 )
		{
			Error_Set(_TL("Select an input table and at least one output feature!"));
			return( false );
		}

		// Count the total number of available features
		i_trainFeatTotalCount = t_TrainInput->Get_Field_Count();

		// Count the number of selected output features
		i_outputFeatureIdxs = (int *)SG_Calloc(i_trainFeatTotalCount, sizeof(int));
		i_outputFeatureCount = 0;
	
		for(int i=0; i<p_TrainFeatures->Get_Count(); i++)
		{
			if( p_TrainFeatures->Get_Parameter(i)->asBool() )
			{
				i_outputFeatureIdxs[i_outputFeatureCount++] = CSG_String(p_TrainFeatures->Get_Parameter(i)->Get_Identifier()).asInt();
			}
		}

		// Update the number of training features
		i_trainFeatTotalCount = i_trainFeatTotalCount-i_outputFeatureCount;

		if( i_outputFeatureCount <= 0 )
		{
			Error_Set(_TL("Select at least one output feature!"));
			return( false );
		}

		// Now convert the input and output training data into a OpenCV matrix objects
		mat_data = GetTrainAndOutputMatrix(t_TrainInput, i_matType, i_outputFeatureIdxs, i_outputFeatureCount);
	}
	else
	{
		// TODO: Add some grid validation logic
		i_trainFeatTotalCount = gl_TrainInputs->Get_Count();
		i_outputFeatureCount = s_TrainInputAreas->Get_Count();

		// Convert the data from the grid into the matrix from
		mat_data = GetTrainAndOutputMatrix(gl_TrainInputs, i_matType, s_TrainInputAreas, i_areasClassId, g_EvalOutput, g_EvalOutputCert);
	}

	//-------------------------------------------------
	// Add two additional layer to the network topology (0-th layer for input and the last as the output)
	i_layers = i_layers + 2;
	mat_neuralLayers = cvCreateMat(i_layers, 1, CV_32SC1);
	cvGetRows(mat_neuralLayers, &mat_layerSizesSub, 0, i_layers);
	
	//Setting the number of neurons on each layer
	for (int i = 0; i < i_layers; i++)
	{
		if (i == 0)
		{
			// The first layer needs the same size (number of nerons) as the number of columns in the training data
			cvSet1D(&mat_layerSizesSub, i, cvScalar(i_trainFeatTotalCount));
		}
		else if (i == i_layers-1)
		{
			// The last layer needs the same size (number of neurons) as the number of output columns
			cvSet1D(&mat_layerSizesSub, i, cvScalar(i_outputFeatureCount));
		}
		else
		{
			// On every other layer set the layer size selected by the user
			cvSet1D(&mat_layerSizesSub, i, cvScalar(i_neurons));	
		}
	}

	//-------------------------------------------------
	// Create the training params object
	tp_trainParams = CvANN_MLP_TrainParams();
	tp_trainParams.term_crit = cvTermCriteria(CV_TERMCRIT_ITER + CV_TERMCRIT_EPS, i_maxIter, d_eps);

	// Check which training method was selected and set corresponding params
	if(e_trainMet == RPROP)
	{
		// Set all RPROP specific params
		tp_trainParams.train_method = CvANN_MLP_TrainParams::RPROP;
		tp_trainParams.rp_dw0 = Parameters("RP_DW0"				)->asDouble();
		tp_trainParams.rp_dw_plus = Parameters("RP_DW_PLUS"		)->asDouble();
		tp_trainParams.rp_dw_minus = Parameters("RP_DW_MINUS"	)->asDouble();
		tp_trainParams.rp_dw_min = Parameters("RP_DW_MIN"		)->asDouble();
		tp_trainParams.rp_dw_max = Parameters("RP_DW_MAX"		)->asDouble();
	}
	else
	{
		// Set all BPROP specific params
		tp_trainParams.train_method = CvANN_MLP_TrainParams::BACKPROP;
		tp_trainParams.bp_dw_scale = Parameters("BP_DW_SCALE"			)->asDouble();
		tp_trainParams.bp_moment_scale = Parameters("BP_MOMENT_SCALE"	)->asInt();
	}
	
	//-------------------------------------------------
	// Create the model (depending on the activation function)
	if(e_actFunc == SIGMOID)
	{
		model.create(mat_neuralLayers);
	}
	else
	{
		model.create(mat_neuralLayers, CvANN_MLP::GAUSSIAN, d_alpha, d_beta);
	}

	//-------------------------------------------------
	// Now train the network

	// TODO: Integrate init weights and indicies for record selection
	// mat_Weights  = GetMatrix(t_Weights, i_matType);
	// mat_Indices = GetMatrix(t_Indices, i_matType);
	
	//model.train(mat_TrainInput, mat_TrainOutput, NULL, NULL, tp_trainParams);
	model.train(mat_data[0], mat_data[1], NULL, NULL, tp_trainParams);

	//-------------------------------------------------
	// Predict data
	if (e_dataType == TABLE)
	{
		// Get the eavaluation/test matrix from the eval table
		mat_EvalInput = GetEvalMatrix(t_EvalInput, i_matType);
	}
	else
	{
		// Train and eval data overlap in grid mode
		mat_EvalInput = GetEvalMatrix(gl_TrainInputs, i_matType);
	}

	// Prepare output matrix
	mat_EvalOutput = cvCreateMat(mat_EvalInput->rows, i_outputFeatureCount, i_matType);

	// Start prediction
	model.predict(mat_EvalInput, mat_EvalOutput);

	Message_Add(_TL("Successfully trained the network and predicted the values. Here comes the output."));
	
	//-------------------------------------------------
	// Save and print results
	if (e_dataType == TABLE)
	{
		// DEBUG -> Save results to output table and print results
		for (int i = 0; i < i_outputFeatureCount; i++)
		{
			t_EvalOutput->Add_Field(CSG_String(t_TrainInput->Get_Field_Name(i_outputFeatureIdxs[i])), SG_DATATYPE_Float);
		}
	
		for (int i = 0; i < mat_EvalOutput->rows; i++)
		{
			CSG_Table_Record* tr_record = t_EvalOutput->Add_Record();

			for (int j = 0; j < i_outputFeatureCount; j++)
			{
				float f_targetValue = mat_EvalOutput->data.fl[i*i_outputFeatureCount+j];
				tr_record->Set_Value(j, f_targetValue);
			}
		}
	}
	else
	{
		// Fill the output table output
		for (int i = 0; i < i_outputFeatureCount; i++)
		{
			// TODO: Get the class name
			t_EvalOutput->Add_Field(CSG_String::Format(SG_T("CLASS_%d"), i), SG_DATATYPE_Float);
		}
	
		for (int i = 0; i < mat_EvalOutput->rows; i++)
		{
			CSG_Table_Record* tr_record = t_EvalOutput->Add_Record();

			for (int j = 0; j < i_outputFeatureCount; j++)
			{
				float f_targetValue = mat_EvalOutput->data.fl[i*i_outputFeatureCount+j];
				tr_record->Set_Value(j, f_targetValue);
			}
		}

		i_evalOut = 0;

		// Fill the output grid
		for(y=0, p.y=Get_YMin(); y<Get_NY() && Set_Progress(y); y++, p.y+=Get_Cellsize())
		{
			for(x=0, p.x=Get_XMin(); x<Get_NX(); x++, p.x+=Get_Cellsize())
			{
				for(i_Grid=0, b_NoData=false; i_Grid<gl_TrainInputs->Get_Count() && !b_NoData; i_Grid++)
				{
					// If there is one grid that has no data in this point p, then set the no data flag
					if( gl_TrainInputs->asGrid(i_Grid)->is_NoData(x, y) )
					{
						b_NoData = true;
					}
				}

				if (!b_NoData)
				{
					// We have data in all grids, so this is a point that was predicted
					// Get the winner class for this point and set it to the output grid
					float f_targetValue = 0;

					for (int j = 0; j < i_outputFeatureCount; j++)
					{
						if (mat_EvalOutput->data.fl[i_evalOut*i_outputFeatureCount+j] > f_targetValue)
						{
							// The current value is higher than the last one, so lets memorize the current class
							f_targetValue = mat_EvalOutput->data.fl[i_evalOut*i_outputFeatureCount+j];
							i_winner = j;
						}
					}

					// Now finally set the values to the grids
					g_EvalOutput->Set_Value(x, y, i_winner);
					g_EvalOutputCert->Set_Value(x, y, f_targetValue);

					i_evalOut++;
				}
			}
		}
	}

	return( true );
}
コード例 #10
0
// Read the training data and train the network.
void trainMachine()
{
    int i;
    //The number of training samples. 
    int train_sample_count = 130;
    
    //The training data matrix. 
    float td[130][61];
    
    //Read the training file
    FILE *fin;
    fin = fopen("data/sonar_train.csv", "r");
    
    //Create the matrices    
    //Input data samples. Matrix of order (train_sample_count x 60)
    CvMat* trainData = cvCreateMat(train_sample_count, 60, CV_32FC1);
    
    //Output data samples. Matrix of order (train_sample_count x 1)
    CvMat* trainClasses = cvCreateMat(train_sample_count, 1, CV_32FC1);
    
    //The weight of each training data sample. We'll later set all to equal weights.
    CvMat* sampleWts = cvCreateMat(train_sample_count, 1, CV_32FC1);
    
    //The matrix representation of our ANN. We'll have four layers.
    CvMat* neuralLayers = cvCreateMat(4, 1, CV_32SC1);
    
    //Setting the number of neurons on each layer of the ANN
    /* 
     We have in Layer 1: 60 neurons (60 inputs)
     Layer 2: 150 neurons (hidden layer)
     Layer 3: 225 neurons (hidden layer)
     Layer 4: 1 neurons (1 output)
     */
    cvSet1D(neuralLayers, 0, cvScalar(60));
    cvSet1D(neuralLayers, 1, cvScalar(150));
    cvSet1D(neuralLayers, 2, cvScalar(225));
    cvSet1D(neuralLayers, 3, cvScalar(1));
    
    //Read and populate the samples.
    for (i=0;i<train_sample_count;i++)
        fscanf(fin,"%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f",
               &td[i][0],&td[i][1],&td[i][2],&td[i][3],&td[i][4],&td[i][5],&td[i][6],&td[i][7],&td[i][8],&td[i][9],&td[i][10],&td[i][11],&td[i][12],&td[i][13],&td[i][14],&td[i][15],&td[i][16],&td[i][17],&td[i][18],&td[i][19],&td[i][20],&td[i][21],&td[i][22],&td[i][23],&td[i][24],&td[i][25],&td[i][26],&td[i][27],&td[i][28],&td[i][29],&td[i][30],&td[i][31],&td[i][32],&td[i][33],&td[i][34],&td[i][35],&td[i][36],&td[i][37],&td[i][38],&td[i][39],&td[i][40],&td[i][41],&td[i][42],&td[i][43],&td[i][44],&td[i][45],&td[i][46],&td[i][47],&td[i][48],&td[i][49],&td[i][50],&td[i][51],&td[i][52],&td[i][53],&td[i][54],&td[i][55],&td[i][56],&td[i][57],&td[i][58],&td[i][59],&td[i][60]);
    
    //we are done reading the file, so close it
    fclose(fin);
    
    //Assemble the ML training data.
    for (i=0; i<train_sample_count; i++)
    {
        //inputs
        for (int j = 0; j < 60; j++) 
            cvSetReal2D(trainData, i, j, td[i][j]);
    
        //Output
        cvSet1D(trainClasses, i, cvScalar(td[i][60]));
        //Weight (setting everything to 1)
        cvSet1D(sampleWts, i, cvScalar(1));
    }
    
    //Create our ANN.
    ann.create(neuralLayers);
    cout << "training...\n";
    //Train it with our data.
    ann.train(
        trainData,
        trainClasses,
        sampleWts,
        0,
        CvANN_MLP_TrainParams(
            cvTermCriteria(
                CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,
                100000,
                0.000001),
            CvANN_MLP_TrainParams::BACKPROP,
            0.01,
            0.05));
}
コード例 #11
0
ファイル: macduff.cpp プロジェクト: floored/matcap
IplImage * find_macbeth( const char *img )
{
    IplImage * macbeth_img = cvLoadImage( img,
        CV_LOAD_IMAGE_ANYCOLOR|CV_LOAD_IMAGE_ANYDEPTH );
        
    IplImage * macbeth_original = cvCreateImage( cvSize(macbeth_img->width, macbeth_img->height), macbeth_img->depth, macbeth_img->nChannels );
    cvCopy(macbeth_img, macbeth_original);
        
    IplImage * macbeth_split[3];
    IplImage * macbeth_split_thresh[3];
    
    for(int i = 0; i < 3; i++) {
        macbeth_split[i] = cvCreateImage( cvSize(macbeth_img->width, macbeth_img->height), macbeth_img->depth, 1 );
        macbeth_split_thresh[i] = cvCreateImage( cvSize(macbeth_img->width, macbeth_img->height), macbeth_img->depth, 1 );
    }
    
    cvSplit(macbeth_img, macbeth_split[0], macbeth_split[1], macbeth_split[2], NULL);
    
    if( macbeth_img )
    {
        int adaptive_method = CV_ADAPTIVE_THRESH_MEAN_C;
        int threshold_type = CV_THRESH_BINARY_INV;
        int block_size = cvRound(
            MIN(macbeth_img->width,macbeth_img->height)*0.02)|1;
        fprintf(stderr,"Using %d as block size\n", block_size);
        
        double offset = 6;
        
        // do an adaptive threshold on each channel
        for(int i = 0; i < 3; i++) {
            cvAdaptiveThreshold(macbeth_split[i], macbeth_split_thresh[i], 255, adaptive_method, threshold_type, block_size, offset);
        }
        
        IplImage * adaptive = cvCreateImage( cvSize(macbeth_img->width, macbeth_img->height), IPL_DEPTH_8U, 1 );
        
        // OR the binary threshold results together
        cvOr(macbeth_split_thresh[0],macbeth_split_thresh[1],adaptive);
        cvOr(macbeth_split_thresh[2],adaptive,adaptive);
        
        for(int i = 0; i < 3; i++) {
            cvReleaseImage( &(macbeth_split[i]) );
            cvReleaseImage( &(macbeth_split_thresh[i]) );
        }
                
        int element_size = (block_size/10)+2;
        fprintf(stderr,"Using %d as element size\n", element_size);
        
        // do an opening on the threshold image
        IplConvKernel * element = cvCreateStructuringElementEx(element_size,element_size,element_size/2,element_size/2,CV_SHAPE_RECT);
        cvMorphologyEx(adaptive,adaptive,NULL,element,CV_MOP_OPEN);
        cvReleaseStructuringElement(&element);
        
        CvMemStorage* storage = cvCreateMemStorage(0);
        
        CvSeq* initial_quads = cvCreateSeq( 0, sizeof(*initial_quads), sizeof(void*), storage );
        CvSeq* initial_boxes = cvCreateSeq( 0, sizeof(*initial_boxes), sizeof(CvBox2D), storage );
        
        // find contours in the threshold image
        CvSeq * contours = NULL;
        cvFindContours(adaptive,storage,&contours);
        
        int min_size = (macbeth_img->width*macbeth_img->height)/
            (MACBETH_SQUARES*100);
        
        if(contours) {
            int count = 0;
            
            for( CvSeq* c = contours; c != NULL; c = c->h_next) {
                CvRect rect = ((CvContour*)c)->rect;
                // only interested in contours with these restrictions
                if(CV_IS_SEQ_HOLE(c) && rect.width*rect.height >= min_size) {
                    // only interested in quad-like contours
                    CvSeq * quad_contour = find_quad(c, storage, min_size);
                    if(quad_contour) {
                        cvSeqPush( initial_quads, &quad_contour );
                        count++;
                        rect = ((CvContour*)quad_contour)->rect;
                        
                        CvScalar average = contour_average((CvContour*)quad_contour, macbeth_img);
                        
                        CvBox2D box = cvMinAreaRect2(quad_contour,storage);
                        cvSeqPush( initial_boxes, &box );
                        
                        // fprintf(stderr,"Center: %f %f\n", box.center.x, box.center.y);
                        
                        double min_distance = MAX_RGB_DISTANCE;
                        CvPoint closest_color_idx = cvPoint(-1,-1);
                        for(int y = 0; y < MACBETH_HEIGHT; y++) {
                            for(int x = 0; x < MACBETH_WIDTH; x++) {
                                double distance = euclidean_distance_lab(average,colorchecker_srgb[y][x]);
                                if(distance < min_distance) {
                                    closest_color_idx.x = x;
                                    closest_color_idx.y = y;
                                    min_distance = distance;
                                }
                            }
                        }
                        
                        CvScalar closest_color = colorchecker_srgb[closest_color_idx.y][closest_color_idx.x];
                        // fprintf(stderr,"Closest color: %f %f %f (%d %d)\n",
                        //     closest_color.val[2],
                        //     closest_color.val[1],
                        //     closest_color.val[0],
                        //     closest_color_idx.x,
                        //     closest_color_idx.y
                        // );
                        
                        // cvDrawContours(
                        //     macbeth_img,
                        //     quad_contour,
                        //     cvScalar(255,0,0),
                        //     cvScalar(0,0,255),
                        //     0,
                        //     element_size
                        // );
                        // cvCircle(
                        //     macbeth_img,
                        //     cvPointFrom32f(box.center),
                        //     element_size*6,
                        //     cvScalarAll(255),
                        //     -1
                        // );
                        // cvCircle(
                        //     macbeth_img,
                        //     cvPointFrom32f(box.center),
                        //     element_size*6,
                        //     closest_color,
                        //     -1
                        // );
                        // cvCircle(
                        //     macbeth_img,
                        //     cvPointFrom32f(box.center),
                        //     element_size*4,
                        //     average,
                        //     -1
                        // );
                        // CvRect rect = contained_rectangle(box);
                        // cvRectangle(
                        //     macbeth_img,
                        //     cvPoint(rect.x,rect.y),
                        //     cvPoint(rect.x+rect.width, rect.y+rect.height),
                        //     cvScalarAll(0),
                        //     element_size
                        // );
                    }
                }
            }
            
            ColorChecker found_colorchecker;

            fprintf(stderr,"%d initial quads found", initial_quads->total);
            if(count > MACBETH_SQUARES) {
                fprintf(stderr," (probably a Passport)\n");
                
                CvMat* points = cvCreateMat( initial_quads->total , 1, CV_32FC2 );
                CvMat* clusters = cvCreateMat( initial_quads->total , 1, CV_32SC1 );
                
                CvSeq* partitioned_quads[2];
                CvSeq* partitioned_boxes[2];
                for(int i = 0; i < 2; i++) {
                    partitioned_quads[i] = cvCreateSeq( 0, sizeof(**partitioned_quads), sizeof(void*), storage );
                    partitioned_boxes[i] = cvCreateSeq( 0, sizeof(**partitioned_boxes), sizeof(CvBox2D), storage );
                }
                
                // set up the points sequence for cvKMeans2, using the box centers
                for(int i = 0; i < initial_quads->total; i++) {
                    CvBox2D box = (*(CvBox2D*)cvGetSeqElem(initial_boxes, i));
                    
                    cvSet1D(points, i, cvScalar(box.center.x,box.center.y));
                }
                
                // partition into two clusters: passport and colorchecker
                cvKMeans2( points, 2, clusters, 
                           cvTermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,
                                           10, 1.0 ) );
        
                for(int i = 0; i < initial_quads->total; i++) {
                    CvPoint2D32f pt = ((CvPoint2D32f*)points->data.fl)[i];
                    int cluster_idx = clusters->data.i[i];
                    
                    cvSeqPush( partitioned_quads[cluster_idx],
                               cvGetSeqElem(initial_quads, i) );
                    cvSeqPush( partitioned_boxes[cluster_idx],
                               cvGetSeqElem(initial_boxes, i) );

                    // cvCircle(
                    //     macbeth_img,
                    //     cvPointFrom32f(pt),
                    //     element_size*2,
                    //     cvScalar(255*cluster_idx,0,255-(255*cluster_idx)),
                    //     -1
                    // );
                }
                
                ColorChecker partitioned_checkers[2];
                
                // check each of the two partitioned sets for the best colorchecker
                for(int i = 0; i < 2; i++) {
                    partitioned_checkers[i] =
                        find_colorchecker(partitioned_quads[i], partitioned_boxes[i],
                                      storage, macbeth_img, macbeth_original);
                }
                
                // use the colorchecker with the lowest error
                found_colorchecker = partitioned_checkers[0].error < partitioned_checkers[1].error ?
                    partitioned_checkers[0] : partitioned_checkers[1];
                
                cvReleaseMat( &points );
                cvReleaseMat( &clusters );
            }
            else { // just one colorchecker to test
                fprintf(stderr,"\n");
                found_colorchecker = find_colorchecker(initial_quads, initial_boxes,
                                  storage, macbeth_img, macbeth_original);
            }
            
            // render the found colorchecker
            draw_colorchecker(found_colorchecker.values,found_colorchecker.points,macbeth_img,found_colorchecker.size);
            
            // print out the colorchecker info
            for(int y = 0; y < MACBETH_HEIGHT; y++) {            
                for(int x = 0; x < MACBETH_WIDTH; x++) {
                    CvScalar this_value = cvGet2D(found_colorchecker.values,y,x);
                    CvScalar this_point = cvGet2D(found_colorchecker.points,y,x);
                    
                    printf("%.0f,%.0f,%.0f,%.0f,%.0f\n",
                        this_point.val[0],this_point.val[1],
                        this_value.val[2],this_value.val[1],this_value.val[0]);
                }
            }
            printf("%0.f\n%f\n",found_colorchecker.size,found_colorchecker.error);
            
        }
                
        cvReleaseMemStorage( &storage );
        
        if( macbeth_original ) cvReleaseImage( &macbeth_original );
        if( adaptive ) cvReleaseImage( &adaptive );
        
        return macbeth_img;
    }

    if( macbeth_img ) cvReleaseImage( &macbeth_img );

    return NULL;
}
コード例 #12
0
ファイル: macduff.cpp プロジェクト: floored/matcap
ColorChecker find_colorchecker(CvSeq * quads, CvSeq * boxes, CvMemStorage *storage, IplImage *image, IplImage *original_image)
{
    CvPoint2D32f box_corners[4];
    bool passport_box_flipped = false;
    bool rotated_box = false;
    
    CvMat* points = cvCreateMat( boxes->total , 1, CV_32FC2 );
    for(int i = 0; i < boxes->total; i++)
    {
        CvBox2D box = (*(CvBox2D*)cvGetSeqElem(boxes, i));
        cvSet1D(points, i, cvScalar(box.center.x,box.center.y));
    }
    CvBox2D passport_box = cvMinAreaRect2(points,storage);
    fprintf(stderr,"Box:\n\tCenter: %f,%f\n\tSize: %f,%f\n\tAngle: %f\n",passport_box.center.x,passport_box.center.y,passport_box.size.width,passport_box.size.height,passport_box.angle);
    if(passport_box.angle < 0.0) {
      passport_box_flipped = true;
    }
    
    cvBoxPoints(passport_box, box_corners);
    // for(int i = 0; i < 4; i++)
    // {
    //   fprintf(stderr,"Box corner %d: %d,%d\n",i,cvPointFrom32f(box_corners[i]).x,cvPointFrom32f(box_corners[i]).y);
    // }
    
    // cvBox(passport_box, image, cvScalarAll(128), 10);
    
    if(euclidean_distance(cvPointFrom32f(box_corners[0]),cvPointFrom32f(box_corners[1])) <
       euclidean_distance(cvPointFrom32f(box_corners[1]),cvPointFrom32f(box_corners[2]))) {
        fprintf(stderr,"Box is upright, rotating\n");
        rotate_box(box_corners);
        rotated_box = true && passport_box_flipped;
    }

    double horizontal_spacing = euclidean_distance(
        cvPointFrom32f(box_corners[0]),cvPointFrom32f(box_corners[1]))/(double)(MACBETH_WIDTH-1);
    double vertical_spacing = euclidean_distance(
        cvPointFrom32f(box_corners[1]),cvPointFrom32f(box_corners[2]))/(double)(MACBETH_HEIGHT-1);
    double horizontal_slope = (box_corners[1].y - box_corners[0].y)/(box_corners[1].x - box_corners[0].x);
    double horizontal_mag = sqrt(1+pow(horizontal_slope,2));
    double vertical_slope = (box_corners[3].y - box_corners[0].y)/(box_corners[3].x - box_corners[0].x);
    double vertical_mag = sqrt(1+pow(vertical_slope,2));
    double horizontal_orientation = box_corners[0].x < box_corners[1].x ? -1 : 1;
    double vertical_orientation = box_corners[0].y < box_corners[3].y ? -1 : 1;
        
    fprintf(stderr,"Spacing is %f %f\n",horizontal_spacing,vertical_spacing);
    fprintf(stderr,"Slope is %f %f\n", horizontal_slope,vertical_slope);
    
    int average_size = 0;
    for(int i = 0; i < boxes->total; i++)
    {
        CvBox2D box = (*(CvBox2D*)cvGetSeqElem(boxes, i));
        
        CvRect rect = contained_rectangle(box);
        average_size += MIN(rect.width, rect.height);
    }
    average_size /= boxes->total;
    
    fprintf(stderr,"Average contained rect size is %d\n", average_size);
    
    CvMat * this_colorchecker = cvCreateMat(MACBETH_HEIGHT, MACBETH_WIDTH, CV_32FC3);
    CvMat * this_colorchecker_points = cvCreateMat( MACBETH_HEIGHT, MACBETH_WIDTH, CV_32FC2 );
    
    // calculate the averages for our oriented colorchecker
    for(int x = 0; x < MACBETH_WIDTH; x++) {
        for(int y = 0; y < MACBETH_HEIGHT; y++) {
            CvPoint2D32f row_start;
            
            if ( ((image->origin == IPL_ORIGIN_BL) || !rotated_box) && !((image->origin == IPL_ORIGIN_BL) && rotated_box) )
            {
                row_start.x = box_corners[0].x + vertical_spacing * y * (1 / vertical_mag);
                row_start.y = box_corners[0].y + vertical_spacing * y * (vertical_slope / vertical_mag);
            }
            else
            {
                row_start.x = box_corners[0].x - vertical_spacing * y * (1 / vertical_mag);
                row_start.y = box_corners[0].y - vertical_spacing * y * (vertical_slope / vertical_mag);
            }
            
            CvRect rect = cvRect(0,0,average_size,average_size);
            
            rect.x = row_start.x - horizontal_spacing * x * ( 1 / horizontal_mag ) * horizontal_orientation;
            rect.y = row_start.y - horizontal_spacing * x * ( horizontal_slope / horizontal_mag ) * vertical_orientation;
            
            cvSet2D(this_colorchecker_points, y, x, cvScalar(rect.x,rect.y));
            
            rect.x = rect.x - average_size / 2;
            rect.y = rect.y - average_size / 2;
            
            // cvRectangle(
            //     image,
            //     cvPoint(rect.x,rect.y),
            //     cvPoint(rect.x+rect.width, rect.y+rect.height),
            //     cvScalarAll(0),
            //     10
            // );
            
            CvScalar average_color = rect_average(rect, original_image);
            
            cvSet2D(this_colorchecker,y,x,average_color);
        }
    }
    
    double orient_1_error = check_colorchecker(this_colorchecker);
    cvFlip(this_colorchecker,NULL,-1);
    double orient_2_error = check_colorchecker(this_colorchecker);
    
    fprintf(stderr,"Orientation 1: %f\n",orient_1_error);
    fprintf(stderr,"Orientation 2: %f\n",orient_2_error);
    
    if(orient_1_error < orient_2_error) {
        cvFlip(this_colorchecker,NULL,-1);
    }
    else {
        cvFlip(this_colorchecker_points,NULL,-1);
    }
    
    // draw_colorchecker(this_colorchecker,this_colorchecker_points,image,average_size);
    
    ColorChecker found_colorchecker;
    
    found_colorchecker.error = MIN(orient_1_error,orient_2_error);
    found_colorchecker.values = this_colorchecker;
    found_colorchecker.points = this_colorchecker_points;
    found_colorchecker.size = average_size;
    
    return found_colorchecker;
}
コード例 #13
0
// Read the training data and train the network.
void trainMachine()
{ int i; //The number of training samples. 
    int train_sample_count;

    //The training data matrix. 
    //Note that we are limiting the number of training data samples to 1000 here.
    //The data sample consists of two inputs and an output. That's why 3.
    float td[10000][3];

    //Read the training file
    /*
       A sample file contents(say we are training the network for generating 
       the mean given two numbers) would be:

       5
       12 16 14
       10 5  7.5
       8  10 9
       5  4  4.5
       12 6  9

     */
    FILE *fin;
    fin = fopen("train.txt", "r");

    //Get the number of samples.
    fscanf(fin, "%d", &train_sample_count);
    printf("Found training file with %d samples...\n", train_sample_count);

    //Create the matrices

    //Input data samples. Matrix of order (train_sample_count x 2)
    CvMat* trainData = cvCreateMat(train_sample_count, 2, CV_32FC1);

    //Output data samples. Matrix of order (train_sample_count x 1)
    CvMat* trainClasses = cvCreateMat(train_sample_count, 1, CV_32FC1);

    //The weight of each training data sample. We'll later set all to equal weights.
    CvMat* sampleWts = cvCreateMat(train_sample_count, 1, CV_32FC1);

    //The matrix representation of our ANN. We'll have four layers.
    CvMat* neuralLayers = cvCreateMat(4, 1, CV_32SC1);

    CvMat trainData1, trainClasses1, neuralLayers1, sampleWts1;

    cvGetRows(trainData, &trainData1, 0, train_sample_count);
    cvGetRows(trainClasses, &trainClasses1, 0, train_sample_count);
    cvGetRows(trainClasses, &trainClasses1, 0, train_sample_count);
    cvGetRows(sampleWts, &sampleWts1, 0, train_sample_count);
    cvGetRows(neuralLayers, &neuralLayers1, 0, 4);

    //Setting the number of neurons on each layer of the ANN
    /* 
       We have in Layer 1: 2 neurons (2 inputs)
       Layer 2: 3 neurons (hidden layer)
       Layer 3: 3 neurons (hidden layer)
       Layer 4: 1 neurons (1 output)
     */
    cvSet1D(&neuralLayers1, 0, cvScalar(2));
    cvSet1D(&neuralLayers1, 1, cvScalar(3));
    cvSet1D(&neuralLayers1, 2, cvScalar(3));
    cvSet1D(&neuralLayers1, 3, cvScalar(1));

    //Read and populate the samples.
    for (i=0;i<train_sample_count;i++)
        fscanf(fin,"%f %f %f",&td[i][0],&td[i][1],&td[i][2]);

    fclose(fin);

    //Assemble the ML training data.
    for (i=0; i<train_sample_count; i++)
    {
        //Input 1
        cvSetReal2D(&trainData1, i, 0, td[i][0]);
        //Input 2
        cvSetReal2D(&trainData1, i, 1, td[i][1]);
        //Output
        cvSet1D(&trainClasses1, i, cvScalar(td[i][2]));
        //Weight (setting everything to 1)
        cvSet1D(&sampleWts1, i, cvScalar(1));
    }

    //Create our ANN.
    machineBrain.create(neuralLayers);//sigmoid 0 0(激活函数的两个参数)

    //Train it with our data.   
    machineBrain.train(
        trainData,//输入
        trainClasses,//输出
        sampleWts,//输入项的权值
        0,
        CvANN_MLP_TrainParams(
            cvTermCriteria(
                CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,///类型 CV_TERMCRIT_ITER 和CV_TERMCRIT_EPS二值之一,或者二者的组合
                10000000,//最大迭代次数
                0.00000001//结果的精确性 两次迭代间权值变化量
                ),
            CvANN_MLP_TrainParams::BACKPROP,//BP算法
            0.01,//几个可显式调整的参数 学习速率 阿尔法
            0.05                      //惯性参数
        )
    );
}
コード例 #14
0
// Read the training data and train the network.
void trainMachine()
{
    int i;
    //The number of training samples.
    int train_sample_count;

    //The training data matrix.
    //Note that we are limiting the number of training data samples to 1000 here.
    //The data sample consists of two inputs and an output. That's why 3.
    //td es la matriz dinde se cargan las muestras
    float td[3000][7];

    //Read the training file
    /*
     A sample file contents(say we are training the network for generating
     the mean given two numbers) would be:

     5
     12 16 14
     10 5  7.5
     8  10 9
     5  4  4.5
     12 6  9

     */
    FILE *fin;
    fin = fopen("train.txt", "r");

    //Get the number of samples.
    fscanf(fin, "%d", &train_sample_count);
    printf("Found training file with %d samples...\n", train_sample_count);

    //Create the matrices

    //Input data samples. Matrix of order (train_sample_count x 2)
    CvMat* trainData = cvCreateMat(train_sample_count, 6, CV_32FC1);

    //Output data samples. Matrix of order (train_sample_count x 1)
    CvMat* trainClasses = cvCreateMat(train_sample_count, 1, CV_32FC1);

    //The weight of each training data sample. We'll later set all to equal weights.
    CvMat* sampleWts = cvCreateMat(train_sample_count, 1, CV_32FC1);

    //The matrix representation of our ANN. We'll have four layers.
    CvMat* neuralLayers = cvCreateMat(2, 1, CV_32SC1);

    CvMat trainData1, trainClasses1, neuralLayers1, sampleWts1;

    cvGetRows(trainData, &trainData1, 0, train_sample_count);
    cvGetRows(trainClasses, &trainClasses1, 0, train_sample_count);
    cvGetRows(trainClasses, &trainClasses1, 0, train_sample_count);
    cvGetRows(sampleWts, &sampleWts1, 0, train_sample_count);
    cvGetRows(neuralLayers, &neuralLayers1, 0, 2);

    //Setting the number of neurons on each layer of the ANN
    /*
     We have in Layer 1: 2 neurons (6 inputs)
                Layer 2: 3 neurons (hidden layer)
                Layer 3: 3 neurons (hidden layer)
                Layer 4: 1 neurons (1 output)
     */
    cvSet1D(&neuralLayers1, 0, cvScalar(6));
    //cvSet1D(&neuralLayers1, 1, cvScalar(3));
    //cvSet1D(&neuralLayers1, 2, cvScalar(3));
    cvSet1D(&neuralLayers1, 1, cvScalar(1));

    //Read and populate the samples.
    for (i=0; i<train_sample_count; i++)
        fscanf(fin,"%f %f %f %f",&td[i][0],&td[i][1],&td[i][2],&td[i][3]);

    fclose(fin);

    //Assemble the ML training data.
    for (i=0; i<train_sample_count; i++)
    {
        //Input 1
        cvSetReal2D(&trainData1, i, 0, td[i][0]);
        //Input 2
        cvSetReal2D(&trainData1, i, 1, td[i][1]);
        cvSetReal2D(&trainData1, i, 2, td[i][2]);
        cvSetReal2D(&trainData1, i, 3, td[i][3]);
        cvSetReal2D(&trainData1, i, 4, td[i][4]);
        cvSetReal2D(&trainData1, i, 5, td[i][5]);
        //Output
        cvSet1D(&trainClasses1, i, cvScalar(td[i][6]));
        //Weight (setting everything to 1)
        cvSet1D(&sampleWts1, i, cvScalar(1));
    }

    //Create our ANN.
    machineBrain.create(neuralLayers);

    //Train it with our data.
    //See the Machine learning reference at http://www.seas.upenn.edu/~bensapp/opencvdocs/ref/opencvref_ml.htm#ch_ann
    machineBrain.train(
        trainData,
        trainClasses,
        sampleWts,
        0,
        CvANN_MLP_TrainParams(
            cvTermCriteria(
                CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,
                100000,
                1.0
            ),
            CvANN_MLP_TrainParams::BACKPROP,
            0.01,
            0.05
        )
    );
}