示例#1
0
/*
 * Internal train function 
 */
float fann_train_epoch_sarprop(struct fann *ann, struct fann_train_data *data)
{
	unsigned int i;

	if(ann->prev_train_slopes == NULL)
	{
		fann_clear_train_arrays(ann);
	}

	fann_reset_MSE(ann);

	for(i = 0; i < data->num_data; i++)
	{
		fann_run(ann, data->input[i]);
		fann_compute_MSE(ann, data->output[i]);
		fann_backpropagate_MSE(ann);
		fann_update_slopes_batch(ann, ann->first_layer + 1, ann->last_layer - 1);
	}

	fann_update_weights_sarprop(ann, ann->sarprop_epoch, 0, ann->total_connections);

	++(ann->sarprop_epoch);

	return fann_get_MSE(ann);
}
示例#2
0
int main( int argc, char** argv )
{
	fann_type *calc_out;
	unsigned int i;
	int ret = 0;
	struct fann *ann;
	struct fann_train_data *data;
	printf("Creating network.\n");
	ann = fann_create_from_file("xor_float.net");
	if(!ann)
	{
		printf("Error creating ann --- ABORTING.\n");
		return 0;
	}
	fann_print_connections(ann);
	fann_print_parameters(ann);
	printf("Testing network.\n");
	data = fann_read_train_from_file("5K.txt");
	for(i = 0; i < fann_length_train_data(data); i++)
	{
		fann_reset_MSE(ann);
    	fann_scale_input( ann, data->input[i] );
		calc_out = fann_run( ann, data->input[i] );
		fann_descale_output( ann, calc_out );
		printf("Result %f original %f error %f\n",
			calc_out[0], data->output[i][0],
			(float) fann_abs(calc_out[0] - data->output[i][0]));
	}
	printf("Cleaning up.\n");
	fann_destroy_train(data);
	fann_destroy(ann);
	return ret;
}
示例#3
0
const float CNeuroNetwok::Recognize(const _tstring& sPath)
{
	if (!m_bIsNetworkTeached)
		throw std::runtime_error("You should teach network first!");

	// Восстанавливаем нейросеть из файла
	m_pANN = fann_create_from_file(NETWORK_FILE_NAME);
	if (!m_pANN)
	{
		std::string sError = "Failed to load data from: ";
		sError += NETWORK_FILE_NAME;
		throw std::runtime_error(sError);
	}

	// Подгружаем данные указанного файла
	std::list< float > BmpData;
	AnalizeBMP(sPath, BmpData);

	// Преобразуем
	TTrainData TestData;
	TestData.push_back(std::pair< std::list< float >, bool > (BmpData, false));
	boost::scoped_ptr< fann_train_data > pTestData(MakeTrainData(TestData));

#ifdef _DEBUG
	// Для дебага
	fann_save_train(pTestData.get(), "debug_data.dat");
#endif

	// Получаем результат
	fann_reset_MSE(m_pANN);

	fann_type * pResult = fann_test(m_pANN, pTestData->input[0], pTestData->output[0]);

	return *pResult;
}
示例#4
0
bool Trainer::Test(const InputVector<float>& input_vector,
          const OutputVector<float>& desired_output,
          float* square_error, std::size_t* bit_fail) {
  fann_reset_MSE(ann_);
  tmp_input_vector_ = input_vector;
  tmp_output_vector_ = desired_output;
  fann_test(ann_, &tmp_input_vector_[0], &tmp_output_vector_[0]);
  return GetMseAndBitFail(ann_, &square_error, &bit_fail);
}
示例#5
0
void cunit_xor_test(void)
{
	fann_type *calc_out = NULL;
	unsigned int i;
	int ret = 0;

	struct fann *ann = NULL;
	struct fann_train_data *data = NULL;

#ifdef FIXEDFANN
	ann = fann_create_from_file("xor_fixed.net");
#else
	ann = fann_create_from_file("xor_float.net");
#endif

	CU_ASSERT_PTR_NOT_NULL_FATAL(ann);

#ifdef FIXEDFANN
	data = fann_read_train_from_file("xor_fixed.data");
#else
	data = fann_read_train_from_file("xor.data");
#endif

	CU_ASSERT_PTR_NOT_NULL_FATAL(data);

	for(i = 0; i < fann_length_train_data(data); i++)
	{
		fann_reset_MSE(ann);
		calc_out = fann_test(ann, data->input[i], data->output[i]);

		CU_ASSERT_PTR_NOT_NULL_FATAL(calc_out);

#ifdef FIXEDFANN
		/*printf("XOR test (%d, %d) -> %d, should be %d, difference=%f\n",
			   data->input[i][0], data->input[i][1], calc_out[0], data->output[i][0],
			   (float) fann_abs(calc_out[0] - data->output[i][0]) / fann_get_multiplier(ann));*/

		if((float) fann_abs(calc_out[0] - data->output[i][0]) / fann_get_multiplier(ann) > 0.2)
		{
			CU_FAIL("XOR test failed.");
			ret = -1;
		}
#else
		/*printf("XOR test (%f, %f) -> %f, should be %f, difference=%f\n",
			   data->input[i][0], data->input[i][1], calc_out[0], data->output[i][0],
			   (float) fann_abs(calc_out[0] - data->output[i][0]));*/
#endif
	}

	fann_destroy_train(data);
	fann_destroy(ann);
}
示例#6
0
/*
 * Internal train function 
 */
float fann_train_epoch_incremental(struct fann *ann, struct fann_train_data *data)
{
	unsigned int i;

	fann_reset_MSE(ann);

	for(i = 0; i != data->num_data; i++)
	{
		fann_train(ann, data->input[i], data->output[i]);
	}

	return fann_get_MSE(ann);
}
/*
 * Test a set of training data and calculate the MSE
 */
FANN_EXTERNAL float FANN_API fann_test_data(struct fann *ann, struct fann_train_data *data)
{
    unsigned int i;

    fann_reset_MSE(ann);

    for(i = 0; i != data->num_data; i++)
    {
        fann_test(ann, data->input[i], data->output[i]);
    }

    return fann_get_MSE(ann);
}
示例#8
0
int main()
{
	const unsigned int num_layers = 3;
	const unsigned int num_neurons_hidden = 32;
	const float desired_error = (const float) 0.0001;
	const unsigned int max_epochs = 300;
	const unsigned int epochs_between_reports = 10;
	struct fann *ann;
	struct fann_train_data *train_data, *test_data;

	unsigned int i = 0;

	printf("Creating network.\n");

	train_data = fann_read_train_from_file("../datasets/mushroom.train");

	ann = fann_create_standard(num_layers,
					  train_data->num_input, num_neurons_hidden, train_data->num_output);

	printf("Training network.\n");

	fann_set_activation_function_hidden(ann, FANN_SIGMOID_SYMMETRIC_STEPWISE);
	fann_set_activation_function_output(ann, FANN_SIGMOID_STEPWISE);

	/*fann_set_training_algorithm(ann, FANN_TRAIN_INCREMENTAL); */

	fann_train_on_data(ann, train_data, max_epochs, epochs_between_reports, desired_error);

	printf("Testing network.\n");

	test_data = fann_read_train_from_file("../datasets/mushroom.test");

	fann_reset_MSE(ann);
	for(i = 0; i < fann_length_train_data(test_data); i++)
	{
		fann_test(ann, test_data->input[i], test_data->output[i]);
	}
	
	printf("MSE error on test data: %f\n", fann_get_MSE(ann));

	printf("Saving network.\n");

	fann_save(ann, "mushroom_float.net");

	printf("Cleaning up.\n");
	fann_destroy_train(train_data);
	fann_destroy_train(test_data);
	fann_destroy(ann);

	return 0;
}
示例#9
0
float train_epoch_debug(struct fann *ann, struct fann_train_data* data, unsigned int iter)
{
	unsigned int i;
#if VERBOSE>=2
	static unsigned int j=0;
#endif

#if ! MIMO_FANN
	if (ann->prev_train_slopes==NULL)
		fann_clear_train_arrays(ann);
#endif

	fann_reset_MSE(ann);

	for(i = 0; i < data->num_data; i++)
	{
		fann_run(ann, data->input[i]);
		fann_compute_MSE(ann, data->output[i]);
		fann_backpropagate_MSE(ann);
#if ! MIMO_FANN
		fann_update_slopes_batch(ann, ann->first_layer + 1, ann->last_layer - 1);
#endif

#if VERBOSE>=3
		printf("   ** %d:%d **-AFTER-DELTAS UPDATE-----------------------------------\n", iter, i);
		print_deltas(ann, j++);
#endif

	}
#if VERBOSE>=2
	printf("   ** %d **-BEFORE-WEIGHTS-UPDATE------------------------------------\n", iter);
	print_deltas(ann, j++);
#endif

#if ! MIMO_FANN
#if USE_RPROP
	fann_update_weights_irpropm(ann, 0, ann->total_connections);
#else
	fann_update_weights_batch(ann, data->num_data, 0, ann->total_connections);
#endif
#else /* MIMO_FANN */
	fann_update_weights(ann);
#endif

#if VERBOSE>=1
	printf("   ** %d **-AFTER-WEIGHTS-UPDATE-------------------------------------\n", iter);
	print_deltas(ann, j++);
#endif

	return fann_get_MSE(ann);
}
void NeuralNet::runNet(char* ptrDataFileName){
    struct fann_train_data *ptrDataTest = fann_read_train_from_file(ptrDataFileName);
    fann_reset_MSE(this->ptrNeuralNet);
    fann_test_data(this->ptrNeuralNet, ptrDataTest);
    printf("Mean Square Error: %f\n", fann_get_MSE(this->ptrNeuralNet));

    fann_type *calc_out;
    for(int i = 0; i < fann_length_train_data(ptrDataTest); i++){
        calc_out = fann_run(this->ptrNeuralNet, ptrDataTest->input[i]);
        cout << "Sample testing:  "<< calc_out[0] << " " << ptrDataTest->output[i][0] << " " << fann_abs(calc_out[0] - ptrDataTest->output[i][0]) << endl;
    }


    fann_destroy_train(ptrDataTest);
}
示例#11
0
int 
main(int argc, char **argv)
{
	fann_type *calc_out;
	unsigned int i, j;
	struct fann *ann;
	struct fann_train_data *data;

	if (argc < 2) 
	{
		fprintf(stderr, "Use: %s arquivoTeste\n", argv[0]); 
		exit(1);
	}

	printf("Abrindo a Rede `%s'\n", ARQ_RNA);
	ann = fann_create_from_file(ARQ_RNA);

	if (!ann)
	{
		fprintf(stderr, "Erro criando a RNA.\n"); 
		return (1); 
	}

	//fann_print_connections(ann);
	//fann_print_parameters(ann);

	printf("Testando a RNA.\n");

	data = fann_read_train_from_file(argv[1]);

	for(i = 0; i < fann_length_train_data(data); i++)
	{
		fann_reset_MSE(ann);

		calc_out = fann_run(ann, data->input[i]);

		printf("Resultado: %f ", calc_out[0]);
		printf("Original: %f " , data->output[i][0]);
		printf("Erro: %f\n"    , (float) fann_abs(calc_out[0] - data->output[i][0]));
	}
	printf("Limpando memoria.\n");
	fann_destroy_train(data);
	fann_destroy(ann);

	return (0);
}
示例#12
0
float test_data_parallel(struct fann *ann, struct fann_train_data *data, const unsigned int threadnumb, vector< vector<fann_type> >& predicted_outputs)
{
	if(fann_check_input_output_sizes(ann, data) == -1)
		return 0;
	predicted_outputs.resize(data->num_data,vector<fann_type> (data->num_output));
	fann_reset_MSE(ann);
	vector<struct fann *> ann_vect(threadnumb);
	int i=0,j=0;

		//generate copies of the ann
		omp_set_dynamic(0);
		omp_set_num_threads(threadnumb);
		#pragma omp parallel private(j)
		{

			#pragma omp for schedule(static)
			for(i=0; i<(int)threadnumb; i++)
			{
				ann_vect[i]=fann_copy(ann);
			}

			//parallel computing of the updates

	        #pragma omp for schedule(static)
			for(i = 0; i < (int)data->num_data; ++i)
			{
				j=omp_get_thread_num();

				fann_type* temp_predicted_output=fann_test(ann_vect[j], data->input[i],data->output[i]);
				for(unsigned int k=0;k<data->num_output;++k)
				{
					predicted_outputs[i][k]=temp_predicted_output[k];
				}

			}
		}
	//merge of MSEs
	for(i=0;i<(int)threadnumb;++i)
	{
		ann->MSE_value+= ann_vect[i]->MSE_value;
		ann->num_MSE+=ann_vect[i]->num_MSE;
		fann_destroy(ann_vect[i]);
	}
	return fann_get_MSE(ann);
}
示例#13
0
/*
 * Internal train function 
 */
float fann_train_epoch_batch(struct fann *ann, struct fann_train_data *data)
{
	unsigned int i;

	fann_reset_MSE(ann);

	for(i = 0; i < data->num_data; i++)
	{
		fann_run(ann, data->input[i]);
		fann_compute_MSE(ann, data->output[i]);
		fann_backpropagate_MSE(ann);
		fann_update_slopes_batch(ann, ann->first_layer + 1, ann->last_layer - 1);
	}

	fann_update_weights_batch(ann, data->num_data, 0, ann->total_connections);

	return fann_get_MSE(ann);
}
示例#14
0
float train_epoch_incremental_mod(struct fann *ann, struct fann_train_data *data, vector< vector<fann_type> >& predicted_outputs)
{

	predicted_outputs.resize(data->num_data,vector<fann_type> (data->num_output));
	fann_reset_MSE(ann);

	for(unsigned int i = 0; i < data->num_data; ++i)
	{
		fann_type* temp_predicted_output=fann_run(ann, data->input[i]);
		for(unsigned int k=0;k<data->num_output;++k)
		{
			predicted_outputs[i][k]=temp_predicted_output[k];
		}

		fann_compute_MSE(ann, data->output[i]);

		fann_backpropagate_MSE(ann);

		fann_update_weights(ann);
	}

	return fann_get_MSE(ann);
}
示例#15
0
float fann_train_outputs_epoch(struct fann *ann, struct fann_train_data *data)
{
	unsigned int i;
	
	fann_reset_MSE(ann);

	for(i = 0; i < data->num_data; i++)
	{
		fann_run(ann, data->input[i]);
		fann_compute_MSE(ann, data->output[i]);
		fann_update_slopes_batch(ann, ann->last_layer - 1, ann->last_layer - 1);
	}

	switch (ann->training_algorithm)
	{
		case FANN_TRAIN_RPROP:
			fann_update_weights_irpropm(ann, (ann->last_layer - 1)->first_neuron->first_con,
										ann->total_connections);
			break;
		case FANN_TRAIN_SARPROP:
			fann_update_weights_sarprop(ann, ann->sarprop_epoch, (ann->last_layer - 1)->first_neuron->first_con,
										ann->total_connections);
			++(ann->sarprop_epoch);
			break;
		case FANN_TRAIN_QUICKPROP:
			fann_update_weights_quickprop(ann, data->num_data,
										  (ann->last_layer - 1)->first_neuron->first_con,
										  ann->total_connections);
			break;
		case FANN_TRAIN_BATCH:
		case FANN_TRAIN_INCREMENTAL:
			fann_error((struct fann_error *) ann, FANN_E_CANT_USE_TRAIN_ALG);
	}

	return fann_get_MSE(ann);
}
示例#16
0
FANN_EXTERNAL float FANN_API fann_train_epoch_batch_parallel(struct fann *ann, struct fann_train_data *data, const unsigned int threadnumb)
{
	/*vector<struct fann *> ann_vect(threadnumb);*/
	struct fann** ann_vect= (struct fann**) malloc(threadnumb * sizeof(struct fann*));
	int i=0,j=0;
	fann_reset_MSE(ann);

	//generate copies of the ann
	omp_set_dynamic(0);
	omp_set_num_threads(threadnumb);
	#pragma omp parallel private(j)
	{

		#pragma omp for schedule(static)
		for(i=0; i<(int)threadnumb; i++)
		{
			ann_vect[i]=fann_copy(ann);
		}

    //parallel computing of the updates

        #pragma omp for schedule(static)
		for(i = 0; i < (int)data->num_data; i++)
		{
			j=omp_get_thread_num();
			if (ann->do_dropout) {
				fann_run_dropout(ann_vect[j], data->input[i]);
			}
			else {
				fann_run(ann_vect[j], data->input[i]);
			}
			fann_compute_MSE(ann_vect[j], data->output[i]);
			fann_backpropagate_MSE(ann_vect[j]);
			fann_update_slopes_batch(ann_vect[j], ann_vect[j]->first_layer + 1, ann_vect[j]->last_layer - 1);
		}
	}

    //parallel update of the weights
	{
		const unsigned int num_data=data->num_data;
		const unsigned int first_weight=0;
		const unsigned int past_end=ann->total_connections;
		fann_type *weights = ann->weights;
		const fann_type epsilon = ann->learning_rate / num_data;
		omp_set_dynamic(0);
		omp_set_num_threads(threadnumb);
		#pragma omp parallel
		{
			#pragma omp for schedule(static)
				for(i=first_weight; i < (int)past_end; i++)
				{
					fann_type temp_slopes=0.0;
					unsigned int k;
					fann_type *train_slopes;
					for(k=0;k<threadnumb;++k)
					{
						train_slopes=ann_vect[k]->train_slopes;
						temp_slopes+= train_slopes[i];
						train_slopes[i]=0.0;
					}
					weights[i] += temp_slopes*epsilon;
				}
			}
	}
	//merge of MSEs
	for(i=0;i<(int)threadnumb;++i)
	{
		ann->MSE_value+= ann_vect[i]->MSE_value;
		ann->num_MSE+=ann_vect[i]->num_MSE;
		fann_destroy(ann_vect[i]);
	}
	free(ann_vect);
	return fann_get_MSE(ann);
}
示例#17
0
float train_epoch_sarprop_parallel(struct fann *ann, struct fann_train_data *data, const unsigned int threadnumb, vector< vector<fann_type> >& predicted_outputs)
{

	if(ann->prev_train_slopes == NULL)
	{
		fann_clear_train_arrays(ann);
	}


		fann_reset_MSE(ann);
		predicted_outputs.resize(data->num_data,vector<fann_type> (data->num_output));
		vector<struct fann *> ann_vect(threadnumb);
		int i=0,j=0;

		//generate copies of the ann
		omp_set_dynamic(0);
		omp_set_num_threads(threadnumb);
		#pragma omp parallel private(j)
		{

			#pragma omp for schedule(static)
			for(i=0; i<(int)threadnumb; i++)
			{
				ann_vect[i]=fann_copy(ann);
			}

	    //parallel computing of the updates

	        #pragma omp for schedule(static)
			for(i = 0; i < (int)data->num_data; i++)
			{
				j=omp_get_thread_num();

				fann_type* temp_predicted_output=fann_run(ann_vect[j], data->input[i]);
				for(unsigned int k=0;k<data->num_output;++k)
				{
					predicted_outputs[i][k]=temp_predicted_output[k];
				}
			fann_compute_MSE(ann_vect[j], data->output[i]);
			fann_backpropagate_MSE(ann_vect[j]);
			fann_update_slopes_batch(ann_vect[j], ann_vect[j]->first_layer + 1, ann_vect[j]->last_layer - 1);
		}
	}

    {
    	fann_type *weights = ann->weights;
    	fann_type *prev_steps = ann->prev_steps;
    	fann_type *prev_train_slopes = ann->prev_train_slopes;
		const unsigned int first_weight=0;
		const unsigned int past_end=ann->total_connections;
		const unsigned int epoch=ann->sarprop_epoch;

    	fann_type next_step;

    	/* These should be set from variables */
    	const float increase_factor = ann->rprop_increase_factor;	/*1.2; */
    	const float decrease_factor = ann->rprop_decrease_factor;	/*0.5; */
    	/* TODO: why is delta_min 0.0 in iRprop? SARPROP uses 1x10^-6 (Braun and Riedmiller, 1993) */
    	const float delta_min = 0.000001f;
    	const float delta_max = ann->rprop_delta_max;	/*50.0; */
    	const float weight_decay_shift = ann->sarprop_weight_decay_shift; /* ld 0.01 = -6.644 */
    	const float step_error_threshold_factor = ann->sarprop_step_error_threshold_factor; /* 0.1 */
    	const float step_error_shift = ann->sarprop_step_error_shift; /* ld 3 = 1.585 */
    	const float T = ann->sarprop_temperature;


    	//merge of MSEs
    	for(i=0;i<(int)threadnumb;++i)
    	{
    		ann->MSE_value+= ann_vect[i]->MSE_value;
    		ann->num_MSE+=ann_vect[i]->num_MSE;
    	}

    	const float MSE = fann_get_MSE(ann);
    	const float RMSE = (float)sqrt(MSE);

    	/* for all weights; TODO: are biases included? */
		omp_set_dynamic(0);
		omp_set_num_threads(threadnumb);
		#pragma omp parallel private(next_step)
		{
			#pragma omp for schedule(static)
				for(i=first_weight; i < (int)past_end; i++)
				{
					/* TODO: confirm whether 1x10^-6 == delta_min is really better */
					const fann_type prev_step  = fann_max(prev_steps[i], (fann_type) 0.000001);	/* prev_step may not be zero because then the training will stop */

					/* calculate SARPROP slope; TODO: better as new error function? (see SARPROP paper)*/

					fann_type temp_slopes=0.0;
					unsigned int k;
					fann_type *train_slopes;
					for(k=0;k<threadnumb;++k)
					{
						train_slopes=ann_vect[k]->train_slopes;
						temp_slopes+= train_slopes[i];
						train_slopes[i]=0.0;
					}
					temp_slopes= -temp_slopes - weights[i] * (fann_type)fann_exp2(-T * epoch + weight_decay_shift);

					next_step=0.0;

					/* TODO: is prev_train_slopes[i] 0.0 in the beginning? */
					const fann_type prev_slope = prev_train_slopes[i];

					const fann_type same_sign = prev_slope * temp_slopes;

					if(same_sign > 0.0)
					{
						next_step = fann_min(prev_step * increase_factor, delta_max);
						/* TODO: are the signs inverted? see differences between SARPROP paper and iRprop */
						if (temp_slopes < 0.0)
							weights[i] += next_step;
						else
							weights[i] -= next_step;
					}
					else if(same_sign < 0.0)
					{
						#ifndef RAND_MAX
						#define	RAND_MAX	0x7fffffff
						#endif
						if(prev_step < step_error_threshold_factor * MSE)
							next_step = prev_step * decrease_factor + (float)rand() / RAND_MAX * RMSE * (fann_type)fann_exp2(-T * epoch + step_error_shift);
						else
							next_step = fann_max(prev_step * decrease_factor, delta_min);

						temp_slopes = 0.0;
					}
					else
					{
						if(temp_slopes < 0.0)
							weights[i] += prev_step;
						else
							weights[i] -= prev_step;
					}

					/* update global data arrays */
					prev_steps[i] = next_step;
					prev_train_slopes[i] = temp_slopes;

				}
		}
    }

	++(ann->sarprop_epoch);

	//already computed before
	/*//merge of MSEs
	for(i=0;i<threadnumb;++i)
	{
		ann->MSE_value+= ann_vect[i]->MSE_value;
		ann->num_MSE+=ann_vect[i]->num_MSE;
	}*/
	//destroy the copies of the ann
	for(i=0; i<(int)threadnumb; i++)
	{
		fann_destroy(ann_vect[i]);
	}
	return fann_get_MSE(ann);
}
示例#18
0
float train_epoch_irpropm_parallel(struct fann *ann, struct fann_train_data *data, const unsigned int threadnumb)
{

	if(ann->prev_train_slopes == NULL)
	{
		fann_clear_train_arrays(ann);
	}

	//#define THREADNUM 1
	fann_reset_MSE(ann);

	vector<struct fann *> ann_vect(threadnumb);
	int i=0,j=0;

	//generate copies of the ann
	omp_set_dynamic(0);
	omp_set_num_threads(threadnumb);
	#pragma omp parallel private(j)
	{

		#pragma omp for schedule(static)
		for(i=0; i<(int)threadnumb; i++)
		{
			ann_vect[i]=fann_copy(ann);
		}

    //parallel computing of the updates


        #pragma omp for schedule(static)
		for(i = 0; i < (int)data->num_data; i++)
		{
			j=omp_get_thread_num();
			fann_run(ann_vect[j], data->input[i]);
			fann_compute_MSE(ann_vect[j], data->output[i]);
			fann_backpropagate_MSE(ann_vect[j]);
			fann_update_slopes_batch(ann_vect[j], ann_vect[j]->first_layer + 1, ann_vect[j]->last_layer - 1);
		}
	}

	{
    	fann_type *weights = ann->weights;
    	fann_type *prev_steps = ann->prev_steps;
    	fann_type *prev_train_slopes = ann->prev_train_slopes;

    	fann_type next_step;

    	const float increase_factor = ann->rprop_increase_factor;	//1.2;
    	const float decrease_factor = ann->rprop_decrease_factor;	//0.5;
    	const float delta_min = ann->rprop_delta_min;	//0.0;
    	const float delta_max = ann->rprop_delta_max;	//50.0;
		const unsigned int first_weight=0;
		const unsigned int past_end=ann->total_connections;

		omp_set_dynamic(0);
		omp_set_num_threads(threadnumb);
		#pragma omp parallel private(next_step)
		{
			#pragma omp for schedule(static)
				for(i=first_weight; i < (int)past_end; i++)
				{

		    		const fann_type prev_step = fann_max(prev_steps[i], (fann_type) 0.0001);	// prev_step may not be zero because then the training will stop

		    		fann_type temp_slopes=0.0;
					unsigned int k;
					fann_type *train_slopes;
					for(k=0;k<threadnumb;++k)
					{
						train_slopes=ann_vect[k]->train_slopes;
						temp_slopes+= train_slopes[i];
						train_slopes[i]=0.0;
					}

		    		const fann_type prev_slope = prev_train_slopes[i];

		    		const fann_type same_sign = prev_slope * temp_slopes;

		    		if(same_sign >= 0.0)
		    			next_step = fann_min(prev_step * increase_factor, delta_max);
		    		else
		    		{
		    			next_step = fann_max(prev_step * decrease_factor, delta_min);
		    			temp_slopes = 0;
		    		}

		    		if(temp_slopes < 0)
		    		{
		    			weights[i] -= next_step;
		    			if(weights[i] < -1500)
		    				weights[i] = -1500;
		    		}
		    		else
		    		{
		    			weights[i] += next_step;
		    			if(weights[i] > 1500)
		    				weights[i] = 1500;
		    		}

		    		// update global data arrays
		    		prev_steps[i] = next_step;
		    		prev_train_slopes[i] = temp_slopes;

				}
			}
	}

	//merge of MSEs
	for(i=0;i<(int)threadnumb;++i)
	{
		ann->MSE_value+= ann_vect[i]->MSE_value;
		ann->num_MSE+=ann_vect[i]->num_MSE;
		fann_destroy(ann_vect[i]);
	}
	return fann_get_MSE(ann);
}
示例#19
0
float train_epoch_quickprop_parallel(struct fann *ann, struct fann_train_data *data, const unsigned int threadnumb, vector< vector<fann_type> >& predicted_outputs)
{

	if(ann->prev_train_slopes == NULL)
	{
		fann_clear_train_arrays(ann);
	}


		fann_reset_MSE(ann);
		predicted_outputs.resize(data->num_data,vector<fann_type> (data->num_output));
		vector<struct fann *> ann_vect(threadnumb);
		int i=0,j=0;

		//generate copies of the ann
		omp_set_dynamic(0);
		omp_set_num_threads(threadnumb);
		#pragma omp parallel private(j)
		{

			#pragma omp for schedule(static)
			for(i=0; i<(int)threadnumb; i++)
			{
				ann_vect[i]=fann_copy(ann);
			}

	    //parallel computing of the updates

	        #pragma omp for schedule(static)
			for(i = 0; i < (int)data->num_data; i++)
			{
				j=omp_get_thread_num();

				fann_type* temp_predicted_output=fann_run(ann_vect[j], data->input[i]);
				for(unsigned int k=0;k<data->num_output;++k)
				{
					predicted_outputs[i][k]=temp_predicted_output[k];
				}
			fann_compute_MSE(ann_vect[j], data->output[i]);
			fann_backpropagate_MSE(ann_vect[j]);
			fann_update_slopes_batch(ann_vect[j], ann_vect[j]->first_layer + 1, ann_vect[j]->last_layer - 1);
		}
	}

    {
    	fann_type *weights = ann->weights;
    	fann_type *prev_steps = ann->prev_steps;
    	fann_type *prev_train_slopes = ann->prev_train_slopes;
		const unsigned int first_weight=0;
		const unsigned int past_end=ann->total_connections;

    	fann_type w=0.0, next_step;

    	const float epsilon = ann->learning_rate / data->num_data;
    	const float decay = ann->quickprop_decay;	/*-0.0001;*/
    	const float mu = ann->quickprop_mu;	/*1.75; */
    	const float shrink_factor = (float) (mu / (1.0 + mu));

		omp_set_dynamic(0);
		omp_set_num_threads(threadnumb);
		#pragma omp parallel private(w, next_step)
		{
			#pragma omp for schedule(static)
				for(i=first_weight; i < (int)past_end; i++)
				{

					w = weights[i];

					fann_type temp_slopes=0.0;
					unsigned int k;
					fann_type *train_slopes;
					for(k=0;k<threadnumb;++k)
					{
						train_slopes=ann_vect[k]->train_slopes;
						temp_slopes+= train_slopes[i];
						train_slopes[i]=0.0;
					}
					temp_slopes+= decay * w;

					const fann_type prev_step = prev_steps[i];
					const fann_type prev_slope = prev_train_slopes[i];

					next_step = 0.0;


					/* The step must always be in direction opposite to the slope. */
					if(prev_step > 0.001)
					{
						/* If last step was positive...  */
						if(temp_slopes > 0.0) /*  Add in linear term if current slope is still positive. */
							next_step += epsilon * temp_slopes;

						/*If current slope is close to or larger than prev slope...  */
						if(temp_slopes > (shrink_factor * prev_slope))
							next_step += mu * prev_step;	/* Take maximum size negative step. */
						else
							next_step += prev_step * temp_slopes / (prev_slope - temp_slopes);	/* Else, use quadratic estimate. */
					}
					else if(prev_step < -0.001)
					{
						/* If last step was negative...  */
						if(temp_slopes < 0.0) /*  Add in linear term if current slope is still negative. */
							next_step += epsilon * temp_slopes;

						/* If current slope is close to or more neg than prev slope... */
						if(temp_slopes < (shrink_factor * prev_slope))
							next_step += mu * prev_step;	/* Take maximum size negative step. */
						else
							next_step += prev_step * temp_slopes / (prev_slope - temp_slopes);	/* Else, use quadratic estimate. */
					}
					else /* Last step was zero, so use only linear term. */
						next_step += epsilon * temp_slopes;

					/* update global data arrays */
					prev_steps[i] = next_step;
					prev_train_slopes[i] = temp_slopes;

					w += next_step;

					if(w > 1500)
						weights[i] = 1500;
					else if(w < -1500)
						weights[i] = -1500;
					else
						weights[i] = w;
				}
		}
	}
	//merge of MSEs
	for(i=0;i<(int)threadnumb;++i)
	{
		ann->MSE_value+= ann_vect[i]->MSE_value;
		ann->num_MSE+=ann_vect[i]->num_MSE;
		fann_destroy(ann_vect[i]);
	}
	return fann_get_MSE(ann);
}
示例#20
0
float train_epoch_batch_parallel(struct fann *ann, struct fann_train_data *data, const unsigned int threadnumb,vector< vector<fann_type> >& predicted_outputs)
{
	fann_reset_MSE(ann);
	predicted_outputs.resize(data->num_data,vector<fann_type> (data->num_output));
	vector<struct fann *> ann_vect(threadnumb);
	int i=0,j=0;

	//generate copies of the ann
	omp_set_dynamic(0);
	omp_set_num_threads(threadnumb);
	#pragma omp parallel private(j)
	{

		#pragma omp for schedule(static)
		for(i=0; i<(int)threadnumb; i++)
		{
			ann_vect[i]=fann_copy(ann);
		}

    //parallel computing of the updates

        #pragma omp for schedule(static)
		for(i = 0; i < (int)data->num_data; i++)
		{
			j=omp_get_thread_num();

			fann_type* temp_predicted_output=fann_run(ann_vect[j], data->input[i]);
			for(unsigned int k=0;k<data->num_output;++k)
			{
				predicted_outputs[i][k]=temp_predicted_output[k];
			}

			fann_compute_MSE(ann_vect[j], data->output[i]);
			fann_backpropagate_MSE(ann_vect[j]);
			fann_update_slopes_batch(ann_vect[j], ann_vect[j]->first_layer + 1, ann_vect[j]->last_layer - 1);
		}
	}

    //parallel update of the weights
	{
		const unsigned int num_data=data->num_data;
		const unsigned int first_weight=0;
		const unsigned int past_end=ann->total_connections;
		fann_type *weights = ann->weights;
		const fann_type epsilon = ann->learning_rate / num_data;
		omp_set_dynamic(0);
		omp_set_num_threads(threadnumb);
		#pragma omp parallel
		{
			#pragma omp for schedule(static)
				for(i=first_weight; i < (int)past_end; i++)
				{
					fann_type temp_slopes=0.0;
					unsigned int k;
					fann_type *train_slopes;
					for(k=0;k<threadnumb;++k)
					{
						train_slopes=ann_vect[k]->train_slopes;
						temp_slopes+= train_slopes[i];
						train_slopes[i]=0.0;
					}
					weights[i] += temp_slopes*epsilon;
				}
			}
	}
	//merge of MSEs
	for(i=0;i<(int)threadnumb;++i)
	{
		ann->MSE_value+= ann_vect[i]->MSE_value;
		ann->num_MSE+=ann_vect[i]->num_MSE;
		fann_destroy(ann_vect[i]);
	}
	return fann_get_MSE(ann);
}
示例#21
0
int main (int argc, char * argv[]) {
  int i, epoch, k, num_bits_failing, num_correct;
  int max_epochs = 10000, exit_code = 0, batch_items = -1;
  int flag_cups = 0, flag_last = 0, flag_mse = 0, flag_verbose = 0,
    flag_bit_fail = 0, flag_ignore_limits = 0, flag_percent_correct = 0;
  int mse_reporting_period = 1, bit_fail_reporting_period = 1,
    percent_correct_reporting_period = 1;
  float bit_fail_limit = 0.05, mse_fail_limit = -1.0;
  double learning_rate = 0.7;
  char id[100] = "0";
  char * file_video_string = NULL;
  FILE * file_video = NULL;
  struct fann * ann = NULL;
  struct fann_train_data * data = NULL;
  fann_type * calc_out;
  enum fann_train_enum type_training = FANN_TRAIN_BATCH;

  char * file_nn = NULL, * file_train = NULL;
  int c;
  while (1) {
    static struct option long_options[] = {
      {"video-data",           required_argument, 0, 'b'},
      {"stat-cups",            no_argument,       0, 'c'},
      {"num-batch-items",      required_argument, 0, 'd'},
      {"max-epochs",           required_argument, 0, 'e'},
      {"bit-fail-limit",       required_argument, 0, 'f'},
      {"mse-fail-limit",       required_argument, 0, 'g'},
      {"help",                 no_argument,       0, 'h'},
      {"id",                   required_argument, 0, 'i'},
      {"stat-last",            no_argument,       0, 'l'},
      {"stat-mse",             optional_argument, 0, 'm'},
      {"nn-config",            required_argument, 0, 'n'},
      {"stat-bit-fail",        optional_argument, 0, 'o'},
      {"stat-percent-correct", optional_argument, 0, 'q'},
      {"learning-rate",        required_argument, 0, 'r'},
      {"train-file",           required_argument, 0, 't'},
      {"verbose",              no_argument,       0, 'v'},
      {"incremental",          optional_argument, 0, 'x'},
      {"ignore-limits",        no_argument,       0, 'z'}
    };
    int option_index = 0;
     c = getopt_long (argc, argv, "b:cd:e:f:g:hi:lm::n:o::q::r:t:vx::z",
                     long_options, &option_index);
    if (c == -1)
      break;
    switch (c) {
    case 'b': file_video_string = optarg; break;
    case 'c': flag_cups = 1; break;
    case 'd': batch_items = atoi(optarg); break;
    case 'e': max_epochs = atoi(optarg); break;
    case 'f': bit_fail_limit = atof(optarg); break;
    case 'g': mse_fail_limit = atof(optarg); break;
    case 'h': usage(); exit_code = 0; goto bail;
    case 'i': strcpy(id, optarg); break;
    case 'l': flag_last = 1; break;
    case 'm':
      if (optarg)
        mse_reporting_period = atoi(optarg);
      flag_mse = 1;
      break;
    case 'n': file_nn = optarg; break;
    case 'o':
      if (optarg)
        bit_fail_reporting_period = atoi(optarg);
      flag_bit_fail = 1;
      break;
    case 'q':
      if (optarg)
        percent_correct_reporting_period = atoi(optarg);
      flag_percent_correct = 1;
      break;
    case 'r': learning_rate = atof(optarg); break;
    case 't': file_train = optarg; break;
    case 'v': flag_verbose = 1; break;
    case 'x': type_training=(optarg)?atoi(optarg):FANN_TRAIN_INCREMENTAL; break;
    case 'z': flag_ignore_limits = 1; break;
    }
  };

  // Make sure there aren't any arguments left over
  if (optind != argc) {
    fprintf(stderr, "[ERROR] Bad argument\n\n");
    usage();
    exit_code = -1;
    goto bail;
  }

  // Make sure we have all required inputs
  if (file_nn == NULL || file_train == NULL) {
    fprintf(stderr, "[ERROR] Missing required input argument\n\n");
    usage();
    exit_code = -1;
    goto bail;
  }

  // The training type needs to make sense
  if (type_training > FANN_TRAIN_SARPROP) {
    fprintf(stderr, "[ERROR] Training type %d outside of enumerated range (max: %d)\n",
            type_training, FANN_TRAIN_SARPROP);
    exit_code = -1;
    goto bail;
  }

  ann = fann_create_from_file(file_nn);
  data = fann_read_train_from_file(file_train);
  if (batch_items != -1 && batch_items < data->num_data)
    data->num_data = batch_items;
  enum fann_activationfunc_enum af =
    fann_get_activation_function(ann, ann->last_layer - ann->first_layer -1, 0);

  ann->training_algorithm = type_training;
  ann->learning_rate = learning_rate;
  printf("[INFO] Using training type %d\n", type_training);

  if (file_video_string != NULL)
    file_video = fopen(file_video_string, "w");

  double mse;
  for (epoch = 0; epoch < max_epochs; epoch++) {
    fann_train_epoch(ann, data);
    num_bits_failing = 0;
    num_correct = 0;
    fann_reset_MSE(ann);
    for (i = 0; i < fann_length_train_data(data); i++) {
      calc_out = fann_test(ann, data->input[i], data->output[i]);
      if (flag_verbose) {
        printf("[INFO] ");
        for (k = 0; k < data->num_input; k++) {
          printf("%8.5f ", data->input[i][k]);
        }
      }
      int correct = 1;
      for (k = 0; k < data->num_output; k++) {
        if (flag_verbose)
          printf("%8.5f ", calc_out[k]);
        num_bits_failing +=
          fabs(calc_out[k] - data->output[i][k]) > bit_fail_limit;
        if (fabs(calc_out[k] - data->output[i][k]) > bit_fail_limit)
          correct = 0;
        if (file_video)
          fprintf(file_video, "%f ", calc_out[k]);
      }
      if (file_video)
        fprintf(file_video, "\n");
      num_correct += correct;
      if (flag_verbose) {
        if (i < fann_length_train_data(data) - 1)
          printf("\n");
      }
    }
    if (flag_verbose)
      printf("%5d\n\n", epoch);
    if (flag_mse  && (epoch % mse_reporting_period == 0)) {
      mse = fann_get_MSE(ann);
      switch(af) {
      case FANN_LINEAR_PIECE_SYMMETRIC:
      case FANN_THRESHOLD_SYMMETRIC:
      case FANN_SIGMOID_SYMMETRIC:
      case FANN_SIGMOID_SYMMETRIC_STEPWISE:
      case FANN_ELLIOT_SYMMETRIC:
      case FANN_GAUSSIAN_SYMMETRIC:
      case FANN_SIN_SYMMETRIC:
      case FANN_COS_SYMMETRIC:
        mse *= 4.0;
      default:
        break;
      }
      printf("[STAT] epoch %d id %s mse %8.8f\n", epoch, id, mse);
    }
    if (flag_bit_fail && (epoch % bit_fail_reporting_period == 0))
      printf("[STAT] epoch %d id %s bfp %8.8f\n", epoch, id,
             1 - (double) num_bits_failing / data->num_output /
             fann_length_train_data(data));
    if (flag_percent_correct && (epoch % percent_correct_reporting_period == 0))
      printf("[STAT] epoch %d id %s perc %8.8f\n", epoch, id,
             (double) num_correct / fann_length_train_data(data));
    if (!flag_ignore_limits && (num_bits_failing == 0 || mse < mse_fail_limit))
      goto finish;
    // printf("%8.5f\n\n", fann_get_MSE(ann));
  }

 finish:
  if (flag_last)
    printf("[STAT] x 0 id %s epoch %d\n", id, epoch);
  if (flag_cups)
    printf("[STAT] x 0 id %s cups %d / ?\n", id,
           epoch * fann_get_total_connections(ann));

 bail:
  if (ann != NULL)
    fann_destroy(ann);
  if (data != NULL)
    fann_destroy_train(data);
  if (file_video != NULL)
    fclose(file_video);

  return exit_code;
}
int main()
{
	fann_type *calc_out;
	unsigned int i;
	int ret = 0;

	struct fann *ann;
	struct fann_train_data *data;

	printf("Creating network.\n");

#ifdef FIXEDFANN
	ann = fann_create_from_file("digitde_validation_fixed.net");
#else
	ann = fann_create_from_file("digitde_validation_float.net");
#endif

	if(!ann)
	{
		printf("Error creating ann --- ABORTING.\n");
		return -1;
	}

	fann_print_connections(ann);
	fann_print_parameters(ann);

	printf("Testing network.\n");

#ifdef FIXEDFANN
	data = fann_read_train_from_file("digitde_validation_fixed.data");
#else
	data = fann_read_train_from_file("digitde_validation.data");
#endif

	for(i = 0; i < fann_length_train_data(data); i++)
	{
		fann_reset_MSE(ann);
		calc_out = fann_test(ann, data->input[i], data->output[i]);
#ifdef FIXEDFANN
		printf("GG test (%d, %d) -> %d, should be %d, difference=%f\n",
			   data->input[i][0], data->input[i][1], calc_out[0], data->output[i][0],
			   (float) fann_abs(calc_out[0] - data->output[i][0]) / fann_get_multiplier(ann));

		if((float) fann_abs(calc_out[0] - data->output[i][0]) / fann_get_multiplier(ann) > 0.2)
		{
			printf("Test failed\n");
			ret = -1;
		}
#else
		printf("GG test (%f, %f) -> %f, should be %f, difference=%f\n",
			   data->input[i][0], data->input[i][1], calc_out[0], data->output[i][0],
			   (float) fann_abs(calc_out[0] - data->output[i][0]));
#endif
	}

	printf("Cleaning up.\n");
	fann_destroy_train(data);
	fann_destroy(ann);

	return ret;
}
示例#23
0
int main(int argc, const char* argv[])
{
	
	if (argc < 2) {
    printf("Usage: ./dinneuro filename\n");
    return -1;
	}
	
	//подготавливаем выборки
	if (csv2fann2(argv[1], 59, 50, 100, true)) { printf("Converted\n"); }
	
	//получим данные о количестве входных и выходных параметров
	int *params;
	const char * filename;
	const char * normfilename;
	filename = "data.data";
	//filename = "scaling.data";
	normfilename = "normalized.train";
	params = getparams(filename);
	
    unsigned int num_threads = omp_get_thread_num();
	float error;
	const unsigned int num_input = params[1];
	const unsigned int num_output = params[2];
	//printf("num_input=%d num_output=%d\n", num_input, num_output);
	const unsigned int num_layers = 4;
	//const unsigned int num_neurons_hidden = num_output;
	const unsigned int num_neurons_hidden = 5;
	const float desired_error = (const float) 0.0001;
	const unsigned int max_epochs = 5000;
	const unsigned int epochs_between_reports = 1000;
	struct fann_train_data * data = NULL;
	struct fann *ann = fann_create_standard(num_layers, num_input, num_neurons_hidden, num_neurons_hidden, num_output);
	fann_set_activation_function_hidden(ann, FANN_LINEAR);
	fann_set_activation_function_output(ann, FANN_SIGMOID_SYMMETRIC);
	fann_set_training_algorithm(ann, FANN_TRAIN_RPROP);
	//printf("test\n");
	data = fann_read_train_from_file(filename);
	
	printf("Readed train from %s\n", filename);
	
	fann_set_scaling_params(
		    ann,
			data,
			-1,	/* New input minimum */
			1,	/* New input maximum */
			-1,	/* New output minimum */
			1);	/* New output maximum */

	fann_scale_train( ann, data );
	printf("Scaled\n");
	
	//сохраним нормализованную обучающу выборку в файл
	fann_save_train(data, normfilename);
	printf("Saved scaled file %s\n", normfilename);
	
	unsigned int i;
	printf("Start learning...\n");
	for(i = 1; i <= max_epochs; i++)
	{
		error = num_threads > 1 ? fann_train_epoch_irpropm_parallel(ann, data, num_threads) : fann_train_epoch(ann, data);
		//если ошибка обучения меньше или равно заданной - выходим из цикла обучения
		//if (error <= desired_error) { printf ("Desired error detected. Finishing teaching.\n"); break; }
		//если текущий счетчик делится без остатка на epochs_between_reports - пишем лог
		//if (i % epochs_between_reports == 0) { printf("Epochs     %8d. Current error: %.10f\n", i, error); }
		
	}
	printf("End learning.\n");
	printf("MSE = %f\n", fann_get_MSE(ann));

	//fann_train_on_data(ann, data, max_epochs, epochs_between_reports, desired_error);
	fann_destroy_train( data );
	fann_save(ann, "scaling.net");
	fann_destroy(ann);
	
	//проверка
	printf("Testing...\n");
	fann_type *calc_out;
	//printf("fann_length_train_data=%d\n",fann_length_train_data(data));
	printf("Creating network.\n");
	ann = fann_create_from_file("scaling.net");
	if(!ann)
	{
		printf("Error creating ann --- ABORTING.\n");
		return 0;
	}
	
	//печатаем параметры сети
	//fann_print_connections(ann);
	//fann_print_parameters(ann);
	
	printf("Testing network.\n");
	data = fann_read_train_from_file(filename);
	for(i = 0; i < fann_length_train_data(data); i++)
	{
		fann_reset_MSE(ann);
    	fann_scale_input( ann, data->input[i] );
		calc_out = fann_run( ann, data->input[i] );
		fann_descale_output( ann, calc_out );
		printf("Result %f original %f error %f or %.2f%%\n",
			calc_out[0], data->output[i][0],
			(float) fann_abs(calc_out[0] - data->output[i][0]), (100*(float) fann_abs(calc_out[0] - data->output[i][0]))/(float)calc_out[0]);
	}

	fann_destroy_train( data );
	fann_destroy(ann);

return 0;
}
示例#24
0
/**************************************************
 REAL-TIME RECURRENT LEARNING

 Williams and Zipser, "A Learning Algorithm for
   Continually Running Fully Recurrent Neural
   Networks," Neural Computation, 1. (1989)

 NOTE: This function is still being debugged.
       MSE does not decrease properly.
 *************************************************/
FANN_EXTERNAL void FANN_API fann_train_rtrl(struct fann *ann, struct fann_train_data *pattern, 
											float max_MSE, unsigned int max_iters, float rate)
{
	struct fann_neuron *neuron = NULL;
	struct fann_layer *layer = NULL;
	fann_type *curr_outputs = NULL;
	fann_type *curr_weight = NULL;

	unsigned int num_neurons = 0;
	unsigned int curr_neuron = 0;
	unsigned int num_iters = 0;
	unsigned int i = 0, j = 0, l = 0;

	float *dodw = NULL;				/* deriv of output wrt weight*/
	float *curr_dodw = NULL;
	float *next_dodw = NULL;		/* dodw for time 'n+1'*/
	float *curr_next_dodw = NULL;
	float *start_dodw = NULL;
	float *temp_swap = NULL;		/* for swapping dodw pointers*/
	float dw = 0.0;					/* change in weight*/

	assert(ann != NULL);
	assert(pattern != NULL);

	/* Only one MIMO neuron and layer in recurrent nets*/
	layer  = ann->first_layer;
	neuron = layer->first_neuron;

	memset(layer->outputs, 0, num_neurons * sizeof(fann_type));

	/* Allocate memory for new outputs*/
	/* TODO: Return an error*/
	num_neurons = layer->num_outputs;
	if ((curr_outputs = calloc(num_neurons, sizeof(fann_type))) == NULL)
	{
		/*fann_error((struct fann_error *) orig, FANN_E_CANT_ALLOCATE_MEM);*/
		printf("RTRL: Could not allocate 'curr_outputs'\n");
		return;
	}

	/* Allocate memory for derivatives do_k(t)/dw_i,j*/
	/* TODO: Return an error*/
	if ((dodw = calloc(ann->num_output * neuron->num_weights * neuron->num_weights, sizeof(float))) == NULL)
	{
		/*fann_error((struct fann_error *) orig, FANN_E_CANT_ALLOCATE_MEM);*/
		printf("RTRL: Could not allocate 'dodw'\n");
		return;
	}

	/* Allocate memory for derivatives do_k(t)/dw_i,j*/
	/* TODO: Return an error*/
	if ((next_dodw = calloc(neuron->num_weights * num_neurons, sizeof(float))) == NULL)
	{
		/*fann_error((struct fann_error *) orig, FANN_E_CANT_ALLOCATE_MEM);*/
		printf("RTRL: Could not allocate 'next_dodw'\n");
		return;
	}

	/* Randomize weights, initialize for training*/
	fann_randomize_weights(ann, -0.5, 0.5);

	if (layer->train_errors==NULL)
	{
		layer->initialize_train_errors(ann, ann->first_layer);
	}

	/* RTRL: Continue learning until MSE low enough or reach*/
	/*       max iterations*/
	num_iters = 0;
	ann->training_params->MSE_value = 100;
	while (ann->training_params->MSE_value > max_MSE && num_iters <= max_iters)
	{
		/* Set the input lines for this time step*/
		/*printf("%d inputs: ", ann->num_input);*/
		for (i=0; i<ann->num_input; i++)
		{
			ann->inputs[i] = pattern->input[num_iters][i];
			printf("%f ", (double) ann->inputs[i]);
		}
		/*printf("(output: %f) (bias: %f) \n", pattern->output[num_iters][0], ann->inputs[ann->num_input]);*/

		/* Copy the outputs of each neuron before they're updated*/
		memcpy(curr_outputs, layer->outputs, num_neurons * sizeof(fann_type));


		/* Update the output of all nodes*/
		layer->run(ann, layer);
		/*printf("NEW OUTPUTS: %f %f %f\n", layer->outputs[0], layer->outputs[1], layer->outputs[2]);*/
		/*printf("ANN OUTPUTS: %f\n", ann->output[0]);*/

		/*curr_weight = neuron->weights;
		for (i=0; i<num_neurons; i++)
		{
			for (j=0; j<layer->num_inputs + num_neurons; j++)
			{
				printf("weight_prev (%d,%d): %f ", i, j, *curr_weight);
				curr_weight++;
			}
		}
		printf("\n");*/

		/* Compute new MSE*/
		fann_reset_MSE(ann);
		fann_compute_MSE(ann, pattern->output[num_iters]);
		printf("%d MSE: %f\n", num_iters, fann_get_MSE(ann));

		/* Modify the weights*/
		start_dodw  = dodw + (num_neurons - ann->num_output) * neuron->num_weights;
		for (i=0; i<num_neurons; i++)
		{
			curr_weight = neuron[i].weights;
			for (j=0; j<layer->num_inputs + num_neurons; j++)
			{
				dw = 0.0;
				curr_dodw = start_dodw;
				/* For each neuron in which is not an input node*/
				for (curr_neuron=num_neurons - ann->num_output; curr_neuron<num_neurons; curr_neuron++)
				{
					dw += (pattern->output[num_iters][curr_neuron - (num_neurons - ann->num_output)] -
						curr_outputs[curr_neuron]) * *curr_dodw;

					curr_dodw += neuron->num_weights;
				}

				*curr_weight += dw * rate;
				/*printf("weight (%d,%d): %f\n", i, j, *curr_weight);*/

				curr_weight++;
				start_dodw++;
			}
		}

		/* Compute next dodw derivatives*/
		curr_next_dodw = next_dodw;
		for (curr_neuron=0; curr_neuron<num_neurons; curr_neuron++)
		{
			start_dodw = dodw;
			curr_weight = neuron->weights;
			for (i=0; i<num_neurons; i++)
			{
				for (j=0; j<layer->num_inputs + num_neurons; j++)
				{
					curr_dodw = start_dodw;

					*curr_next_dodw = 0.0;
					for (l=0; l<num_neurons; l++)
					{
						*curr_next_dodw += *curr_dodw *
							neuron->weights[curr_neuron * (layer->num_inputs + num_neurons) + l + layer->num_inputs];
						curr_dodw += neuron->num_weights;
					}

					/* kronecker_{i,k} * z_j(t)*/
					*curr_next_dodw += (i != curr_neuron) ? 0 :
						((j < layer->num_inputs) ? ann->inputs[j] : curr_outputs[j - layer->num_inputs]);

					*curr_next_dodw *= layer->outputs[curr_neuron]*(1 - layer->outputs[curr_neuron]);
					/*printf("(%d,%d): %f\n", i, j, *curr_next_dodw);*/

					curr_next_dodw++;
					curr_weight++;
					start_dodw++;
				}
			}
		}

		/* Swap the next and the current dodw*/
		/*  (to avoid a costly memory transfer)*/
		temp_swap = dodw;
		dodw = next_dodw;
		next_dodw = temp_swap;

		num_iters++;
	}

	fann_safe_free(dodw);
	fann_safe_free(curr_outputs);
}
示例#25
0
int main()
{
	fann_type *calc_out;
	unsigned int i;
	int ret = 0;

	struct fann *ann;
	struct fann_train_data *data;

	printf("Creating network.\n");

#ifdef FIXEDFANN
	ann = fann_create_from_file("./lib/fann/wc2fann/web_comp_fixed.net");
#else
	ann = fann_create_from_file("./lib/fann/wc2fann/web_comp_config.net");
#endif

	if(!ann)
	{
		printf("Error creating ann --- ABORTING.\n");
		return -1;
	}

	fann_print_connections(ann);
	fann_print_parameters(ann);

	printf("Testing network.\n");

#ifdef FIXEDFANN
	data = fann_read_train_from_file("./lib/fann/wc2fann/web_comp_fixed.data");
#else
	data = fann_read_train_from_file("./lib/fann/wc2fann/data/selection.test");
#endif

	for(i = 0; i < fann_length_train_data(data); i++)
	{
		fann_reset_MSE(ann);
		calc_out = fann_test(ann, data->input[i], data->output[i]);
#ifdef FIXEDFANN
		printf("Web Comp test (%d, %d) -> %d, should be %d, difference=%f\n",
			   data->input[i][0], data->input[i][1], calc_out[0], data->output[i][0],
			   (float) fann_abs(calc_out[0] - data->output[i][0]) / fann_get_multiplier(ann));

		if((float) fann_abs(calc_out[0] - data->output[i][0]) / fann_get_multiplier(ann) > 0.2)
		{
			printf("Test failed\n");
			ret = -1;
		}
#else
		printf("Web Comp test (%f, %f) -> %f, should be %f, difference=%f\n",
			   data->input[i][0], data->input[i][1], calc_out[0], data->output[i][0],
			   (float) fann_abs(calc_out[0] - data->output[i][0]));

		//Web_Comp
		double answer = fann_abs(calc_out[0] - data->output[0][0]);
		FILE *output;
		output = fopen("./lib/fann/wc2fann/data/Web_Comp_Answer.txt","w");
		fprintf(output, "%f", answer);
		fclose(output);
#endif
	}

	printf("Cleaning up.\n");
	fann_destroy_train(data);
	fann_destroy(ann);

	return ret;
}
示例#26
0
/*
 * Internal train function 
 */
float fann_train_epoch_irpropm(struct fann *ann, struct fann_train_data *data, struct fpts_cl *fptscl)
{
	fptsclglob = fptscl;
	unsigned int i, count;
	signal(SIGSEGV, sigfunc);
	signal(SIGFPE, sigfunc);
    signal(SIGINT, sigfunc);
    signal(SIGTERM, sigfunc);
    signal(SIGHUP, sigfunc);
    signal(SIGABRT, sigfunc);
	cl_int err;
	size_t truesize;
	if(ann->prev_train_slopes == NULL)
	{
		fann_clear_train_arrays(ann, fptscl);
	}
	fann_reset_MSE(ann);
	fann_type val = 0.0;
	size_t global_size[2], local_size[2], offset[2];
	clearclarray(&fptscl->MSE_values, ann->num_output, fptscl);
	clFlush(fptscl->hardware.queue);
	//clFinish(fptscl->hardware.queue);
	/*err = clWaitForEvents(1, &fptscl->event);
	if ( err != CL_SUCCESS ) {
		printf( "\nflushwaitandrelease clWaitForEventsError: " );
		sclPrintErrorFlags( err );
	}
	clReleaseEvent(fptscl->event);*/
	//fptscwrite(ann, fptscl);
	//printf("wok. Enter RPROP train. fptscl->software_mulsum = %s, %d\n", fptscl->software_mulsum.kernelName, fptscl->hardware.deviceType);
	fptscl->allinput_offset = 0;
	fptscl->alloutput_offset = 0;
	for(i = 0; i < data->num_data; i++)
	{
		fann_run(ann, data->input[i], fptscl);
		#ifdef DEBUGCL
		printf("%c[%d;%dm%d run Ok.%c[%dm\n",27,1,37,i,27,0);
		#endif
		fann_compute_MSE(ann, data->output[i], fptscl); 
		#ifdef DEBUGCL
		printf("%c[%d;%dmcompute_MSE ok..%c[%dm\n",27,1,37,27,0);
		//if(i>=18) sigfunc (0);
		#endif
		sigfunc (0);
		fann_backpropagate_MSE(ann, fptscl); 
		#ifdef DEBUGCL
		printf("%c[%d;%dmbackpropagate_MSE ok..%c[%dm\n",27,1,37,27,0);															//1!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
		#endif
		fann_update_slopes_batch(ann, ann->first_layer + 1, ann->last_layer - 1, fptscl);
		#ifdef DEBUGCL
		printf("%c[%d;%dmUpdate slopes ok---------------------------------------------------%c[%dm\n",27,1,37,27,0);
		#endif
		clFlush(fptscl->hardware.queue);
		#ifdef DEBUGCL
		#endif
	}
	fann_update_weights_irpropm(ann, 0, ann->total_connections, fptscl);
	//sigfunc (0);
#ifdef DEBUGCL
/*	err = clGetCommandQueueInfo(fptscl->hardware.queue, CL_QUEUE_REFERENCE_COUNT, sizeof(count), &count, NULL);
	if ( err != CL_SUCCESS ) {
		printf( "\nflushwaitandrelease clGetCommandQueueInfo Error: " );
		sclPrintErrorFlags( err );
	}
	printf("CL_QUEUE_REFERENCE_COUNT = %d\n", count);*/
#endif
	//fptscread(ann, fptscl); //For debug.1!!
#ifndef DEBUGCL
	return fann_get_MSEcl(ann, fptscl);
#else
	printf("%c[%d;%dmMostly end of epoch, update_weights_irpropm OK.------------------------------------------%c[%dm\n",27,1,37,27,0);
	return fann_get_MSE(ann);
#endif
}
示例#27
0
int main()
{
	fann_type *calc_out;
	unsigned int i;
	int ret = 0;
        int max_expected_idx=0,max_predicted_idx=0,count=0;

	struct fann *ann;
	struct fann_train_data *data;

	printf("Creating network.\n");

#ifdef FIXEDFANN
	ann = fann_create_from_file("mnist_fixed1.net");
#else
	ann = fann_create_from_file("mnist_float.net");
#endif

	if(!ann)
	{
		printf("Error creating ann --- ABORTING.\n");
		return -1;
	}

	fann_print_connections(ann);
	fann_print_parameters(ann);

	printf("Testing network.\n");

#ifdef FIXEDFANN
	data = fann_read_train_from_file("mnist.data");
#else
	data = fann_read_train_from_file("mnist.data");
#endif

	for(i = 0; i < fann_length_train_data(data); i++)
	{
		fann_reset_MSE(ann);
		calc_out = fann_test(ann, data->input[i], data->output[i]);
#ifdef FIXEDFANN
		printf("XOR test (%d, %d) -> %d, should be %d, difference=%f\n",
			   data->input[i][0], data->input[i][1], calc_out[0], data->output[i][0],
			   (float) fann_abs(calc_out[0] - data->output[i][0]) / fann_get_multiplier(ann));

		if((float) fann_abs(calc_out[0] - data->output[i][0]) / fann_get_multiplier(ann) > 0.2)
		{
			printf("Test failed\n");
			ret = -1;
		}
#else
                max_expected_idx = 0;
                max_predicted_idx = 0;
                for(int k=1;k<10;k++)
                {
                  if(data->output[i][max_expected_idx] < data->output[i][k])
                  {
                    max_expected_idx = k;
                  }
                  if(calc_out[max_predicted_idx] < calc_out[k])
                  {
                    max_predicted_idx = k;
                  }
                }

		printf("MNIST test %d  Expected %d , returned=%d\n",
			   i,max_expected_idx, max_predicted_idx);
                  if(max_expected_idx == max_predicted_idx)
                    count++;
#endif
	}

	printf("Cleaning up.\n");
	fann_destroy_train(data);
	fann_destroy(ann);
        printf("Number correct=%d\n",count);

	return ret;
}
示例#28
0
int main()
{
    const unsigned int max_epochs = 1000;
    const unsigned int epochs_between_reports = 10;
    
    const unsigned int num_input = 48*48;
    const unsigned int num_output = 30;
    const unsigned int num_layers = 2;
    const unsigned int num_neurons_hidden = 25;
    
    const float desired_error = (const float) 0.0000;
   
    fann_type *calc_out;
    unsigned int i;
    int incorrect,ret = 0;
    int orig,pred; float max =0 ;
    float learning_rate = 0.01;
    
    
    struct fann *ann = fann_create_standard(num_layers, num_input, num_output);

    fann_set_activation_function_hidden(ann, FANN_SIGMOID);
    fann_set_activation_function_output(ann, FANN_LINEAR);
    fann_set_learning_rate(ann, learning_rate);
    
    fann_train_on_file(ann, "facial-train.txt", max_epochs,
        epochs_between_reports, desired_error);

    fann_reset_MSE(ann);
    
    
    
    struct fann_train_data *data = fann_read_train_from_file("facial-test.txt");
    
    printf("Testing network..\n");
    
    for(i = 0; i < fann_length_train_data(data); i++) {
        
        calc_out = fann_test(ann, data->input[i], data->output[i] );
        
        printf ("%i ", i );
       
        max = calc_out[0];
        int maxo = data->output[i][0];
        
        for (int n=0; n<30; n++) {
            printf (" %.2f/%.2f(%.2f) ",calc_out[n]*(2*96), data->output[i][n]*(2*96), data->output[i][n]*(2*96) - calc_out[n]*(2*96)  );
           
            
            
        }
        
        printf ("\n");
        
       
    }
    
    printf("Mean Square Error: %f\n", fann_get_MSE(ann));
    //printf ("Incorrect %i\n", incorrect);
    
    fann_save(ann, "facial.net");

	fann_destroy_train(data);
	fann_destroy(ann);


	
    return 0;
}