예제 #1
0
void fann_update_candidate_weights(struct fann *ann, unsigned int num_data)
{
	struct fann_neuron *first_cand = (ann->last_layer - 1)->last_neuron + 1;	/* there is an empty neuron between the actual neurons and the candidate neuron */
	struct fann_neuron *last_cand = first_cand + fann_get_cascade_num_candidates(ann) - 1;

	switch (fann_get_training_algorithm(ann))
	{
		case FANN_TRAIN_RPROP:
			fann_update_weights_irpropm(ann, first_cand->first_con,
										last_cand->last_con + ann->num_output);
			break;
		case FANN_TRAIN_SARPROP:
			/* TODO: increase epoch? */
			fann_update_weights_sarprop(ann, fann_get_sarprop_epoch(ann), first_cand->first_con,
										last_cand->last_con + ann->num_output);
			break;
		case FANN_TRAIN_QUICKPROP:
			fann_update_weights_quickprop(ann, num_data, first_cand->first_con,
										  last_cand->last_con + ann->num_output);
			break;
		case FANN_TRAIN_BATCH:
		case FANN_TRAIN_INCREMENTAL:
			fann_error((struct fann_error *) ann, FANN_E_CANT_USE_TRAIN_ALG);
			break;
	}
}
예제 #2
0
파일: fann.c 프로젝트: inigosola/lua-fann
/*! ann:get_training_algorithm()
 *# Retrieves the training algorithm:\n
 *# Valid algorithms are {{fann.FANN_TRAIN_INCREMENTAL}},
 *# {{fann.FANN_TRAIN_BATCH}}, {{fann.FANN_TRAIN_RPROP}} or
 *# {{fann.FANN_TRAIN_QUICKPROP}}
 *-
 */
static int ann_get_training_algorithm(lua_State *L)
{
	struct fann **ann;

	ann = luaL_checkudata(L, 1, FANN_METATABLE);
	luaL_argcheck(L, ann != NULL, 1, "'neural net' expected");

	lua_pushinteger(L, fann_get_training_algorithm(*ann));
	return 1;
}
예제 #3
0
void initialize_candidate_weights(struct fann *ann, unsigned int first_con, unsigned int last_con, float scale_factor)
{
	fann_type prev_step;
	unsigned int i = 0;
	unsigned int bias_weight = first_con + (ann->first_layer->last_neuron - ann->first_layer->first_neuron) - 1;

	if(fann_get_training_algorithm(ann) == FANN_TRAIN_RPROP)
		prev_step = fann_get_rprop_delta_zero(ann);
	else
		prev_step = 0;

	for(i = first_con; i < last_con; i++)
	{
		if(i == bias_weight) 
			ann->weights[i] = fann_rand(-scale_factor, scale_factor);
		else
			ann->weights[i] = fann_rand(0,scale_factor);
					
		ann->rprop_params->train_slopes[i] = 0;
		ann->rprop_params->prev_steps[i] = prev_step;
		ann->rprop_params->prev_train_slopes[i] = 0;
	}
}
예제 #4
0
float fann_train_outputs_epoch(struct fann *ann, struct fann_train_data *data)
{
	unsigned int i;
	
	fann_reset_MSE(ann);

	for(i = 0; i < data->num_data; i++)
	{
		fann_run(ann, data->input[i]);
		fann_compute_MSE(ann, data->output[i]);
		fann_update_slopes_batch(ann, ann->last_layer - 1, ann->last_layer - 1);
	}

	switch (fann_get_training_algorithm(ann))
	{
		case FANN_TRAIN_RPROP:
			fann_update_weights_irpropm(ann, (ann->last_layer - 1)->first_neuron->first_con,
										ann->total_connections);
			break;
		case FANN_TRAIN_SARPROP:
			fann_update_weights_sarprop(ann, fann_get_sarprop_epoch(ann), (ann->last_layer - 1)->first_neuron->first_con,
										ann->total_connections);
			fann_set_sarprop_epoch(ann, fann_get_sarprop_epoch(ann) + 1);
			break;
		case FANN_TRAIN_QUICKPROP:
			fann_update_weights_quickprop(ann, data->num_data,
										  (ann->last_layer - 1)->first_neuron->first_con,
										  ann->total_connections);
			break;
		case FANN_TRAIN_BATCH:
		case FANN_TRAIN_INCREMENTAL:
			fann_error((struct fann_error *) ann, FANN_E_CANT_USE_TRAIN_ALG);
	}

	return fann_get_MSE(ann);
}
예제 #5
0
TrainingAlgorithm Trainer::training_algorithm() const {
  const TrainingAlgorithm* algo =
      TrainingAlgorithm::Get(fann_get_training_algorithm(ann_));
  BOOST_ASSERT(algo != NULL); // should not happen
  return *algo;
}
예제 #6
0
int main(int argc, char *argv[])
{
	struct fann_train_data *dadosTreino, *dadosTeste;
	struct fann *ANN;
	fann_type *ANN_Answers;

	int *layers, i, j, aux;
	chromosome chromo;

	float erro = 0.0;

	checkArgs(argc, argv);
	buildChromosome(argv, &chromo);

	checkDatasetFiles();

	dadosTreino = fann_read_train_from_file(nomeArqTreino);

	layers = (int *) calloc(2+chromo.qntCamadasOcultas, sizeof(int));
	layers[0] = qntNeuroniosEntrada;
	layers[2+chromo.qntCamadasOcultas-1] = qntNeuroniosSaida;
	aux = chromo.neurOcultos;
	for (i=1; i < 2+chromo.qntCamadasOcultas-1 ; i++)
	{
		layers[i] = aux;
		aux = aux/2;
	}

	// CRIANDO A RNA:
	ANN = fann_create_standard_array(2+chromo.qntCamadasOcultas, layers);

	// TREINO
	fann_set_learning_rate(ANN, chromo.learnRate);
	fann_set_learning_momentum(ANN, chromo.moment);

	fann_set_activation_function_hidden( ANN, chromo.fcOculta );
	fann_set_activation_function_output( ANN, chromo.fcSaida  );
	fann_set_training_algorithm(ANN, chromo.algAprend );

	if (fann_get_training_algorithm(ANN) == FANN_TRAIN_QUICKPROP)
		fann_set_quickprop_decay(ANN, chromo.decay);

	// Em python, o treino ficava entre um try.
	// Se desse erro, escrevia "Resultado: 999.0" e exit
	fann_train_on_data(ANN, dadosTreino, chromo.epocasTreino, 50, desiredError);

	fann_destroy_train(dadosTreino);

	// TESTES:
	dadosTeste  = fann_read_train_from_file( nomeArqValidacao);

	// Em python, o teste também ficava entre um try.
	// Se desse erro, escrevia "Resultado: 999.0" e exit
	for(i = 0; i < fann_length_train_data(dadosTeste); i++)
	{
		ANN_Answers = fann_run(ANN, dadosTeste->input[i]);
		if (ANN_Answers == NULL)
		{
			printf("Resultado: 999.0\n");
			exit(2);
		}

		for (j=0; j < qntNeuroniosSaida; j++)
			erro += (float) powf(fann_abs(ANN_Answers[j] - dadosTeste->output[i][j]), 2);
	}
	printf("Resultado: %f\n", erro/(fann_length_train_data(dadosTeste)-1));

	fann_destroy_train(dadosTeste);

	saveANN(argc, argv, ANN);

	fann_destroy(ANN);
}