コード例 #1
0
/* Free memory when neuron is destroyed */
FANN_EXTERNAL void FANN_API fann_neuron_destructor_fully_recurrent(struct fann_neuron* neuron)
{
	assert(neuron != NULL);

	fann_safe_free(neuron->weights);
	fann_safe_free(neuron->weights_deltas);
	fann_safe_free(neuron->sums);
}
コード例 #2
0
ファイル: fann_train_data.c プロジェクト: Envek/fann
/*
 * deallocate the train data structure. 
 */
FANN_EXTERNAL void FANN_API fann_destroy_train(struct fann_train_data *data)
{
	if(data == NULL)
		return;
	if(data->input != NULL)
		fann_safe_free(data->input[0]);
	if(data->output != NULL)
		fann_safe_free(data->output[0]);
	fann_safe_free(data->input);
	fann_safe_free(data->output);
	fann_safe_free(data);
}
コード例 #3
0
ファイル: fann_sparse.c プロジェクト: DontLookAtMe/fann-mrnn
FANN_EXTERNAL void FANN_API fann_sparse_neuron_destructor(struct fann_neuron* neuron)
{
	struct fann_neuron_private_data_connected_any_any *priv = (struct fann_neuron_private_data_connected_any_any *) neuron->private_data;
	fann_safe_free(neuron->weights);
	fann_safe_free(neuron->weights_deltas);
	fann_safe_free(neuron->sums);
	fann_safe_free(neuron->weights_deltas);
	fann_safe_free(priv->prev_weights_deltas);
	fann_safe_free(priv->prev_steps);
	fann_safe_free( ((struct fann_sparse_neuron_private_data*) neuron->private_data)->mask );
	fann_safe_free( ((struct fann_sparse_neuron_private_data*) neuron->private_data)->generic );
	fann_safe_free(neuron->private_data);

}
コード例 #4
0
/* Deallocate a layer */
FANN_EXTERNAL void FANN_API fann_layer_destructor_fully_recurrent(struct fann_layer* layer)
{
	struct fann_neuron *neuron_it;

	assert(layer != NULL);
	assert(layer->first_neuron != NULL);
	assert(layer->last_neuron  != NULL);

	/* Deallocate memory for each neuron in this layer */
	for (neuron_it=layer->first_neuron; neuron_it!=layer->last_neuron; neuron_it++)
	{
		if (neuron_it->destructor != NULL)
		{
			neuron_it->destructor(neuron_it);
		}
	}

	fann_safe_free(layer->first_neuron);
	fann_safe_free(layer->train_errors);
	fann_safe_free(layer->outputs);
}
コード例 #5
0
/* Runs a single layer for one iteration */
FANN_EXTERNAL void FANN_API fann_layer_run_fully_recurrent(
	struct fann *ann, struct fann_layer* layer)
{
	struct fann_neuron * last_neuron = layer->last_neuron;
	struct fann_neuron * neuron_it;
	unsigned int neuron_num = 0;
	unsigned int i = 0;
	
	fann_type sum      = (fann_type)0.0;
	fann_type *outputs = NULL;

	if ((outputs = calloc(ann->num_output, sizeof(fann_type))) == NULL)
	{
		printf("Run Layer: 'outputs' allocation failed!\n");
		return;
	}


	/* Run each neuron in the layer a single iteration */
	for(neuron_it = layer->first_neuron; neuron_it != last_neuron; neuron_it++)
	{
		/* Find the sum of weights*inputs*/
		sum = (fann_type)0.0;
		for (i=0; i<neuron_it->num_weights; i++)
		{
			if (i < ann->num_input + 1)
			{
				neuron_it->sums[i] = neuron_it->weights[i] * layer->inputs[i];
			}
			else
			{
				neuron_it->sums[i] = neuron_it->weights[i] * layer->outputs[i - ann->num_input - 1];
			}

			sum += neuron_it->sums[i];
		}

		/* FIXME: Support alternate functions other than sigmoid!*/
		outputs[neuron_num] = 1.0f / (1.0f + exp(-sum));
		neuron_num++;
	}

	/* Copy the new values over to the ANN*/
	for (i=0; i<ann->num_output; i++)
	{
		ann->output[i] = outputs[i];
	}
	
	fann_safe_free(outputs);
}
コード例 #6
0
ファイル: fann_recurrent.c プロジェクト: EQ4/Visore
/**************************************************
 REAL-TIME RECURRENT LEARNING

 Williams and Zipser, "A Learning Algorithm for
   Continually Running Fully Recurrent Neural
   Networks," Neural Computation, 1. (1989)

 NOTE: This function is still being debugged.
       MSE does not decrease properly.
 *************************************************/
FANN_EXTERNAL void FANN_API fann_train_rtrl(struct fann *ann, struct fann_train_data *pattern, 
											float max_MSE, unsigned int max_iters, float rate)
{
	struct fann_neuron *neuron = NULL;
	struct fann_layer *layer = NULL;
	fann_type *curr_outputs = NULL;
	fann_type *curr_weight = NULL;

	unsigned int num_neurons = 0;
	unsigned int curr_neuron = 0;
	unsigned int num_iters = 0;
	unsigned int i = 0, j = 0, l = 0;

	float *dodw = NULL;				/* deriv of output wrt weight*/
	float *curr_dodw = NULL;
	float *next_dodw = NULL;		/* dodw for time 'n+1'*/
	float *curr_next_dodw = NULL;
	float *start_dodw = NULL;
	float *temp_swap = NULL;		/* for swapping dodw pointers*/
	float dw = 0.0;					/* change in weight*/

	assert(ann != NULL);
	assert(pattern != NULL);

	/* Only one MIMO neuron and layer in recurrent nets*/
	layer  = ann->first_layer;
	neuron = layer->first_neuron;

	memset(layer->outputs, 0, num_neurons * sizeof(fann_type));

	/* Allocate memory for new outputs*/
	/* TODO: Return an error*/
	num_neurons = layer->num_outputs;
	if ((curr_outputs = calloc(num_neurons, sizeof(fann_type))) == NULL)
	{
		/*fann_error((struct fann_error *) orig, FANN_E_CANT_ALLOCATE_MEM);*/
		printf("RTRL: Could not allocate 'curr_outputs'\n");
		return;
	}

	/* Allocate memory for derivatives do_k(t)/dw_i,j*/
	/* TODO: Return an error*/
	if ((dodw = calloc(ann->num_output * neuron->num_weights * neuron->num_weights, sizeof(float))) == NULL)
	{
		/*fann_error((struct fann_error *) orig, FANN_E_CANT_ALLOCATE_MEM);*/
		printf("RTRL: Could not allocate 'dodw'\n");
		return;
	}

	/* Allocate memory for derivatives do_k(t)/dw_i,j*/
	/* TODO: Return an error*/
	if ((next_dodw = calloc(neuron->num_weights * num_neurons, sizeof(float))) == NULL)
	{
		/*fann_error((struct fann_error *) orig, FANN_E_CANT_ALLOCATE_MEM);*/
		printf("RTRL: Could not allocate 'next_dodw'\n");
		return;
	}

	/* Randomize weights, initialize for training*/
	fann_randomize_weights(ann, -0.5, 0.5);

	if (layer->train_errors==NULL)
	{
		layer->initialize_train_errors(ann, ann->first_layer);
	}

	/* RTRL: Continue learning until MSE low enough or reach*/
	/*       max iterations*/
	num_iters = 0;
	ann->training_params->MSE_value = 100;
	while (ann->training_params->MSE_value > max_MSE && num_iters <= max_iters)
	{
		/* Set the input lines for this time step*/
		/*printf("%d inputs: ", ann->num_input);*/
		for (i=0; i<ann->num_input; i++)
		{
			ann->inputs[i] = pattern->input[num_iters][i];
			printf("%f ", (double) ann->inputs[i]);
		}
		/*printf("(output: %f) (bias: %f) \n", pattern->output[num_iters][0], ann->inputs[ann->num_input]);*/

		/* Copy the outputs of each neuron before they're updated*/
		memcpy(curr_outputs, layer->outputs, num_neurons * sizeof(fann_type));


		/* Update the output of all nodes*/
		layer->run(ann, layer);
		/*printf("NEW OUTPUTS: %f %f %f\n", layer->outputs[0], layer->outputs[1], layer->outputs[2]);*/
		/*printf("ANN OUTPUTS: %f\n", ann->output[0]);*/

		/*curr_weight = neuron->weights;
		for (i=0; i<num_neurons; i++)
		{
			for (j=0; j<layer->num_inputs + num_neurons; j++)
			{
				printf("weight_prev (%d,%d): %f ", i, j, *curr_weight);
				curr_weight++;
			}
		}
		printf("\n");*/

		/* Compute new MSE*/
		fann_reset_MSE(ann);
		fann_compute_MSE(ann, pattern->output[num_iters]);
		printf("%d MSE: %f\n", num_iters, fann_get_MSE(ann));

		/* Modify the weights*/
		start_dodw  = dodw + (num_neurons - ann->num_output) * neuron->num_weights;
		for (i=0; i<num_neurons; i++)
		{
			curr_weight = neuron[i].weights;
			for (j=0; j<layer->num_inputs + num_neurons; j++)
			{
				dw = 0.0;
				curr_dodw = start_dodw;
				/* For each neuron in which is not an input node*/
				for (curr_neuron=num_neurons - ann->num_output; curr_neuron<num_neurons; curr_neuron++)
				{
					dw += (pattern->output[num_iters][curr_neuron - (num_neurons - ann->num_output)] -
						curr_outputs[curr_neuron]) * *curr_dodw;

					curr_dodw += neuron->num_weights;
				}

				*curr_weight += dw * rate;
				/*printf("weight (%d,%d): %f\n", i, j, *curr_weight);*/

				curr_weight++;
				start_dodw++;
			}
		}

		/* Compute next dodw derivatives*/
		curr_next_dodw = next_dodw;
		for (curr_neuron=0; curr_neuron<num_neurons; curr_neuron++)
		{
			start_dodw = dodw;
			curr_weight = neuron->weights;
			for (i=0; i<num_neurons; i++)
			{
				for (j=0; j<layer->num_inputs + num_neurons; j++)
				{
					curr_dodw = start_dodw;

					*curr_next_dodw = 0.0;
					for (l=0; l<num_neurons; l++)
					{
						*curr_next_dodw += *curr_dodw *
							neuron->weights[curr_neuron * (layer->num_inputs + num_neurons) + l + layer->num_inputs];
						curr_dodw += neuron->num_weights;
					}

					/* kronecker_{i,k} * z_j(t)*/
					*curr_next_dodw += (i != curr_neuron) ? 0 :
						((j < layer->num_inputs) ? ann->inputs[j] : curr_outputs[j - layer->num_inputs]);

					*curr_next_dodw *= layer->outputs[curr_neuron]*(1 - layer->outputs[curr_neuron]);
					/*printf("(%d,%d): %f\n", i, j, *curr_next_dodw);*/

					curr_next_dodw++;
					curr_weight++;
					start_dodw++;
				}
			}
		}

		/* Swap the next and the current dodw*/
		/*  (to avoid a costly memory transfer)*/
		temp_swap = dodw;
		dodw = next_dodw;
		next_dodw = temp_swap;

		num_iters++;
	}

	fann_safe_free(dodw);
	fann_safe_free(curr_outputs);
}
コード例 #7
0
ファイル: fann_recurrent.c プロジェクト: EQ4/Visore
FANN_EXTERNAL fann_type *FANN_API fann_run_hopfield(struct fann *ann, fann_type *input)
{
	struct fann_neuron *neuron = NULL;
	unsigned int num_neurons = 0;
	unsigned int rand_neuron = 0;
	unsigned int i = 0;
	unsigned int neuron_array_size = 0;
	unsigned int iters = 0;
	int statediff = 0;
	
	fann_type sum = 0;
	fann_type *old_output = NULL;
	fann_type *weights = NULL;

	assert(ann != NULL);
	assert(input != NULL);

	neuron = ann->first_layer->first_neuron;
	num_neurons = ann->first_layer->num_outputs;

	/* Initialization*/
	for (i=0; i<num_neurons; i++)
	{
		ann->output[i] = input[i];
	}

	neuron_array_size = num_neurons * sizeof(fann_type);
	old_output = (fann_type *)malloc(neuron_array_size);

	/* Iterate until states unchanged*/
	/* FIXME: The number of iterations is currently */
	/*   somewhat arbitrary (10*num_neurons once appears*/
	/*   stable). Having a better measure of whether*/
	/*   the network is stable would be nice.*/
	do
	{

		/* Asynchronously update the neurons*/
		rand_neuron = floor(rand()%num_neurons);
		/*printf("Iters: %d (rand = %d)\n", iters, rand_neuron);*/
		weights = neuron[rand_neuron].weights;
		memcpy(old_output, ann->output, neuron_array_size);

		/* Compute the new output vector*/
		sum = 0;
		for (i=0; i<num_neurons; i++)
		{
			sum += weights[i] * ann->output[i];	
		}

		ann->output[rand_neuron] = (sum >= 0) ? 1 : -1;


		/* Compare the old output vector to the new output vector*/
		if ((statediff = memcmp(old_output, ann->output, neuron_array_size)) != 0)
		{
			iters = 0;
		}

		iters += (statediff == 0) ? 1 : 0;

	} while (iters < 10*num_neurons);

	fann_safe_free(old_output);

	/* FIXME: Is it OK to return an internal fp? */
	return ann->output;
}
コード例 #8
0
ファイル: fann_recurrent.c プロジェクト: EQ4/Visore
/* Creates a feedforward, layered net which is an "unrolled" recurrent network.
 For example, the recurrent net:
  A <-> B <-> C<- (where C autosynapses)
 Becomes (unrolled two time steps):
  A  B  C    input layer
   \/ \/|
  A  B  C    hidden layer I
   \/ \/|
  A  B  C    output layer
*/
FANN_EXTERNAL struct fann *FANN_API fann_create_unrolled_recurrent(
	unsigned int num_neurons, fann_type *weights, unsigned int time_steps)
{
	struct fann *ann        = NULL;
	unsigned int *layers    = NULL;
	unsigned int num_layers = time_steps + 1;
	unsigned int layern     = 0;

	struct fann_layer *curr_layer   = NULL;
	struct fann_neuron *curr_neuron = NULL;
	fann_type *curr_weights         = weights;

	
	/*************************************
	  CREATE THE FEEDFORWARD STRUCTURE 
	 *************************************/

	/* Allocate number of neurons per layer array */
	layers = (unsigned int *)calloc(num_layers, sizeof(unsigned int));
	if (layers == NULL)
	{
		return NULL;
	}

	/* Populate each layer with the number of neurons */
	for (layern=0; layern < num_layers; layern++)
	{
		layers[layern] = num_neurons;
	}

	/* Create the feedforward network */
	ann = fann_create_standard_array(num_layers, layers);
	fann_safe_free(layers);

	/*printf("REQUESTED: LAYERS=%d, NEURONS/LAYER=%d\n", num_layers, num_neurons);
	printf("NUM LAYERS: %d\n", ann->last_layer - ann->first_layer);
	printf("IN: %d, NEURONS: %d, OUTPUT: %d\n",
		ann->num_input, ann->num_neurons, ann->num_output);*/


	/*************************************
	  SET THE FEEDFORWARD WEIGHTS
	 *************************************/

	/* Visit each layer */
    for (curr_layer = ann->first_layer; 
		curr_layer != ann->last_layer; 
		curr_layer++)
	{
		/* The weights are the same for each feedforward layer! */
		curr_weights = weights;

		/* Copy the weight matrix into the neurons, 
		   one row per neuron */
		for (curr_neuron = curr_layer->first_neuron; 
			curr_neuron != curr_layer->last_neuron; 
			curr_neuron++)
		{
            memcpy(curr_neuron->weights, curr_weights, num_neurons * num_neurons * sizeof(fann_type));

			curr_weights += num_neurons;
		}
	}

	return ann;
}
コード例 #9
0
ファイル: fann.cpp プロジェクト: akoshterek/MultiGammonCpp
FANN_EXTERNAL void FANN_API fann_destroy(struct fann *ann)
{
	if(ann == NULL)
		return;
	fann_safe_free(ann->weights);
	fann_safe_free(ann->connections);
	fann_safe_free(ann->first_layer->first_neuron);
	//AK
	fann_safe_free(ann->first_layer->sum);
	fann_safe_free(ann->first_layer->value);
	//
	fann_safe_free(ann->first_layer);
	fann_safe_free(ann->output);
	fann_safe_free(ann->train_errors);
	fann_safe_free(ann->train_slopes);
	fann_safe_free(ann->prev_train_slopes);
	fann_safe_free(ann->prev_steps);
	fann_safe_free(ann->prev_weights_deltas);
	fann_safe_free(ann->errstr);
	
	fann_safe_free( ann->scale_mean_in );
	fann_safe_free( ann->scale_deviation_in );
	fann_safe_free( ann->scale_new_min_in );
	fann_safe_free( ann->scale_factor_in );

	fann_safe_free( ann->scale_mean_out );
	fann_safe_free( ann->scale_deviation_out );
	fann_safe_free( ann->scale_new_min_out );
	fann_safe_free( ann->scale_factor_out );
	
	fann_safe_free(ann);
}
コード例 #10
0
FANN_EXTERNAL void FANN_API fann_destroy(struct fann *ann)
{
	if(ann == NULL)
		return;
	fann_safe_free(ann->weights);
	fann_safe_free(ann->connections);
	fann_safe_free(ann->first_layer->first_neuron);
	fann_safe_free(ann->first_layer);
	fann_safe_free(ann->output);
	fann_safe_free(ann->train_errors);
	fann_safe_free(ann->train_slopes);
	fann_safe_free(ann->prev_train_slopes);
	fann_safe_free(ann->prev_steps);
	fann_safe_free(ann->prev_weights_deltas);
	fann_safe_free(ann->errstr);
	fann_safe_free(ann->cascade_activation_functions);
	fann_safe_free(ann->cascade_activation_steepnesses);
	
#ifndef FIXEDFANN
	fann_safe_free( ann->scale_mean_in );
	fann_safe_free( ann->scale_deviation_in );
	fann_safe_free( ann->scale_new_min_in );
	fann_safe_free( ann->scale_factor_in );

	fann_safe_free( ann->scale_mean_out );
	fann_safe_free( ann->scale_deviation_out );
	fann_safe_free( ann->scale_new_min_out );
	fann_safe_free( ann->scale_factor_out );
#endif
	
	fann_safe_free(ann);
}
コード例 #11
0
/* INTERNAL FUNCTION
   Allocates the main structure and sets some default values.
 */
struct fann *fann_allocate_structure(unsigned int num_layers)
{
	struct fann *ann;

	if(num_layers < 2)
	{
#ifdef DEBUG
		printf("less than 2 layers - ABORTING.\n");
#endif
		return NULL;
	}

	/* allocate and initialize the main network structure */
	ann = (struct fann *) malloc(sizeof(struct fann));
	if(ann == NULL)
	{
		fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
		return NULL;
	}

	ann->errno_f = FANN_E_NO_ERROR;
	ann->error_log = fann_default_error_log;
	ann->errstr = NULL;
	ann->learning_rate = 0.7f;
	ann->learning_momentum = 0.0;
	ann->total_neurons = 0;
	ann->total_connections = 0;
	ann->num_input = 0;
	ann->num_output = 0;
	ann->train_errors = NULL;
	ann->train_slopes = NULL;
	ann->prev_steps = NULL;
	ann->prev_train_slopes = NULL;
	ann->prev_weights_deltas = NULL;
	ann->training_algorithm = FANN_TRAIN_RPROP;
	ann->num_MSE = 0;
	ann->MSE_value = 0;
	ann->num_bit_fail = 0;
	ann->bit_fail_limit = (fann_type)0.35;
	ann->network_type = FANN_NETTYPE_LAYER;
	ann->train_error_function = FANN_ERRORFUNC_TANH;
	ann->train_stop_function = FANN_STOPFUNC_MSE;
	ann->callback = NULL;
    ann->user_data = NULL; /* User is responsible for deallocation */
	ann->weights = NULL;
	ann->connections = NULL;
	ann->output = NULL;
#ifndef FIXEDFANN
	ann->scale_mean_in = NULL;
	ann->scale_deviation_in = NULL;
	ann->scale_new_min_in = NULL;
	ann->scale_factor_in = NULL;
	ann->scale_mean_out = NULL;
	ann->scale_deviation_out = NULL;
	ann->scale_new_min_out = NULL;
	ann->scale_factor_out = NULL;
#endif	
	
	/* variables used for cascade correlation (reasonable defaults) */
	ann->cascade_output_change_fraction = 0.01f;
	ann->cascade_candidate_change_fraction = 0.01f;
	ann->cascade_output_stagnation_epochs = 12;
	ann->cascade_candidate_stagnation_epochs = 12;
	ann->cascade_num_candidate_groups = 2;
	ann->cascade_weight_multiplier = (fann_type)0.4;
	ann->cascade_candidate_limit = (fann_type)1000.0;
	ann->cascade_max_out_epochs = 150;
	ann->cascade_max_cand_epochs = 150;
	ann->cascade_candidate_scores = NULL;
	ann->cascade_activation_functions_count = 10;
	ann->cascade_activation_functions = 
		(enum fann_activationfunc_enum *)calloc(ann->cascade_activation_functions_count, 
							   sizeof(enum fann_activationfunc_enum));
	if(ann->cascade_activation_functions == NULL)
	{
		fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
		free(ann);
		return NULL;
	}
							   
	ann->cascade_activation_functions[0] = FANN_SIGMOID;
	ann->cascade_activation_functions[1] = FANN_SIGMOID_SYMMETRIC;
	ann->cascade_activation_functions[2] = FANN_GAUSSIAN;
	ann->cascade_activation_functions[3] = FANN_GAUSSIAN_SYMMETRIC;
	ann->cascade_activation_functions[4] = FANN_ELLIOT;
	ann->cascade_activation_functions[5] = FANN_ELLIOT_SYMMETRIC;
	ann->cascade_activation_functions[6] = FANN_SIN_SYMMETRIC;
	ann->cascade_activation_functions[7] = FANN_COS_SYMMETRIC;
	ann->cascade_activation_functions[8] = FANN_SIN;
	ann->cascade_activation_functions[9] = FANN_COS;

	ann->cascade_activation_steepnesses_count = 4;
	ann->cascade_activation_steepnesses = 
		(fann_type *)calloc(ann->cascade_activation_steepnesses_count, 
							   sizeof(fann_type));
	if(ann->cascade_activation_steepnesses == NULL)
	{
		fann_safe_free(ann->cascade_activation_functions);
		fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
		free(ann);
		return NULL;
	}
	
	ann->cascade_activation_steepnesses[0] = (fann_type)0.25;
	ann->cascade_activation_steepnesses[1] = (fann_type)0.5;
	ann->cascade_activation_steepnesses[2] = (fann_type)0.75;
	ann->cascade_activation_steepnesses[3] = (fann_type)1.0;

	/* Variables for use with with Quickprop training (reasonable defaults) */
	ann->quickprop_decay = (float) -0.0001;
	ann->quickprop_mu = 1.75;

	/* Variables for use with with RPROP training (reasonable defaults) */
	ann->rprop_increase_factor = (float) 1.2;
	ann->rprop_decrease_factor = 0.5;
	ann->rprop_delta_min = 0.0;
	ann->rprop_delta_max = 50.0;
	ann->rprop_delta_zero = 0.1f;
	
	fann_init_error_data((struct fann_error *) ann);

#ifdef FIXEDFANN
	/* these values are only boring defaults, and should really
	 * never be used, since the real values are always loaded from a file. */
	ann->decimal_point = 8;
	ann->multiplier = 256;
#endif

	/* allocate room for the layers */
	ann->first_layer = (struct fann_layer *) calloc(num_layers, sizeof(struct fann_layer));
	if(ann->first_layer == NULL)
	{
		fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
		free(ann);
		return NULL;
	}

	ann->last_layer = ann->first_layer + num_layers;

	return ann;
}