Пример #1
0
/* INTERNAL FUNCTION
   The iRprop- algorithm
*/
void fann_update_weights_irpropm(struct fann *ann, unsigned int first_weight, unsigned int past_end)
{
	fann_type *train_slopes = ann->train_slopes;
	fann_type *weights = ann->weights;
	fann_type *prev_steps = ann->prev_steps;
	fann_type *prev_train_slopes = ann->prev_train_slopes;

	fann_type prev_step, slope, prev_slope, next_step, same_sign;

	float increase_factor = ann->rprop_increase_factor;	/*1.2; */
	float decrease_factor = ann->rprop_decrease_factor;	/*0.5; */
	float delta_min = ann->rprop_delta_min;	/*0.0; */
	float delta_max = ann->rprop_delta_max;	/*50.0; */

	unsigned int i = first_weight;
  unsigned int *connections_to_weights = ann->connections_to_weights;

	for(; i != past_end; i++)
	{
		prev_step = fann_max(prev_steps[i], (fann_type) 0.0001);	/* prev_step may not be zero because then the training will stop */
		slope = train_slopes[i];
		prev_slope = prev_train_slopes[i];

		same_sign = prev_slope * slope;

		if(same_sign >= 0.0)
			next_step = fann_min(prev_step * increase_factor, delta_max);
		else
		{
			next_step = fann_max(prev_step * decrease_factor, delta_min);
			slope = 0;
		}

		if(slope < 0)
		{
			weights[connections_to_weights[i]] -= next_step;
			if(weights[connections_to_weights[i]] < -1500)
				weights[connections_to_weights[i]] = -1500;
		}
		else
		{
			weights[connections_to_weights[i]] += next_step;
			if(weights[connections_to_weights[i]] > 1500)
				weights[connections_to_weights[i]] = 1500;
		}

		/*if(i == 2){
		 * printf("weight=%f, slope=%f, next_step=%f, prev_step=%f\n", weights[i], slope, next_step, prev_step);
		 * } */

		/* update global data arrays */
		prev_steps[i] = next_step;
		prev_train_slopes[i] = slope;
		train_slopes[i] = 0.0;
	}
}
Пример #2
0
/* INTERNAL FUNCTION
   Adjust the steepwise functions (if used)
*/
void fann_update_stepwise(struct fann *ann)
{
	unsigned int i = 0;

	/* Calculate the parameters for the stepwise linear
	 * sigmoid function fixed point.
	 * Using a rewritten sigmoid function.
	 * results 0.005, 0.05, 0.25, 0.75, 0.95, 0.995
	 */
	ann->sigmoid_results[0] = fann_max((fann_type) (ann->multiplier / 200.0 + 0.5), 1);
	ann->sigmoid_results[1] = fann_max((fann_type) (ann->multiplier / 20.0 + 0.5), 1);
	ann->sigmoid_results[2] = fann_max((fann_type) (ann->multiplier / 4.0 + 0.5), 1);
	ann->sigmoid_results[3] = fann_min(ann->multiplier - (fann_type) (ann->multiplier / 4.0 + 0.5), ann->multiplier - 1);
	ann->sigmoid_results[4] = fann_min(ann->multiplier - (fann_type) (ann->multiplier / 20.0 + 0.5), ann->multiplier - 1);
	ann->sigmoid_results[5] = fann_min(ann->multiplier - (fann_type) (ann->multiplier / 200.0 + 0.5), ann->multiplier - 1);

	ann->sigmoid_symmetric_results[0] = fann_max((fann_type) ((ann->multiplier / 100.0) - ann->multiplier - 0.5),
				                                 (fann_type) (1 - (fann_type) ann->multiplier));
	ann->sigmoid_symmetric_results[1] =	fann_max((fann_type) ((ann->multiplier / 10.0) - ann->multiplier - 0.5),
				                                 (fann_type) (1 - (fann_type) ann->multiplier));
	ann->sigmoid_symmetric_results[2] =	fann_max((fann_type) ((ann->multiplier / 2.0) - ann->multiplier - 0.5),
                                				 (fann_type) (1 - (fann_type) ann->multiplier));
	ann->sigmoid_symmetric_results[3] = fann_min(ann->multiplier - (fann_type) (ann->multiplier / 2.0 + 0.5),
				 							     ann->multiplier - 1);
	ann->sigmoid_symmetric_results[4] = fann_min(ann->multiplier - (fann_type) (ann->multiplier / 10.0 + 0.5),
				 							     ann->multiplier - 1);
	ann->sigmoid_symmetric_results[5] = fann_min(ann->multiplier - (fann_type) (ann->multiplier / 100.0 + 1.0),
				 							     ann->multiplier - 1);

	for(i = 0; i < 6; i++)
	{
		ann->sigmoid_values[i] =
			(fann_type) (((log(ann->multiplier / (float) ann->sigmoid_results[i] - 1) *
						   (float) ann->multiplier) / -2.0) * (float) ann->multiplier);
		ann->sigmoid_symmetric_values[i] =
			(fann_type) (((log
						   ((ann->multiplier -
							 (float) ann->sigmoid_symmetric_results[i]) /
							((float) ann->sigmoid_symmetric_results[i] +
							 ann->multiplier)) * (float) ann->multiplier) / -2.0) *
						 (float) ann->multiplier);
	}
}
Пример #3
0
float train_epoch_sarprop_parallel(struct fann *ann, struct fann_train_data *data, const unsigned int threadnumb, vector< vector<fann_type> >& predicted_outputs)
{

	if(ann->prev_train_slopes == NULL)
	{
		fann_clear_train_arrays(ann);
	}


		fann_reset_MSE(ann);
		predicted_outputs.resize(data->num_data,vector<fann_type> (data->num_output));
		vector<struct fann *> ann_vect(threadnumb);
		int i=0,j=0;

		//generate copies of the ann
		omp_set_dynamic(0);
		omp_set_num_threads(threadnumb);
		#pragma omp parallel private(j)
		{

			#pragma omp for schedule(static)
			for(i=0; i<(int)threadnumb; i++)
			{
				ann_vect[i]=fann_copy(ann);
			}

	    //parallel computing of the updates

	        #pragma omp for schedule(static)
			for(i = 0; i < (int)data->num_data; i++)
			{
				j=omp_get_thread_num();

				fann_type* temp_predicted_output=fann_run(ann_vect[j], data->input[i]);
				for(unsigned int k=0;k<data->num_output;++k)
				{
					predicted_outputs[i][k]=temp_predicted_output[k];
				}
			fann_compute_MSE(ann_vect[j], data->output[i]);
			fann_backpropagate_MSE(ann_vect[j]);
			fann_update_slopes_batch(ann_vect[j], ann_vect[j]->first_layer + 1, ann_vect[j]->last_layer - 1);
		}
	}

    {
    	fann_type *weights = ann->weights;
    	fann_type *prev_steps = ann->prev_steps;
    	fann_type *prev_train_slopes = ann->prev_train_slopes;
		const unsigned int first_weight=0;
		const unsigned int past_end=ann->total_connections;
		const unsigned int epoch=ann->sarprop_epoch;

    	fann_type next_step;

    	/* These should be set from variables */
    	const float increase_factor = ann->rprop_increase_factor;	/*1.2; */
    	const float decrease_factor = ann->rprop_decrease_factor;	/*0.5; */
    	/* TODO: why is delta_min 0.0 in iRprop? SARPROP uses 1x10^-6 (Braun and Riedmiller, 1993) */
    	const float delta_min = 0.000001f;
    	const float delta_max = ann->rprop_delta_max;	/*50.0; */
    	const float weight_decay_shift = ann->sarprop_weight_decay_shift; /* ld 0.01 = -6.644 */
    	const float step_error_threshold_factor = ann->sarprop_step_error_threshold_factor; /* 0.1 */
    	const float step_error_shift = ann->sarprop_step_error_shift; /* ld 3 = 1.585 */
    	const float T = ann->sarprop_temperature;


    	//merge of MSEs
    	for(i=0;i<(int)threadnumb;++i)
    	{
    		ann->MSE_value+= ann_vect[i]->MSE_value;
    		ann->num_MSE+=ann_vect[i]->num_MSE;
    	}

    	const float MSE = fann_get_MSE(ann);
    	const float RMSE = (float)sqrt(MSE);

    	/* for all weights; TODO: are biases included? */
		omp_set_dynamic(0);
		omp_set_num_threads(threadnumb);
		#pragma omp parallel private(next_step)
		{
			#pragma omp for schedule(static)
				for(i=first_weight; i < (int)past_end; i++)
				{
					/* TODO: confirm whether 1x10^-6 == delta_min is really better */
					const fann_type prev_step  = fann_max(prev_steps[i], (fann_type) 0.000001);	/* prev_step may not be zero because then the training will stop */

					/* calculate SARPROP slope; TODO: better as new error function? (see SARPROP paper)*/

					fann_type temp_slopes=0.0;
					unsigned int k;
					fann_type *train_slopes;
					for(k=0;k<threadnumb;++k)
					{
						train_slopes=ann_vect[k]->train_slopes;
						temp_slopes+= train_slopes[i];
						train_slopes[i]=0.0;
					}
					temp_slopes= -temp_slopes - weights[i] * (fann_type)fann_exp2(-T * epoch + weight_decay_shift);

					next_step=0.0;

					/* TODO: is prev_train_slopes[i] 0.0 in the beginning? */
					const fann_type prev_slope = prev_train_slopes[i];

					const fann_type same_sign = prev_slope * temp_slopes;

					if(same_sign > 0.0)
					{
						next_step = fann_min(prev_step * increase_factor, delta_max);
						/* TODO: are the signs inverted? see differences between SARPROP paper and iRprop */
						if (temp_slopes < 0.0)
							weights[i] += next_step;
						else
							weights[i] -= next_step;
					}
					else if(same_sign < 0.0)
					{
						#ifndef RAND_MAX
						#define	RAND_MAX	0x7fffffff
						#endif
						if(prev_step < step_error_threshold_factor * MSE)
							next_step = prev_step * decrease_factor + (float)rand() / RAND_MAX * RMSE * (fann_type)fann_exp2(-T * epoch + step_error_shift);
						else
							next_step = fann_max(prev_step * decrease_factor, delta_min);

						temp_slopes = 0.0;
					}
					else
					{
						if(temp_slopes < 0.0)
							weights[i] += prev_step;
						else
							weights[i] -= prev_step;
					}

					/* update global data arrays */
					prev_steps[i] = next_step;
					prev_train_slopes[i] = temp_slopes;

				}
		}
    }

	++(ann->sarprop_epoch);

	//already computed before
	/*//merge of MSEs
	for(i=0;i<threadnumb;++i)
	{
		ann->MSE_value+= ann_vect[i]->MSE_value;
		ann->num_MSE+=ann_vect[i]->num_MSE;
	}*/
	//destroy the copies of the ann
	for(i=0; i<(int)threadnumb; i++)
	{
		fann_destroy(ann_vect[i]);
	}
	return fann_get_MSE(ann);
}
Пример #4
0
float train_epoch_irpropm_parallel(struct fann *ann, struct fann_train_data *data, const unsigned int threadnumb)
{

	if(ann->prev_train_slopes == NULL)
	{
		fann_clear_train_arrays(ann);
	}

	//#define THREADNUM 1
	fann_reset_MSE(ann);

	vector<struct fann *> ann_vect(threadnumb);
	int i=0,j=0;

	//generate copies of the ann
	omp_set_dynamic(0);
	omp_set_num_threads(threadnumb);
	#pragma omp parallel private(j)
	{

		#pragma omp for schedule(static)
		for(i=0; i<(int)threadnumb; i++)
		{
			ann_vect[i]=fann_copy(ann);
		}

    //parallel computing of the updates


        #pragma omp for schedule(static)
		for(i = 0; i < (int)data->num_data; i++)
		{
			j=omp_get_thread_num();
			fann_run(ann_vect[j], data->input[i]);
			fann_compute_MSE(ann_vect[j], data->output[i]);
			fann_backpropagate_MSE(ann_vect[j]);
			fann_update_slopes_batch(ann_vect[j], ann_vect[j]->first_layer + 1, ann_vect[j]->last_layer - 1);
		}
	}

	{
    	fann_type *weights = ann->weights;
    	fann_type *prev_steps = ann->prev_steps;
    	fann_type *prev_train_slopes = ann->prev_train_slopes;

    	fann_type next_step;

    	const float increase_factor = ann->rprop_increase_factor;	//1.2;
    	const float decrease_factor = ann->rprop_decrease_factor;	//0.5;
    	const float delta_min = ann->rprop_delta_min;	//0.0;
    	const float delta_max = ann->rprop_delta_max;	//50.0;
		const unsigned int first_weight=0;
		const unsigned int past_end=ann->total_connections;

		omp_set_dynamic(0);
		omp_set_num_threads(threadnumb);
		#pragma omp parallel private(next_step)
		{
			#pragma omp for schedule(static)
				for(i=first_weight; i < (int)past_end; i++)
				{

		    		const fann_type prev_step = fann_max(prev_steps[i], (fann_type) 0.0001);	// prev_step may not be zero because then the training will stop

		    		fann_type temp_slopes=0.0;
					unsigned int k;
					fann_type *train_slopes;
					for(k=0;k<threadnumb;++k)
					{
						train_slopes=ann_vect[k]->train_slopes;
						temp_slopes+= train_slopes[i];
						train_slopes[i]=0.0;
					}

		    		const fann_type prev_slope = prev_train_slopes[i];

		    		const fann_type same_sign = prev_slope * temp_slopes;

		    		if(same_sign >= 0.0)
		    			next_step = fann_min(prev_step * increase_factor, delta_max);
		    		else
		    		{
		    			next_step = fann_max(prev_step * decrease_factor, delta_min);
		    			temp_slopes = 0;
		    		}

		    		if(temp_slopes < 0)
		    		{
		    			weights[i] -= next_step;
		    			if(weights[i] < -1500)
		    				weights[i] = -1500;
		    		}
		    		else
		    		{
		    			weights[i] += next_step;
		    			if(weights[i] > 1500)
		    				weights[i] = 1500;
		    		}

		    		// update global data arrays
		    		prev_steps[i] = next_step;
		    		prev_train_slopes[i] = temp_slopes;

				}
			}
	}

	//merge of MSEs
	for(i=0;i<(int)threadnumb;++i)
	{
		ann->MSE_value+= ann_vect[i]->MSE_value;
		ann->num_MSE+=ann_vect[i]->num_MSE;
		fann_destroy(ann_vect[i]);
	}
	return fann_get_MSE(ann);
}
Пример #5
0
/* INTERNAL FUNCTION
   The SARprop- algorithm
*/
void fann_update_weights_sarprop(struct fann *ann, unsigned int epoch, unsigned int first_weight, unsigned int past_end)
{
	fann_type *train_slopes = ann->train_slopes;
	fann_type *weights = ann->weights;
	fann_type *prev_steps = ann->prev_steps;
	fann_type *prev_train_slopes = ann->prev_train_slopes;

	fann_type prev_step, slope, prev_slope, next_step = 0, same_sign;

	/* These should be set from variables */
	float increase_factor = ann->rprop_increase_factor;	/*1.2; */
	float decrease_factor = ann->rprop_decrease_factor;	/*0.5; */
	/* TODO: why is delta_min 0.0 in iRprop? SARPROP uses 1x10^-6 (Braun and Riedmiller, 1993) */
	float delta_min = 0.000001f;
	float delta_max = ann->rprop_delta_max;	/*50.0; */
	float weight_decay_shift = ann->sarprop_weight_decay_shift; /* ld 0.01 = -6.644 */
	float step_error_threshold_factor = ann->sarprop_step_error_threshold_factor; /* 0.1 */
	float step_error_shift = ann->sarprop_step_error_shift; /* ld 3 = 1.585 */
	float T = ann->sarprop_temperature;
	float MSE = fann_get_MSE(ann);
	float RMSE = (float)sqrt(MSE);

	unsigned int i = first_weight;


	/* for all weights; TODO: are biases included? */
	for(; i != past_end; i++)
	{
		/* TODO: confirm whether 1x10^-6 == delta_min is really better */
		prev_step = fann_max(prev_steps[i], (fann_type) 0.000001);	/* prev_step may not be zero because then the training will stop */
		/* calculate SARPROP slope; TODO: better as new error function? (see SARPROP paper)*/
		slope = -train_slopes[i] - weights[i] * (fann_type)fann_exp2(-T * epoch + weight_decay_shift);

		/* TODO: is prev_train_slopes[i] 0.0 in the beginning? */
		prev_slope = prev_train_slopes[i];

		same_sign = prev_slope * slope;

		if(same_sign > 0.0)
		{
			next_step = fann_min(prev_step * increase_factor, delta_max);
			/* TODO: are the signs inverted? see differences between SARPROP paper and iRprop */
			if (slope < 0.0)
				weights[i] += next_step;
			else
				weights[i] -= next_step;
		}
		else if(same_sign < 0.0)
		{
			if(prev_step < step_error_threshold_factor * MSE)
				next_step = prev_step * decrease_factor + (float)rand() / RAND_MAX * RMSE * (fann_type)fann_exp2(-T * epoch + step_error_shift);
			else
				next_step = fann_max(prev_step * decrease_factor, delta_min);

			slope = 0.0;
		}
		else
		{
			if(slope < 0.0)
				weights[i] += prev_step;
			else
				weights[i] -= prev_step;
		}


		/*if(i == 2){
		 * printf("weight=%f, slope=%f, next_step=%f, prev_step=%f\n", weights[i], slope, next_step, prev_step);
		 * } */

		/* update global data arrays */
		prev_steps[i] = next_step;
		prev_train_slopes[i] = slope;
		train_slopes[i] = 0.0;
	}
}
Пример #6
0
/* INTERNAL FUNCTION
   Propagate the error backwards from the output layer.

   After this the train_errors in the hidden layers will be:
   neuron_value_derived * sum(outgoing_weights * connected_neuron)
*/
void fann_backpropagate_MSE(struct fann *ann)
{
	fann_type tmp_error, max;
	unsigned int i;
	struct fann_layer *layer_it;
	struct fann_neuron *neuron_it, *last_neuron;
	struct fann_neuron **connections;

	fann_type *error_begin = ann->train_errors;
	fann_type *error_prev_layer;
	fann_type *weights;
  unsigned int *connections_to_weights;
	const struct fann_neuron *first_neuron = ann->first_layer->first_neuron;
	const struct fann_layer *second_layer = ann->first_layer + 1;
	struct fann_layer *last_layer = ann->last_layer;

	/* go through all the layers, from last to first.
	 * And propagate the error backwards */
	for(layer_it = last_layer - 1; layer_it > second_layer; --layer_it)
	{
		last_neuron = layer_it->last_neuron;

		/* for each connection in this layer, propagate the error backwards */
		if(ann->connection_rate >= 1)
		{
			if(ann->network_type == FANN_NETTYPE_LAYER)
			{
				error_prev_layer = error_begin + ((layer_it - 1)->first_neuron - first_neuron);
			}
			else
			{
				error_prev_layer = error_begin;
			}

			for(neuron_it = layer_it->first_neuron; neuron_it != last_neuron; neuron_it++)
			{

				tmp_error = error_begin[neuron_it - first_neuron];
				weights = ann->weights;
        connections_to_weights = ann->connections_to_weights + neuron_it->first_con;
				for(i = neuron_it->last_con - neuron_it->first_con; i--;)
				{
					/*printf("i = %d\n", i);
					 * printf("error_prev_layer[%d] = %f\n", i, error_prev_layer[i]);
					 * printf("weights[%d] = %f\n", i, weights[i]); */
					error_prev_layer[i] += tmp_error * weights[connections_to_weights[i]];
				}
			}
		}
		else
		{
			for(neuron_it = layer_it->first_neuron; neuron_it != last_neuron; neuron_it++)
			{
				tmp_error = error_begin[neuron_it - first_neuron];
				weights = ann->weights;
				connections_to_weights = ann->connections_to_weights + neuron_it->first_con;
				connections = ann->connections + neuron_it->first_con;
				if (neuron_it->activation_function != FANN_MAXPOOLING){
					for(i = neuron_it->last_con - neuron_it->first_con; i--;)
					{
						error_begin[connections[i] - first_neuron] += tmp_error * weights[connections_to_weights[i]];
					}
				}else{
					tmp_error = error_begin[neuron_it - first_neuron];
					weights = ann->weights;
					connections_to_weights = ann->connections_to_weights + neuron_it->first_con;
					connections = ann->connections + neuron_it->first_con;
					max = connections[neuron_it->last_con - neuron_it->first_con]->value;
					//find the maximum value from the previous layers
					for(i = neuron_it->last_con - neuron_it->first_con; i--;)
					{
						max = fann_max(max, connections[i]->value);
					}
					for(i = neuron_it->last_con - neuron_it->first_con; i--;)
					{
						if (connections[i]->value == max){
							error_begin[connections[i] - first_neuron] += tmp_error;
						}
					}
				}
			}
		}

		/* then calculate the actual errors in the previous layer */
		error_prev_layer = error_begin + ((layer_it - 1)->first_neuron - first_neuron);
		last_neuron = (layer_it - 1)->last_neuron;

		for(neuron_it = (layer_it - 1)->first_neuron; neuron_it != last_neuron; neuron_it++)
		{
			*error_prev_layer *= fann_activation_derived(neuron_it->activation_function,
				neuron_it->activation_steepness, neuron_it->value, neuron_it->sum);
			error_prev_layer++;
		}

	}
}
Пример #7
0
/* Allocates room inside the neuron for the connections.
 * Creates a fully connected neuron
 */
FANN_EXTERNAL int FANN_API fann_sparse_neuron_constructor(struct fann *ann, struct fann_layer *layer, 
		struct fann_neuron *neuron, struct fann_neuron_descr * descr)
{
	unsigned int i, j;
	unsigned int min_connections, max_connections, num_connections;
	unsigned int connections_per_output;
	float connection_rate = * ((float* )descr->private_data);
	struct fann_sparse_neuron_private_data* private_data;
	struct fann_neuron_private_data_connected_any_any* generic_private_data;
	fann_type *mask, *weights;
	struct dice *dices;

#ifdef FIXEDFANN
	fann_type multiplier = ann->fixed_params->multiplier;
	neuron->activation_steepness = ann->fixed_params->multiplier / 2;
#else
	neuron->activation_steepness = 0.5;
#endif


	connection_rate = connection_rate > 1.0f ? 1.0f : connection_rate;
	
	neuron->activation_function = FANN_SIGMOID_STEPWISE;


	neuron->num_outputs=descr->num_outputs;
	neuron->inputs=layer->inputs;
	neuron->num_inputs=layer->num_inputs;

	/* set the error array to null (lazy allocation) */
	neuron->train_errors=NULL;
	
	/* this is the number of actually allocated weights (some are unused) */
	neuron->num_weights=neuron->num_outputs*neuron->num_inputs;
	
	/* allocate the weights even for unused connections */
	if ( (weights = neuron->weights = (fann_type*) calloc(neuron->num_weights, sizeof(fann_type))) == NULL)
		return 1;
	
	/* allocate space for the dot products results */
	if ( (neuron->sums = (fann_type*) malloc(neuron->num_outputs*sizeof(fann_type))) == NULL)
		return 1;

	/* allocate private data */
	if ( (private_data = neuron->private_data = (struct fann_sparse_neuron_private_data*) malloc(sizeof(struct fann_sparse_neuron_private_data))) == NULL)
		return 1;
	/* private data stores the connection mask, allocate it */
	if ( (mask = private_data->mask = (fann_type*) calloc(neuron->num_weights, sizeof(fann_type))) == NULL)
		return 1;
	if ( (generic_private_data = private_data->generic = (struct fann_neuron_private_data_connected_any_any*) malloc (sizeof(struct fann_neuron_private_data_connected_any_any))) == NULL)
		return 1;
	generic_private_data->prev_steps=NULL;
	generic_private_data->prev_weights_deltas=NULL;

	/* alocate a set of dices to select rows */
	if ( (dices = (struct dice*) malloc(neuron->num_inputs*sizeof(struct dice))) == NULL)
		return 1;
	
	for (i=0; i<neuron->num_inputs; i++)
	{
		dices[i].idx=i;
		dices[i].value=0;
	}
	
	min_connections = fann_max(neuron->num_inputs, neuron->num_outputs);
	max_connections = neuron->num_inputs * neuron->num_outputs;
	num_connections = fann_max(min_connections,
			(unsigned int) (0.5 + (connection_rate * max_connections)));

	connections_per_output = num_connections / neuron->num_outputs;

	/* Dice throw simulation: a float value is assigned to each input.  
	 * The value decimal component is chosen randomly between 0 and 0.4 ("dice throw").
	 * The integer components is equal to the number of output neurons already
	 * connected to this input.
	 * For each output neuron ecah input gets a new "dice throw". Then the inputs and are 
	 * sorted in ascending order according to the value.
	 * The first ones in the array had less output neurons attached to the 
	 * and better luck in "dice thow". This ones are selected and theyr value is incremented.
	 */
	for (i=0; i<neuron->num_outputs; i++)
	{
		/* throw one dice per input */
		for (j=0; j<neuron->num_inputs; j++)
			dices[j].value= ((int)dices[j].value) + fann_rand(0, 0.4);

		/* sort: smaller (dice value + num_connections) wins) */
		qsort((void*) dices, neuron->num_inputs, sizeof(struct dice), dice_sorter);

		/* assign connections to the output to the winner inputs */
		for (j=0; j<connections_per_output; j++)
		{
			dices[j].value+=1;
			mask[dices[j].idx] = (fann_type) 1.0f;
			weights[dices[j].idx] = (fann_type) fann_random_weight();
		}
		weights += neuron->num_inputs;
	}
	free(dices);

	/* set the function pointers */
	neuron->destructor = fann_sparse_neuron_destructor;
	neuron->run = fann_sparse_neuron_run;
	neuron->backpropagate = fann_sparse_neuron_backprop;
	neuron->update_weights = fann_sparse_neuron_update;
	neuron->compute_error = fann_sparse_neuron_compute_MSE;
	
	return 0;
}
Пример #8
0
/* INTERNAL FUNCTION
	 The iRprop- algorithm
	 */
void fann_sparse_neuron_irpropm_update(struct fann *ann, struct fann_neuron *neuron)
{
	struct fann_neuron_private_data_connected_any_any *priv = (struct fann_neuron_private_data_connected_any_any *) neuron->private_data;

	fann_type *weights = neuron->weights;
	fann_type *weights_deltas = neuron->weights_deltas;
	fann_type *prev_weights_deltas = priv->prev_weights_deltas;
	fann_type *prev_steps = priv->prev_steps;
	fann_type *mask = ((struct fann_sparse_neuron_private_data*) neuron->private_data)->mask;

	const unsigned int num_outputs = neuron->num_outputs;
	const unsigned int num_inputs = neuron->num_inputs;
	float increase_factor = ann->rprop_params->rprop_increase_factor;	/*1.2; */
	float decrease_factor = ann->rprop_params->rprop_decrease_factor;	/*0.5; */
	float delta_min = ann->rprop_params->rprop_delta_min;	/*0.0; */
	float delta_max = ann->rprop_params->rprop_delta_max;	/*50.0; */

	unsigned int o, i;
	fann_type prev_step, delta, prev_delta, next_step, same_sign;
	
	if (neuron->num_backprop_done==0)
	{
		fann_error(NULL, FANN_E_CANT_USE_TRAIN_ALG);
		return;
	}

	for (o = 0; o < num_outputs; o++)
	{
		for (i = 0; i < num_inputs; i++)
		{
			/*don't update masked connections*/
			if (!mask[i])
				continue;
			prev_step = fann_max(prev_steps[i], (fann_type) 0.0001);	/* prev_step may not be zero because then the training will stop */
			/* does 0.0001 make sense????*/
			delta = weights_deltas[i];
			prev_delta = prev_weights_deltas[i];

			same_sign = prev_delta * delta;

			if(same_sign >= 0.0)
				next_step = fann_min(prev_step * increase_factor, delta_max);
			else
			{
				next_step = fann_max(prev_step * decrease_factor, delta_min);
				delta = 0;
			}

			if(delta < 0)
			{
				weights[i] -= next_step;
				if(weights[i] < -1500)
					weights[i] = -1500;
			}
			else
			{
				weights[i] += next_step;
				if(weights[i] > 1500)
					weights[i] = 1500;
			}

			/* update data arrays */
			prev_steps[i] = next_step;
			prev_weights_deltas[i] = delta;
			weights_deltas[i] = 0.0;
		}
		weights += num_inputs;
		weights_deltas += num_inputs;
		prev_weights_deltas += num_inputs;
		prev_steps += num_inputs;
		mask +=num_inputs;
	}
	neuron->num_backprop_done=0;
}
Пример #9
0
FANN_EXTERNAL struct fann *FANN_API fann_create_standard_array(unsigned int num_layers, 
															   const unsigned int *layers)
{
	struct fann_layer *layer_it, *last_layer, *prev_layer;
	struct fann *ann;
	struct fann_neuron *neuron_it, *last_neuron;
#ifdef DEBUG
	unsigned int prev_layer_size;
#endif
	unsigned int num_neurons_in, num_neurons_out, i;
	unsigned int min_connections, max_connections, num_connections;
	unsigned int connections_per_neuron, allocated_connections;
	unsigned int tmp_con;

	/* seed random */
#ifndef FANN_NO_SEED
	fann_seed_rand();
#endif

	/* allocate the general structure */
	ann = fann_allocate_structure(num_layers);
	if(ann == NULL)
	{
		fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
		return NULL;
	}

	/* determine how many neurons there should be in each layer */
	i = 0;
	for(layer_it = ann->first_layer; layer_it != ann->last_layer; layer_it++)
	{
		/* we do not allocate room here, but we make sure that
		 * last_neuron - first_neuron is the number of neurons */
		layer_it->first_neuron = NULL;
		layer_it->last_neuron = layer_it->first_neuron + layers[i++] + 1;	/* +1 for bias */
		ann->total_neurons += (unsigned int)(layer_it->last_neuron - layer_it->first_neuron);
	}

	ann->num_output = (unsigned int)((ann->last_layer - 1)->last_neuron - (ann->last_layer - 1)->first_neuron - 1);
	ann->num_input = (unsigned int)(ann->first_layer->last_neuron - ann->first_layer->first_neuron - 1);

	/* allocate room for the actual neurons */
	fann_allocate_neurons(ann);
	if(ann->errno_f == FANN_E_CANT_ALLOCATE_MEM)
	{
		fann_destroy(ann);
		return NULL;
	}

#ifdef DEBUG
	printf("creating network with connection rate %f\n", connection_rate);
	printf("input\n");
	printf("  layer       : %d neurons, 1 bias\n",
		   ann->first_layer->last_neuron - ann->first_layer->first_neuron - 1);
#endif

	num_neurons_in = ann->num_input;
	for(layer_it = ann->first_layer + 1; layer_it != ann->last_layer; layer_it++)
	{
		layer_it->activation_function = FANN_SIGMOID_SYMMETRIC;
		layer_it->activation_steepness = 0.5;
		
		num_neurons_out = (unsigned int)(layer_it->last_neuron - layer_it->first_neuron - 1);
		/*�if all neurons in each layer should be connected to at least one neuron
		 * in the previous layer, and one neuron in the next layer.
		 * and the bias node should be connected to the all neurons in the next layer.
		 * Then this is the minimum amount of neurons */
		min_connections = fann_max(num_neurons_in, num_neurons_out) + num_neurons_out;
		max_connections = num_neurons_in * num_neurons_out;	/* not calculating bias */
		num_connections = fann_max(min_connections, max_connections + num_neurons_out);
		connections_per_neuron = num_connections / num_neurons_out;
		allocated_connections = 0;
		/* Now split out the connections on the different neurons */
		for(i = 0; i != num_neurons_out; i++)
		{
			layer_it->first_neuron[i].first_con = ann->total_connections + allocated_connections;
			allocated_connections += connections_per_neuron;
			layer_it->first_neuron[i].last_con = ann->total_connections + allocated_connections;

			if(allocated_connections < (num_connections * (i + 1)) / num_neurons_out)
			{
				layer_it->first_neuron[i].last_con++;
				allocated_connections++;
			}
		}

		/* bias neuron also gets stuff */
		layer_it->first_neuron[i].first_con = ann->total_connections + allocated_connections;
		layer_it->first_neuron[i].last_con = ann->total_connections + allocated_connections;

		ann->total_connections += num_connections;

		/* used in the next run of the loop */
		num_neurons_in = num_neurons_out;
	}

	fann_allocate_connections(ann);
	if(ann->errno_f == FANN_E_CANT_ALLOCATE_MEM)
	{
		fann_destroy(ann);
		return NULL;
	}

#ifdef DEBUG
	prev_layer_size = ann->num_input + 1;
#endif
	prev_layer = ann->first_layer;
	last_layer = ann->last_layer;
	for(layer_it = ann->first_layer + 1; layer_it != last_layer; layer_it++)
	{
		last_neuron = layer_it->last_neuron - 1;
		for(neuron_it = layer_it->first_neuron; neuron_it != last_neuron; neuron_it++)
		{
			tmp_con = neuron_it->last_con - 1;
			for(i = neuron_it->first_con; i != tmp_con; i++)
			{
				ann->weights[i] = (fann_type) fann_random_weight();
				/* these connections are still initialized for fully connected networks, to allow
				 * operations to work, that are not optimized for fully connected networks.
				 */
				ann->connections[i] = prev_layer->first_neuron + (i - neuron_it->first_con);
			}

			/* bias weight */
			ann->weights[tmp_con] = (fann_type) fann_random_bias_weight();
			ann->connections[tmp_con] = prev_layer->first_neuron + (tmp_con - neuron_it->first_con);
		}
#ifdef DEBUG
		prev_layer_size = layer_it->last_neuron - layer_it->first_neuron;
#endif
		prev_layer = layer_it;
#ifdef DEBUG
		printf("  layer       : %d neurons, 1 bias\n", prev_layer_size - 1);
#endif
	}

#ifdef DEBUG
	printf("output\n");
#endif

	return ann; 
}
Пример #10
0
FANN_EXTERNAL struct fann *FANN_API fann_create_sparse_array(float connection_rate,
															 unsigned int num_layers,
															 const unsigned int *layers)
{
	struct fann_layer *layer_it, *last_layer, *prev_layer;
	struct fann *ann;
	struct fann_neuron *neuron_it, *last_neuron, *random_neuron, *bias_neuron;
#ifdef DEBUG
	unsigned int prev_layer_size;
#endif
	unsigned int num_neurons_in, num_neurons_out, i, j;
	unsigned int min_connections, max_connections, num_connections;
	unsigned int connections_per_neuron, allocated_connections;
	unsigned int random_number, found_connection, tmp_con;

#ifdef FIXEDFANN
	unsigned int decimal_point;
	unsigned int multiplier;
#endif
	if(connection_rate > 1)
	{
		connection_rate = 1;
	}

	/* seed random */
#ifndef FANN_NO_SEED
	fann_seed_rand();
#endif

	/* allocate the general structure */
	ann = fann_allocate_structure(num_layers);
	if(ann == NULL)
	{
		fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
		return NULL;
	}

	ann->connection_rate = connection_rate;
#ifdef FIXEDFANN
	decimal_point = ann->decimal_point;
	multiplier = ann->multiplier;
	fann_update_stepwise(ann);
#endif

	/* determine how many neurons there should be in each layer */
	i = 0;
	for(layer_it = ann->first_layer; layer_it != ann->last_layer; layer_it++)
	{
		/* we do not allocate room here, but we make sure that
		 * last_neuron - first_neuron is the number of neurons */
		layer_it->first_neuron = NULL;
		layer_it->last_neuron = layer_it->first_neuron + layers[i++] + 1;	/* +1 for bias */
		ann->total_neurons += layer_it->last_neuron - layer_it->first_neuron;
	}

	ann->num_output = (ann->last_layer - 1)->last_neuron - (ann->last_layer - 1)->first_neuron - 1;
	ann->num_input = ann->first_layer->last_neuron - ann->first_layer->first_neuron - 1;

	/* allocate room for the actual neurons */
	fann_allocate_neurons(ann);
	if(ann->errno_f == FANN_E_CANT_ALLOCATE_MEM)
	{
		fann_destroy(ann);
		return NULL;
	}

#ifdef DEBUG
	printf("creating network with connection rate %f\n", connection_rate);
	printf("input\n");
	printf("  layer       : %d neurons, 1 bias\n",
		   ann->first_layer->last_neuron - ann->first_layer->first_neuron - 1);
#endif

	num_neurons_in = ann->num_input;
	for(layer_it = ann->first_layer + 1; layer_it != ann->last_layer; layer_it++)
	{
		num_neurons_out = layer_it->last_neuron - layer_it->first_neuron - 1;
		/*�if all neurons in each layer should be connected to at least one neuron
		 * in the previous layer, and one neuron in the next layer.
		 * and the bias node should be connected to the all neurons in the next layer.
		 * Then this is the minimum amount of neurons */
		min_connections = fann_max(num_neurons_in, num_neurons_out) + num_neurons_out;
		max_connections = num_neurons_in * num_neurons_out;	/* not calculating bias */
		num_connections = fann_max(min_connections,
								   (unsigned int) (0.5 + (connection_rate * max_connections)) +
								   num_neurons_out);

		connections_per_neuron = num_connections / num_neurons_out;
		allocated_connections = 0;
		/* Now split out the connections on the different neurons */
		for(i = 0; i != num_neurons_out; i++)
		{
			layer_it->first_neuron[i].first_con = ann->total_connections + allocated_connections;
			allocated_connections += connections_per_neuron;
			layer_it->first_neuron[i].last_con = ann->total_connections + allocated_connections;

			layer_it->first_neuron[i].activation_function = FANN_SIGMOID_STEPWISE;
#ifdef FIXEDFANN
			layer_it->first_neuron[i].activation_steepness = ann->multiplier / 2;
#else
			layer_it->first_neuron[i].activation_steepness = 0.5;
#endif

			if(allocated_connections < (num_connections * (i + 1)) / num_neurons_out)
			{
				layer_it->first_neuron[i].last_con++;
				allocated_connections++;
			}
		}

		/* bias neuron also gets stuff */
		layer_it->first_neuron[i].first_con = ann->total_connections + allocated_connections;
		layer_it->first_neuron[i].last_con = ann->total_connections + allocated_connections;

		ann->total_connections += num_connections;

		/* used in the next run of the loop */
		num_neurons_in = num_neurons_out;
	}

	fann_allocate_connections(ann);
	if(ann->errno_f == FANN_E_CANT_ALLOCATE_MEM)
	{
		fann_destroy(ann);
		return NULL;
	}

	if(connection_rate >= 1)
	{
#ifdef DEBUG
		prev_layer_size = ann->num_input + 1;
#endif
		prev_layer = ann->first_layer;
		last_layer = ann->last_layer;
		for(layer_it = ann->first_layer + 1; layer_it != last_layer; layer_it++)
		{
			last_neuron = layer_it->last_neuron - 1;
			for(neuron_it = layer_it->first_neuron; neuron_it != last_neuron; neuron_it++)
			{
				tmp_con = neuron_it->last_con - 1;
				for(i = neuron_it->first_con; i != tmp_con; i++)
				{
					ann->weights[i] = (fann_type) fann_random_weight();
					/* these connections are still initialized for fully connected networks, to allow
					 * operations to work, that are not optimized for fully connected networks.
					 */
					ann->connections[i] = prev_layer->first_neuron + (i - neuron_it->first_con);
				}

				/* bias weight */
				ann->weights[tmp_con] = (fann_type) fann_random_bias_weight();
				ann->connections[tmp_con] = prev_layer->first_neuron + (tmp_con - neuron_it->first_con);
			}
#ifdef DEBUG
			prev_layer_size = layer_it->last_neuron - layer_it->first_neuron;
#endif
			prev_layer = layer_it;
#ifdef DEBUG
			printf("  layer       : %d neurons, 1 bias\n", prev_layer_size - 1);
#endif
		}
	}
	else
	{
		/* make connections for a network, that are not fully connected */

		/* generally, what we do is first to connect all the input
		 * neurons to a output neuron, respecting the number of
		 * available input neurons for each output neuron. Then
		 * we go through all the output neurons, and connect the
		 * rest of the connections to input neurons, that they are
		 * not allready connected to.
		 */

		/* All the connections are cleared by calloc, because we want to
		 * be able to see which connections are allready connected */

		for(layer_it = ann->first_layer + 1; layer_it != ann->last_layer; layer_it++)
		{

			num_neurons_out = layer_it->last_neuron - layer_it->first_neuron - 1;
			num_neurons_in = (layer_it - 1)->last_neuron - (layer_it - 1)->first_neuron - 1;

			/* first connect the bias neuron */
			bias_neuron = (layer_it - 1)->last_neuron - 1;
			last_neuron = layer_it->last_neuron - 1;
			for(neuron_it = layer_it->first_neuron; neuron_it != last_neuron; neuron_it++)
			{

				ann->connections[neuron_it->first_con] = bias_neuron;
				ann->weights[neuron_it->first_con] = (fann_type) fann_random_bias_weight();
			}

			/* then connect all neurons in the input layer */
			last_neuron = (layer_it - 1)->last_neuron - 1;
			for(neuron_it = (layer_it - 1)->first_neuron; neuron_it != last_neuron; neuron_it++)
			{

				/* random neuron in the output layer that has space
				 * for more connections */
				do
				{
					random_number = (int) (0.5 + fann_rand(0, num_neurons_out - 1));
					random_neuron = layer_it->first_neuron + random_number;
					/* checks the last space in the connections array for room */
				}
				while(ann->connections[random_neuron->last_con - 1]);

				/* find an empty space in the connection array and connect */
				for(i = random_neuron->first_con; i < random_neuron->last_con; i++)
				{
					if(ann->connections[i] == NULL)
					{
						ann->connections[i] = neuron_it;
						ann->weights[i] = (fann_type) fann_random_weight();
						break;
					}
				}
			}

			/* then connect the rest of the unconnected neurons */
			last_neuron = layer_it->last_neuron - 1;
			for(neuron_it = layer_it->first_neuron; neuron_it != last_neuron; neuron_it++)
			{
				/* find empty space in the connection array and connect */
				for(i = neuron_it->first_con; i < neuron_it->last_con; i++)
				{
					/* continue if allready connected */
					if(ann->connections[i] != NULL)
						continue;

					do
					{
						found_connection = 0;
						random_number = (int) (0.5 + fann_rand(0, num_neurons_in - 1));
						random_neuron = (layer_it - 1)->first_neuron + random_number;

						/* check to see if this connection is allready there */
						for(j = neuron_it->first_con; j < i; j++)
						{
							if(random_neuron == ann->connections[j])
							{
								found_connection = 1;
								break;
							}
						}

					}
					while(found_connection);

					/* we have found a neuron that is not allready
					 * connected to us, connect it */
					ann->connections[i] = random_neuron;
					ann->weights[i] = (fann_type) fann_random_weight();
				}
			}

#ifdef DEBUG
			printf("  layer       : %d neurons, 1 bias\n", num_neurons_out);
#endif
		}

		/* TODO it would be nice to have the randomly created
		 * connections sorted for smoother memory access.
		 */
	}

#ifdef DEBUG
	printf("output\n");
#endif

	return ann;
}