コード例 #1
0
ファイル: fann_train.c プロジェクト: wvang/fann
/* INTERNAL FUNCTION
  Calculates the activation of a value, given an activation function
   and a steepness
*/
fann_type fann_activation(struct fann * ann, unsigned int activation_function, fann_type steepness,
						  fann_type value)
{
	value = fann_mult(steepness, value);
	fann_activation_switch(activation_function, value, value);
	return value;
}
コード例 #2
0
static __inline  void  MAKE_NAME(base_neuron_run)(struct fann * ann, struct fann_neuron * neuron) 
{
	unsigned int i, o, num_connections, num_outputs;
	fann_type *neuron_sums, *inputs, *weights;
	fann_type steepness;

	fann_type max_sum = 0;

	/* Algorithm for fully connected networks */
	steepness = neuron->activation_steepness;
	
	inputs = neuron->inputs;
	num_outputs = neuron->num_outputs;
	num_connections = neuron->num_inputs;
	weights = neuron->weights;
	neuron_sums=neuron->sums;
	
	for (o=0; o<num_outputs ; o++)
	{
		neuron_sums[o]=0;
		/* unrolled loop start */
		i = num_connections & 3;	/* same as modulo 4 */
		switch (i)
		{
			case 3:
				neuron_sums[o] += fann_mult(weights[2], inputs[2]);
			case 2:
				neuron_sums[o] += fann_mult(weights[1], inputs[1]);
			case 1:
				neuron_sums[o] += fann_mult(weights[0], inputs[0]);
			case 0:
				break;
		}
		
		for(; i != num_connections; i += 4)
		{
			neuron_sums[o] +=
				fann_mult(weights[i]    , inputs[i]    ) +
				fann_mult(weights[i + 1], inputs[i + 1]) +
				fann_mult(weights[i + 2], inputs[i + 2]) +
				fann_mult(weights[i + 3], inputs[i + 3]);
		}
		weights += num_connections;
		/* unrolled loop end */

		neuron_sums[o] = fann_mult(steepness, neuron_sums[o]);

		max_sum = 150/steepness;
		if(neuron_sums[o] > max_sum)
			neuron_sums[o] = max_sum;
		else if(neuron_sums[o] < -max_sum)
			neuron_sums[o] = -max_sum;

    	activation_macro()(neuron, o);
	}
}
コード例 #3
0
ファイル: fann.cpp プロジェクト: akoshterek/MultiGammonCpp
FANN_EXTERNAL fann_type *FANN_API fann_run(struct fann * ann, const fann_type * input)
{
	struct fann_neuron *neuron_it, *last_neuron, *neurons;
	unsigned int i, num_connections, num_input, num_output;
	fann_type neuron_sum, *output;
	fann_type *weights;
	struct fann_layer *layer_it, *last_layer;
	unsigned int activation_function;
	fann_type steepness;

	/* store some variabels local for fast access */
	struct fann_neuron *first_neuron = ann->first_layer->first_neuron;
	fann_type max_sum;	

	/* first set the input */
	num_input = ann->num_input;
	for(i = 0; i != num_input; i++)
	{
		*first_neuron[i].valuePtr = input[i];
	}
	/* Set the bias neuron in the input layer */
	*(ann->first_layer->last_neuron - 1)->valuePtr = 1;

	last_layer = ann->last_layer;
	for(layer_it = ann->first_layer + 1; layer_it != last_layer; layer_it++)
	{
		activation_function = layer_it->activation_function;
		steepness = layer_it->activation_steepness;

		last_neuron = layer_it->last_neuron;
		for(neuron_it = layer_it->first_neuron; neuron_it != last_neuron; neuron_it++)
		{
			if(neuron_it->first_con == neuron_it->last_con)
			{
				/* bias neurons */
				*neuron_it->valuePtr = 1;
				continue;
			}

			neuron_sum = 0;
			num_connections = neuron_it->last_con - neuron_it->first_con;
			weights = ann->weights + neuron_it->first_con;

			neurons = (layer_it - 1)->first_neuron;

			/* unrolled loop start */
			i = num_connections & 3;	/* same as modulo 4 */
			switch (i)
			{
				case 3:
					neuron_sum += fann_mult(weights[2], *neurons[2].valuePtr);
				case 2:
					neuron_sum += fann_mult(weights[1], *neurons[1].valuePtr);
				case 1:
					neuron_sum += fann_mult(weights[0], *neurons[0].valuePtr);
				case 0:
					break;
			}
			for(; i != num_connections; i += 4)
			{
				neuron_sum +=
					fann_mult(weights[i], *neurons[i].valuePtr) +
					fann_mult(weights[i + 1], *neurons[i + 1].valuePtr) +
					fann_mult(weights[i + 2], *neurons[i + 2].valuePtr) +
					fann_mult(weights[i + 3], *neurons[i + 3].valuePtr);
			}
			/* unrolled loop end */

			/*
			 * for(i = 0;i != num_connections; i++){
			 * printf("%f += %f*%f, ", neuron_sum, weights[i], neurons[i].value);
			 * neuron_sum += fann_mult(weights[i], neurons[i].value);
			 * }
			 */

			neuron_sum = fann_mult(steepness, neuron_sum);
			
			max_sum = 150/steepness;
			if(neuron_sum > max_sum)
				neuron_sum = max_sum;
			else if(neuron_sum < -max_sum)
				neuron_sum = -max_sum;
			
			*neuron_it->sumPtr = neuron_sum;

			fann_activation_switch(activation_function, neuron_sum, *neuron_it->valuePtr);
		}
	}

	/* set the output */
	output = ann->output;
	num_output = ann->num_output;
	neurons = (ann->last_layer - 1)->first_neuron;
	for(i = 0; i != num_output; i++)
	{
		output[i] = *neurons[i].valuePtr;
	}
	return ann->output;
}
コード例 #4
0
static __inline  void  MAKE_NAME(base_neuron_run)(struct fann * ann, struct fann_neuron * neuron)
{
	unsigned int i, o, num_connections, num_inputs, num_outputs;
	fann_type *neuron_sums, *outputs, *inputs, *weights;
	unsigned int activation_function;
	fann_type steepness;

	int multiplier = ann->fixed_params->multiplier;
	unsigned int decimal_point = ann->fixed_params->decimal_point;

	/* values used for the stepwise linear sigmoid function */
	fann_type r1 = 0, r2 = 0, r3 = 0, r4 = 0, r5 = 0, r6 = 0;
	fann_type v1 = 0, v2 = 0, v3 = 0, v4 = 0, v5 = 0, v6 = 0;

	fann_type last_steepness = 0;
	unsigned int last_activation_function = 0;

	/* Algorith for fully connected networks */
	activation_function = neuron->activation_function;
	steepness = neuron->activation_steepness;

	num_inputs = neuron->num_inputs;
	inputs = neuron->inputs;
	num_outputs = neuron->num_outputs;
	outputs = neuron->outputs;
	num_connections = neuron->num_inputs;
	weights = neuron->weights;
	neuron_sums=neuron->sums;

	for (o=0; o<num_outputs ; o++)
	{
		neuron_sums[o]=0;
		/* unrolled loop start */
		i = num_connections & 3;	/* same as modulo 4 */
		switch (i)
		{
			case 3:
				neuron_sums[o] += fann_mult(weights[2], inputs[2]);
			case 2:
				neuron_sums[o] += fann_mult(weights[1], inputs[1]);
			case 1:
				neuron_sums[o] += fann_mult(weights[0], inputs[0]);
			case 0:
				break;
		}

		for(; i != num_connections; i += 4)
		{
			neuron_sums[o] +=
				fann_mult(weights[i]    , inputs[i]    ) +
				fann_mult(weights[i + 1], inputs[i + 1]) +
				fann_mult(weights[i + 2], inputs[i + 2]) +
				fann_mult(weights[i + 3], inputs[i + 3]);
		}
		weights += num_connections;
		/* unrolled loop end */

		neuron->sums[o] = fann_mult(steepness, neuron_sums[o]);

		if(activation_function != last_activation_function || steepness != last_steepness)
		{
			switch (activation_function)
			{
				case FANN_SIGMOID:
				case FANN_SIGMOID_STEPWISE:
					r1 = ann->fixed_params->sigmoid_results[0];
					r2 = ann->fixed_params->sigmoid_results[1];
					r3 = ann->fixed_params->sigmoid_results[2];
					r4 = ann->fixed_params->sigmoid_results[3];
					r5 = ann->fixed_params->sigmoid_results[4];
					r6 = ann->fixed_params->sigmoid_results[5];
					v1 = ann->fixed_params->sigmoid_values[0] / steepness;
					v2 = ann->fixed_params->sigmoid_values[1] / steepness;
					v3 = ann->fixed_params->sigmoid_values[2] / steepness;
					v4 = ann->fixed_params->sigmoid_values[3] / steepness;
					v5 = ann->fixed_params->sigmoid_values[4] / steepness;
					v6 = ann->fixed_params->sigmoid_values[5] / steepness;
					break;
				case FANN_SIGMOID_SYMMETRIC:
				case FANN_SIGMOID_SYMMETRIC_STEPWISE:
					r1 = ann->fixed_params->sigmoid_symmetric_results[0];
					r2 = ann->fixed_params->sigmoid_symmetric_results[1];
					r3 = ann->fixed_params->sigmoid_symmetric_results[2];
					r4 = ann->fixed_params->sigmoid_symmetric_results[3];
					r5 = ann->fixed_params->sigmoid_symmetric_results[4];
					r6 = ann->fixed_params->sigmoid_symmetric_results[5];
					v1 = ann->fixed_params->sigmoid_symmetric_values[0] / steepness;
					v2 = ann->fixed_params->sigmoid_symmetric_values[1] / steepness;
					v3 = ann->fixed_params->sigmoid_symmetric_values[2] / steepness;
					v4 = ann->fixed_params->sigmoid_symmetric_values[3] / steepness;
					v5 = ann->fixed_params->sigmoid_symmetric_values[4] / steepness;
					v6 = ann->fixed_params->sigmoid_symmetric_values[5] / steepness;
					break;
				case FANN_THRESHOLD:
					break;
			}
		}

		switch (activation_function)
		{
			case FANN_SIGMOID:
			case FANN_SIGMOID_STEPWISE:
				neuron->outputs[o] =
					(fann_type) fann_stepwise(v1, v2, v3, v4, v5, v6, r1, r2, r3, r4, r5, r6, 0,
							multiplier, neuron_sums[o]);
				break;
			case FANN_SIGMOID_SYMMETRIC:
			case FANN_SIGMOID_SYMMETRIC_STEPWISE:
				neuron->outputs[o] =
					(fann_type) fann_stepwise(v1, v2, v3, v4, v5, v6, r1, r2, r3, r4, r5, r6,
							-multiplier, multiplier, neuron_sums[o]);
				break;
			case FANN_THRESHOLD:
				neuron->outputs[o] = (fann_type) ((neuron_sums[o] < 0) ? 0 : multiplier);
				break;
			case FANN_THRESHOLD_SYMMETRIC:
				neuron->outputs[o] = (fann_type) ((neuron_sums[o] < 0) ? -multiplier : multiplier);
				break;
			case FANN_LINEAR:
				neuron->outputs[o] = neuron_sums[o];
				break;
			case FANN_LINEAR_PIECE:
				neuron->outputs[o] = (fann_type)((neuron_sums[o] < 0) ? 0 : (neuron_sums[o] > multiplier) ? multiplier : neuron_sums[o]);
				break;
			case FANN_LINEAR_PIECE_SYMMETRIC:
				neuron->outputs[o] = (fann_type)((neuron_sums[o] < -multiplier) ? -multiplier : (neuron_sums[o] > multiplier) ? multiplier : neuron_sums[o]);
				break;
			case FANN_ELLIOT:
			case FANN_ELLIOT_SYMMETRIC:
			case FANN_GAUSSIAN:
			case FANN_GAUSSIAN_SYMMETRIC:
			case FANN_GAUSSIAN_STEPWISE:
			case FANN_SIN_SYMMETRIC:
			case FANN_COS_SYMMETRIC:
				fann_error((struct fann_error *) ann, FANN_E_CANT_USE_ACTIVATION);
				break;
		}
		last_steepness = steepness;
		last_activation_function = activation_function;
	}
}
コード例 #5
0
FANN_EXTERNAL fann_type *FANN_API fann_run(struct fann * ann, fann_type * input)
{
	struct fann_neuron *neuron_it, *last_neuron, *neurons, **neuron_pointers;
	unsigned int i, num_connections, num_input, num_output;
	fann_type neuron_sum, *output;
	fann_type *weights;
	struct fann_layer *layer_it, *last_layer;
	unsigned int activation_function;
	fann_type steepness;

	/* store some variabels local for fast access */
	struct fann_neuron *first_neuron = ann->first_layer->first_neuron;

#if 0
	__m128 xmm_weight, xmm_neurons, xmm_sum;
#endif
				
#ifdef FIXEDFANN
	int multiplier = ann->multiplier;
	unsigned int decimal_point = ann->decimal_point;

	/* values used for the stepwise linear sigmoid function */
	fann_type r1 = 0, r2 = 0, r3 = 0, r4 = 0, r5 = 0, r6 = 0;
	fann_type v1 = 0, v2 = 0, v3 = 0, v4 = 0, v5 = 0, v6 = 0;

	fann_type last_steepness = 0;
	unsigned int last_activation_function = 0;
#else
	fann_type max_sum;	
#endif

	/* first set the input */
	num_input = ann->num_input;
	for(i = 0; i != num_input; i++)
	{
#ifdef FIXEDFANN
		if(fann_abs(input[i]) > multiplier)
		{
			printf
				("Warning input number %d is out of range -%d - %d with value %d, integer overflow may occur.\n",
				 i, multiplier, multiplier, input[i]);
		}
#endif
		first_neuron[i].value = input[i];
	}
	/* Set the bias neuron in the input layer */
#ifdef FIXEDFANN
	(ann->first_layer->last_neuron - 1)->value = multiplier;
#else
	(ann->first_layer->last_neuron - 1)->value = 1;
#endif

	last_layer = ann->last_layer;
	for(layer_it = ann->first_layer + 1; layer_it != last_layer; layer_it++)
	{
		last_neuron = layer_it->last_neuron;
		for(neuron_it = layer_it->first_neuron; neuron_it != last_neuron; neuron_it++)
		{
			if(neuron_it->first_con == neuron_it->last_con)
			{
				/* bias neurons */
#ifdef FIXEDFANN
				neuron_it->value = multiplier;
#else
				neuron_it->value = 1;
#endif
				continue;
			}

			activation_function = neuron_it->activation_function;
			steepness = neuron_it->activation_steepness;

			neuron_sum = 0;
			num_connections = neuron_it->last_con - neuron_it->first_con;
			weights = ann->weights + neuron_it->first_con;

			if(ann->connection_rate >= 1)
			{
				if(ann->network_type == FANN_NETTYPE_SHORTCUT)
				{
					neurons = ann->first_layer->first_neuron;
				}
				else
				{
					neurons = (layer_it - 1)->first_neuron;
				}


				/* unrolled loop start */
				i = num_connections & 3;	/* same as modulo 4 */
				switch (i)
				{
					case 3:
						neuron_sum += fann_mult(weights[2], neurons[2].value);
					case 2:
						neuron_sum += fann_mult(weights[1], neurons[1].value);
					case 1:
						neuron_sum += fann_mult(weights[0], neurons[0].value);
					case 0:
						break;
				}
#if 0
				xmm_sum = _mm_setzero_ps();
				for(; i != num_connections; i += 4)
				{
					xmm_weight = _mm_loadu_ps(weights + i);
					xmm_neurons = _mm_set_ps(neurons[i + 3].value,neurons[i + 2].value,
											 neurons[i + 1].value,neurons[i].value);
					xmm_weight = _mm_mul_ps(xmm_weight,xmm_neurons);
					xmm_sum =_mm_add_ps(xmm_sum,xmm_weight);
				}
				neuron_sum += xmm_sum.m128_f32[3] + xmm_sum.m128_f32[2] +
						      xmm_sum.m128_f32[1] + xmm_sum.m128_f32[0];
#else
				for(; i != num_connections; i += 4)
				{
					neuron_sum +=
						fann_mult(weights[i], neurons[i].value) +
						fann_mult(weights[i + 1], neurons[i + 1].value) +
						fann_mult(weights[i + 2], neurons[i + 2].value) +
						fann_mult(weights[i + 3], neurons[i + 3].value);
				}
#endif
				/* unrolled loop end */

				/*
				 * for(i = 0;i != num_connections; i++){
				 * printf("%f += %f*%f, ", neuron_sum, weights[i], neurons[i].value);
				 * neuron_sum += fann_mult(weights[i], neurons[i].value);
				 * }
				 */
			}
			else
			{
				neuron_pointers = ann->connections + neuron_it->first_con;

				i = num_connections & 3;	/* same as modulo 4 */
				switch (i)
				{
					case 3:
						neuron_sum += fann_mult(weights[2], neuron_pointers[2]->value);
					case 2:
						neuron_sum += fann_mult(weights[1], neuron_pointers[1]->value);
					case 1:
						neuron_sum += fann_mult(weights[0], neuron_pointers[0]->value);
					case 0:
						break;
				}
#if 0
				xmm_sum = _mm_setzero_ps();
				for(; i != num_connections; i += 4)
				{
					xmm_weight = _mm_loadu_ps(weights + i);
					xmm_neurons = _mm_set_ps(neuron_pointers[i + 3]->value,neuron_pointers[i + 2]->value,
											 neuron_pointers[i + 1]->value,neuron_pointers[i + 0]->value);
					xmm_weight = _mm_mul_ps(xmm_weight,xmm_neurons);
					xmm_sum =_mm_add_ps(xmm_sum,xmm_weight);
				}
				neuron_sum += xmm_sum.m128_f32[3] + xmm_sum.m128_f32[2] +
						      xmm_sum.m128_f32[1] + xmm_sum.m128_f32[0];
#else 
				for(; i != num_connections; i += 4)
				{
					neuron_sum +=
						fann_mult(weights[i], neuron_pointers[i]->value) +
						fann_mult(weights[i + 1], neuron_pointers[i + 1]->value) +
						fann_mult(weights[i + 2], neuron_pointers[i + 2]->value) +
						fann_mult(weights[i + 3], neuron_pointers[i + 3]->value);
				}
#endif
			}

#ifdef FIXEDFANN
			neuron_it->sum = fann_mult(steepness, neuron_sum);

			if(activation_function != last_activation_function || steepness != last_steepness)
			{
				switch (activation_function)
				{
					case FANN_SIGMOID:
					case FANN_SIGMOID_STEPWISE:
						r1 = ann->sigmoid_results[0];
						r2 = ann->sigmoid_results[1];
						r3 = ann->sigmoid_results[2];
						r4 = ann->sigmoid_results[3];
						r5 = ann->sigmoid_results[4];
						r6 = ann->sigmoid_results[5];
						v1 = ann->sigmoid_values[0] / steepness;
						v2 = ann->sigmoid_values[1] / steepness;
						v3 = ann->sigmoid_values[2] / steepness;
						v4 = ann->sigmoid_values[3] / steepness;
						v5 = ann->sigmoid_values[4] / steepness;
						v6 = ann->sigmoid_values[5] / steepness;
						break;
					case FANN_SIGMOID_SYMMETRIC:
					case FANN_SIGMOID_SYMMETRIC_STEPWISE:
						r1 = ann->sigmoid_symmetric_results[0];
						r2 = ann->sigmoid_symmetric_results[1];
						r3 = ann->sigmoid_symmetric_results[2];
						r4 = ann->sigmoid_symmetric_results[3];
						r5 = ann->sigmoid_symmetric_results[4];
						r6 = ann->sigmoid_symmetric_results[5];
						v1 = ann->sigmoid_symmetric_values[0] / steepness;
						v2 = ann->sigmoid_symmetric_values[1] / steepness;
						v3 = ann->sigmoid_symmetric_values[2] / steepness;
						v4 = ann->sigmoid_symmetric_values[3] / steepness;
						v5 = ann->sigmoid_symmetric_values[4] / steepness;
						v6 = ann->sigmoid_symmetric_values[5] / steepness;
						break;
					case FANN_THRESHOLD:
						break;
				}
			}

			switch (activation_function)
			{
				case FANN_SIGMOID:
				case FANN_SIGMOID_STEPWISE:
					neuron_it->value =
						(fann_type) fann_stepwise(v1, v2, v3, v4, v5, v6, r1, r2, r3, r4, r5, r6, 0,
												  multiplier, neuron_sum);
					break;
				case FANN_SIGMOID_SYMMETRIC:
				case FANN_SIGMOID_SYMMETRIC_STEPWISE:
					neuron_it->value =
						(fann_type) fann_stepwise(v1, v2, v3, v4, v5, v6, r1, r2, r3, r4, r5, r6,
												  -multiplier, multiplier, neuron_sum);
					break;
				case FANN_THRESHOLD:
					neuron_it->value = (fann_type) ((neuron_sum < 0) ? 0 : multiplier);
					break;
				case FANN_THRESHOLD_SYMMETRIC:
					neuron_it->value = (fann_type) ((neuron_sum < 0) ? -multiplier : multiplier);
					break;
				case FANN_LINEAR:
					neuron_it->value = neuron_sum;
					break;
				case FANN_LINEAR_PIECE:
					neuron_it->value = (fann_type)((neuron_sum < 0) ? 0 : (neuron_sum > multiplier) ? multiplier : neuron_sum);
					break;
				case FANN_LINEAR_PIECE_SYMMETRIC:
					neuron_it->value = (fann_type)((neuron_sum < -multiplier) ? -multiplier : (neuron_sum > multiplier) ? multiplier : neuron_sum);
					break;
				case FANN_ELLIOT:
				case FANN_ELLIOT_SYMMETRIC:
				case FANN_GAUSSIAN:
				case FANN_GAUSSIAN_SYMMETRIC:
				case FANN_GAUSSIAN_STEPWISE:
				case FANN_SIN_SYMMETRIC:
				case FANN_COS_SYMMETRIC:
					fann_error((struct fann_error *) ann, FANN_E_CANT_USE_ACTIVATION);
					break;
			}
			last_steepness = steepness;
			last_activation_function = activation_function;
#else
			neuron_sum = fann_mult(steepness, neuron_sum);
			
			max_sum = 150/steepness;
			if(neuron_sum > max_sum)
				neuron_sum = max_sum;
			else if(neuron_sum < -max_sum)
				neuron_sum = -max_sum;
			
			neuron_it->sum = neuron_sum;

			fann_activation_switch(activation_function, neuron_sum, neuron_it->value);
#endif
		}
	}

	/* set the output */
	output = ann->output;
	num_output = ann->num_output;
	neurons = (ann->last_layer - 1)->first_neuron;
	for(i = 0; i != num_output; i++)
	{
		output[i] = neurons[i].value;
	}
	return ann->output;
}