Exemple #1
0
// fann_randomize_weights      Give each connection a random weight between min_weight and max_weight
int sci_fann_randomize_weights(char * fname)
{
  int * pi_minweight_addr = NULL;
  int * pi_maxweight_addr = NULL;
  int res;
  double minweight = 0.0, maxweight = 0.0;
  struct fann * result_ann = NULL;
  SciErr _sciErr;

  if ((Rhs!=3)&&(Lhs!=1))
    {
      Scierror(999,"%s usage: ann_out = %s(ann_in, weight_min, weight_max)", fname, fname);
      return 0;
    }

  // Get the ann
  res = detect_fannlist(1);
  if (res==-1) return 0;
  
  result_ann = createCFannStructFromScilabFannStruct(1,&res);
  if (res==-1) return 0;

  if (result_ann==NULL)
    {
      Scierror(999,"%s: Problem while creating the fann scilab structure\n",fname);
      return 0;
    }

  _sciErr = getVarAddressFromPosition(pvApiCtx, 2, &pi_minweight_addr);
  if (_sciErr.iErr)
    {
      printError(&_sciErr, 0);
      return 0;
    }
  getScalarDouble(pvApiCtx, pi_minweight_addr, &minweight);

  _sciErr = getVarAddressFromPosition(pvApiCtx, 3, &pi_maxweight_addr);
  if (_sciErr.iErr)
    {
      printError(&_sciErr, 0);
      return 0;
    }
  getScalarDouble(pvApiCtx, pi_maxweight_addr, &maxweight);

  fann_randomize_weights(result_ann,(fann_type)minweight,(fann_type)maxweight);
  
  res = createScilabFannStructFromCFannStruct(result_ann, Rhs + 1);
  if (res==-1) return 0;

  LhsVar(1) = Rhs + 1;

  return 0;
}
struct fann *random_network(){
	// create and randomize activation function
	struct fann *ann = fann_create_standard(num_layers,num_input,num_hidden_nodes,num_output);
	fann_set_activation_function_hidden(ann,FANN_SIGMOID);
	fann_set_activation_function_output(ann,FANN_SIGMOID);
	
	// randomize weights
	fann_randomize_weights(ann,-1,1);
	
	// return
	return ann;
}
Exemple #3
0
bool Trainer::Train(const AnnData& data, const float* random_weight_limit,
                    std::size_t max_epochs, std::size_t epochs_between_reports,
                    float desired_error,
                    float* mse, std::size_t* bit_fail) {
  if (random_weight_limit == NULL)
    fann_init_weights(ann_, data.data());
  else
    fann_randomize_weights(ann_, -*random_weight_limit, *random_weight_limit);
  fann_shuffle_train_data(data.data());
  fann_train_on_data(ann_, data.data(), max_epochs, epochs_between_reports,
                     desired_error);
  return GetMseAndBitFail(ann_, &mse, &bit_fail);
}
Exemple #4
0
/***
 * @function rspamd_fann.create(nlayers, [layer1, ... layern])
 * Creates new neural network with `nlayers` that contains `layer1`...`layern`
 * neurons in each layer
 * @param {number} nlayers number of layers
 * @param {number} layerI number of neurons in each layer
 * @return {fann} fann object
 */
static gint
lua_fann_create (lua_State *L)
{
#ifndef WITH_FANN
	return 0;
#else
	struct fann *f, **pfann;
	guint nlayers, *layers, i;

	nlayers = luaL_checknumber (L, 1);

	if (nlayers > 0) {
		layers = g_malloc (nlayers * sizeof (layers[0]));

		if (lua_type (L, 2) == LUA_TNUMBER) {
			for (i = 0; i < nlayers; i ++) {
				layers[i] = luaL_checknumber (L, i + 2);
			}
		}
		else if (lua_type (L, 2) == LUA_TTABLE) {
			for (i = 0; i < nlayers; i ++) {
				lua_rawgeti (L, 2, i + 1);
				layers[i] = luaL_checknumber (L, -1);
				lua_pop (L, 1);
			}
		}

		f = fann_create_standard_array (nlayers, layers);
		fann_set_activation_function_hidden (f, FANN_SIGMOID_SYMMETRIC);
		fann_set_activation_function_output (f, FANN_SIGMOID_SYMMETRIC);
		fann_set_training_algorithm (f, FANN_TRAIN_INCREMENTAL);
		fann_randomize_weights (f, 0, 1);

		if (f != NULL) {
			pfann = lua_newuserdata (L, sizeof (gpointer));
			*pfann = f;
			rspamd_lua_setclass (L, "rspamd{fann}", -1);
		}
		else {
			lua_pushnil (L);
		}

		g_free (layers);
	}
	else {
		lua_pushnil (L);
	}

	return 1;
#endif
}
Exemple #5
0
bool ViFann::setWeights(const Weights &initialization, const qreal &minimum, const qreal &maximum)
{
	if(mNetwork == NULL) return false;
	mWeights = initialization;

	if(initialization == Random)
	{
		fann_randomize_weights(mNetwork, minimum, maximum);
		mWeightsMinimum = minimum;
		mWeightsMaximum = maximum;
	}
	else if(initialization == WidrowNguyen)
	{
		// Create fake training set so that FANN can determine the min and max values
		fann_train_data *data = fann_create_train(1, 2, 1);
		data->input[0][0] = 1;
		data->input[0][1] = -1;
		fann_init_weights(mNetwork, data);
		fann_destroy_train(data);
	}

	return true;
}
int main()
{
	struct fann *ann;
	struct fann_train_data *train_data, *test_data;
	const float desired_error = (const float)0.0;
	unsigned int max_neurons = 30;
	unsigned int neurons_between_reports = 1;
	unsigned int bit_fail_train, bit_fail_test;
	float mse_train, mse_test;
	unsigned int i = 0;
	fann_type *output;
	fann_type steepness;
	int multi = 0;
	enum fann_activationfunc_enum activation;
	enum fann_train_enum training_algorithm = FANN_TRAIN_RPROP;
	
	printf("Reading data.\n");
	 
	train_data = fann_read_train_from_file("../benchmarks/datasets/parity8.train");
	test_data = fann_read_train_from_file("../benchmarks/datasets/parity8.test");

	fann_scale_train_data(train_data, -1, 1);
	fann_scale_train_data(test_data, -1, 1);
	
	printf("Creating network.\n");
	
	ann = fann_create_shortcut(2, fann_num_input_train_data(train_data), fann_num_output_train_data(train_data));
		
	fann_set_training_algorithm(ann, training_algorithm);
	fann_set_activation_function_hidden(ann, FANN_SIGMOID_SYMMETRIC);
	fann_set_activation_function_output(ann, FANN_LINEAR);
	fann_set_train_error_function(ann, FANN_ERRORFUNC_LINEAR);
	
	if(!multi)
	{
		/*steepness = 0.5;*/
		steepness = 1;
		fann_set_cascade_activation_steepnesses(ann, &steepness, 1);
		/*activation = FANN_SIN_SYMMETRIC;*/
		activation = FANN_SIGMOID_SYMMETRIC;
		
		fann_set_cascade_activation_functions(ann, &activation, 1);		
		fann_set_cascade_num_candidate_groups(ann, 8);
	}	
		
	if(training_algorithm == FANN_TRAIN_QUICKPROP)
	{
		fann_set_learning_rate(ann, 0.35);
		fann_randomize_weights(ann, -2.0,2.0);
	}
	
	fann_set_bit_fail_limit(ann, 0.9);
	fann_set_train_stop_function(ann, FANN_STOPFUNC_BIT);
	fann_print_parameters(ann);
		
	fann_save(ann, "cascade_train2.net");
	
	printf("Training network.\n");

	fann_cascadetrain_on_data(ann, train_data, max_neurons, neurons_between_reports, desired_error);
	
	fann_print_connections(ann);
	
	mse_train = fann_test_data(ann, train_data);
	bit_fail_train = fann_get_bit_fail(ann);
	mse_test = fann_test_data(ann, test_data);
	bit_fail_test = fann_get_bit_fail(ann);
	
	printf("\nTrain error: %f, Train bit-fail: %d, Test error: %f, Test bit-fail: %d\n\n", 
		   mse_train, bit_fail_train, mse_test, bit_fail_test);
	
	for(i = 0; i < train_data->num_data; i++)
	{
		output = fann_run(ann, train_data->input[i]);
		if((train_data->output[i][0] >= 0 && output[0] <= 0) ||
		   (train_data->output[i][0] <= 0 && output[0] >= 0))
		{
			printf("ERROR: %f does not match %f\n", train_data->output[i][0], output[0]);
		}
	}
	
	printf("Saving network.\n");
	
	fann_save(ann, "cascade_train.net");
	
	printf("Cleaning up.\n");
	fann_destroy_train(train_data);
	fann_destroy_train(test_data);
	fann_destroy(ann);
	
	return 0;
}
Exemple #7
0
/**************************************************
 REAL-TIME RECURRENT LEARNING

 Williams and Zipser, "A Learning Algorithm for
   Continually Running Fully Recurrent Neural
   Networks," Neural Computation, 1. (1989)

 NOTE: This function is still being debugged.
       MSE does not decrease properly.
 *************************************************/
FANN_EXTERNAL void FANN_API fann_train_rtrl(struct fann *ann, struct fann_train_data *pattern, 
											float max_MSE, unsigned int max_iters, float rate)
{
	struct fann_neuron *neuron = NULL;
	struct fann_layer *layer = NULL;
	fann_type *curr_outputs = NULL;
	fann_type *curr_weight = NULL;

	unsigned int num_neurons = 0;
	unsigned int curr_neuron = 0;
	unsigned int num_iters = 0;
	unsigned int i = 0, j = 0, l = 0;

	float *dodw = NULL;				/* deriv of output wrt weight*/
	float *curr_dodw = NULL;
	float *next_dodw = NULL;		/* dodw for time 'n+1'*/
	float *curr_next_dodw = NULL;
	float *start_dodw = NULL;
	float *temp_swap = NULL;		/* for swapping dodw pointers*/
	float dw = 0.0;					/* change in weight*/

	assert(ann != NULL);
	assert(pattern != NULL);

	/* Only one MIMO neuron and layer in recurrent nets*/
	layer  = ann->first_layer;
	neuron = layer->first_neuron;

	memset(layer->outputs, 0, num_neurons * sizeof(fann_type));

	/* Allocate memory for new outputs*/
	/* TODO: Return an error*/
	num_neurons = layer->num_outputs;
	if ((curr_outputs = calloc(num_neurons, sizeof(fann_type))) == NULL)
	{
		/*fann_error((struct fann_error *) orig, FANN_E_CANT_ALLOCATE_MEM);*/
		printf("RTRL: Could not allocate 'curr_outputs'\n");
		return;
	}

	/* Allocate memory for derivatives do_k(t)/dw_i,j*/
	/* TODO: Return an error*/
	if ((dodw = calloc(ann->num_output * neuron->num_weights * neuron->num_weights, sizeof(float))) == NULL)
	{
		/*fann_error((struct fann_error *) orig, FANN_E_CANT_ALLOCATE_MEM);*/
		printf("RTRL: Could not allocate 'dodw'\n");
		return;
	}

	/* Allocate memory for derivatives do_k(t)/dw_i,j*/
	/* TODO: Return an error*/
	if ((next_dodw = calloc(neuron->num_weights * num_neurons, sizeof(float))) == NULL)
	{
		/*fann_error((struct fann_error *) orig, FANN_E_CANT_ALLOCATE_MEM);*/
		printf("RTRL: Could not allocate 'next_dodw'\n");
		return;
	}

	/* Randomize weights, initialize for training*/
	fann_randomize_weights(ann, -0.5, 0.5);

	if (layer->train_errors==NULL)
	{
		layer->initialize_train_errors(ann, ann->first_layer);
	}

	/* RTRL: Continue learning until MSE low enough or reach*/
	/*       max iterations*/
	num_iters = 0;
	ann->training_params->MSE_value = 100;
	while (ann->training_params->MSE_value > max_MSE && num_iters <= max_iters)
	{
		/* Set the input lines for this time step*/
		/*printf("%d inputs: ", ann->num_input);*/
		for (i=0; i<ann->num_input; i++)
		{
			ann->inputs[i] = pattern->input[num_iters][i];
			printf("%f ", (double) ann->inputs[i]);
		}
		/*printf("(output: %f) (bias: %f) \n", pattern->output[num_iters][0], ann->inputs[ann->num_input]);*/

		/* Copy the outputs of each neuron before they're updated*/
		memcpy(curr_outputs, layer->outputs, num_neurons * sizeof(fann_type));


		/* Update the output of all nodes*/
		layer->run(ann, layer);
		/*printf("NEW OUTPUTS: %f %f %f\n", layer->outputs[0], layer->outputs[1], layer->outputs[2]);*/
		/*printf("ANN OUTPUTS: %f\n", ann->output[0]);*/

		/*curr_weight = neuron->weights;
		for (i=0; i<num_neurons; i++)
		{
			for (j=0; j<layer->num_inputs + num_neurons; j++)
			{
				printf("weight_prev (%d,%d): %f ", i, j, *curr_weight);
				curr_weight++;
			}
		}
		printf("\n");*/

		/* Compute new MSE*/
		fann_reset_MSE(ann);
		fann_compute_MSE(ann, pattern->output[num_iters]);
		printf("%d MSE: %f\n", num_iters, fann_get_MSE(ann));

		/* Modify the weights*/
		start_dodw  = dodw + (num_neurons - ann->num_output) * neuron->num_weights;
		for (i=0; i<num_neurons; i++)
		{
			curr_weight = neuron[i].weights;
			for (j=0; j<layer->num_inputs + num_neurons; j++)
			{
				dw = 0.0;
				curr_dodw = start_dodw;
				/* For each neuron in which is not an input node*/
				for (curr_neuron=num_neurons - ann->num_output; curr_neuron<num_neurons; curr_neuron++)
				{
					dw += (pattern->output[num_iters][curr_neuron - (num_neurons - ann->num_output)] -
						curr_outputs[curr_neuron]) * *curr_dodw;

					curr_dodw += neuron->num_weights;
				}

				*curr_weight += dw * rate;
				/*printf("weight (%d,%d): %f\n", i, j, *curr_weight);*/

				curr_weight++;
				start_dodw++;
			}
		}

		/* Compute next dodw derivatives*/
		curr_next_dodw = next_dodw;
		for (curr_neuron=0; curr_neuron<num_neurons; curr_neuron++)
		{
			start_dodw = dodw;
			curr_weight = neuron->weights;
			for (i=0; i<num_neurons; i++)
			{
				for (j=0; j<layer->num_inputs + num_neurons; j++)
				{
					curr_dodw = start_dodw;

					*curr_next_dodw = 0.0;
					for (l=0; l<num_neurons; l++)
					{
						*curr_next_dodw += *curr_dodw *
							neuron->weights[curr_neuron * (layer->num_inputs + num_neurons) + l + layer->num_inputs];
						curr_dodw += neuron->num_weights;
					}

					/* kronecker_{i,k} * z_j(t)*/
					*curr_next_dodw += (i != curr_neuron) ? 0 :
						((j < layer->num_inputs) ? ann->inputs[j] : curr_outputs[j - layer->num_inputs]);

					*curr_next_dodw *= layer->outputs[curr_neuron]*(1 - layer->outputs[curr_neuron]);
					/*printf("(%d,%d): %f\n", i, j, *curr_next_dodw);*/

					curr_next_dodw++;
					curr_weight++;
					start_dodw++;
				}
			}
		}

		/* Swap the next and the current dodw*/
		/*  (to avoid a costly memory transfer)*/
		temp_swap = dodw;
		dodw = next_dodw;
		next_dodw = temp_swap;

		num_iters++;
	}

	fann_safe_free(dodw);
	fann_safe_free(curr_outputs);
}
Exemple #8
0
void OcrNet::Train(cv::Mat &image, std::map< int, std::set<ImageRegion> > &image_lines, std::string &char_set)
{
	std::map<int, std::set<ImageRegion> >::iterator image_lines_it;
	std::set<ImageRegion> regions;
	std::set<ImageRegion>::iterator regions_it;
	unsigned int char_count = 0, char_length = 0, repeat = 2, index = 0;
	struct fann_train_data **data;
	fann_type **inputs, **outputs;

	char_length = char_set.length();
	data = new struct fann_train_data* [char_length];

	for(uint j=0; j < char_length; j++)
	{
		data[j] = new struct fann_train_data;
		data[j]->num_data = repeat;
		data[j]->num_input = Ann[j]->num_input;
		data[j]->num_output = Ann[j]->num_output;
		inputs = new fann_type*[data[j]->num_data];
		outputs = new fann_type*[data[j]->num_data];
		data[j]->input = inputs;
		data[j]->output = outputs;

		for(uint i = 0; i < repeat; i++, char_count = 0)
		{
			for(image_lines_it = image_lines.begin();
					char_count < char_length && image_lines_it != image_lines.end();
					image_lines_it++, char_count++)
			{
				regions = (std::set<ImageRegion>) image_lines_it->second;

				for(regions_it = regions.begin();
						char_count < char_length && regions_it != regions.end();
						regions_it++, char_count++)
				{
					index = i * char_length + char_count;
					inputs[index] = new fann_type[data->num_input];
					outputs[index] = new fann_type[data->num_output];
					CreateInput(inputs[index], data->num_input, image, *regions_it);
					CreateOutput(outputs[index], data->num_output, char_count);

					std::cout << "Repeat Index: " << i << ", index: " << index << std::endl;
				}
			}
		}

		fann_randomize_weights(Ann[j], -0.001, 0.0);
		//	fann_init_weights(Ann, data);
		fann_train_on_data(Ann[j], &data[j], MaxEpochs, 10, Error);

		for(unsigned int i=0; i < data[j]->num_data; i++)
		{
			delete [] inputs[i];
			delete [] outputs[i];
		}
	}

	delete data;
	delete [] inputs;
	delete [] outputs;
}
Exemple #9
0
void BasicBrain::randomize() {
	fann_randomize_weights(nn_, -1, 1);
}
Exemple #10
0
/***
 * @function rspamd_fann.create_full(params)
 * Creates new neural network with parameters:
 * - `layers` {table/numbers}: table of layers in form: {N1, N2, N3 ... Nn} where N is number of neurons in a layer
 * - `activation_hidden` {string}: activation function type for hidden layers (`tanh` by default)
 * - `activation_output` {string}: activation function type for output layer (`tanh` by default)
 * - `sparsed` {float}: create sparsed ANN, where number is a coefficient for sparsing
 * - `learn` {string}: learning algorithm (quickprop, rprop or incremental)
 * - `randomize` {boolean}: randomize weights (true by default)
 * @return {fann} fann object
 */
static gint
lua_fann_create_full (lua_State *L)
{
#ifndef WITH_FANN
	return 0;
#else
	struct fann *f, **pfann;
	guint nlayers, *layers, i;
	const gchar *activation_hidden = NULL, *activation_output, *learn_alg = NULL;
	gdouble sparsed = 0.0;
	gboolean randomize_ann = TRUE;
	GError *err = NULL;

	if (lua_type (L, 1) == LUA_TTABLE) {
		lua_pushstring (L, "layers");
		lua_gettable (L, 1);

		if (lua_type (L, -1) != LUA_TTABLE) {
			return luaL_error (L, "bad layers attribute");
		}

		nlayers = rspamd_lua_table_size (L, -1);
		if (nlayers < 2) {
			return luaL_error (L, "bad layers attribute");
		}

		layers = g_new0 (guint, nlayers);

		for (i = 0; i < nlayers; i ++) {
			lua_rawgeti (L, -1, i + 1);
			layers[i] = luaL_checknumber (L, -1);
			lua_pop (L, 1);
		}

		lua_pop (L, 1); /* Table */

		if (!rspamd_lua_parse_table_arguments (L, 1, &err,
				"sparsed=N;randomize=B;learn=S;activation_hidden=S;activation_output=S",
				&sparsed, &randomize_ann, &learn_alg, &activation_hidden, &activation_output)) {
			g_free (layers);

			if (err) {
				gint r;

				r = luaL_error (L, "invalid arguments: %s", err->message);
				g_error_free (err);
				return r;
			}
			else {
				return luaL_error (L, "invalid arguments");
			}
		}

		if (sparsed != 0.0) {
			f = fann_create_standard_array (nlayers, layers);
		}
		else {
			f = fann_create_sparse_array (sparsed, nlayers, layers);
		}

		if (f != NULL) {
			pfann = lua_newuserdata (L, sizeof (gpointer));
			*pfann = f;
			rspamd_lua_setclass (L, "rspamd{fann}", -1);
		}
		else {
			g_free (layers);
			return luaL_error (L, "cannot create fann");
		}

		fann_set_activation_function_hidden (f,
				string_to_activation_func (activation_hidden));
		fann_set_activation_function_output (f,
				string_to_activation_func (activation_output));
		fann_set_training_algorithm (f, string_to_learn_alg (learn_alg));

		if (randomize_ann) {
			fann_randomize_weights (f, 0, 1);
		}

		g_free (layers);
	}
	else {
		return luaL_error (L, "bad arguments");
	}

	return 1;
#endif
}
Exemple #11
0
struct fann * setup_net(struct fann_train_data * data)
{
	struct fann *ann;
#if MIMO_FANN
#if OPTIMIZE == 0
	ann = fann_create_standard( 3, data->num_input, H_DIM, data->num_output);
	fann_set_activation_function_hidden(ann, FANN_SIGMOID_SYMMETRIC);
	fann_set_activation_function_output(ann, FANN_SIGMOID_SYMMETRIC);
#endif

#if OPTIMIZE == 1
	unsigned int i, j;
	struct fann_descr *descr=(struct fann_descr*) calloc(1, sizeof(struct fann_descr));
	fann_setup_descr(descr, 2, data->num_input);


	i=0;

	fann_setup_layer_descr(
					&(descr->layers_descr[i]),
					"connected_any_any",
					1,
					NULL
					);

		for (j=0; j< descr->layers_descr[i].num_neurons; j++)
		{
						fann_setup_neuron_descr(
							descr->layers_descr[i].neurons_descr+j,
							H_DIM,
							"scalar_rprop_sigmoid_symmetric",
							NULL
							);
		}

	i=1;

	fann_setup_layer_descr(
					&(descr->layers_descr[i]),
					"connected_any_any",
					1,
					NULL
					);

		for (j=0; j< descr->layers_descr[i].num_neurons; j++)
		{
						fann_setup_neuron_descr(
							descr->layers_descr[i].neurons_descr+j,
							data->num_output,
							"scalar_rprop_sigmoid_symmetric",
							NULL
							);
		}
	ann = fann_create_from_descr( descr );
#endif

#if OPTIMIZE >= 2
	{
		unsigned int layers[] = { data->num_input, H_DIM, data->num_output };
		/*char *type;
		asprintf(&type, "%s_%s_%s", vals(implementation), vals(algorithm), vals(activation));*/

		ann = fann_create_standard_array_typed(layer_type, neuron_type, 3,  layers);

	}
#endif
#else /*MIMO_FANN*/

#ifdef SPARSE
	ann = fann_create_sparse( SPARSE, 3, data->num_input, H_DIM, data->num_output);
#else
	ann = fann_create_standard( 3, data->num_input, H_DIM, data->num_output);
#endif
	fann_set_activation_function_hidden(ann, FANN_SIGMOID_SYMMETRIC);
	fann_set_activation_function_output(ann, FANN_SIGMOID_SYMMETRIC);

#endif /*MIMO_FANN*/ 

	fann_set_train_stop_function(ann, FANN_STOPFUNC_BIT);
	fann_set_bit_fail_limit(ann, 0.01f);
	fann_set_activation_steepness_hidden(ann, 1);
	fann_set_activation_steepness_output(ann, 1);

#if INIT_WEIGHTS == 1
	fann_randomize_weights(ann,0,1);
#endif
#if INIT_WEIGHTS == 2
	fann_init_weights(ann, data);
#endif

#ifdef USE_RPROP
	fann_set_training_algorithm(ann, FANN_TRAIN_RPROP);
#else
	fann_set_training_algorithm(ann, FANN_TRAIN_BATCH);
#endif

	return ann;
}