Пример #1
0
void BasicBrain::makeNN() {
	assert(layout_.numLayers_ >= 3);
	assert(layout_.numLayers_ < 20);
	unsigned int* layerArray = new unsigned int[layout_.numLayers_];
	layerArray[0] = layout_.numInputs_;

	for(size_t i = 1; i < (layout_.numLayers_ - 1); i++) {
		layerArray[i] = layout_.neuronsPerHidden;
	}

	layerArray[layout_.numLayers_ - 1] = layout_.numOutputs;

	nn_ = fann_create_standard_array(layout_.numLayers_, layerArray);
    fann_set_activation_function_hidden(nn_, FANN_SIGMOID_SYMMETRIC);
    fann_set_activation_function_output(nn_, FANN_SIGMOID_SYMMETRIC);

    inputs_ = new fann_type[layout_.numInputs_];
    reset();
    delete[] layerArray;

#ifdef _CHECK_BRAIN_ALLOC
    size_t id = ++nnAllocCnt_;
    nnAllocs_[nn_] = id;
    std::cerr << "alloc: " << id << std::endl;
#endif
}
Пример #2
0
FANN_EXTERNAL struct fann *FANN_API fann_create_standard(unsigned int num_layers, ...)
{
	struct fann *ann;
	va_list layer_sizes;
	int i;
	unsigned int *layers = (unsigned int *) fann_calloc(num_layers, sizeof(unsigned int));

	if(layers == NULL)
	{
		fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
		return NULL;
	}

	va_start(layer_sizes, num_layers);
	for(i = 0; i < (int) num_layers; i++)
	{
		layers[i] = va_arg(layer_sizes, unsigned int);
	}
	va_end(layer_sizes);

	ann = fann_create_standard_array(num_layers, layers);

	fann_free(layers);

	return ann;
}
Пример #3
0
int main(int argc, char *argv[])
{
	unsigned int layers[3] = {38, 17, 9};
	struct fann *ann = fann_create_standard_array(3, layers);
	fann_set_activation_function_hidden(ann, FANN_SIGMOID_SYMMETRIC);
    	fann_set_activation_function_output(ann, FANN_LINEAR);
	fann_train_on_file(ann, "scotland.data", 100000, 100, 0.00001);
	fann_save(ann, "scotland_test.net");
	fann_destroy(ann);
	return 0;
} 
Пример #4
0
/***
 * @function rspamd_fann.create(nlayers, [layer1, ... layern])
 * Creates new neural network with `nlayers` that contains `layer1`...`layern`
 * neurons in each layer
 * @param {number} nlayers number of layers
 * @param {number} layerI number of neurons in each layer
 * @return {fann} fann object
 */
static gint
lua_fann_create (lua_State *L)
{
#ifndef WITH_FANN
	return 0;
#else
	struct fann *f, **pfann;
	guint nlayers, *layers, i;

	nlayers = luaL_checknumber (L, 1);

	if (nlayers > 0) {
		layers = g_malloc (nlayers * sizeof (layers[0]));

		if (lua_type (L, 2) == LUA_TNUMBER) {
			for (i = 0; i < nlayers; i ++) {
				layers[i] = luaL_checknumber (L, i + 2);
			}
		}
		else if (lua_type (L, 2) == LUA_TTABLE) {
			for (i = 0; i < nlayers; i ++) {
				lua_rawgeti (L, 2, i + 1);
				layers[i] = luaL_checknumber (L, -1);
				lua_pop (L, 1);
			}
		}

		f = fann_create_standard_array (nlayers, layers);
		fann_set_activation_function_hidden (f, FANN_SIGMOID_SYMMETRIC);
		fann_set_activation_function_output (f, FANN_SIGMOID_SYMMETRIC);
		fann_set_training_algorithm (f, FANN_TRAIN_INCREMENTAL);
		fann_randomize_weights (f, 0, 1);

		if (f != NULL) {
			pfann = lua_newuserdata (L, sizeof (gpointer));
			*pfann = f;
			rspamd_lua_setclass (L, "rspamd{fann}", -1);
		}
		else {
			lua_pushnil (L);
		}

		g_free (layers);
	}
	else {
		lua_pushnil (L);
	}

	return 1;
#endif
}
Пример #5
0
bool ViFann::setStructure(const Type &type, const QList<int> &neurons, const qreal &connectionRate)
{
	#ifdef GPU
		if(type != Standard)
		{
			LOG("The GPU version of FANN currently doesn't support shortcut, sparse or cascade networks.", QtFatalMsg);
			exit(-1);
		}
	#endif
	clear();

	mType = type;
	mInputCount = neurons.first();
	mOutputCount = neurons.last();
	mNeurons.clear();
	for(mI = 0; mI < neurons.size(); ++mI)
	{
		if(neurons[mI] != 0) mNeurons.append(neurons[mI]);
	}

	if(mInput == NULL) delete [] mInput;
	if(mOutput == NULL) delete [] mOutput;
	mInput = new float[mInputCount];
	mOutput = new float[mOutputCount];

	unsigned int layers = mNeurons.size();
	unsigned int layerNeurons[layers];
	for(mI = 0; mI < layers; ++mI) layerNeurons[mI] = mNeurons[mI];

	if(type == Standard) mNetwork = fann_create_standard_array(layers, layerNeurons);
	#ifndef GPU
		else if(type == Sparse)
		{
			mNetwork = fann_create_sparse_array(connectionRate, layers, layerNeurons);
			mConnectionRate = connectionRate;
		}
		else if(type == Shortcut) mNetwork = fann_create_shortcut_array(layers, layerNeurons);
	#endif
	else return false;

	fann_set_train_stop_function(mNetwork, FANN_STOPFUNC_MSE);

	if(ENABLE_CALLBACK)
	{
		fann_set_callback(mNetwork, &ViFann::trainCallback);
		mMseTotal.clear();
		mMseCount = 0;
	}

	return true;
}
Пример #6
0
/*! fann.create_standard(num_layers, neurons_1, neurons_2, ..., neurons_n)
 *# Creates a neural network with {{num_layers}}.\n
 *# The i'th layer will have {{neurons_i}} neurons (the function must thus have
 *# {{num_layers+1}} parameters in total).
 *x ann = fann.create_standard(3, 2, 3, 1)
 *-
 */
static int ann_create_standard(lua_State *L)
{
	struct fann **ann;
	int num_layers, i;
	unsigned int *layers;

	luaL_argcheck(L, lua_isinteger(L,1), 1, "First argument to fann.create_standard() must be an integer");

	num_layers = lua_tointeger(L, 1);
#ifdef FANN_VERBOSE
	printf("Creating neural net, %d layers\n", num_layers);
#endif

	if(num_layers < 1)
		luaL_error(L, "Neural network must have at least one layer");

	if(lua_gettop(L) < num_layers + 1)
		luaL_error(L, "Neural net has %d layers, so fann.open() must have %d parameters", num_layers, num_layers + 1);

	layers = lua_newuserdata(L, num_layers*(sizeof *layers));

	for(i = 0; i < num_layers; i++)
	{
		int n = luaL_checkinteger(L, i + 2);
		if(n < 1)
		{
			luaL_error(L, "Layer %d must have at least 1 neuron", i);
		}

#ifdef FANN_VERBOSE
		printf("Layer %d to have %d neurons\n", i, n);
#endif
		layers[i] = n;
	}

	ann = lua_newuserdata(L, sizeof *ann);

	luaL_getmetatable(L, FANN_METATABLE);
	lua_setmetatable(L, -2);

	*ann = fann_create_standard_array(num_layers, layers);
	if(!*ann)
	{
		luaL_error(L, "Unable to create neural network");
	}

	return 1;
}
Пример #7
0
int main()
{
	const float desired_error = (const float) 0.0001;
	const unsigned int max_epochs = 350;
	const unsigned int epochs_between_reports = 25;
	struct fann *ann;
	struct fann_train_data *train_data;

	unsigned int i = 0;

	printf("Creating network.\n");

	train_data = fann_read_train_from_file("ann_training_data");
    // Using incremental training -> shuffle training data    
    fann_shuffle_train_data(train_data);

//	ann = fann_create_standard(num_layers,
//					  train_data->num_input, num_neurons_hidden, train_data->num_output);

    //ann = fann_create_standard(500, 2, 50, 50, 21);
    unsigned int layers[4] = {150, 70, 30, 22};
    ann = fann_create_standard_array(4, layers);
	printf("Training network.\n");

	fann_set_activation_function_hidden(ann, FANN_SIGMOID_SYMMETRIC_STEPWISE);
	fann_set_activation_function_output(ann, FANN_LINEAR_PIECE); //FANN_SIGMOID_STEPWISE);

    fann_set_training_algorithm(ann, FANN_TRAIN_INCREMENTAL);

	fann_train_on_data(ann, train_data, max_epochs, epochs_between_reports, desired_error);

	printf("Saving network.\n");

	fann_save(ann, "mymoves_gestures.net");

	printf("Cleaning up.\n");
	fann_destroy_train(train_data);
	fann_destroy(ann);

	return 0;
}
Пример #8
0
int main()
{
    const float desired_error = (const float) 0.01;
    const unsigned int max_epochs = 50000;
    const unsigned int epochs_between_reports = 10;

    unsigned int layers []= {6, 200, 200, 4};
    struct fann * ann = fann_create_standard_array(4, layers);

    fann_set_activation_function_layer(ann, FANN_SIGMOID_SYMMETRIC, 1);
    fann_set_activation_function_layer(ann, FANN_SIGMOID_SYMMETRIC, 2);
    fann_set_activation_function_layer(ann, FANN_SIGMOID_SYMMETRIC, 3);
    fann_train_on_file(ann, "../training_file", max_epochs,
                       epochs_between_reports, desired_error);

    fann_save(ann, "wii.net");

    fann_destroy(ann);

    return 0;
}
Пример #9
0
/***
 * @function rspamd_fann.create(nlayers, [layer1, ... layern])
 * Creates new neural network with `nlayers` that contains `layer1`...`layern`
 * neurons in each layer
 * @param {number} nlayers number of layers
 * @param {number} layerI number of neurons in each layer
 * @return {fann} fann object
 */
static gint
lua_fann_create (lua_State *L)
{
#ifndef WITH_FANN
	return 0;
#else
	struct fann *f, **pfann;
	guint nlayers, *layers, i;

	nlayers = luaL_checknumber (L, 1);

	if (nlayers > 0) {
		layers = g_malloc (nlayers * sizeof (layers[0]));

		for (i = 0; i < nlayers; i ++) {
			layers[i] = luaL_checknumber (L, i + 2);
		}

		f = fann_create_standard_array (nlayers, layers);

		if (f != NULL) {
			pfann = lua_newuserdata (L, sizeof (gpointer));
			*pfann = f;
			rspamd_lua_setclass (L, "rspamd{fann}", -1);
		}
		else {
			lua_pushnil (L);
		}
	}
	else {
		lua_pushnil (L);
	}

	return 1;
#endif
}
Пример #10
0
/* Creates a feedforward, layered net which is an "unrolled" recurrent network.
 For example, the recurrent net:
  A <-> B <-> C<- (where C autosynapses)
 Becomes (unrolled two time steps):
  A  B  C    input layer
   \/ \/|
  A  B  C    hidden layer I
   \/ \/|
  A  B  C    output layer
*/
FANN_EXTERNAL struct fann *FANN_API fann_create_unrolled_recurrent(
	unsigned int num_neurons, fann_type *weights, unsigned int time_steps)
{
	struct fann *ann        = NULL;
	unsigned int *layers    = NULL;
	unsigned int num_layers = time_steps + 1;
	unsigned int layern     = 0;

	struct fann_layer *curr_layer   = NULL;
	struct fann_neuron *curr_neuron = NULL;
	fann_type *curr_weights         = weights;

	
	/*************************************
	  CREATE THE FEEDFORWARD STRUCTURE 
	 *************************************/

	/* Allocate number of neurons per layer array */
	layers = (unsigned int *)calloc(num_layers, sizeof(unsigned int));
	if (layers == NULL)
	{
		return NULL;
	}

	/* Populate each layer with the number of neurons */
	for (layern=0; layern < num_layers; layern++)
	{
		layers[layern] = num_neurons;
	}

	/* Create the feedforward network */
	ann = fann_create_standard_array(num_layers, layers);
	fann_safe_free(layers);

	/*printf("REQUESTED: LAYERS=%d, NEURONS/LAYER=%d\n", num_layers, num_neurons);
	printf("NUM LAYERS: %d\n", ann->last_layer - ann->first_layer);
	printf("IN: %d, NEURONS: %d, OUTPUT: %d\n",
		ann->num_input, ann->num_neurons, ann->num_output);*/


	/*************************************
	  SET THE FEEDFORWARD WEIGHTS
	 *************************************/

	/* Visit each layer */
    for (curr_layer = ann->first_layer; 
		curr_layer != ann->last_layer; 
		curr_layer++)
	{
		/* The weights are the same for each feedforward layer! */
		curr_weights = weights;

		/* Copy the weight matrix into the neurons, 
		   one row per neuron */
		for (curr_neuron = curr_layer->first_neuron; 
			curr_neuron != curr_layer->last_neuron; 
			curr_neuron++)
		{
            memcpy(curr_neuron->weights, curr_weights, num_neurons * num_neurons * sizeof(fann_type));

			curr_weights += num_neurons;
		}
	}

	return ann;
}
Пример #11
0
/*
arguments (all required):
 - data filename
 - topology, as number of neurons per layer separated by dashes
 - epochs (integer)
 - learning rate (0.0-1.0 float)
 - output filename
*/
int main(int argc, char **argv)
{
    // Argument 1: data filename.
    const char *datafn = argv[1];

    // Argument 2: topology.
    unsigned int layer_sizes[MAX_LAYERS];
    unsigned int num_layers = 0;
    char *token = strtok(argv[2], "-");
    while (token != NULL) {
        layer_sizes[num_layers] = atoi(token);
        ++num_layers;
        token = strtok(NULL, "-");
    }

    // Argument 3: epoch count.
    unsigned int max_epochs = atoi(argv[3]);

    // Argument 4: learning rate.
    float learning_rate = atof(argv[4]);

    // Argument 5: output filename.
    const char *outfn = argv[5];

    struct fann *ann;
	ann = fann_create_standard_array(num_layers, layer_sizes);

    // Misc parameters.
    fann_set_training_algorithm(ann, FANN_TRAIN_RPROP);
	fann_set_activation_steepness_hidden(ann, 0.5);
	fann_set_activation_steepness_output(ann, 0.5);
	fann_set_activation_function_hidden(ann, FANN_SIGMOID);
	fann_set_activation_function_output(ann, FANN_SIGMOID);
    //fann_set_train_stop_function(ann, FANN_STOPFUNC_BIT);
    //fann_set_bit_fail_limit(ann, 0.01f);

    struct fann_train_data *data;
    data = fann_read_train_from_file(datafn);
	fann_init_weights(ann, data);
	
    fann_set_learning_rate(ann, learning_rate);
	fann_train_on_data(
        ann,
        data,
        max_epochs,
        10,  // epochs between reports
        DESIRED_ERROR
    );

	printf("Testing network. %f\n", fann_test_data(ann, data));

    fann_type *calc_out;
	for(unsigned int i = 0; i < fann_length_train_data(data); ++i)
	{
		calc_out = fann_run(ann, data->input[i]);
	}
	
	printf("RMSE = %f\n", sqrt(fann_get_MSE(ann)));

	fann_save(ann, outfn);

	fann_destroy_train(data);
	fann_destroy(ann);

	return 0;
}
Пример #12
0
int main(int argc, char *argv[])
{
	struct fann_train_data *dadosTreino, *dadosTeste;
	struct fann *ANN;
	fann_type *ANN_Answers;

	int *layers, i, j, aux;
	chromosome chromo;

	float erro = 0.0;

	checkArgs(argc, argv);
	buildChromosome(argv, &chromo);

	checkDatasetFiles();

	dadosTreino = fann_read_train_from_file(nomeArqTreino);

	layers = (int *) calloc(2+chromo.qntCamadasOcultas, sizeof(int));
	layers[0] = qntNeuroniosEntrada;
	layers[2+chromo.qntCamadasOcultas-1] = qntNeuroniosSaida;
	aux = chromo.neurOcultos;
	for (i=1; i < 2+chromo.qntCamadasOcultas-1 ; i++)
	{
		layers[i] = aux;
		aux = aux/2;
	}

	// CRIANDO A RNA:
	ANN = fann_create_standard_array(2+chromo.qntCamadasOcultas, layers);

	// TREINO
	fann_set_learning_rate(ANN, chromo.learnRate);
	fann_set_learning_momentum(ANN, chromo.moment);

	fann_set_activation_function_hidden( ANN, chromo.fcOculta );
	fann_set_activation_function_output( ANN, chromo.fcSaida  );
	fann_set_training_algorithm(ANN, chromo.algAprend );

	if (fann_get_training_algorithm(ANN) == FANN_TRAIN_QUICKPROP)
		fann_set_quickprop_decay(ANN, chromo.decay);

	// Em python, o treino ficava entre um try.
	// Se desse erro, escrevia "Resultado: 999.0" e exit
	fann_train_on_data(ANN, dadosTreino, chromo.epocasTreino, 50, desiredError);

	fann_destroy_train(dadosTreino);

	// TESTES:
	dadosTeste  = fann_read_train_from_file( nomeArqValidacao);

	// Em python, o teste também ficava entre um try.
	// Se desse erro, escrevia "Resultado: 999.0" e exit
	for(i = 0; i < fann_length_train_data(dadosTeste); i++)
	{
		ANN_Answers = fann_run(ANN, dadosTeste->input[i]);
		if (ANN_Answers == NULL)
		{
			printf("Resultado: 999.0\n");
			exit(2);
		}

		for (j=0; j < qntNeuroniosSaida; j++)
			erro += (float) powf(fann_abs(ANN_Answers[j] - dadosTeste->output[i][j]), 2);
	}
	printf("Resultado: %f\n", erro/(fann_length_train_data(dadosTeste)-1));

	fann_destroy_train(dadosTeste);

	saveANN(argc, argv, ANN);

	fann_destroy(ANN);
}
Пример #13
0
/***
 * @function rspamd_fann.create_full(params)
 * Creates new neural network with parameters:
 * - `layers` {table/numbers}: table of layers in form: {N1, N2, N3 ... Nn} where N is number of neurons in a layer
 * - `activation_hidden` {string}: activation function type for hidden layers (`tanh` by default)
 * - `activation_output` {string}: activation function type for output layer (`tanh` by default)
 * - `sparsed` {float}: create sparsed ANN, where number is a coefficient for sparsing
 * - `learn` {string}: learning algorithm (quickprop, rprop or incremental)
 * - `randomize` {boolean}: randomize weights (true by default)
 * @return {fann} fann object
 */
static gint
lua_fann_create_full (lua_State *L)
{
#ifndef WITH_FANN
	return 0;
#else
	struct fann *f, **pfann;
	guint nlayers, *layers, i;
	const gchar *activation_hidden = NULL, *activation_output, *learn_alg = NULL;
	gdouble sparsed = 0.0;
	gboolean randomize_ann = TRUE;
	GError *err = NULL;

	if (lua_type (L, 1) == LUA_TTABLE) {
		lua_pushstring (L, "layers");
		lua_gettable (L, 1);

		if (lua_type (L, -1) != LUA_TTABLE) {
			return luaL_error (L, "bad layers attribute");
		}

		nlayers = rspamd_lua_table_size (L, -1);
		if (nlayers < 2) {
			return luaL_error (L, "bad layers attribute");
		}

		layers = g_new0 (guint, nlayers);

		for (i = 0; i < nlayers; i ++) {
			lua_rawgeti (L, -1, i + 1);
			layers[i] = luaL_checknumber (L, -1);
			lua_pop (L, 1);
		}

		lua_pop (L, 1); /* Table */

		if (!rspamd_lua_parse_table_arguments (L, 1, &err,
				"sparsed=N;randomize=B;learn=S;activation_hidden=S;activation_output=S",
				&sparsed, &randomize_ann, &learn_alg, &activation_hidden, &activation_output)) {
			g_free (layers);

			if (err) {
				gint r;

				r = luaL_error (L, "invalid arguments: %s", err->message);
				g_error_free (err);
				return r;
			}
			else {
				return luaL_error (L, "invalid arguments");
			}
		}

		if (sparsed != 0.0) {
			f = fann_create_standard_array (nlayers, layers);
		}
		else {
			f = fann_create_sparse_array (sparsed, nlayers, layers);
		}

		if (f != NULL) {
			pfann = lua_newuserdata (L, sizeof (gpointer));
			*pfann = f;
			rspamd_lua_setclass (L, "rspamd{fann}", -1);
		}
		else {
			g_free (layers);
			return luaL_error (L, "cannot create fann");
		}

		fann_set_activation_function_hidden (f,
				string_to_activation_func (activation_hidden));
		fann_set_activation_function_output (f,
				string_to_activation_func (activation_output));
		fann_set_training_algorithm (f, string_to_learn_alg (learn_alg));

		if (randomize_ann) {
			fann_randomize_weights (f, 0, 1);
		}

		g_free (layers);
	}
	else {
		return luaL_error (L, "bad arguments");
	}

	return 1;
#endif
}
Пример #14
0
int sci_fann_create(char * fname)
{
  int * pi_command_addr = NULL;
  int m_layers,  n_layers,  * pi_layers_addr = NULL;
  int * pi_conn_addr = NULL;
  char * Command = NULL;
  double * layers = NULL, conn = 0.0;
  unsigned int * ui_layers = NULL;
  int res, numLayers, i;
  struct fann * result_ann = NULL;
  SciErr _sciErr;

  if (Rhs<2)
    {
      Scierror(999,"%s usage: ann = %s(command,[layers ...])", fname, fname);
      return 0;
    }

  _sciErr = getVarAddressFromPosition(pvApiCtx, 1, &pi_command_addr);
  if (_sciErr.iErr)
    {
      printError(&_sciErr, 0);
      return 0;
    }
  getAllocatedSingleString(pvApiCtx,  pi_command_addr, &Command);

  _sciErr = getVarAddressFromPosition(pvApiCtx, 2, &pi_layers_addr);
  if (_sciErr.iErr)
    {
      printError(&_sciErr, 0);
      return 0;
    }
  _sciErr = getMatrixOfDouble(pvApiCtx, pi_layers_addr, &m_layers, &n_layers, &layers);

  if ((n_layers != 1) & (m_layers !=1))
    {
      Scierror(999,"%s: Layers must be a vector!",fname);
      return 0;
    }
  
  numLayers = m_layers * n_layers;
  ui_layers = (unsigned int *)MALLOC(numLayers*sizeof(unsigned int));
  for(i=0; i<numLayers; i++) ui_layers[i] = layers[i];

  if (strcmp(Command,"standard") == 0)
    {
      freeAllocatedSingleString(Command);

      // fann_create_standard_array  Just like fann_create_standard, but with an array of layer sizes instead of individual parameters.
      result_ann = fann_create_standard_array(numLayers,ui_layers);
      FREE(ui_layers);
      if (result_ann==NULL)
	{
	  Scierror(999,"%s: not able to create standard network\n",fname);
	  return 0;
	}
    }
  
  if (strcmp(Command,"sparse") == 0)
    {
      freeAllocatedSingleString(Command);

      // fann_create_sparse_array    Just like fann_create_sparse, but with an array of layer sizes instead of individual parameters.
      _sciErr = getVarAddressFromPosition(pvApiCtx, 3, &pi_conn_addr);
      if (_sciErr.iErr)
	{
	  printError(&_sciErr, 0);
	  return 0;
	}
      getScalarDouble(pvApiCtx, pi_conn_addr, &conn);

      result_ann = fann_create_sparse_array(conn,numLayers,ui_layers);
      FREE(ui_layers);
      if (result_ann==NULL)
	{
	  Scierror(999,"%s: not able to create sparse network\n",fname);
	  return 0;
	}
    }

  if (strcmp(Command,"shortcut") == 0)
    {
      freeAllocatedSingleString(Command);

      // fann_create_shortcut_array  Just like fann_create_shortcut, but with an array of layer sizes instead of individual parameters.
      result_ann = fann_create_shortcut_array(numLayers,ui_layers);
      FREE(ui_layers);
      if (result_ann==NULL)
	{
	  Scierror(999,"%s: not able to create shortcut network\n",fname);
	  return 0;
	}
    }

  //Create the struct representing this ann in scilab
  res = createScilabFannStructFromCFannStruct(result_ann, Rhs + 1);
  if (res==-1) return 0;

  LhsVar(1) = Rhs + 1;

  return 0;
}