FANN_EXTERNAL struct fann *FANN_API fann_create_sparse(float connection_rate, 
													   unsigned int num_layers, ...)
{
	struct fann *ann;
	va_list layer_sizes;
	int i;
	unsigned int *layers = (unsigned int *) calloc(num_layers, sizeof(unsigned int));

	if(layers == NULL)
	{
		fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
		return NULL;
	}

	va_start(layer_sizes, num_layers);
	for(i = 0; i < (int) num_layers; i++)
	{
		layers[i] = va_arg(layer_sizes, unsigned int);
	}
	va_end(layer_sizes);

	ann = fann_create_sparse_array(connection_rate, num_layers, layers);

	free(layers);

	return ann;
}
示例#2
0
文件: vifann.cpp 项目: EQ4/Visore
bool ViFann::setStructure(const Type &type, const QList<int> &neurons, const qreal &connectionRate)
{
	#ifdef GPU
		if(type != Standard)
		{
			LOG("The GPU version of FANN currently doesn't support shortcut, sparse or cascade networks.", QtFatalMsg);
			exit(-1);
		}
	#endif
	clear();

	mType = type;
	mInputCount = neurons.first();
	mOutputCount = neurons.last();
	mNeurons.clear();
	for(mI = 0; mI < neurons.size(); ++mI)
	{
		if(neurons[mI] != 0) mNeurons.append(neurons[mI]);
	}

	if(mInput == NULL) delete [] mInput;
	if(mOutput == NULL) delete [] mOutput;
	mInput = new float[mInputCount];
	mOutput = new float[mOutputCount];

	unsigned int layers = mNeurons.size();
	unsigned int layerNeurons[layers];
	for(mI = 0; mI < layers; ++mI) layerNeurons[mI] = mNeurons[mI];

	if(type == Standard) mNetwork = fann_create_standard_array(layers, layerNeurons);
	#ifndef GPU
		else if(type == Sparse)
		{
			mNetwork = fann_create_sparse_array(connectionRate, layers, layerNeurons);
			mConnectionRate = connectionRate;
		}
		else if(type == Shortcut) mNetwork = fann_create_shortcut_array(layers, layerNeurons);
	#endif
	else return false;

	fann_set_train_stop_function(mNetwork, FANN_STOPFUNC_MSE);

	if(ENABLE_CALLBACK)
	{
		fann_set_callback(mNetwork, &ViFann::trainCallback);
		mMseTotal.clear();
		mMseCount = 0;
	}

	return true;
}
示例#3
0
文件: fann.c 项目: inigosola/lua-fann
/*! fann.create_sparse(connection_rate, num_layers, neurons_1, neurons_2, ..., neurons_n)
 *# Creates a neural network with {{num_layers}} that are not fully connected.\n
 *# The i'th layer will have {{neurons_i}} neurons (the function must thus have
 *# {{num_layers+1}} parameters in total).
 *x ann = fann.create_sparse(0.5, 3, 2, 3, 1)
 *-
 */
static int ann_create_sparse(lua_State *L)
{
	struct fann **ann;
	int num_layers, i;
	unsigned int *layers;
	float connection_rate;

	connection_rate = luaL_checknumber(L, 1);

	luaL_argcheck(L, lua_isinteger(L,2), 2, "Second argument to fann.create_sparse() must be an integer");

	num_layers = lua_tointeger(L, 2);
#ifdef FANN_VERBOSE
	printf("Creating neural net, %d layers\n", num_layers);
#endif

	if(num_layers < 1)
		luaL_error(L, "Neural network must have at least one layer");

	if(lua_gettop(L) < num_layers + 2)
		luaL_error(L, "Neural net has %d layers, so fann.create_sparse() must have %d parameters", num_layers, num_layers + 2);

	layers = lua_newuserdata(L, num_layers*(sizeof *layers));

	for(i = 0; i < num_layers; i++)
	{
		int n = luaL_checkinteger(L, i + 3);
		if(n < 1)
		{
			luaL_error(L, "Layer %d must have at least 1 neuron", i);
		}

#ifdef FANN_VERBOSE
		printf("Layer %d to have %d neurons\n", i, n);
#endif
		layers[i] = n;
	}

	ann = lua_newuserdata(L, sizeof *ann);

	luaL_getmetatable(L, FANN_METATABLE);
	lua_setmetatable(L, -2);

	*ann = fann_create_sparse_array(connection_rate, num_layers, layers);
	if(!*ann)
	{
		luaL_error(L, "Unable to create neural network");
	}

	return 1;
}
示例#4
0
/***
 * @function rspamd_fann.create_full(params)
 * Creates new neural network with parameters:
 * - `layers` {table/numbers}: table of layers in form: {N1, N2, N3 ... Nn} where N is number of neurons in a layer
 * - `activation_hidden` {string}: activation function type for hidden layers (`tanh` by default)
 * - `activation_output` {string}: activation function type for output layer (`tanh` by default)
 * - `sparsed` {float}: create sparsed ANN, where number is a coefficient for sparsing
 * - `learn` {string}: learning algorithm (quickprop, rprop or incremental)
 * - `randomize` {boolean}: randomize weights (true by default)
 * @return {fann} fann object
 */
static gint
lua_fann_create_full (lua_State *L)
{
#ifndef WITH_FANN
	return 0;
#else
	struct fann *f, **pfann;
	guint nlayers, *layers, i;
	const gchar *activation_hidden = NULL, *activation_output, *learn_alg = NULL;
	gdouble sparsed = 0.0;
	gboolean randomize_ann = TRUE;
	GError *err = NULL;

	if (lua_type (L, 1) == LUA_TTABLE) {
		lua_pushstring (L, "layers");
		lua_gettable (L, 1);

		if (lua_type (L, -1) != LUA_TTABLE) {
			return luaL_error (L, "bad layers attribute");
		}

		nlayers = rspamd_lua_table_size (L, -1);
		if (nlayers < 2) {
			return luaL_error (L, "bad layers attribute");
		}

		layers = g_new0 (guint, nlayers);

		for (i = 0; i < nlayers; i ++) {
			lua_rawgeti (L, -1, i + 1);
			layers[i] = luaL_checknumber (L, -1);
			lua_pop (L, 1);
		}

		lua_pop (L, 1); /* Table */

		if (!rspamd_lua_parse_table_arguments (L, 1, &err,
				"sparsed=N;randomize=B;learn=S;activation_hidden=S;activation_output=S",
				&sparsed, &randomize_ann, &learn_alg, &activation_hidden, &activation_output)) {
			g_free (layers);

			if (err) {
				gint r;

				r = luaL_error (L, "invalid arguments: %s", err->message);
				g_error_free (err);
				return r;
			}
			else {
				return luaL_error (L, "invalid arguments");
			}
		}

		if (sparsed != 0.0) {
			f = fann_create_standard_array (nlayers, layers);
		}
		else {
			f = fann_create_sparse_array (sparsed, nlayers, layers);
		}

		if (f != NULL) {
			pfann = lua_newuserdata (L, sizeof (gpointer));
			*pfann = f;
			rspamd_lua_setclass (L, "rspamd{fann}", -1);
		}
		else {
			g_free (layers);
			return luaL_error (L, "cannot create fann");
		}

		fann_set_activation_function_hidden (f,
				string_to_activation_func (activation_hidden));
		fann_set_activation_function_output (f,
				string_to_activation_func (activation_output));
		fann_set_training_algorithm (f, string_to_learn_alg (learn_alg));

		if (randomize_ann) {
			fann_randomize_weights (f, 0, 1);
		}

		g_free (layers);
	}
	else {
		return luaL_error (L, "bad arguments");
	}

	return 1;
#endif
}
FANN_EXTERNAL struct fann *FANN_API fann_create_standard_array(unsigned int num_layers, 
															   const unsigned int *layers)
{
	return fann_create_sparse_array(1, num_layers, layers);	
}
示例#6
0
int sci_fann_create(char * fname)
{
  int * pi_command_addr = NULL;
  int m_layers,  n_layers,  * pi_layers_addr = NULL;
  int * pi_conn_addr = NULL;
  char * Command = NULL;
  double * layers = NULL, conn = 0.0;
  unsigned int * ui_layers = NULL;
  int res, numLayers, i;
  struct fann * result_ann = NULL;
  SciErr _sciErr;

  if (Rhs<2)
    {
      Scierror(999,"%s usage: ann = %s(command,[layers ...])", fname, fname);
      return 0;
    }

  _sciErr = getVarAddressFromPosition(pvApiCtx, 1, &pi_command_addr);
  if (_sciErr.iErr)
    {
      printError(&_sciErr, 0);
      return 0;
    }
  getAllocatedSingleString(pvApiCtx,  pi_command_addr, &Command);

  _sciErr = getVarAddressFromPosition(pvApiCtx, 2, &pi_layers_addr);
  if (_sciErr.iErr)
    {
      printError(&_sciErr, 0);
      return 0;
    }
  _sciErr = getMatrixOfDouble(pvApiCtx, pi_layers_addr, &m_layers, &n_layers, &layers);

  if ((n_layers != 1) & (m_layers !=1))
    {
      Scierror(999,"%s: Layers must be a vector!",fname);
      return 0;
    }
  
  numLayers = m_layers * n_layers;
  ui_layers = (unsigned int *)MALLOC(numLayers*sizeof(unsigned int));
  for(i=0; i<numLayers; i++) ui_layers[i] = layers[i];

  if (strcmp(Command,"standard") == 0)
    {
      freeAllocatedSingleString(Command);

      // fann_create_standard_array  Just like fann_create_standard, but with an array of layer sizes instead of individual parameters.
      result_ann = fann_create_standard_array(numLayers,ui_layers);
      FREE(ui_layers);
      if (result_ann==NULL)
	{
	  Scierror(999,"%s: not able to create standard network\n",fname);
	  return 0;
	}
    }
  
  if (strcmp(Command,"sparse") == 0)
    {
      freeAllocatedSingleString(Command);

      // fann_create_sparse_array    Just like fann_create_sparse, but with an array of layer sizes instead of individual parameters.
      _sciErr = getVarAddressFromPosition(pvApiCtx, 3, &pi_conn_addr);
      if (_sciErr.iErr)
	{
	  printError(&_sciErr, 0);
	  return 0;
	}
      getScalarDouble(pvApiCtx, pi_conn_addr, &conn);

      result_ann = fann_create_sparse_array(conn,numLayers,ui_layers);
      FREE(ui_layers);
      if (result_ann==NULL)
	{
	  Scierror(999,"%s: not able to create sparse network\n",fname);
	  return 0;
	}
    }

  if (strcmp(Command,"shortcut") == 0)
    {
      freeAllocatedSingleString(Command);

      // fann_create_shortcut_array  Just like fann_create_shortcut, but with an array of layer sizes instead of individual parameters.
      result_ann = fann_create_shortcut_array(numLayers,ui_layers);
      FREE(ui_layers);
      if (result_ann==NULL)
	{
	  Scierror(999,"%s: not able to create shortcut network\n",fname);
	  return 0;
	}
    }

  //Create the struct representing this ann in scilab
  res = createScilabFannStructFromCFannStruct(result_ann, Rhs + 1);
  if (res==-1) return 0;

  LhsVar(1) = Rhs + 1;

  return 0;
}