NN* nnsetup(int nlayers, const int* archi, float learningRate, float scaling_learningRate, float momentum, int activationFunc, int outputFunc)
{
    int k;
    NN * nn = (NN*)malloc( sizeof(NN));
    nn->n = nlayers;
    nn->learningRate = learningRate;
    nn->scaling_learningRate = scaling_learningRate;
    nn->momentum = momentum;
    nn->inputUnits = archi[0];

    // nn->testing = testing;
    nn->layer = (Layer*)malloc( nlayers * sizeof(Layer) );
    for( k=0; k< nlayers -1 ; k++){
        nn->layer[k].units = archi[k+1];
        nn->layer[k].activationFunc = (k < nlayers-2)?activationFunc: outputFunc;
        nn->layer[k].w = create2Darray( archi[k+1], archi[k] + 1);
        nn->layer[k].dw = create2Darray( archi[k+1], archi[k]  + 1);
        nn->layer[k].adw = create2Darray( archi[k+1], archi[k]  + 1);
        nn->layer[k].mdw = create2Darray( archi[k+1], archi[k]  + 1);
        nn->layer[k].a = (double*)malloc( archi[k+1] * sizeof(double)  );
        nn->layer[k].e = (double*)malloc( archi[k+1] * sizeof(double)  );
        memset(nn->layer[k].a, 0, archi[k+1] * sizeof(double));
        memset(nn->layer[k].e, 0, archi[k+1] * sizeof(double));
        randomWeight(nn->layer[k].w, archi[k+1], archi[k] + 1);
        set2DarrayZero(nn->layer[k].adw, archi[k+1], archi[k] + 1);
        set2DarrayZero(nn->layer[k].mdw, archi[k+1], archi[k] + 1);
    }
    return nn;
}
Exemplo n.º 2
0
	NeuralConnection::NeuralConnection(
		Neuron & source, Neuron & target
	):
		m_weight{randomWeight()},
		m_delta_weight{0.0},
		m_source{std::addressof(source)},
		m_target{std::addressof(target)}
	{}
Exemplo n.º 3
0
Neuron::Neuron(unsigned numOutputs, unsigned myIndex)
{
    for (unsigned c = 0; c < numOutputs; ++c) {
        m_outputWeights.push_back(Connection());
        m_outputWeights.back().weight = randomWeight();
    }

    m_myIndex = myIndex;
}
Exemplo n.º 4
0
Neuron::Neuron(int num_inputs)
  : weights(num_inputs),
  output(0)
  {
    for(int i = 0; i < num_inputs; i++)
    {
      weights[i] = randomWeight();
    }
  }
Exemplo n.º 5
0
LConnection::LConnection()
{
    //fprintf( stderr, "Connecting the neuron\r" );

    LRandom     random;

    randomWeight();
    mConnEntry  = 0.0;
    mConnExit   = 0.0;
}
Exemplo n.º 6
0
Neuron::Neuron( unsigned numOutputs, unsigned weightIndex )
{
    for( unsigned c = 0; c < numOutputs; ++c )
    {
        _outputWeights.push_back( Connection() );
        _outputWeights.back().setWeight( randomWeight() );
    }

    _weightIndex = weightIndex;
}
void BPTT_re_randomize(RNN *net, int numLayers, int *neuronsOfLayer)
	{
	srand(time(NULL));

	for (int l = 1; l < numLayers; ++l)							// for each layer
		for (int n = 0; n < neuronsOfLayer[l]; ++n)				// for each neuron
			for (int i = 0; i <= neuronsOfLayer[l - 1]; ++i)	// for each weight
				{
				extern double randomWeight();
				net->layers[l].neurons[n].weights[i] = randomWeight();
				}
	}
/**
 * Generate benchmark graph 1.
 *
 * Given argument n, generate graph with n^2+2 vertices.
 * 
 * Imagine a square nxn of vertices viewed as a set of n columns. All
 * vertices in column i are connected to all vertices i column i+1, for
 * 0 <= i < n.  Then vertex s are connected to all vertices in column 0
 * while all vertices in column n-1 are connected to target vertex t.
 *
 * All edges are given a random weight in the range 1..n^2
 * 
 * Total edges = n + n^2(n-1) + n = n^3-n^2+2n
 */
int main (int argc, char **argv) {
  int i, j, k;

  if (argc < 2) {
    printf ("Usage: ./generateBench n\n");
    printf ("       parameter n is used to generate graph with n^2+2 vertices.\n");
    return 0;
  }

  int n = atoi (argv[1]);
  printf ("Benchmark 1 [%d]\n", n);

  maxWeight = n*n;

  int e = n*n*n-n*n+2*n;
  printf ("%d %d directed\n", n*n+2, e);
  
  // output s --> vi and vi --> t
  for (i = 1; i <= n; i++) {
    printf ("%d,%d,%d\n", 0, i, randomWeight());
  }
  for (i = n*n-n+1; i <= n*n; i++) {
    printf ("%d,%d,%d\n", i, n*n+1, randomWeight());
  }

  // output inner edges for the square
  i = 1;
  while (i < n*(n-1)) {

    for (j = 0; j < n; j++) {
      for (k = 0; k < n; k++) {
	printf ("%d,%d,%d\n", i+j, i+n+k, randomWeight());
      }
    }

    i += n;
  }

} 
RNN *create_BPTT_NN(int numLayers, int *neuronsOfLayer)
	{
	RNN *net = (RNN *) malloc(sizeof(RNN));

	srand(time(NULL));
	net->numLayers = numLayers;

	assert(numLayers >= 3);

	net->layers = (rLAYER *) malloc(numLayers * sizeof (rLAYER));
	//construct input layer, no weights
	net->layers[0].numNeurons = neuronsOfLayer[0];
	net->layers[0].neurons = (rNEURON *) malloc(neuronsOfLayer[0] * sizeof (rNEURON));

	//construct hidden layers
	for (int l = 1; l < numLayers; l++) //construct layers
		{
		// This takes cares of n-folds of outputs and gradients:
		net->layers[l].neurons = (rNEURON *) malloc(neuronsOfLayer[l] * sizeof (rNEURON));
		net->layers[l].numNeurons = neuronsOfLayer[l];
		for (int n = 0; n < neuronsOfLayer[l]; n++) // construct each neuron in the layer
			{
			// Only 1 array of weights per neuron, because weights are shared across folds
			net->layers[l].neurons[n].weights =
					(double *) malloc((neuronsOfLayer[l - 1] + 1) * sizeof (double));
			for (int i = 0; i <= neuronsOfLayer[l - 1]; i++)
				{
				//construct weights of neuron from previous layer neurons
				//when k = 0, it's bias weight
				extern double randomWeight();
				net->layers[l].neurons[n].weights[i] = randomWeight();
				//net->layers[i].neurons[j].weights[k] = 0.0f;
				}
			}
		}
	return net;
	}
Exemplo n.º 10
0
Connection::Connection()
{
	weight=randomWeight();
	deltaWeight=0;
}
Exemplo n.º 11
0
Connection::Connection(Neuron* a, Neuron* b) {
	weight = randomWeight();
	a->addOut(this);
	b->addIn(this);
}