コード例 #1
0
ファイル: nn.c プロジェクト: yflim/code-samples
// creates neural network; also calls function to initialize network data
NeuralNetwork makeneuralnetwork(int inputs, int hiddens, unsigned short skip, int fn, double *fnpars, double learnr, int bias,
                           double ho_low, double ho_high, double ih_low, double ih_high, double skip_low, double skip_high)
{
  NeuralNetwork netwk;

  if (bias)
  {
    netwk.hbias = generateRand(BIAS_LOW, BIAS_HIGH);   // hidden node bias - imposed in this system that all hidden nodes have the same bias
    netwk.obias = generateRand(BIAS_LOW, BIAS_HIGH);   // output node bias 
  }
  else
  {
    netwk.hbias = 0;
    netwk.obias = 0;
  }

  netwk.activationFunction = fn;

  // array of activation function parameters 
  netwk.fnpars = fnpars;

  // lr_ih, lr_ho, lr_skip: learning rate for the input->hidden, hidden->output, skip layer weights
  netwk.lr_ih = netwk.lr_ho = netwk.lr_skip = learnr;
  
  netwk.data = makenetworkdat(inputs, hiddens, skip, ho_low, ho_high, ih_low, ih_high, skip_low, skip_high);

  return netwk;
} 
コード例 #2
0
ファイル: Shape.cpp プロジェクト: prodk/PingPong3D
// Returns a rand number from the specified interval of absolute values with a random sign.
float generateRandomSignInterval(float start, float end)
{
    float sign = 1.;
    if( generateRand(-1., 1.) < 0)
        sign = -1.;

    return generateRand(sign*start, sign*end);
}
コード例 #3
0
ファイル: nn.c プロジェクト: yflim/code-samples
NetworkData makenetworkdat(int inputs, int hiddens, unsigned short skip, double ho_low, double ho_high, 
                           double ih_low, double ih_high, double skip_low, double skip_high)
{
  NetworkData dat;
  int i, j;

  // numbers of hidden, input, and output nodes; whether to use skip layer or not
  dat.hiddenNodes = hiddens; 
  dat.inputNodes = inputs; 
  dat.outputNodes = 1;
  dat.useSkipLayer = skip;

  // values in the input layer
  dat.inputLayer = calloc(inputs, sizeof(double));

  // values in the hidden layer
  dat.hiddenLayer = calloc(hiddens, sizeof(double));
  for (i=0; i<hiddens; ++i)
  {
    dat.hiddenLayer[i] = 0.0;
  }

  // values in the output layer - only one node in output layer here
  dat.outputLayer = 0.0;
  
  //input->hidden layer weights
  dat.weights_IH = calloc(hiddens, sizeof(double*));
  //hidden->output layer weights
  dat.weights_HO = calloc(hiddens, sizeof(double));
  //input->output layer weights (the skip layer)
  dat.weights_skip = calloc(inputs, sizeof(double));

  // each hidden node has a weighted connection to an (the) output node, and to each input node
  for (i=0; i<hiddens; ++i)
  {
    dat.weights_HO[i] = generateRand(ho_low, ho_high);
    dat.weights_IH[i] = calloc(inputs, sizeof(double));
    for(j=0; j < dat.inputNodes; j++)
      dat.weights_IH[i][j] = generateRand(ih_low, ih_high);
  }

  // each input node has a connection to an (the) output node
  if (skip)
    for(j=0; j < dat.inputNodes; j++)
      dat.weights_skip[j] = generateRand(skip_low, skip_high);
  else
    for(j=0; j < dat.inputNodes; j++)
      dat.weights_skip[j] = 0; 
  
  dat.patternerr = 0.0;

  return dat;
}
コード例 #4
0
//return bool: if is multiple of maxNumThread
//if yes, info[0]: number of blocks, info[1] = maxNumThread
//if no, info[0]: number of blocks except of the last block, info[1]: number of thread in the last block
void testReduceImpl( int rLen, int OPERATOR, int numThreadPB , int numMaxBlock)
{
	int _CPU_GPU=0;
	int result=0;
	int memSize = sizeof(Record)*rLen;

	void * h_Rin;
	HOST_MALLOC(h_Rin, memSize );
	generateRand((Record *)h_Rin, TEST_MAX - 11111, rLen, 0 );

	void* h_Rout;

	unsigned int numResult = 0;
	cl_mem d_Rin=NULL;
	cl_mem d_Rout;
	CL_MALLOC( &d_Rin, memSize );
	cl_writebuffer( d_Rin, h_Rin, memSize,0);
	numResult= CL_AggMaxOnly( d_Rin, rLen, &d_Rout, numThreadPB, numMaxBlock,_CPU_GPU);
	HOST_MALLOC(h_Rout, sizeof(Record)*numResult );
	cl_readbuffer( h_Rout, d_Rout, sizeof(Record)*numResult,_CPU_GPU);
	//validateReduce((Record *)h_Rin, rLen,((Record *)h_Rout)[0].y, OPERATOR );	
	HOST_FREE( h_Rin );
	HOST_FREE( h_Rout );
	CL_FREE( d_Rin );
	CL_FREE( d_Rout );
	printf("testReduceFinish\n");
}
コード例 #5
0
void testAggAfterGroupByImpl( int rLen, int OPERATOR, int numThread, int numBlock)
{
	int _CPU_GPU=0;
	int memSize = sizeof(Record)*rLen;
	void* h_Rin;
	void* h_Rout;
	void* h_Sin;
	int* h_startPos;
	HOST_MALLOC( h_Rin, memSize );
	HOST_MALLOC( h_Rout, memSize );
	HOST_MALLOC( h_Sin, memSize );
	generateRand((Record *) h_Rin, 50, rLen, 0 );
	generateRand((Record *) h_Sin, TEST_MAX, rLen, 0 );  
	int numGroup = 0;
	numGroup = CL_GroupBy((Record *)h_Rin, rLen,(Record *) h_Rout, &h_startPos, numThread, numBlock,_CPU_GPU);
	validateGroupBy((Record*) h_Rin, rLen, (Record*)h_Rout,h_startPos, numGroup );
	void* h_aggResults;
	HOST_MALLOC(h_aggResults, sizeof(int)*numGroup );
	switch(OPERATOR){
	case REDUCE_MAX:
		{
			CL_agg_max_afterGroupBy((Record *)h_Rout,rLen,h_startPos,numGroup,(Record *)h_Sin,(int *)h_aggResults,numThread,_CPU_GPU);
			break;
		}
	case REDUCE_MIN:
		{
			CL_agg_min_afterGroupBy((Record *)h_Rout,rLen,h_startPos,numGroup,(Record *)h_Sin,(int *)h_aggResults,numThread,_CPU_GPU);
			break;
		}
	case REDUCE_SUM:
		{
			CL_agg_sum_afterGroupBy((Record *)h_Rout,rLen,h_startPos,numGroup,(Record *)h_Sin,(int *)h_aggResults,numThread,_CPU_GPU);
			break;
		}
	case REDUCE_AVERAGE:
		{
			CL_agg_avg_afterGroupBy((Record *)h_Rout,rLen,h_startPos,numGroup,(Record *)h_Sin,(int *)h_aggResults,numThread,_CPU_GPU);
			break;
		}
	}
	validateAggAfterGroupBy((Record*) h_Rin, rLen, (int*)h_startPos, numGroup,(Record*) h_Sin, (int *)h_aggResults, OPERATOR);
}
コード例 #6
0
/**
  * Inserts random numbers between 0.0 and 0.1 as Weight values.
  */
int NetInit( PtNet Net )
{
  int i, j, k;

  /* Verifying possible errors: */
  if (Net==NULL)
    return -2;
  /* Initializing Weights and Gradients: */
  for( i=Net->Layers-2 ; i>=0 ; i-- )
    for( j=0 ; j<=Net->NLayers[i+1] ; j++ )
      for( k=0 ; k<=Net->NLayers[i] ; k++ )
      {
        /* Inserting random values for the Weights: */
		  Net->Weights[i][j][k] = generateRand()/Net->NLayers[i];
        /*printf("Net->Weights: %lf\n", Net->Weights[i][j][k]);*/
        /* Inserting 0.0 in Gradients: */
        Net->Gradients[i][j][k] = 0.0;
      }
  /* Inserting 0.0 in the neurons's output values: */
  for( i=0 ; i<Net->Layers ; i++ )
    for ( j=0 ; j<Net->NLayers[i] ; j++ )
      Net->Neurons[i][j] = generateRand();
  return 0;
}
コード例 #7
0
void testGroupByImpl( int rLen, int numThread, int numBlock)
{
	int _CPU_GPU=0;
	int memSize = sizeof(Record)*rLen;

	void* h_Rin;
	HOST_MALLOC(h_Rin, memSize );
	void* h_Rout;
	HOST_MALLOC(h_Rout, memSize );
 	generateRand((Record *)h_Rin, 64, rLen, 0 );
	int* h_startPos;

	int numGroup = 0;
	//group by
	numGroup=CL_GroupBy((Record *) h_Rin, rLen, (Record*) h_Rout, &h_startPos, numThread, numBlock,_CPU_GPU);
	//copy back
	validateGroupBy( (Record*)h_Rin, rLen, (Record*)h_Rout, h_startPos, numGroup );	
	HOST_FREE(h_startPos);
	HOST_FREE( h_Rin );
	HOST_FREE( h_Rout );
}