Exemple #1
0
double * Layer::computeOutput(double *input) const {
    double *inducedLocalFields = computeInducedLocalField(input);
    double *res = new double [mySize];
    for (int i = 0; i < mySize; ++i) {
        res[i] = activationFunction(inducedLocalFields[i]);
    }
    delete [] inducedLocalFields;
    return res;
}
Exemple #2
0
void RBM::sampleVgivenH()
{
  const int N = h.rows();
  pv = h * W;
  pv.rowwise() += bv.transpose();
  activationFunction(LOGISTIC, pv, pv);
  for(int n = 0; n < N; n++)
    for(int i = 0; i < D; i++)
      v(n, i) = (double)(pv(n, i) > rng->generate<double>(0.0, 1.0));
}
Exemple #3
0
void RBM::sampleHgivenV()
{
  const int N = v.rows();
  h.conservativeResize(N, Eigen::NoChange);
  ph = v * W.transpose();
  ph.rowwise() += bh.transpose();
  activationFunction(LOGISTIC, ph, ph);
  for(int n = 0; n < N; n++)
    for(int j = 0; j < H; j++)
      h(n, j) = (double)(ph(n, j) > rng->generate<double>(0.0, 1.0));
}
void FullyConnected::forwardPropagate(Eigen::MatrixXd* x, Eigen::MatrixXd*& y, bool dropout)
{
  const int N = x->rows();
  this->y.conservativeResize(N, Eigen::NoChange);
  this->x = x;
  // Activate neurons
  a = *x * W.transpose();
  if(bias)
    a.rowwise() += b.transpose();
  // Compute output
  activationFunction(act, a, this->y);
  y = &(this->y);
}
Exemple #5
0
float nn::Neuron::genOutput()
{
	float output = 0.0f;

	for (unsigned int i = 0; i < _inputs.size(); i++)
		output += _inputs[i] * _weights[i];

	output = activationFunction(output);

	_output = output;

	return output;
}
Exemple #6
0
ViElement ViNeuron::exportData()
{
	ViElement element("neuron");
	element.addChild("id", id());

	if(type() == ViNeuron::UnknownNeuron)
	{
		LOG("An unknown neuron (id: " + id() + ") was detected.", QtCriticalMsg);
	}
	element.addChild("type", typeToString(type()));

	if(activationFunction() != NULL)
	{
		element.addChild(activationFunction()->exportData());
	}

	if(type() == ViNeuron::BiasNeuron)
	{
		element.addChild("value", value());
	}

	return element;
}
Exemple #7
0
//void calculateOutput(Neuron* neuron, Neuron inputNeurons) {
double calculateOutput(Neuron* neuron) {

	int i;
	neuron->output = 0;
	
	for(i = 0 ; i < neuron->inputCount ; i++) {
		
		//printf("Using input val: %f\n",neuron->inputs[i]);
		//printf("Using weight val: %f\n",neuron->weights[i]);
		neuron->output += neuron->inputs[i] * neuron->weights[i];
	}
	
	neuron->output = activationFunction(neuron->output);
	return neuron->output;
}
Exemple #8
0
//Added this
void hiddenLayer::calculate(inputLayer iL, weights wil)
{
	for(int j=0; j < numHidden; j++)
	{
		//clear value
		setNeuron(j, 0);
		
		//get weighted sum of pattern and bias neuron
		for( int i=0; i <= iL.getInputs(); i++ )
		{
			setNeuron(j, (getNeuron(j) + (iL.getNeuron(i) * wil.getWeight(i,j))));
		}
		
		//set to result of sigmoid
		setNeuron(j, activationFunction(getNeuron(j)));
	}
}
Exemple #9
0
//Added this
void outputLayer::calculate(hiddenLayer hL, weights who)
{
	for(int k=0; k < numOutput; k++)
	{
		//clear value
		setNeuron(k, 0);
		
		//get weighted sum of pattern and bias neuron
		for( int j=0; j <= hL.getNumHidden(); j++ ) 
		{
			setNeuron(k, (getNeuron(k) + (hL.getNeuron(j) * who.getWeight(j,k))));
		}
		
		//set to result of sigmoid
		setNeuron(k, activationFunction(getNeuron(k)));
	}
}
void Subsampling::forwardPropagate(Eigen::MatrixXd* x, Eigen::MatrixXd*& y,
                                   bool dropout, double* error)
{
  const int N = x->rows();
  this->a.conservativeResize(N, Eigen::NoChange);
  this->y.conservativeResize(N, Eigen::NoChange);
  this->x = x;

  OPENANN_CHECK_EQUALS(x->cols(), fm * inRows * inCols);
  OPENANN_CHECK_EQUALS(this->y.cols(), fm * outRows * outCols);

  a.setZero();
  #pragma omp parallel for
  for(int n = 0; n < N; n++)
  {
    int outputIdx = 0;
    for(int fmo = 0; fmo < fm; fmo++)
    {
      for(int ri = 0, ro = 0; ri < maxRow; ri += kernelRows, ro++)
      {
        int rowBase = fmo * fmInSize + ri * inCols;
        for(int ci = 0, co = 0; ci < maxCol; ci += kernelCols, co++, outputIdx++)
        {
          for(int kr = 0; kr < kernelRows; kr++)
          {
            for(int kc = 0, inputIdx = rowBase + ci; kc < kernelCols; kc++)
              a(n, outputIdx) += (*x)(n, inputIdx++) * W[fmo](ro, co);
          }
          if(bias)
            a(n, outputIdx) += Wb[fmo](ro, co);
        }
      }
    }
  }

  activationFunction(act, a, this->y);

  if(error && regularization.l1Penalty > 0.0)
    for(int fmo = 0; fmo < fm; fmo++)
      *error += regularization.l1Penalty * W[fmo].array().abs().sum();
  if(error && regularization.l2Penalty > 0.0)
    for(int fmo = 0; fmo < fm; fmo++)
      *error += regularization.l2Penalty * W[fmo].array().square().sum() / 2.0;

  y = &(this->y);
}
Exemple #11
0
double derivativeFunction(double input) {
	return activationFunction(input)*(1-activationFunction(input));
}
Exemple #12
0
double NetNeuron::computeOut(){
  output = activationFunction(net);
  return output;
}
Exemple #13
0
double TanhNeuron::activationDerivative(double x){
    double o = activationFunction(x);
    return slope * (1 - o*o) / 2;
}
Exemple #14
0
double LogisticNeuron::activationDerivative(double x){
    double o = activationFunction(x);
    return slope * o * (1 - o);
}