void nnff(NN* nn, int data_index, const double** train_x)
{
    int k, i, j, num_pre_unit;
    double *pre_a =(double*) train_x[data_index];
    double *a = NULL;     // the activations we want to update per layer (except input layer)
    Layer *layer = nn->layer;

    for( k=0; k< nn->n - 1 ;k ++){

        if( k==0)
            num_pre_unit = nn->inputUnits ; 
        else
            num_pre_unit = layer[k-1].units ;

        a = layer[k].a;

        for( i = 0; i< layer[k].units; i++ ){

            // update activations
            a[i] = 1.0f * layer[k].w[i][0];  // bias
            for( j=0; j< num_pre_unit ; j++){
                a[i] +=   pre_a[j] * nn->layer[k].w[i][j+1] ;
            }
            


            if( layer[k].activationFunc == SIGM)
                a[i] = sigm(a[i]);
            else if( layer[k].activationFunc == TANH_OPT)
                a[i] = tanh_opt(a[i]);
        }

        pre_a = a;
    }

}
Example #2
0
  //NNFF performs a feedforward pass
  double FBNN::nnff(const FMatrix& x, const FMatrix& y)
  {
//     std::cout << "start nnff x = (" << x.rows() << "," << x.columns() << ")" << std::endl;
    double L = 0;
    if(m_oAs.empty())
    {
      for(int i = 0; i < m_iN; ++i)
	m_oAs.push_back(std::make_shared<FMatrix>(FMatrix()));
    }
    *m_oAs[0] = addPreColumn(x,1);    
    if(m_fDropoutFraction > 0 && !m_fTesting)
    {
        if(m_odOMs.empty())//clear dropOutMask
        {
            for(int i = 0; i < m_iN - 1; ++i)
                m_odOMs.push_back(std::make_shared<FMatrix>(FMatrix()));
        }
    }
    
//     std::cout << "start feedforward" << std::endl;
    //feedforward pass
    for(int i = 1; i < m_iN - 1; ++i)
    {
//       std::cout << "activation function" << std::endl;
      //activation function
      if(m_strActivationFunction == "sigm")
      {	
	//Calculate the unit's outputs (including the bias term)
	*m_oAs[i] = sigm((*m_oAs[i-1]) * blaze::trans(*m_oWs[i-1]));
      }
      else if(m_strActivationFunction == "tanh_opt")
      {
	*m_oAs[i] = tanh_opt((*m_oAs[i-1]) * blaze::trans(*m_oWs[i-1]));
      }
      
//       std::cout << "dropout" << std::endl;
      //dropout
      if(m_fDropoutFraction > 0)
      {
	if(m_fTesting)
	  *m_oAs[i] = (*m_oAs[i]) * (1 - m_fDropoutFraction);
	else
	{
	  *m_odOMs[i] = rand(m_oAs[i]->rows(),m_oAs[i]->columns()) > m_fDropoutFraction;
	  *m_oAs[i] = bitWiseMul(*m_oAs[i],*m_odOMs[i]);
	}
      }
      
//       std::cout << "sparsity" << std::endl;
      //calculate running exponential activations for use with sparsity
      if(m_fNonSparsityPenalty > 0)
	*m_oPs[i] =  (*m_oPs[i]) * 0.99 + columnMean(*m_oAs[i]);
      
//       std::cout << "Add the bias term" << std::endl;
      //Add the bias term
      *m_oAs[i] = addPreColumn(*m_oAs[i],1);    
    }
    
//     std::cout << "start calculate output" << std::endl;
    if(m_strOutput == "sigm")
    {
      *m_oAs[m_iN -1] = sigm((*m_oAs[m_iN-2]) * blaze::trans(*m_oWs[m_iN-2]));
    }
    else if(m_strOutput == "linear")
    {
      *m_oAs[m_iN -1] = (*m_oAs[m_iN-2]) * blaze::trans(*m_oWs[m_iN-2]);
    }
    else if(m_strOutput == "softmax")
    {
      *m_oAs[m_iN -1] = softmax((*m_oAs[m_iN-2]) * blaze::trans(*m_oWs[m_iN-2]));
    }
    
//     std::cout << "start error and loss" << std::endl;
    //error and loss
    m_oEp = std::make_shared<FMatrix>(y - (*m_oAs[m_iN-1]));
    
    if(m_strOutput == "sigm" || m_strOutput == "linear")
    {
      L = 0.5 * matrixSum(bitWiseSquare(*m_oEp)) / x.rows();
    }
    else if(m_strOutput == "softmax")
    {
      L = -matrixSum(bitWiseMul(y,bitWiseLog(*m_oAs[m_iN-1]))) / x.rows();
    }    
//     std::cout << "end nnff" << std::endl;
    return L;
  }