Ejemplo n.º 1
0
int BIDSSensorLayer::updateState(double timef, double dt){
   pvdata_t * output = getCLayer()->V;
   pvdata_t * input = blayer->getCLayer()->activity->data;
   int index;
   //Iterate through post layer
   for (int i = 0; i < nx * ny; i++){
      assert(nf == 1);
      //Iterate through features
//      std::cout << "Node (" << coords[i].xCoord << ", " << coords[i].yCoord << ")\n";
      for (int k = 0; k < nf; k++){
         int x = i % nx;
         int y = (int) floor(i/nx);
         index = kIndex(x, y, k, nx, ny, nf);
         data[i][buf_index] = input[index] - (neutral_val / 256);
//         std::cout << "\tBuf_index: " << buf_index << ": " << data[i][buf_index] << "\n";
         //Next buf index, or reset if at end
         float out = matchFilter(i, (int)(timef * dt));
         output[index] = out * weight;
      }
   }
   if(buf_index < buf_size - 1){
      buf_index++;
   }
   else{
      buf_index = 0;
   }
   HyPerLayer::setActivity();
   return PV_SUCCESS;
}
Ejemplo n.º 2
0
int CIFARGTLayer::updateState(double timef, double dt) {
   //getline (inputfile,inputString);
   inputString = std::string(imageLayer->getFilename());
   unsigned found = inputString.find_last_of("/\\");
   //CIFAR is 0 indexed
   char cVal = inputString.at(found-1);
   iVal = cVal - '0';

   pvdata_t * A = getCLayer()->activity->data;
   const PVLayerLoc * loc = getLayerLoc(); 

   //std::cout << "time: " << parent->simulationTime() << " inputString:" << inputString << "  iVal:" << iVal << "\n";
   assert(iVal >= 0 && iVal < 10);
   //NF must be 10, one for each class
   assert(loc->nf == 10);
   for(int ni = 0; ni < getNumNeurons(); ni++){
      int nExt = kIndexExtended(ni, loc->nx, loc->ny, loc->nf, loc->halo.lt, loc->halo.rt, loc->halo.dn, loc->halo.up);
      int fi = featureIndex(nExt, loc->nx+loc->halo.rt+loc->halo.lt, loc->ny+loc->halo.dn+loc->halo.up, loc->nf);
      if(fi == iVal){
         A[nExt] = 1;
      }
      else{
         if(negativeGt){
            A[nExt] = -1;
         }
         else{
            A[nExt] = 0;
         }
      }
   }
   return PV_SUCCESS;
}
Ejemplo n.º 3
0
int MLPOutputLayer::updateState(double timef, double dt) {
   int status = MLPSigmoidLayer::updateState(timef, dt);
   //Collect stats if needed
   if(statProgressPeriod > 0){
      //TODO add more if statements for different cases
      if(!localTarget){
         if(getLayerLoc()->nf == 1){
            binaryNonlocalStats();
         }
         else{
            multiclassNonlocalStats();
         }
      }
      else{
         if(getLayerLoc()->nf == 1){
            binaryLocalStats();
         }
         else{
            std::cout << "Not implemented\n";
            exit(EXIT_FAILURE);
            //TODO
            //multiclassNonlocalStats();
         }
      }
   }
   //For testing purposes
   pvdata_t * A = getCLayer()->activity->data;
  // for(int ni = 0; ni < getNumNeurons(); ni++){
      //int nExt = kIndexExtended(ni, loc->nx, loc->ny, loc->nf, loc->nb);
  //    std::cout << timef <<":  ni: " << ni << "  A: " << A[ni] << "\n";
  // }
   return PV_SUCCESS;
}
Ejemplo n.º 4
0
int MaskError::updateState(double time, double dt)
{
   const PVLayerLoc * loc = getLayerLoc();

   int nx = loc->nx;
   int ny = loc->ny;
   int nf = loc->nf;
   int num_neurons = nx*ny*nf;
   //Reset pointer of gSynHead to point to the inhib channel
   pvdata_t * GSynExt = getChannel(CHANNEL_EXC);
   pvdata_t * GSynInh = getChannel(CHANNEL_INH);

   pvdata_t * A = getCLayer()->activity->data;
   pvdata_t * V = getV();

#ifdef PV_USE_OPENMP_THREADS
#pragma omp parallel for
#endif
   for(int ni = 0; ni < num_neurons; ni++){
      int next = kIndexExtended(ni, nx, ny, nf, loc->halo.lt, loc->halo.rt, loc->halo.dn, loc->halo.up);
      //expected - actual only if expected isn't 0
      if(GSynExt[ni] == 0){
         A[next] = 0;
      }
      else{
         A[next] = GSynExt[ni] - GSynInh[ni];
      }
   }
   return PV_SUCCESS;
}
Ejemplo n.º 5
0
int BinaryThresh::updateState(double time, double dt)
{
   const PVLayerLoc * loc = getLayerLoc();

   int nx = loc->nx;
   int ny = loc->ny;
   int nf = loc->nf;
   int num_neurons = nx*ny*nf;
   //Reset pointer of gSynHead to point to the inhib channel
   pvdata_t * GSynExt = getChannel(CHANNEL_EXC);
   pvdata_t * GSynInh = getChannel(CHANNEL_INH);

   pvdata_t * A = getCLayer()->activity->data;
   pvdata_t * V = getV();

#ifdef PV_USE_OPENMP_THREADS
#pragma omp parallel for
#endif
   for(int ni = 0; ni < num_neurons; ni++){
      int next = kIndexExtended(ni, nx, ny, nf, loc->halo.lt, loc->halo.rt, loc->halo.dn, loc->halo.up);
      //Activity is either 0 or 1 based on if it's active
      A[next] = GSynExt[ni] == 0 ? 0 : 1;
   }
   return PV_SUCCESS;
}
Ejemplo n.º 6
0
int InputLayer::updateState(double timef, double dt) {
   if(!constantValue || firstRun){
      char cVal = inputString.at(int(parent->simulationTime()-1)%numExamples);
      iVal = cVal - '0';
   }
   pvdata_t * A = getCLayer()->activity->data;
   const PVLayerLoc * loc = getLayerLoc(); 
   assert(loc->nf == 2);
   //Set binary values of xor values
   
   std::cout << timef << ": input val:" << iVal << "\n";
   
   int negVal;
   negVal = -1;
   for(int ni = 0; ni < getNumNeurons(); ni++){
      int nExt = kIndexExtended(ni, loc->nx, loc->ny, loc->nf, loc->halo.lt, loc->halo.rt, loc->halo.dn, loc->halo.up);
      int fi = featureIndex(nExt, loc->nx+loc->halo.lt+loc->halo.rt, loc->ny+loc->halo.dn+loc->halo.up, loc->nf);
      switch(iVal){
         case 0:
            if(fi == 0){
               A[nExt] = negVal;
            }
            if(fi == 1){
               A[nExt] = negVal;
            }
            break;
         case 1:
            if(fi == 0){
               A[nExt] = negVal;
            }
            if(fi == 1){
               A[nExt] = 1;
            }
            break;
         case 2:
            if(fi == 0){
               A[nExt] = 1;
            }
            if(fi == 1){
               A[nExt] = negVal;
            }
            break;
         case 3:
            if(fi == 0){
               A[nExt] = 1;
            }
            if(fi == 1){
               A[nExt] = 1;
            }
            break;
      }
   }
   firstRun = false;
   return PV_SUCCESS;
}
Ejemplo n.º 7
0
int AccumulateLayer::setActivity() {
   const PVLayerLoc * loc = getLayerLoc();
   int nx = loc->nx;
   int ny = loc->ny;
   int nf = loc->nf;
   int num_neurons = nx*ny*nf;
   int status = PV_SUCCESS;
   memset(clayer->activity->data, 0, sizeof(pvdata_t)*getNumExtended());
   if( status == PV_SUCCESS ) status = applyVThresh_ANNLayer(num_neurons, getV(), AMin, VThresh, AShift, VWidth, getCLayer()->activity->data, nx, ny, nf, loc->halo.lt, loc->halo.rt, loc->halo.dn, loc->halo.up);
   if( status == PV_SUCCESS ) status = applyVMax_ANNLayer(num_neurons, getV(), AMax, getCLayer()->activity->data, nx, ny, nf, loc->halo.lt, loc->halo.rt, loc->halo.dn, loc->halo.up);
   return status;
}
int FilenameParsingGroundTruthLayer::updateState(double time, double dt)
{
   update_timer->start();
   pvdata_t * A = getCLayer()->activity->data;
   const PVLayerLoc * loc = getLayerLoc();
   int num_neurons = getNumNeurons();
   if (num_neurons != numClasses)
   {
      pvError() << "The number of neurons in " << getName() << " is not equal to the number of classes specified in " << parent->getOutputPath() << "/classes.txt\n";
   }   

   for(int b = 0; b < loc->nbatch; b++){
      char * currentFilename = NULL;
      int filenameLen = 0;
      //TODO depending on speed of this layer, more efficient way would be to preallocate currentFilename buffer
      if(parent->icCommunicator()->commRank()==0){
         currentFilename = strdup(movieLayer->getFilename(b));
         //Get length of currentFilename and broadcast
         int filenameLen = (int) strlen(currentFilename) + 1; //+1 for the null terminator
         //Using local communicator, as each batch MPI will handle it's own run
         MPI_Bcast(&filenameLen, 1, MPI_INT, 0, parent->icCommunicator()->communicator());
         //Braodcast filename to all other local processes
         MPI_Bcast(currentFilename, filenameLen, MPI_CHAR, 0, parent->icCommunicator()->communicator());
      }
      else{
         //Receive broadcast about length of filename
         MPI_Bcast(&filenameLen, 1, MPI_INT, 0, parent->icCommunicator()->communicator());
         currentFilename = (char*)calloc(sizeof(char), filenameLen);
         //Receive filename
         MPI_Bcast(currentFilename, filenameLen, MPI_CHAR, 0, parent->icCommunicator()->communicator());
      }

      std::string fil = currentFilename;
      pvdata_t * ABatch = A + b * getNumExtended();
      for(int i = 0; i < num_neurons; i++){
         int nExt = kIndexExtended(i, loc->nx, loc->ny, loc->nf, loc->halo.lt, loc->halo.rt, loc->halo.dn, loc->halo.up);
         int fi = featureIndex(nExt, loc->nx+loc->halo.rt+loc->halo.lt, loc->ny+loc->halo.dn+loc->halo.up, loc->nf);
         int match = fil.find(classes[i]);
         if(0 <= match){
            ABatch[nExt] = gtClassTrueValue;
         }
         else{
            ABatch[nExt] = gtClassFalseValue;
         }
      }
      //Free buffer, TODO, preallocate buffer to avoid this
      free(currentFilename);
   }
   update_timer->stop();
   return PV_SUCCESS;
}
Ejemplo n.º 9
0
int KmeansLayer::setActivity()
{
    const PVLayerLoc * loc = getLayerLoc();
    int nx = loc->nx;
    int ny = loc->ny;
    int nf = loc->nf;
    PVHalo const * halo = &loc->halo;
    int num_neurons = nx*ny*nf;
    int nbatch = loc->nbatch;
    int status;

    status = setActivity_HyPerLayer(nbatch, num_neurons, getCLayer()->activity->data, getV(), nx, ny, nf, halo->lt, halo->rt, halo->dn, halo->up);

    return status;
}
Ejemplo n.º 10
0
int TrainingLayer::updateState(double timed, double dt) {
   int status = updateState(timed, dt, getLayerLoc(), getCLayer()->activity->data, getV(), numTrainingLabels, trainingLabels, curTrainingLabelIndex, strength);
   return status;
}
Ejemplo n.º 11
0
int SigmoidLayer::updateState(double timef, double dt) {
   int status;
   status = updateState(timef, dt, getLayerLoc(), getCLayer()->activity->data, getV(), 0, NULL, Vth, V0, SigmoidAlpha, SigmoidFlag, InverseFlag);
   return status;
}
Ejemplo n.º 12
0
void MLPOutputLayer::multiclassNonlocalStats(){
   const PVLayerLoc * loc = getLayerLoc();
   int nx = loc->nx;
   int ny = loc->ny;
   int nf = loc->nf;
   int numNeurons = getNumNeurons();
   pvdata_t * A = getCLayer()->activity->data;
   pvdata_t * gtA = gtLayer->getCLayer()->activity->data;
   float sumsq = 0;
   //Winner take all in the output layer
   int currNumRight = 0;
   int currNumWrong = 0;
   assert(classBuffer);
   //Clear classBuffer
   for(int i = 0; i < nf; i++){
      classBuffer[i] = 0;
   }
   //Only go through restricted
   //Calculate the sum squared error
   for(int ni = 0; ni < numNeurons; ni++){
      int nExt = kIndexExtended(ni, nx, ny, nf, loc->halo.lt, loc->halo.rt, loc->halo.dn, loc->halo.up);
      int fi = featureIndex(nExt, nx+loc->halo.lt+loc->halo.rt, ny+loc->halo.dn+loc->halo.up, nf);
      //Sum over x and y direction
      classBuffer[fi] += A[nExt];
      sumsq += pow(A[nExt] - gtA[nExt], 2);
   }
   //Normalize classBuffer to find mean
   for(int i = 0; i < nf; i++){
      classBuffer[i] /= nx*ny;
   }
   //Reduce all classBuffers through a mean
#ifdef PV_USE_MPI
   MPI_Allreduce(MPI_IN_PLACE, &sumsq, 1, MPI_FLOAT, MPI_SUM, parent->icCommunicator()->communicator());
   MPI_Allreduce(MPI_IN_PLACE, classBuffer, nf, MPI_FLOAT, MPI_SUM, parent->icCommunicator()->communicator());
   //Normalize classBuffer across processors
   for(int i = 0; i < nf; i++){
      classBuffer[i] /= parent->icCommunicator()->commSize();
   }
#endif // PV_USE_MPI
   //Find max
   float estMaxF = -1000;
   int estMaxFi = -1;
   float actualMaxF = -1000;
   int actualMaxFi = -1;
   for(int i = 0; i < nf; i++){
      if(classBuffer[i] >= estMaxF){
         estMaxF = classBuffer[i];
         estMaxFi = i;
      }
      int nExt = kIndex(loc->halo.lt, loc->halo.up, i, nx+loc->halo.lt+loc->halo.rt, ny+loc->halo.dn+loc->halo.up, nf);
      if(gtA[nExt] >= actualMaxF){
         actualMaxF = gtA[nExt];
         actualMaxFi = i;
      }
   }
   //Calculate stats
   //Found winning feature, compare to ground truth
   if(estMaxFi == actualMaxFi){
      currNumRight++;
   }
   else{
      currNumWrong++;
   }
#ifdef PV_USE_MPI
   MPI_Allreduce(MPI_IN_PLACE, &currNumRight, 1, MPI_INT, MPI_SUM, parent->icCommunicator()->communicator());
   MPI_Allreduce(MPI_IN_PLACE, &currNumWrong, 1, MPI_INT, MPI_SUM, parent->icCommunicator()->communicator());
#endif // PV_USE_MPI
   numRight += currNumRight;
   numWrong += currNumWrong;
   progressNumRight += currNumRight;
   progressNumWrong += currNumWrong;
   //Print if need
   float timef = parent->simulationTime();
   if(timef >= nextStatProgress){
      //Update nextStatProgress
      nextStatProgress += statProgressPeriod;
      if (parent->columnId()==0) {
         float totalScore = 100*float(numRight)/float(numRight+numWrong);
         float progressScore = 100*float(progressNumRight)/float(progressNumRight+progressNumWrong);
         fprintf(stdout, "time:%f  layer:\"%s\"  total:%f%%  progressStep:%f%%  energy:%f\n", timef, name, totalScore, progressScore, sumsq/2);
      }
      //Reset progressStats
      progressNumRight = 0;
      progressNumWrong = 0;
   }
}
Ejemplo n.º 13
0
void MLPOutputLayer::binaryNonlocalStats(){
   const PVLayerLoc * loc = getLayerLoc();
   int nx = loc->nx;
   int ny = loc->ny;
   int nf = loc->nf;
   assert(nf == 1);
   int numNeurons = getNumNeurons();
   pvdata_t * A = getCLayer()->activity->data;
   pvdata_t * gtA = gtLayer->getCLayer()->activity->data;
   float sumsq = 0;
   float sum = 0;
   float gtSum = 0;
   int currNumRight = 0;
   int currNumWrong = 0;
   int totNum = 0;

   //Only go through restricted
   //Calculate the sum squared error
   for(int ni = 0; ni < numNeurons; ni++){
      int nExt = kIndexExtended(ni, nx, ny, nf, loc->halo.lt, loc->halo.rt, loc->halo.dn, loc->halo.up);
      int fi = featureIndex(nExt, nx+loc->halo.lt+loc->halo.rt, ny+loc->halo.dn+loc->halo.up, nf);
      //Sum over x and y direction
      sumsq += pow(A[nExt] - gtA[nExt], 2);
      //Sum over activity to find mean
      sum += A[nExt];
      gtSum += gtA[nExt];
   }

#ifdef PV_USE_MPI
   MPI_Allreduce(MPI_IN_PLACE, &sumsq, 1, MPI_FLOAT, MPI_SUM, parent->icCommunicator()->communicator());
   MPI_Allreduce(MPI_IN_PLACE, &sum, 1, MPI_FLOAT, MPI_SUM, parent->icCommunicator()->communicator());
   MPI_Allreduce(MPI_IN_PLACE, &gtSum, 1, MPI_FLOAT, MPI_SUM, parent->icCommunicator()->communicator());
#endif // PV_USE_MPI
   //Normalize sum to find mean
   sum /= loc->nxGlobal * loc->nyGlobal;
   gtSum /= loc->nxGlobal * loc->nyGlobal;
   //gtSum should be the same as the values
   assert(gtSum == gtA[0]);

   //Calculate stats
   if(sum < 0 && gtSum < 0){
      currNumRight++;
   }
   else if(sum > 0 && gtSum > 0){
      currNumRight++;
   }
   else{
      currNumWrong++;
   }
#ifdef PV_USE_MPI
   MPI_Allreduce(MPI_IN_PLACE, &currNumRight, 1, MPI_INT, MPI_SUM, parent->icCommunicator()->communicator());
   MPI_Allreduce(MPI_IN_PLACE, &currNumWrong, 1, MPI_INT, MPI_SUM, parent->icCommunicator()->communicator());
#endif // PV_USE_MPI
   numRight += currNumRight;
   numWrong += currNumWrong;
   progressNumRight += currNumRight;
   progressNumWrong += currNumWrong;
   //Print if need
   float timef = parent->simulationTime();
   if(timef >= nextStatProgress){
      //Update nextStatProgress
      nextStatProgress += statProgressPeriod;
      if (parent->columnId()==0) {
         float totalScore = 100*float(numRight)/float(numRight+numWrong);
         float progressScore = 100*float(progressNumRight)/float(progressNumRight+progressNumWrong);
         fprintf(stdout, "time:%f  layer:\"%s\"  total:%f%%  progressStep:%f%%  energy:%f\n", timef, name, totalScore, progressScore, sumsq/2);
      }
      //Reset progressStats
      progressNumRight = 0;
      progressNumWrong = 0;
   }
}
Ejemplo n.º 14
0
int MLPErrorLayer::updateState(double time, double dt)
{
   //update_timer->start();
   //assert(getNumChannels()>= 3);

   const PVLayerLoc * loc = getLayerLoc();

   int nx = loc->nx;
   int ny = loc->ny;
   int nf = loc->nf;
   int num_neurons = nx*ny*nf;
   //Reset pointer of gSynHead to point to the inhib channel
   pvdata_t * GSynExt = getChannel(CHANNEL_EXC);
   pvdata_t * GSynInh = getChannel(CHANNEL_INH);

   pvdata_t Vth, sig_scale;
   if(!symSigmoid){
      //Calculate constants for derivitive of sigmoid layer
      Vth = (VthRest+Vrest)/2.0;
      sig_scale = -logf(1.0f/sigmoid_alpha - 1.0f)/(Vth - Vrest);
   }
   pvdata_t * A = getCLayer()->activity->data;
   pvdata_t * V = getV();

#ifdef PV_USE_OPENMP_THREADS
#pragma omp parallel for
#endif
   for(int ni = 0; ni < num_neurons; ni++){
      int next = kIndexExtended(ni, nx, ny, nf, loc->halo.lt, loc->halo.rt, loc->halo.dn, loc->halo.up);
      //Update activity
      //f'(V)*(error)
      //error = gt - finalLayer iff error is last error
      float errProp, gradient;
      //exct is expected, inh is actual
      if(lastError){
         //0 is DCR
         if(GSynExt[ni] == 0){
            errProp = 0;
         }
      }
      else{
         if(strcmp(lossFunction, "squared") == 0){
            //expected - actual
            errProp = GSynExt[ni] - GSynInh[ni];
         }
         else if(strcmp(lossFunction, "entropy") == 0){
            //expected/actual
            errProp = GSynExt[ni]/GSynInh[ni];
         }
         else if(strcmp(lossFunction, "hidden") == 0){
            errProp = GSynExt[ni];
         }
         if(symSigmoid){
            gradient = 1.14393 * (1/(pow(cosh(((float)2/3) * V[ni]), 2))) + linAlpha;
         }
         else{
            gradient = -.5 * sig_scale * (1/(pow(cosh(sig_scale*(Vth - V[ni])), 2)));
         }
      }
      A[next] = dropout[ni] ? 0 : errProp * gradient;
   }
   //update_timer->stop();
   return PV_SUCCESS;
}
Ejemplo n.º 15
0
int IncrementLayer::updateState(double timef, double dt) {
   int status;
   assert(numChannels>=2);  // updateState uses gSynExc and gSynInh
   status = doUpdateState(timef, dt, &VInited, &nextUpdateTime, firstUpdateTime, displayPeriod, getLayerLoc(), getCLayer()->activity->data, getV(), getVprev(), getNumChannels(), GSyn[0]);
   return status;
}
Ejemplo n.º 16
0
int PtwiseProductLayer::updateState(double timef, double dt) {
   int status;
   status = doUpdateState(timef, dt, getLayerLoc(), getCLayer()->activity->data, getV(), getNumChannels(), GSyn[0]);
   return status;
}
Ejemplo n.º 17
0
void MLPOutputLayer::binaryLocalStats(){
   const PVLayerLoc * loc = getLayerLoc();
   int nx = loc->nx;
   int ny = loc->ny;
   int nf = loc->nf;
   int numNeurons = getNumNeurons();
   pvdata_t * A = getCLayer()->activity->data;
   pvdata_t * gtA = gtLayer->getCLayer()->activity->data;
   float sumsq = 0;

   assert(nf == 1);
   int currNumTotPos = 0;
   int currNumTotNeg = 0;
   int currTruePos = 0;
   int currTrueNeg = 0;
   for(int ni = 0; ni < numNeurons; ni++){
      int nExt = kIndexExtended(ni, nx, ny, nf, loc->halo.lt, loc->halo.rt, loc->halo.dn, loc->halo.up);
      //DCR
      if(gtA[nExt] == 0){
         continue;
         //Note that sumsq doesn't get updated in this case, so a dcr doesn't contribute to the score at all
      }
      //Negative
      else if(gtA[nExt] == -1){
         currNumTotNeg++;
         if(A[nExt] < 0){
            currTrueNeg++;
         }
      }
      //Positive
      else if(gtA[nExt] == 1){
         currNumTotPos++;
         if(A[nExt] > 0){
            currTruePos++;
         }
      }
      sumsq += pow(A[nExt] - gtA[nExt], 2);
   }
   //Do MPI
#ifdef PV_USE_MPI
   MPI_Allreduce(MPI_IN_PLACE, &currNumTotPos, 1, MPI_INT, MPI_SUM, parent->icCommunicator()->communicator());
   MPI_Allreduce(MPI_IN_PLACE, &currNumTotNeg, 1, MPI_INT, MPI_SUM, parent->icCommunicator()->communicator());
   MPI_Allreduce(MPI_IN_PLACE, &currTruePos, 1, MPI_INT, MPI_SUM, parent->icCommunicator()->communicator());
   MPI_Allreduce(MPI_IN_PLACE, &currTrueNeg, 1, MPI_INT, MPI_SUM, parent->icCommunicator()->communicator());
   MPI_Allreduce(MPI_IN_PLACE, &sumsq, 1, MPI_FLOAT, MPI_SUM, parent->icCommunicator()->communicator());
#endif
   numTotPos += currNumTotPos;
   numTotNeg += currNumTotNeg;
   truePos += currTruePos;
   trueNeg += currTrueNeg;
   progressNumTotPos += currNumTotPos;
   progressNumTotNeg += currNumTotNeg;
   progressTruePos += currTruePos;
   progressTrueNeg += currTrueNeg;
   //Print if need
   float timef = parent->simulationTime();
   if(timef >= nextStatProgress){
      //Update nextStatProgress
      nextStatProgress += statProgressPeriod;
      if (parent->columnId()==0) {
         float totalScore = 50*(float(truePos)/float(numTotPos) + float(trueNeg)/float(numTotNeg));
         float progressScore = 50*(float(progressTruePos)/float(progressNumTotPos) + float(progressTrueNeg)/float(progressNumTotNeg));
         fprintf(stdout, "time:%f  layer:\"%s\"  total:%f%%  progressStep:%f%%  energy:%f\n", timef, name, totalScore, progressScore, sumsq/2);
      }
      //Reset progressStats
      progressNumTotPos = 0;
      progressNumTotNeg = 0;
      progressTruePos = 0;
      progressTrueNeg = 0;
   }
}