Exemplo n.º 1
0
int IncrementLayer::allocateDataStructures() {
   int status = ANNLayer::allocateDataStructures();

   if (status == PV_SUCCESS) status = allocateBuffer(&Vprev, getNumNeurons(), "V at previous time");
   if (status == PV_SUCCESS) {
      assert(GSyn && GSyn[0] && GSyn[1]);
      for( int k=0; k<getNumNeurons(); k++ ) {
         assert(GSyn[0][k]==0 && GSyn[1][k]==0);
      }
   }
   if (status != PV_SUCCESS) exit(EXIT_FAILURE);

   return status;
}
Exemplo n.º 2
0
int CPTestInputLayer::initializeV() {
   assert(parent->parameters()->value(name, "restart", 0.0f, false)==0.0f); // initializeV should only be called if restart is false
   const PVLayerLoc * loc = getLayerLoc();
   for (int b = 0; b < parent->getNBatch(); b++){
      pvdata_t * VBatch = getV() + b * getNumNeurons();
      for (int k = 0; k < getNumNeurons(); k++){
         int kx = kxPos(k,loc->nx,loc->nx,loc->nf);
         int ky = kyPos(k,loc->nx,loc->ny,loc->nf);
         int kf = featureIndex(k,loc->nx,loc->ny,loc->nf);
         int kGlobal = kIndex(loc->kx0+kx,loc->ky0+ky,kf,loc->nxGlobal,loc->nyGlobal,loc->nf);
         VBatch[k] = (pvdata_t) kGlobal;
      }
   }
   return PV_SUCCESS;
}
Exemplo n.º 3
0
int MaskFromMemoryBuffer::updateState(double time, double dt)
{
    if (imageLayer->getDataLeft() == dataLeft &&
            imageLayer->getDataTop() == dataTop &&
            imageLayer->getDataWidth() == dataRight-dataLeft &&
            imageLayer->getDataHeight() && dataBottom-dataTop) {
        return PV_SUCCESS; // mask only needs to change if the imageLayer changes its active region
    }

    dataLeft = imageLayer->getDataLeft();
    dataRight = dataLeft+imageLayer->getDataWidth();
    dataTop = imageLayer->getDataTop();
    dataBottom = dataTop + imageLayer->getDataHeight();

    PVLayerLoc const * loc = getLayerLoc();
    for(int b = 0; b < loc->nbatch; b++) {
        pvdata_t * ABatch = getActivity() + b * getNumExtended();
        int const num_neurons = getNumNeurons();
#ifdef PV_USE_OPENMP_THREADS
        #pragma omp parallel for
#endif
        for(int ni = 0; ni < num_neurons; ni++) {
            PVHalo const * halo = &loc->halo;
            int const nx = loc->nx;
            int const ny = loc->ny;
            int const nf = loc->nf;
            int x = kxPos(ni, nx, ny, nf);
            int y = kyPos(ni, nx, ny, nf);
            pvadata_t a = (pvadata_t) (x>=dataLeft && x < dataRight && y >= dataTop && y < dataBottom);
            int nExt = kIndexExtended(ni, nx, ny, nf, halo->lt, halo->rt, halo->dn, halo->up);
            ABatch[nExt] = a;
        }
    }
    return PV_SUCCESS;
}
Exemplo n.º 4
0
int ComparisonLayer::updateState(double timef, double dt){

   //Grab layer size
   const PVLayerLoc* loc = getLayerLoc();
   int nx = loc->nx;
   int ny = loc->ny;
   int nf = loc->nf;
   int kx0 = loc->kx0;
   int ky0 = loc->ky0;

   pvdata_t * GSynExt = getChannel(CHANNEL_EXC); //gated
   pvdata_t * GSynInh = getChannel(CHANNEL_INH); //gt

   bool isCorrect = true;
   //Grab the activity layer of current layer
   //We only care about restricted space
   for (int k = 0; k < getNumNeurons(); k++){
      if(GSynExt[k] != GSynInh[k]){
          std::cout << "Connection " << name << " Mismatch at " << k << ": actual value: " << GSynExt[k] << " Expected value: " << GSynInh[k] << ".\n";
          isCorrect = false;
      }
   }
   
   if(!isCorrect){
      exit(-1);
   }
   return PV_SUCCESS;
}
Exemplo n.º 5
0
int CPTestInputLayer::updateState(double timed, double dt) {
   update_timer->start();
//#ifdef PV_USE_OPENCL
//   if(gpuAccelerateFlag) {
//      updateStateOpenCL(timed, dt);
//      //HyPerLayer::updateState(time, dt);
//   }
//   else {
//#endif
      const int nx = clayer->loc.nx;
      const int ny = clayer->loc.ny;
      const int nf = clayer->loc.nf;
      const PVHalo * halo = &clayer->loc.halo;
      const int numNeurons = getNumNeurons();
      const int nbatch = clayer->loc.nbatch;

      //pvdata_t * GSynExc   = getChannel(CHANNEL_EXC);
      //pvdata_t * GSynInh   = getChannel(CHANNEL_INH);
      pvdata_t * GSynHead   = GSyn[0];
      pvdata_t * V = getV();
      pvdata_t * activity = clayer->activity->data;

      CPTestInputLayer_update_state(nbatch, numNeurons, nx, ny, nf, halo->lt, halo->rt, halo->dn, halo->up, V, VThresh, AMax, AMin, GSynHead, activity);
//#ifdef PV_USE_OPENCL
//   }
//#endif

   update_timer->stop();
   return PV_SUCCESS;
}
Exemplo n.º 6
0
 GIDSet getGIDs() const
 {
     brain::GIDSet gids;
     for( size_t i = 0; i < getNumNeurons(); ++i )
         gids.insert( i + 1 );
     return gids;
 }
Exemplo n.º 7
0
int CIFARGTLayer::updateState(double timef, double dt) {
   //getline (inputfile,inputString);
   inputString = std::string(imageLayer->getFilename());
   unsigned found = inputString.find_last_of("/\\");
   //CIFAR is 0 indexed
   char cVal = inputString.at(found-1);
   iVal = cVal - '0';

   pvdata_t * A = getCLayer()->activity->data;
   const PVLayerLoc * loc = getLayerLoc(); 

   //std::cout << "time: " << parent->simulationTime() << " inputString:" << inputString << "  iVal:" << iVal << "\n";
   assert(iVal >= 0 && iVal < 10);
   //NF must be 10, one for each class
   assert(loc->nf == 10);
   for(int ni = 0; ni < getNumNeurons(); ni++){
      int nExt = kIndexExtended(ni, loc->nx, loc->ny, loc->nf, loc->halo.lt, loc->halo.rt, loc->halo.dn, loc->halo.up);
      int fi = featureIndex(nExt, loc->nx+loc->halo.rt+loc->halo.lt, loc->ny+loc->halo.dn+loc->halo.up, loc->nf);
      if(fi == iVal){
         A[nExt] = 1;
      }
      else{
         if(negativeGt){
            A[nExt] = -1;
         }
         else{
            A[nExt] = 0;
         }
      }
   }
   return PV_SUCCESS;
}
Exemplo n.º 8
0
int ImageTestLayer::updateStateWrapper(double time, double dt)
{
   Image::updateStateWrapper(time, dt);
   const PVLayerLoc * loc = getLayerLoc();
   int nx = loc->nx;
   int ny = loc->ny;
   int nf = loc->nf;
   int nbatch = loc->nbatch;
   for(int b = 0; b < nbatch; b++){
      pvdata_t * dataBatch = data + b * getNumExtended();
      for(int nkRes = 0; nkRes < getNumNeurons(); nkRes++){
         //Calculate extended index
         int nkExt = kIndexExtended(nkRes, nx, ny, nf, loc->halo.lt, loc->halo.rt, loc->halo.dn, loc->halo.up);  
         //checkVal is the value from batch index 0
         pvdata_t checkVal = dataBatch[nkExt] * 255;

         int kxGlobal = kxPos(nkRes, nx, ny, nf) + loc->kx0;
         int kyGlobal = kyPos(nkRes, nx, ny, nf) + loc->ky0; 
         int kf = featureIndex(nkRes, nx, ny, nf);

         pvdata_t expectedVal = kIndex(kxGlobal, kyGlobal, kf, loc->nxGlobal, loc->nyGlobal, nf);
         if(fabs(checkVal - expectedVal) >= 1e-5){
            std::cout << "ImageFileIO test Expected: " << expectedVal << " Actual: " << checkVal << "\n";
            exit(-1);
         }
      }
   }
   return PV_SUCCESS;
}
Exemplo n.º 9
0
// set V to global x/y/f position
int PlasticConnTestLayer::copyAtoV(){
   const PVLayerLoc * loc = getLayerLoc();
   pvdata_t * V = getV();
   pvdata_t * A = clayer->activity->data;
   for (int kLocal = 0; kLocal < getNumNeurons(); kLocal++){
      int kExtended = kIndexExtended(kLocal, loc->nx, loc->ny, loc->nf, loc->halo.lt, loc->halo.rt, loc->halo.dn, loc->halo.up);
      V[kLocal] = A[kExtended];
   }
   return PV_SUCCESS;
}
Exemplo n.º 10
0
int InputLayer::updateState(double timef, double dt) {
   if(!constantValue || firstRun){
      char cVal = inputString.at(int(parent->simulationTime()-1)%numExamples);
      iVal = cVal - '0';
   }
   pvdata_t * A = getCLayer()->activity->data;
   const PVLayerLoc * loc = getLayerLoc(); 
   assert(loc->nf == 2);
   //Set binary values of xor values
   
   std::cout << timef << ": input val:" << iVal << "\n";
   
   int negVal;
   negVal = -1;
   for(int ni = 0; ni < getNumNeurons(); ni++){
      int nExt = kIndexExtended(ni, loc->nx, loc->ny, loc->nf, loc->halo.lt, loc->halo.rt, loc->halo.dn, loc->halo.up);
      int fi = featureIndex(nExt, loc->nx+loc->halo.lt+loc->halo.rt, loc->ny+loc->halo.dn+loc->halo.up, loc->nf);
      switch(iVal){
         case 0:
            if(fi == 0){
               A[nExt] = negVal;
            }
            if(fi == 1){
               A[nExt] = negVal;
            }
            break;
         case 1:
            if(fi == 0){
               A[nExt] = negVal;
            }
            if(fi == 1){
               A[nExt] = 1;
            }
            break;
         case 2:
            if(fi == 0){
               A[nExt] = 1;
            }
            if(fi == 1){
               A[nExt] = negVal;
            }
            break;
         case 3:
            if(fi == 0){
               A[nExt] = 1;
            }
            if(fi == 1){
               A[nExt] = 1;
            }
            break;
      }
   }
   firstRun = false;
   return PV_SUCCESS;
}
Exemplo n.º 11
0
int LCALIFLayer::updateState(double timed, double dt)
{
   //Calculate_state kernel
   for (int k=0; k<getNumNeuronsAllBatches(); k++) {
      G_Norm[k] = GSyn[CHANNEL_NORM][k]; // Copy GSyn buffer on normalizing channel for checkpointing, since LCALIF_update_state will blank the GSyn's
   }
   LCALIF_update_state(clayer->loc.nbatch, getNumNeurons(), timed, dt, clayer->loc.nx, clayer->loc.ny, clayer->loc.nf,
         clayer->loc.halo.lt, clayer->loc.halo.rt, clayer->loc.halo.dn, clayer->loc.halo.up, Vscale, Vadpt, tauTHR, targetRateHz, integratedSpikeCount, &lParams,
         randState->getRNG(0), clayer->V, Vth, G_E, G_I, G_IB, GSyn[0], clayer->activity->data, getGapStrength(), Vattained, Vmeminf, (int) normalizeInputFlag,
         GSynExcEffective, GSynInhEffective, excitatoryNoise, inhibitoryNoise, inhibNoiseB);
   return PV_SUCCESS;
}
Exemplo n.º 12
0
int PursuitLayer::allocateDataStructures() {
    int status = ANNLayer::allocateDataStructures();

    if (status == PV_SUCCESS) {
        status = allocateBuffer(&wnormsq, getLayerLoc()->nf, "wnormsq");
    }
    if (status==PV_SUCCESS) {
        status = allocateBuffer(&minimumLocations, getNumNeurons(), "minimumLocations");
    }
    if (status==PV_SUCCESS) {
        status = allocateBuffer(&energyDrops, getNumNeurons(), "energyDrops");
    }

    int xy = getLayerLoc()->nx * getLayerLoc()->ny;
    if (status==PV_SUCCESS) {
        status = allocateBuffer(&minFeatures, xy, "minFeatures");
    }
    if (status==PV_SUCCESS) {
        status = allocateBuffer(&energyDropsBestFeature, xy, "energyDropsBestFeature");
    }
    if (status==PV_SUCCESS) {
        status = allocateBuffer(&foundFeatures, xy, "foundFeatures");
        for (int k=0; k<xy; k++) {
            foundFeatures[k]=-1;
        }
    }
    if (status==PV_SUCCESS) {
        status = allocateBuffer(&minLocationsBestFeature, xy, "minLocationsBestFeature");
    }
    if (status==PV_SUCCESS) {
        status = allocateBuffer(&gSynSparse, xy, "gSynSparse");
    }
    if (status==PV_SUCCESS) {
        status = allocateBuffer(&minEnergyFiltered, xy, "minEnergyFiltered");
    }
    if (status != PV_SUCCESS) abort();

    return status;
}
int FilenameParsingGroundTruthLayer::updateState(double time, double dt)
{
   update_timer->start();
   pvdata_t * A = getCLayer()->activity->data;
   const PVLayerLoc * loc = getLayerLoc();
   int num_neurons = getNumNeurons();
   if (num_neurons != numClasses)
   {
      pvError() << "The number of neurons in " << getName() << " is not equal to the number of classes specified in " << parent->getOutputPath() << "/classes.txt\n";
   }   

   for(int b = 0; b < loc->nbatch; b++){
      char * currentFilename = NULL;
      int filenameLen = 0;
      //TODO depending on speed of this layer, more efficient way would be to preallocate currentFilename buffer
      if(parent->icCommunicator()->commRank()==0){
         currentFilename = strdup(movieLayer->getFilename(b));
         //Get length of currentFilename and broadcast
         int filenameLen = (int) strlen(currentFilename) + 1; //+1 for the null terminator
         //Using local communicator, as each batch MPI will handle it's own run
         MPI_Bcast(&filenameLen, 1, MPI_INT, 0, parent->icCommunicator()->communicator());
         //Braodcast filename to all other local processes
         MPI_Bcast(currentFilename, filenameLen, MPI_CHAR, 0, parent->icCommunicator()->communicator());
      }
      else{
         //Receive broadcast about length of filename
         MPI_Bcast(&filenameLen, 1, MPI_INT, 0, parent->icCommunicator()->communicator());
         currentFilename = (char*)calloc(sizeof(char), filenameLen);
         //Receive filename
         MPI_Bcast(currentFilename, filenameLen, MPI_CHAR, 0, parent->icCommunicator()->communicator());
      }

      std::string fil = currentFilename;
      pvdata_t * ABatch = A + b * getNumExtended();
      for(int i = 0; i < num_neurons; i++){
         int nExt = kIndexExtended(i, loc->nx, loc->ny, loc->nf, loc->halo.lt, loc->halo.rt, loc->halo.dn, loc->halo.up);
         int fi = featureIndex(nExt, loc->nx+loc->halo.rt+loc->halo.lt, loc->ny+loc->halo.dn+loc->halo.up, loc->nf);
         int match = fil.find(classes[i]);
         if(0 <= match){
            ABatch[nExt] = gtClassTrueValue;
         }
         else{
            ABatch[nExt] = gtClassFalseValue;
         }
      }
      //Free buffer, TODO, preallocate buffer to avoid this
      free(currentFilename);
   }
   update_timer->stop();
   return PV_SUCCESS;
}
Exemplo n.º 14
0
BOOST_AUTO_TEST_CASE_TEMPLATE(factory, T, float_types)
{
	auto input = createLayer<T>(10, "input");
	auto output = createLayer<T>(10, "output");
	auto fc = createLayer<T>(7, 10, "fc");
	auto nl = createLayer<T>(9, "nl tanh");
	
	// check results
	BOOST_CHECK_EQUAL( input->getNumNeurons(), 10 );
	BOOST_CHECK_EQUAL( input->getNumInputs(), 0 );
	BOOST_CHECK_EQUAL( input->getLayerType(), "input" );
	
	BOOST_CHECK_EQUAL( output->getNumNeurons(), 10 );
	BOOST_CHECK_EQUAL( output->getNumInputs(), 10 );
	BOOST_CHECK_EQUAL( output->getLayerType(), "output" );
	
	BOOST_CHECK_EQUAL( fc->getNumNeurons(), 10 );
	BOOST_CHECK_EQUAL( fc->getNumInputs(), 7 );
	BOOST_CHECK_EQUAL( fc->getLayerType(), "fc" );
	
	BOOST_CHECK_EQUAL( nl->getNumNeurons(), 9 );
	BOOST_CHECK_EQUAL( nl->getNumInputs(), 9 );
	BOOST_CHECK_EQUAL( nl->getLayerType(), "nl" );
}
int MatchingPursuitResidual::updateState(double timed, double dt) {
   pvdata_t * V = getV();
   if (inputInV) {
      for (int k=0; k<getNumNeuronsAllBatches(); k++) {
         V[k] -= GSyn[1][k];
      }
   }
   else {
      for (int k=0; k<getNumNeuronsAllBatches(); k++) {
         V[k] = GSyn[0][k];
      }
      inputInV = true;
   }
   PVLayerLoc const * loc = getLayerLoc();
   setActivity_HyPerLayer(loc->nbatch, getNumNeurons(), getActivity(), V, loc->nx, loc->ny, loc->nf, loc->halo.lt, loc->halo.rt, loc->halo.dn, loc->halo.up);
   return PV_SUCCESS;
}
Exemplo n.º 16
0
int LeakyIntegrator::updateState(double timed, double dt) {
   pvdata_t * V = getV();
   pvdata_t * gSyn = GSyn[0];
   pvdata_t decayfactor = (pvdata_t) exp(-dt/integrationTime);
   for (int k=0; k<getNumNeuronsAllBatches(); k++) {
      V[k] *= decayfactor;
      V[k] += gSyn[k];
   }
   int nx = getLayerLoc()->nx;
   int ny = getLayerLoc()->ny;
   int nf = getLayerLoc()->nf;
   int nbatch = getLayerLoc()->nbatch;

   PVHalo const * halo = &getLayerLoc()->halo;
   pvdata_t * A = getActivity();
   int status = setActivity_PtwiseLinearTransferLayer(nbatch, getNumNeurons(), A, V, nx, ny, nf, halo->lt, halo->rt, halo->dn, halo->up, numVertices, verticesV, verticesA, slopes);
   return status;
}
Exemplo n.º 17
0
int ANNSquaredLayer::updateState(double time, double dt)
{
      const int nx = clayer->loc.nx;
      const int ny = clayer->loc.ny;
      const int nf = clayer->loc.nf;
      const int nbatch = clayer->loc.nbatch;

      pvdata_t * GSynHead   = GSyn[0];
      pvdata_t * V = getV();
      pvdata_t * activity = clayer->activity->data;

      ANNSquaredLayer_update_state(nbatch, getNumNeurons(), nx, ny, nf, clayer->loc.halo.lt, clayer->loc.halo.rt, clayer->loc.halo.dn, clayer->loc.halo.up, V, GSynHead, activity);
//#ifdef PV_USE_OPENCL
//   }
//#endif

   //update_timer->stop();
   return PV_SUCCESS;
}
Exemplo n.º 18
0
int MoviePvpTestLayer::updateStateWrapper(double time, double dt)
{
   MoviePvp::updateStateWrapper(time, dt);
   const PVLayerLoc * loc = getLayerLoc();
   int nx = loc->nx;
   int ny = loc->ny;
   int nf = loc->nf;
   int nbatch = loc->nbatch;

   for(int b = 0; b < nbatch; b++){
      pvdata_t * dataBatch = data + b * getNumExtended();
      int frameIdx;
      if(strcmp(getBatchMethod(), "byImage") == 0){
         frameIdx = (time-1) * nbatch + b;
      }
      else if(strcmp(getBatchMethod(), "byMovie") == 0){
         frameIdx = b * 2 + (time-1);
      }
      for(int nkRes = 0; nkRes < getNumNeurons(); nkRes++){
         //Calculate extended index
         int nkExt = kIndexExtended(nkRes, nx, ny, nf, loc->halo.lt, loc->halo.rt, loc->halo.dn, loc->halo.up);  
         //checkVal is the value from batch index 0
         pvdata_t checkVal = dataBatch[nkExt];

         int kxGlobal = kxPos(nkRes, nx, ny, nf) + loc->kx0;
         int kyGlobal = kyPos(nkRes, nx, ny, nf) + loc->ky0; 
         int kf = featureIndex(nkRes, nx, ny, nf);

         pvdata_t expectedVal = kIndex(kxGlobal, kyGlobal, kf, loc->nxGlobal, loc->nyGlobal, nf) + frameIdx*192;
         if(fabs(checkVal - expectedVal) >= 1e-5){
            std::cout << "ImageFileIO " << name << " test Expected: " << expectedVal << " Actual: " << checkVal << "\n";
            //exit(-1);
         }
      }
   }
   return PV_SUCCESS;
}
Exemplo n.º 19
0
int IncrementLayer::setActivity() {
   const PVLayerLoc * loc = getLayerLoc();
   return setActivity_IncrementLayer(getNumNeurons(), clayer->activity->data, getV(), getVprev(), loc->nx, loc->ny, loc->nf, loc->halo.lt, loc->halo.rt, loc->halo.dn, loc->halo.up);
}
Exemplo n.º 20
0
void MLPOutputLayer::multiclassNonlocalStats(){
   const PVLayerLoc * loc = getLayerLoc();
   int nx = loc->nx;
   int ny = loc->ny;
   int nf = loc->nf;
   int numNeurons = getNumNeurons();
   pvdata_t * A = getCLayer()->activity->data;
   pvdata_t * gtA = gtLayer->getCLayer()->activity->data;
   float sumsq = 0;
   //Winner take all in the output layer
   int currNumRight = 0;
   int currNumWrong = 0;
   assert(classBuffer);
   //Clear classBuffer
   for(int i = 0; i < nf; i++){
      classBuffer[i] = 0;
   }
   //Only go through restricted
   //Calculate the sum squared error
   for(int ni = 0; ni < numNeurons; ni++){
      int nExt = kIndexExtended(ni, nx, ny, nf, loc->halo.lt, loc->halo.rt, loc->halo.dn, loc->halo.up);
      int fi = featureIndex(nExt, nx+loc->halo.lt+loc->halo.rt, ny+loc->halo.dn+loc->halo.up, nf);
      //Sum over x and y direction
      classBuffer[fi] += A[nExt];
      sumsq += pow(A[nExt] - gtA[nExt], 2);
   }
   //Normalize classBuffer to find mean
   for(int i = 0; i < nf; i++){
      classBuffer[i] /= nx*ny;
   }
   //Reduce all classBuffers through a mean
#ifdef PV_USE_MPI
   MPI_Allreduce(MPI_IN_PLACE, &sumsq, 1, MPI_FLOAT, MPI_SUM, parent->icCommunicator()->communicator());
   MPI_Allreduce(MPI_IN_PLACE, classBuffer, nf, MPI_FLOAT, MPI_SUM, parent->icCommunicator()->communicator());
   //Normalize classBuffer across processors
   for(int i = 0; i < nf; i++){
      classBuffer[i] /= parent->icCommunicator()->commSize();
   }
#endif // PV_USE_MPI
   //Find max
   float estMaxF = -1000;
   int estMaxFi = -1;
   float actualMaxF = -1000;
   int actualMaxFi = -1;
   for(int i = 0; i < nf; i++){
      if(classBuffer[i] >= estMaxF){
         estMaxF = classBuffer[i];
         estMaxFi = i;
      }
      int nExt = kIndex(loc->halo.lt, loc->halo.up, i, nx+loc->halo.lt+loc->halo.rt, ny+loc->halo.dn+loc->halo.up, nf);
      if(gtA[nExt] >= actualMaxF){
         actualMaxF = gtA[nExt];
         actualMaxFi = i;
      }
   }
   //Calculate stats
   //Found winning feature, compare to ground truth
   if(estMaxFi == actualMaxFi){
      currNumRight++;
   }
   else{
      currNumWrong++;
   }
#ifdef PV_USE_MPI
   MPI_Allreduce(MPI_IN_PLACE, &currNumRight, 1, MPI_INT, MPI_SUM, parent->icCommunicator()->communicator());
   MPI_Allreduce(MPI_IN_PLACE, &currNumWrong, 1, MPI_INT, MPI_SUM, parent->icCommunicator()->communicator());
#endif // PV_USE_MPI
   numRight += currNumRight;
   numWrong += currNumWrong;
   progressNumRight += currNumRight;
   progressNumWrong += currNumWrong;
   //Print if need
   float timef = parent->simulationTime();
   if(timef >= nextStatProgress){
      //Update nextStatProgress
      nextStatProgress += statProgressPeriod;
      if (parent->columnId()==0) {
         float totalScore = 100*float(numRight)/float(numRight+numWrong);
         float progressScore = 100*float(progressNumRight)/float(progressNumRight+progressNumWrong);
         fprintf(stdout, "time:%f  layer:\"%s\"  total:%f%%  progressStep:%f%%  energy:%f\n", timef, name, totalScore, progressScore, sumsq/2);
      }
      //Reset progressStats
      progressNumRight = 0;
      progressNumWrong = 0;
   }
}
Exemplo n.º 21
0
 virtual pvconductance_t * getConductance(ChannelType ch) {
       return ch < this->numChannels ? G_E + ch*getNumNeurons() : NULL;
    }
Exemplo n.º 22
0
int inverseCochlearLayer::updateState(double time, double dt){
   update_timer->start();
    
   // if (time >= nextDisplayTime) {
    //   nextDisplayTime += cochlearLayer->getDisplayPeriod();
    
       const PVLayerLoc * loc = getLayerLoc();
       int nx = loc->nx;
       int ny = loc->ny;
       int nf = loc->nf;

       //This layer must be 1X1X(INVERSECOCHLEARLAYER_NF)
       assert(nx == 1 && ny == 1 && nf == INVERSECOCHLEARLAYER_NF);
       int num_input_neurons = inputLayer->getNumNeurons();
       int num_output_neurons = getNumNeurons();
       //num_output_neurons should be only INVERSECOCHLEARLAYER_NF
       assert(num_output_neurons == INVERSECOCHLEARLAYER_NF);
       
       timehistory[ringBufferLevel] = time;
       for (int k=0; k<cochlearLayer->getLayerLoc()->nx; k++) {
          xhistory[ringBufferLevel][k] = (inputLayer->getLayerData()[k]) / cochlearLayer->getCochlearScales()[k]; //
           
           //std::cout << "xvalues " << inputLayer->getLayerData()[k] << "\n" ;
           //std::cout << " cochlearscales" << cochlearLayer->getCochlearScales()[k] << "\n" ;
           //std::cout << "radianfreqs" << radianFreqs[k] << "\n" ;
           
           
       } // memcpy?
       
       double sumreal = 0.0;
       double sumimag = 0.0;
       for (int j=0; j<bufferLength; j++) {
          for (int k=0; k<numFrequencies; k++) {
             //sumreal += Mreal[j][k]*xhistory[ringBuffer(j)][k];
             sumimag += Mimag[j][k]*xhistory[ringBuffer(j)][k];
          }
       }
    
      //sumreal /= (2*PI);
      sumimag /= (2*PI);
       
       //Reset pointer of gSynHead to point to the excitatory channel
       // pvdata_t * inA = inputLayer->getCLayer()->activity->data;
       pvdata_t * outV = getV();
    
    
       outV[0] = sumimag / 10;
    
       //outV[1] = sumreal;

    
    
       //*outV is where the output data should go

    //Copy V to A buffer
       PV::HyPerLayer::setActivity();
      // clayer->activity->data[0] *= 0.25; // With bufferLength 1, sound is reproduced well but at a higher amplitude
      // clayer->activity->data[1] *= 0.25; // This corrects the amplitude to approximately its original value
                                             // But I think the correction factor depends on frequency.  --pfs Jun 23, 2014
       
       ringBufferLevel++;
       if (ringBufferLevel == bufferLength) { ringBufferLevel = 0; }
    
    //} // end nextdisplaytime
    
   update_timer->stop();
   return PV_SUCCESS;
}
Exemplo n.º 23
0
void MLPOutputLayer::binaryNonlocalStats(){
   const PVLayerLoc * loc = getLayerLoc();
   int nx = loc->nx;
   int ny = loc->ny;
   int nf = loc->nf;
   assert(nf == 1);
   int numNeurons = getNumNeurons();
   pvdata_t * A = getCLayer()->activity->data;
   pvdata_t * gtA = gtLayer->getCLayer()->activity->data;
   float sumsq = 0;
   float sum = 0;
   float gtSum = 0;
   int currNumRight = 0;
   int currNumWrong = 0;
   int totNum = 0;

   //Only go through restricted
   //Calculate the sum squared error
   for(int ni = 0; ni < numNeurons; ni++){
      int nExt = kIndexExtended(ni, nx, ny, nf, loc->halo.lt, loc->halo.rt, loc->halo.dn, loc->halo.up);
      int fi = featureIndex(nExt, nx+loc->halo.lt+loc->halo.rt, ny+loc->halo.dn+loc->halo.up, nf);
      //Sum over x and y direction
      sumsq += pow(A[nExt] - gtA[nExt], 2);
      //Sum over activity to find mean
      sum += A[nExt];
      gtSum += gtA[nExt];
   }

#ifdef PV_USE_MPI
   MPI_Allreduce(MPI_IN_PLACE, &sumsq, 1, MPI_FLOAT, MPI_SUM, parent->icCommunicator()->communicator());
   MPI_Allreduce(MPI_IN_PLACE, &sum, 1, MPI_FLOAT, MPI_SUM, parent->icCommunicator()->communicator());
   MPI_Allreduce(MPI_IN_PLACE, &gtSum, 1, MPI_FLOAT, MPI_SUM, parent->icCommunicator()->communicator());
#endif // PV_USE_MPI
   //Normalize sum to find mean
   sum /= loc->nxGlobal * loc->nyGlobal;
   gtSum /= loc->nxGlobal * loc->nyGlobal;
   //gtSum should be the same as the values
   assert(gtSum == gtA[0]);

   //Calculate stats
   if(sum < 0 && gtSum < 0){
      currNumRight++;
   }
   else if(sum > 0 && gtSum > 0){
      currNumRight++;
   }
   else{
      currNumWrong++;
   }
#ifdef PV_USE_MPI
   MPI_Allreduce(MPI_IN_PLACE, &currNumRight, 1, MPI_INT, MPI_SUM, parent->icCommunicator()->communicator());
   MPI_Allreduce(MPI_IN_PLACE, &currNumWrong, 1, MPI_INT, MPI_SUM, parent->icCommunicator()->communicator());
#endif // PV_USE_MPI
   numRight += currNumRight;
   numWrong += currNumWrong;
   progressNumRight += currNumRight;
   progressNumWrong += currNumWrong;
   //Print if need
   float timef = parent->simulationTime();
   if(timef >= nextStatProgress){
      //Update nextStatProgress
      nextStatProgress += statProgressPeriod;
      if (parent->columnId()==0) {
         float totalScore = 100*float(numRight)/float(numRight+numWrong);
         float progressScore = 100*float(progressNumRight)/float(progressNumRight+progressNumWrong);
         fprintf(stdout, "time:%f  layer:\"%s\"  total:%f%%  progressStep:%f%%  energy:%f\n", timef, name, totalScore, progressScore, sumsq/2);
      }
      //Reset progressStats
      progressNumRight = 0;
      progressNumWrong = 0;
   }
}
Exemplo n.º 24
0
void MLPOutputLayer::binaryLocalStats(){
   const PVLayerLoc * loc = getLayerLoc();
   int nx = loc->nx;
   int ny = loc->ny;
   int nf = loc->nf;
   int numNeurons = getNumNeurons();
   pvdata_t * A = getCLayer()->activity->data;
   pvdata_t * gtA = gtLayer->getCLayer()->activity->data;
   float sumsq = 0;

   assert(nf == 1);
   int currNumTotPos = 0;
   int currNumTotNeg = 0;
   int currTruePos = 0;
   int currTrueNeg = 0;
   for(int ni = 0; ni < numNeurons; ni++){
      int nExt = kIndexExtended(ni, nx, ny, nf, loc->halo.lt, loc->halo.rt, loc->halo.dn, loc->halo.up);
      //DCR
      if(gtA[nExt] == 0){
         continue;
         //Note that sumsq doesn't get updated in this case, so a dcr doesn't contribute to the score at all
      }
      //Negative
      else if(gtA[nExt] == -1){
         currNumTotNeg++;
         if(A[nExt] < 0){
            currTrueNeg++;
         }
      }
      //Positive
      else if(gtA[nExt] == 1){
         currNumTotPos++;
         if(A[nExt] > 0){
            currTruePos++;
         }
      }
      sumsq += pow(A[nExt] - gtA[nExt], 2);
   }
   //Do MPI
#ifdef PV_USE_MPI
   MPI_Allreduce(MPI_IN_PLACE, &currNumTotPos, 1, MPI_INT, MPI_SUM, parent->icCommunicator()->communicator());
   MPI_Allreduce(MPI_IN_PLACE, &currNumTotNeg, 1, MPI_INT, MPI_SUM, parent->icCommunicator()->communicator());
   MPI_Allreduce(MPI_IN_PLACE, &currTruePos, 1, MPI_INT, MPI_SUM, parent->icCommunicator()->communicator());
   MPI_Allreduce(MPI_IN_PLACE, &currTrueNeg, 1, MPI_INT, MPI_SUM, parent->icCommunicator()->communicator());
   MPI_Allreduce(MPI_IN_PLACE, &sumsq, 1, MPI_FLOAT, MPI_SUM, parent->icCommunicator()->communicator());
#endif
   numTotPos += currNumTotPos;
   numTotNeg += currNumTotNeg;
   truePos += currTruePos;
   trueNeg += currTrueNeg;
   progressNumTotPos += currNumTotPos;
   progressNumTotNeg += currNumTotNeg;
   progressTruePos += currTruePos;
   progressTrueNeg += currTrueNeg;
   //Print if need
   float timef = parent->simulationTime();
   if(timef >= nextStatProgress){
      //Update nextStatProgress
      nextStatProgress += statProgressPeriod;
      if (parent->columnId()==0) {
         float totalScore = 50*(float(truePos)/float(numTotPos) + float(trueNeg)/float(numTotNeg));
         float progressScore = 50*(float(progressTruePos)/float(progressNumTotPos) + float(progressTrueNeg)/float(progressNumTotNeg));
         fprintf(stdout, "time:%f  layer:\"%s\"  total:%f%%  progressStep:%f%%  energy:%f\n", timef, name, totalScore, progressScore, sumsq/2);
      }
      //Reset progressStats
      progressNumTotPos = 0;
      progressNumTotNeg = 0;
      progressTruePos = 0;
      progressTrueNeg = 0;
   }
}
Exemplo n.º 25
0
int TrainingLayer::initializeV() {
   memset(getV(), 0, ((size_t) getNumNeurons())*sizeof(pvdata_t));
   // above line not necessary if V was allocated with calloc
   getV()[trainingLabels[curTrainingLabelIndex]] = strength;
   return PV_SUCCESS;
}