Exemplo n.º 1
0
int MaskError::updateState(double time, double dt)
{
   const PVLayerLoc * loc = getLayerLoc();

   int nx = loc->nx;
   int ny = loc->ny;
   int nf = loc->nf;
   int num_neurons = nx*ny*nf;
   //Reset pointer of gSynHead to point to the inhib channel
   pvdata_t * GSynExt = getChannel(CHANNEL_EXC);
   pvdata_t * GSynInh = getChannel(CHANNEL_INH);

   pvdata_t * A = getCLayer()->activity->data;
   pvdata_t * V = getV();

#ifdef PV_USE_OPENMP_THREADS
#pragma omp parallel for
#endif
   for(int ni = 0; ni < num_neurons; ni++){
      int next = kIndexExtended(ni, nx, ny, nf, loc->halo.lt, loc->halo.rt, loc->halo.dn, loc->halo.up);
      //expected - actual only if expected isn't 0
      if(GSynExt[ni] == 0){
         A[next] = 0;
      }
      else{
         A[next] = GSynExt[ni] - GSynInh[ni];
      }
   }
   return PV_SUCCESS;
}
Exemplo n.º 2
0
int MaskFromMemoryBuffer::updateState(double time, double dt)
{
    if (imageLayer->getDataLeft() == dataLeft &&
            imageLayer->getDataTop() == dataTop &&
            imageLayer->getDataWidth() == dataRight-dataLeft &&
            imageLayer->getDataHeight() && dataBottom-dataTop) {
        return PV_SUCCESS; // mask only needs to change if the imageLayer changes its active region
    }

    dataLeft = imageLayer->getDataLeft();
    dataRight = dataLeft+imageLayer->getDataWidth();
    dataTop = imageLayer->getDataTop();
    dataBottom = dataTop + imageLayer->getDataHeight();

    PVLayerLoc const * loc = getLayerLoc();
    for(int b = 0; b < loc->nbatch; b++) {
        pvdata_t * ABatch = getActivity() + b * getNumExtended();
        int const num_neurons = getNumNeurons();
#ifdef PV_USE_OPENMP_THREADS
        #pragma omp parallel for
#endif
        for(int ni = 0; ni < num_neurons; ni++) {
            PVHalo const * halo = &loc->halo;
            int const nx = loc->nx;
            int const ny = loc->ny;
            int const nf = loc->nf;
            int x = kxPos(ni, nx, ny, nf);
            int y = kyPos(ni, nx, ny, nf);
            pvadata_t a = (pvadata_t) (x>=dataLeft && x < dataRight && y >= dataTop && y < dataBottom);
            int nExt = kIndexExtended(ni, nx, ny, nf, halo->lt, halo->rt, halo->dn, halo->up);
            ABatch[nExt] = a;
        }
    }
    return PV_SUCCESS;
}
Exemplo n.º 3
0
int ImageTestLayer::updateStateWrapper(double time, double dt)
{
   Image::updateStateWrapper(time, dt);
   const PVLayerLoc * loc = getLayerLoc();
   int nx = loc->nx;
   int ny = loc->ny;
   int nf = loc->nf;
   int nbatch = loc->nbatch;
   for(int b = 0; b < nbatch; b++){
      pvdata_t * dataBatch = data + b * getNumExtended();
      for(int nkRes = 0; nkRes < getNumNeurons(); nkRes++){
         //Calculate extended index
         int nkExt = kIndexExtended(nkRes, nx, ny, nf, loc->halo.lt, loc->halo.rt, loc->halo.dn, loc->halo.up);  
         //checkVal is the value from batch index 0
         pvdata_t checkVal = dataBatch[nkExt] * 255;

         int kxGlobal = kxPos(nkRes, nx, ny, nf) + loc->kx0;
         int kyGlobal = kyPos(nkRes, nx, ny, nf) + loc->ky0; 
         int kf = featureIndex(nkRes, nx, ny, nf);

         pvdata_t expectedVal = kIndex(kxGlobal, kyGlobal, kf, loc->nxGlobal, loc->nyGlobal, nf);
         if(fabs(checkVal - expectedVal) >= 1e-5){
            pvError() << "ImageFileIO test Expected: " << expectedVal << " Actual: " << checkVal << "\n";
         }
      }
   }
   return PV_SUCCESS;
}
Exemplo n.º 4
0
int BinaryThresh::updateState(double time, double dt)
{
   const PVLayerLoc * loc = getLayerLoc();

   int nx = loc->nx;
   int ny = loc->ny;
   int nf = loc->nf;
   int num_neurons = nx*ny*nf;
   //Reset pointer of gSynHead to point to the inhib channel
   pvdata_t * GSynExt = getChannel(CHANNEL_EXC);
   pvdata_t * GSynInh = getChannel(CHANNEL_INH);

   pvdata_t * A = getCLayer()->activity->data;
   pvdata_t * V = getV();

#ifdef PV_USE_OPENMP_THREADS
#pragma omp parallel for
#endif
   for(int ni = 0; ni < num_neurons; ni++){
      int next = kIndexExtended(ni, nx, ny, nf, loc->halo.lt, loc->halo.rt, loc->halo.dn, loc->halo.up);
      //Activity is either 0 or 1 based on if it's active
      A[next] = GSynExt[ni] == 0 ? 0 : 1;
   }
   return PV_SUCCESS;
}
Exemplo n.º 5
0
float GradientCheckConn::getLogErrCost(){
   float* estA = estLayer->getActivity();
   float* gtA = gtLayer->getActivity();
   const PVLayerLoc * gtLoc = gtLayer->getLayerLoc();
   const PVLayerLoc * estLoc = estLayer->getLayerLoc();
   float sumcost = 0;

   for(int kRes = 0; kRes < estLayer->getNumNeurons(); kRes++){
      int estExt = kIndexExtended(kRes, estLoc->nx, estLoc->ny, estLoc->nf, estLoc->halo.lt, estLoc->halo.rt, estLoc->halo.dn, estLoc->halo.up);
      int gtExt = kIndexExtended(kRes, gtLoc->nx, gtLoc->ny, gtLoc->nf, gtLoc->halo.lt, gtLoc->halo.rt, gtLoc->halo.dn, gtLoc->halo.up);

      if(gtA[gtExt] == 1){
         sumcost += log(estA[estExt]);
      }
   }
   return -sumcost;
}
Exemplo n.º 6
0
int BIDSCloneLayer::mapCoords(){

   //Copy restricted clone data to current clayer data
   for(int i = 0; i < numNodes; i++){

   }

   const PVLayerLoc origLoc = originalLayer->getCLayer()->loc;

   for(int i = 0; i < numNodes; i++){
      int index = kIndex(coords[i].xCoord, coords[i].yCoord, 0, clayer->loc.nx, clayer->loc.ny, clayer->loc.nf);
      int destIndexEx = kIndexExtended(index, clayer->loc.nx, clayer->loc.ny, clayer->loc.nf, clayer->loc.halo.lt, clayer->loc.halo.rt, clayer->loc.halo.dn, clayer->loc.halo.up);
      int srcIndexEx = kIndexExtended(index, origLoc.nx, origLoc.ny, origLoc.nf, origLoc.halo.lt, origLoc.halo.rt, origLoc.halo.dn, origLoc.halo.up);

      this->clayer->activity->data[destIndexEx] = originalLayer->getCLayer()->activity->data[srcIndexEx] == 0 ? 0:1;
   }
   return PV_SUCCESS;
}
Exemplo n.º 7
0
// set V to global x/y/f position
int PlasticConnTestLayer::copyAtoV(){
   const PVLayerLoc * loc = getLayerLoc();
   pvdata_t * V = getV();
   pvdata_t * A = clayer->activity->data;
   for (int kLocal = 0; kLocal < getNumNeurons(); kLocal++){
      int kExtended = kIndexExtended(kLocal, loc->nx, loc->ny, loc->nf, loc->halo.lt, loc->halo.rt, loc->halo.dn, loc->halo.up);
      V[kLocal] = A[kExtended];
   }
   return PV_SUCCESS;
}
Exemplo n.º 8
0
int InputLayer::updateState(double timef, double dt) {
   if(!constantValue || firstRun){
      char cVal = inputString.at(int(parent->simulationTime()-1)%numExamples);
      iVal = cVal - '0';
   }
   pvdata_t * A = getCLayer()->activity->data;
   const PVLayerLoc * loc = getLayerLoc(); 
   assert(loc->nf == 2);
   //Set binary values of xor values
   
   std::cout << timef << ": input val:" << iVal << "\n";
   
   int negVal;
   negVal = -1;
   for(int ni = 0; ni < getNumNeurons(); ni++){
      int nExt = kIndexExtended(ni, loc->nx, loc->ny, loc->nf, loc->halo.lt, loc->halo.rt, loc->halo.dn, loc->halo.up);
      int fi = featureIndex(nExt, loc->nx+loc->halo.lt+loc->halo.rt, loc->ny+loc->halo.dn+loc->halo.up, loc->nf);
      switch(iVal){
         case 0:
            if(fi == 0){
               A[nExt] = negVal;
            }
            if(fi == 1){
               A[nExt] = negVal;
            }
            break;
         case 1:
            if(fi == 0){
               A[nExt] = negVal;
            }
            if(fi == 1){
               A[nExt] = 1;
            }
            break;
         case 2:
            if(fi == 0){
               A[nExt] = 1;
            }
            if(fi == 1){
               A[nExt] = negVal;
            }
            break;
         case 3:
            if(fi == 0){
               A[nExt] = 1;
            }
            if(fi == 1){
               A[nExt] = 1;
            }
            break;
      }
   }
   firstRun = false;
   return PV_SUCCESS;
}
Exemplo n.º 9
0
int customexit(HyPerCol * hc, int argc, char ** argv) {
   pvadata_t correctvalue = 0.5f;
   pvadata_t tolerance = 1.0e-7f;

   if (hc->columnId()==0) {
      pvInfo().printf("Checking whether input layer has all values equal to %f ...\n", correctvalue);
   }
   HyPerLayer * inputlayer = hc->getLayerFromName("input");
   assert(inputlayer);
   PVLayerLoc const * loc = inputlayer->getLayerLoc();
   assert(loc->nf==1);
   const int numNeurons = inputlayer->getNumNeurons();
   assert(numNeurons>0);
   int status = PV_SUCCESS;

   int numExtended = inputlayer->getNumExtended();
   InterColComm * icComm = hc->icCommunicator();
   pvadata_t * layerData = (pvadata_t *) icComm->publisherStore(inputlayer->getLayerId())->buffer(LOCAL);
   int rootproc = 0;
   if (icComm->commRank()==rootproc) {
      pvadata_t * databuffer = (pvadata_t *) malloc(numExtended*sizeof(pvadata_t));
      assert(databuffer);
      for (int proc=0; proc<icComm->commSize(); proc++) {
         if (proc==rootproc) {
            memcpy(databuffer, layerData, numExtended*sizeof(pvadata_t));
         }
         else {
            MPI_Recv(databuffer, numExtended*sizeof(pvadata_t),MPI_BYTE,proc,15,icComm->communicator(), MPI_STATUS_IGNORE);
         }
         // At this point, databuffer on rank 0 should contain the extended input layer on rank proc
         for (int k=0; k<numNeurons; k++) {
            int kExt = kIndexExtended(k,loc->nx,loc->ny,loc->nf,loc->halo.lt,loc->halo.rt,loc->halo.dn,loc->halo.up);
            pvadata_t value = databuffer[kExt];
            if (fabs(value-correctvalue)>=tolerance) {
               pvErrorNoExit().printf("Rank %d, restricted index %d, extended index %d, value is %f instead of %f\n",
                     proc, k, kExt, value, correctvalue);
               status = PV_FAILURE;
            }
         }
      }
      free(databuffer);
      if (status == PV_SUCCESS) {
         pvInfo().printf("%s succeeded.\n", argv[0]);
      }
      else {
         pvError().printf("%s failed.\n", argv[0]);
      }
   }
   else {
      MPI_Send(layerData,numExtended*sizeof(pvadata_t),MPI_BYTE,rootproc,15,icComm->communicator());
   }
   MPI_Barrier(icComm->communicator());
   return status;
}
int FilenameParsingGroundTruthLayer::updateState(double time, double dt)
{
   update_timer->start();
   pvdata_t * A = getCLayer()->activity->data;
   const PVLayerLoc * loc = getLayerLoc();
   int num_neurons = getNumNeurons();
   if (num_neurons != numClasses)
   {
      pvError() << "The number of neurons in " << getName() << " is not equal to the number of classes specified in " << parent->getOutputPath() << "/classes.txt\n";
   }   

   for(int b = 0; b < loc->nbatch; b++){
      char * currentFilename = NULL;
      int filenameLen = 0;
      //TODO depending on speed of this layer, more efficient way would be to preallocate currentFilename buffer
      if(parent->icCommunicator()->commRank()==0){
         currentFilename = strdup(movieLayer->getFilename(b));
         //Get length of currentFilename and broadcast
         int filenameLen = (int) strlen(currentFilename) + 1; //+1 for the null terminator
         //Using local communicator, as each batch MPI will handle it's own run
         MPI_Bcast(&filenameLen, 1, MPI_INT, 0, parent->icCommunicator()->communicator());
         //Braodcast filename to all other local processes
         MPI_Bcast(currentFilename, filenameLen, MPI_CHAR, 0, parent->icCommunicator()->communicator());
      }
      else{
         //Receive broadcast about length of filename
         MPI_Bcast(&filenameLen, 1, MPI_INT, 0, parent->icCommunicator()->communicator());
         currentFilename = (char*)calloc(sizeof(char), filenameLen);
         //Receive filename
         MPI_Bcast(currentFilename, filenameLen, MPI_CHAR, 0, parent->icCommunicator()->communicator());
      }

      std::string fil = currentFilename;
      pvdata_t * ABatch = A + b * getNumExtended();
      for(int i = 0; i < num_neurons; i++){
         int nExt = kIndexExtended(i, loc->nx, loc->ny, loc->nf, loc->halo.lt, loc->halo.rt, loc->halo.dn, loc->halo.up);
         int fi = featureIndex(nExt, loc->nx+loc->halo.rt+loc->halo.lt, loc->ny+loc->halo.dn+loc->halo.up, loc->nf);
         int match = fil.find(classes[i]);
         if(0 <= match){
            ABatch[nExt] = gtClassTrueValue;
         }
         else{
            ABatch[nExt] = gtClassFalseValue;
         }
      }
      //Free buffer, TODO, preallocate buffer to avoid this
      free(currentFilename);
   }
   update_timer->stop();
   return PV_SUCCESS;
}
Exemplo n.º 11
0
int MatchingPursuitProbe::outputState(double timed) {
   int status = PV_SUCCESS;
   const PVLayerLoc * loc = getTargetLayer()->getLayerLoc();
   if (timed>0.0) {
      for (int k=0; k<getTargetLayer()->getNumNeurons(); k++) {
         int kGlobal = globalIndexFromLocal(k, *loc);
         pvdata_t correctValue = nearbyint((double)kGlobal + timed)==256.0 ? (pvdata_t) kGlobal/255.0f : 0.0f;
         int kExtended = kIndexExtended(k, loc->nx, loc->ny, loc->nf, loc->halo.lt, loc->halo.rt, loc->halo.dn, loc->halo.up);
         pvdata_t observed = getTargetLayer()->getLayerData()[kExtended];
         pvdata_t relerr = fabs(observed-correctValue)/correctValue;
         if (relerr>1e-7) {
            fprintf(stderr, "Time %f: Neuron %d (global index) has relative error %f (%f versus correct %f)\n", timed, kGlobal, relerr, observed, correctValue);
            status = PV_FAILURE;
         }
      }
   }
   assert(status==PV_SUCCESS);
   return status;
}
Exemplo n.º 12
0
int main(int argc, char* argv[])
{
   int kg, kl, kb;

   PVLayerLoc loc;

   float nf = 3;

   float nx = 64.0;
   float ny = 68.0;
   float nb = 4.0;

   float nxGlobal = nx + 2*nb;
   float nyGlobal = ny + 2*nb;

   float kx0 = nb;
   float ky0 = nb;

   loc.nx = nx;
   loc.ny = ny;
   loc.nxGlobal = nxGlobal;
   loc.nyGlobal = nyGlobal;
   loc.kx0 = kx0;
   loc.ky0 = ky0;
   loc.halo.lt  = nb;
   loc.halo.rt  = nb;
   loc.halo.dn  = nb;
   loc.halo.up  = nb;
   loc.nf  = nf;

   for (kl = 0; kl < nf*nxGlobal*nyGlobal; kl++) {
      kg = globalIndexFromLocal_nompi(kl, loc);
      kb = kIndexExtended(kl, nx, ny, nf, nb, nb, nb, nb); // All margin widths the same.  Should generalize
      if (kb != kg) {
         printf("FAILED:TEST_EXTEND_BORDER: (kl,kb) = (%d,%d)\n", kl, kb);
         exit(1);
      }
   }

   return 0;
}
Exemplo n.º 13
0
int MoviePvpTestLayer::updateStateWrapper(double time, double dt)
{
   MoviePvp::updateStateWrapper(time, dt);
   const PVLayerLoc * loc = getLayerLoc();
   int nx = loc->nx;
   int ny = loc->ny;
   int nf = loc->nf;
   int nbatch = loc->nbatch;

   for(int b = 0; b < nbatch; b++){
      pvdata_t * dataBatch = data + b * getNumExtended();
      int frameIdx;
      if(strcmp(getBatchMethod(), "byImage") == 0){
         frameIdx = (time-1) * nbatch + b;
      }
      else if(strcmp(getBatchMethod(), "byMovie") == 0){
         frameIdx = b * 2 + (time-1);
      }
      for(int nkRes = 0; nkRes < getNumNeurons(); nkRes++){
         //Calculate extended index
         int nkExt = kIndexExtended(nkRes, nx, ny, nf, loc->halo.lt, loc->halo.rt, loc->halo.dn, loc->halo.up);  
         //checkVal is the value from batch index 0
         pvdata_t checkVal = dataBatch[nkExt];

         int kxGlobal = kxPos(nkRes, nx, ny, nf) + loc->kx0;
         int kyGlobal = kyPos(nkRes, nx, ny, nf) + loc->ky0; 
         int kf = featureIndex(nkRes, nx, ny, nf);

         pvdata_t expectedVal = kIndex(kxGlobal, kyGlobal, kf, loc->nxGlobal, loc->nyGlobal, nf) + frameIdx*192;
         if(fabs(checkVal - expectedVal) >= 1e-5){
            std::cout << "ImageFileIO " << name << " test Expected: " << expectedVal << " Actual: " << checkVal << "\n";
            //exit(-1);
         }
      }
   }
   return PV_SUCCESS;
}
int OjaKernelSpikeRateProbe::allocateDataStructures() {
   targetOjaKernelConn = dynamic_cast<OjaKernelConn *>(getTargetConn());
   if (targetOjaKernelConn == NULL) {
      if (getParent()->columnId()==0) {
         fprintf(stderr, "LCATraceProbe error: connection \"%s\" must be an LCALIFLateralConn.\n", getTargetConn()->getName());
      }
      abort();
   }
   HyPerLayer * targetLayer = NULL;
   if (isInputRate) {
      targetLayer = targetOjaKernelConn->preSynapticLayer();
   }
   else {
      targetLayer = targetOjaKernelConn->postSynapticLayer();
   }
   const PVLayerLoc * loc = targetLayer->getLayerLoc();
   int x_local = xg - loc->kx0;
   int y_local = yg - loc->ky0;
   bool inBounds = (x_local >= 0 && x_local < loc->nx && y_local >= 0 && y_local < loc->ny);
   if(inBounds ) { // if inBounds
      int krestricted = kIndex(x_local, y_local, feature, loc->nx, loc->ny, loc->nf);
      if (isInputRate) {
         int kextended = kIndexExtended(krestricted, loc->nx, loc->ny, loc->nf, loc->halo.lt, loc->halo.rt, loc->halo.dn, loc->halo.up);
         spikeRate = &targetOjaKernelConn->getInputFiringRate(arbor)[kextended];
      }
      else {
         spikeRate = &targetOjaKernelConn->getOutputFiringRate()[krestricted];
      }
   }
   else {
      outputstream = NULL;
   }
   //This is now being done in BaseConnectionProbe
   //getTargetConn()->insertProbe(this);

   return PV_SUCCESS;
}
int ResetStateOnTriggerTestProbe::calcValues(double timevalue) {
   int nBatch = getNumValues();
   if (timevalue > parent->getStartTime()) {
      int N = targetLayer->getNumNeurons();
      int NGlobal = targetLayer->getNumGlobalNeurons();
      PVLayerLoc const * loc = targetLayer->getLayerLoc();
      PVHalo const * halo = &loc->halo;
      int inttime = (int) nearbyintf(timevalue/parent->getDeltaTime());
      for (int b=0; b<nBatch; b++) {
         int numDiscreps = 0;
         pvadata_t const * activity = targetLayer->getLayerData() + b*targetLayer->getNumExtended();
         for (int k=0; k<N; k++) {
            int kex = kIndexExtended(k, loc->nx, loc->ny, loc->nf, halo->lt, halo->rt, halo->dn, halo->up);
            pvadata_t a = activity[kex];
            int kGlobal = globalIndexFromLocal(k, *loc);
            int correctValue = 4*kGlobal*((inttime + 4)%5+1) + (kGlobal==((((inttime-1)/5)*5)+1)%NGlobal);
            if ( a != (pvadata_t) correctValue ) { numDiscreps++; }
         }
         getValuesBuffer()[b] = (double) numDiscreps;
      }
      MPI_Allreduce(MPI_IN_PLACE, getValuesBuffer(), nBatch, MPI_DOUBLE, MPI_SUM, parent->icCommunicator()->communicator());
      if (probeStatus==0) {
         for (int k=0; k<nBatch; k++) {
            if (getValuesBuffer()[k]) {
               probeStatus = 1;
               firstFailureTime = timevalue;
            }
         }
      }      
   }
   else {
      for (int b=0; b<nBatch; b++) {
         getValuesBuffer()[b] = 0.0;
      }
   }
   return PV_SUCCESS;
}
Exemplo n.º 16
0
int RescaleLayerTestProbe::outputState(double timed)
{
   int status = StatsProbe::outputState(timed);
   if (timed==getParent()->getStartTime()) { return PV_SUCCESS; }
   float tolerance = 2.0e-5f;
   InterColComm * icComm = getTargetLayer()->getParent()->icCommunicator();
   bool isRoot = icComm->commRank() == 0;

   RescaleLayer * targetRescaleLayer = dynamic_cast<RescaleLayer *>(getTargetLayer());
   assert(targetRescaleLayer);

   if (targetRescaleLayer->getRescaleMethod()==NULL) {
      fprintf(stderr, "RescaleLayerTestProbe \"%s\": RescaleLayer \"%s\" does not have rescaleMethod set.  Exiting.\n", name, targetRescaleLayer->getName());
      status = PV_FAILURE;
   }
   else if (!strcmp(targetRescaleLayer->getRescaleMethod(), "maxmin")) {
      if (!isRoot) { return PV_SUCCESS; }
      for(int b = 0; b < parent->getNBatch(); b++){
         float targetMax = targetRescaleLayer->getTargetMax();
         if (fabs(fMax[b]-targetMax)>tolerance) {
            fprintf(stderr, "RescaleLayerTestProbe \"%s\": RescaleLayer \"%s\" has max %f instead of target max %f\n", getName(), targetRescaleLayer->getName(), fMax[b], targetMax);
            status = PV_FAILURE;
         }
         float targetMin = targetRescaleLayer->getTargetMin();
         if (fabs(fMin[b]-targetMin)>tolerance) {
            fprintf(stderr, "RescaleLayerTestProbe \"%s\": RescaleLayer \"%s\" has min %f instead of target min %f\n", getName(), targetRescaleLayer->getName(), fMin[b], targetMin);
            status = PV_FAILURE;
         }

         // Now, check whether rescaled activity and original V are colinear.
         PVLayerLoc const * rescaleLoc = targetRescaleLayer->getLayerLoc();
         PVHalo const * rescaleHalo = &rescaleLoc->halo;
         int nk = rescaleLoc->nx * rescaleLoc->nf;
         int ny = rescaleLoc->ny;
         int rescaleStrideYExtended = (rescaleLoc->nx + rescaleHalo->lt + rescaleHalo->rt) * rescaleLoc->nf;
         int rescaleExtendedOffset = kIndexExtended(0, rescaleLoc->nx, rescaleLoc->ny, rescaleLoc->nf, rescaleHalo->lt, rescaleHalo->rt, rescaleHalo->dn, rescaleHalo->up);
         pvadata_t const * rescaledData = targetRescaleLayer->getLayerData() + b * targetRescaleLayer->getNumExtended() + rescaleExtendedOffset;
         PVLayerLoc const * origLoc = targetRescaleLayer->getOriginalLayer()->getLayerLoc();
         PVHalo const * origHalo = &origLoc->halo;
         assert(nk == origLoc->nx * origLoc->nf);
         assert(ny == origLoc->ny);
         int origStrideYExtended = (origLoc->nx + origHalo->lt + origHalo->rt) * origLoc->nf;
         int origExtendedOffset = kIndexExtended(0, rescaleLoc->nx, rescaleLoc->ny, rescaleLoc->nf, rescaleHalo->lt, rescaleHalo->rt, rescaleHalo->dn, rescaleHalo->up);
         pvadata_t const * origData = targetRescaleLayer->getOriginalLayer()->getLayerData() + b * targetRescaleLayer->getOriginalLayer()->getNumExtended() + origExtendedOffset;

         bool iscolinear = colinear(nk, ny, origStrideYExtended, rescaleStrideYExtended, origData, rescaledData, tolerance, NULL, NULL, NULL);
         if (!iscolinear) {
            fprintf(stderr, "RescaleLayerTestProbe \"%s\": Rescale layer \"%s\" data is not a linear rescaling of original membrane potential.\n", getName(), targetRescaleLayer->getName());
            status = PV_FAILURE;
         }
      }
   }
   //l2 norm with a patch size of 1 (default) should be the same as rescaling with meanstd with target mean 0 and std of 1/sqrt(patchsize)
   else if (!strcmp(targetRescaleLayer->getRescaleMethod(), "meanstd") || !strcmp(targetRescaleLayer->getRescaleMethod(), "l2")) {
      if (!isRoot) { return PV_SUCCESS; }
      for(int b = 0; b < parent->getNBatch(); b++){
         float targetMean, targetStd;
         if(!strcmp(targetRescaleLayer->getRescaleMethod(), "meanstd")){
            targetMean = targetRescaleLayer->getTargetMean();
            targetStd = targetRescaleLayer->getTargetStd();
         }
         else{
            targetMean = 0;
            targetStd = 1/sqrt((float)targetRescaleLayer->getL2PatchSize());
         }

         if (fabs(avg[b]-targetMean)>tolerance) {
            fprintf(stderr, "RescaleLayerTestProbe \"%s\": RescaleLayer \"%s\" has mean %f instead of target mean %f\n", getName(), targetRescaleLayer->getName(), (double)avg[b], targetMean);
            status = PV_FAILURE;
         }
         if (sigma[b]>tolerance && fabs(sigma[b]-targetStd)>tolerance) {
            fprintf(stderr, "RescaleLayerTestProbe \"%s\": RescaleLayer \"%s\" has std.dev. %f instead of target std.dev. %f\n", getName(), targetRescaleLayer->getName(), (double)sigma[b], targetStd);
            status = PV_FAILURE;
         }

         // Now, check whether rescaled activity and original V are colinear.
         PVLayerLoc const * rescaleLoc = targetRescaleLayer->getLayerLoc();
         PVHalo const * rescaleHalo = &rescaleLoc->halo;
         int nk = rescaleLoc->nx * rescaleLoc->nf;
         int ny = rescaleLoc->ny;
         int rescaleStrideYExtended = (rescaleLoc->nx + rescaleHalo->lt + rescaleHalo->rt) * rescaleLoc->nf;
         int rescaleExtendedOffset = kIndexExtended(0, rescaleLoc->nx, rescaleLoc->ny, rescaleLoc->nf, rescaleHalo->lt, rescaleHalo->rt, rescaleHalo->dn, rescaleHalo->up);
         pvadata_t const * rescaledData = targetRescaleLayer->getLayerData() + b*targetRescaleLayer->getNumExtended() + rescaleExtendedOffset;
         PVLayerLoc const * origLoc = targetRescaleLayer->getOriginalLayer()->getLayerLoc();
         PVHalo const * origHalo = &origLoc->halo;
         assert(nk == origLoc->nx * origLoc->nf);
         assert(ny == origLoc->ny);
         int origStrideYExtended = (origLoc->nx + origHalo->lt + origHalo->rt) * origLoc->nf;
         int origExtendedOffset = kIndexExtended(0, rescaleLoc->nx, rescaleLoc->ny, rescaleLoc->nf, rescaleHalo->lt, rescaleHalo->rt, rescaleHalo->dn, rescaleHalo->up);
         pvadata_t const * origData = targetRescaleLayer->getOriginalLayer()->getLayerData() + b*targetRescaleLayer->getOriginalLayer()->getNumExtended() + origExtendedOffset;

         bool iscolinear = colinear(nk, ny, origStrideYExtended, rescaleStrideYExtended, origData, rescaledData, tolerance, NULL, NULL, NULL);
         if (!iscolinear) {
            fprintf(stderr, "RescaleLayerTestProbe \"%s\": Rescale layer \"%s\" data is not a linear rescaling of original membrane potential.\n", getName(), targetRescaleLayer->getName());
            status = PV_FAILURE;
         }
      }
   }
   else if (!strcmp(targetRescaleLayer->getRescaleMethod(), "pointmeanstd")) {
      PVLayerLoc const * loc = targetRescaleLayer->getLayerLoc();
      int nf = loc->nf;
      if (nf<2) { return PV_SUCCESS; }
      PVHalo const * halo = &loc->halo;
      float targetMean = targetRescaleLayer->getTargetMean();
      float targetStd = targetRescaleLayer->getTargetStd();
      int numNeurons = targetRescaleLayer->getNumNeurons();
      for(int b = 0; b < parent->getNBatch(); b++){
         pvpotentialdata_t const * originalData = targetRescaleLayer->getV() + b*targetRescaleLayer->getNumNeurons();
         pvadata_t const * rescaledData = targetRescaleLayer->getLayerData() + b*targetRescaleLayer->getNumExtended();
         for (int k=0; k<numNeurons; k+=nf) {
            int kExtended = kIndexExtended(k, loc->nx, loc->ny, loc->nf, halo->lt, halo->rt, halo->dn, halo->up);
            double pointmean = 0.0;
            for (int f=0; f<nf; f++) {
               pointmean += rescaledData[kExtended+f];
            }
            pointmean /= nf;
            double pointstd = 0.0;
            for (int f=0; f<nf; f++) {
               double d = rescaledData[kExtended+f]-pointmean;
               pointstd += d*d;
            }
            pointstd /= nf;
            pointstd = sqrt(pointstd);
            if (fabs(pointmean-targetMean)>tolerance) {
               fprintf(stderr, "RescaleLayerTestProbe \"%s\": RescaleLayer \"%s\", location in rank %d, starting at restricted neuron %d, has mean %f instead of target mean %f\n",
                     getName(), targetRescaleLayer->getName(), getParent()->columnId(), k, pointmean, targetMean);
               status = PV_FAILURE;
            }
            if (pointstd>tolerance && fabs(pointstd-targetStd)>tolerance) {
               fprintf(stderr, "RescaleLayerTestProbe \"%s\": RescaleLayer \"%s\", location in rank %d, starting at restricted neuron %d, has std.dev. %f instead of target std.dev. %f\n",
                     getName(), targetRescaleLayer->getName(), getParent()->columnId(), k, pointstd, targetStd);
               status = PV_FAILURE;
            }
            bool iscolinear = colinear(nf, 1, 0, 0, &originalData[k], &rescaledData[kExtended], tolerance, NULL, NULL, NULL);
            if (!iscolinear) {
               fprintf(stderr, "RescaleLayerTestProbe \"%s\": RescaleLayer \"%s\", location in rank %d, starting at restricted neuron %d, is not a linear rescaling.\n",
                     getName(), targetRescaleLayer->getName(), parent->columnId(), k);
               status = PV_FAILURE;
            }
         }
      }
   }
   else if (!strcmp(targetRescaleLayer->getRescaleMethod(), "zerotonegative")) {
      int numNeurons = targetRescaleLayer->getNumNeurons();
      assert(numNeurons == targetRescaleLayer->getOriginalLayer()->getNumNeurons());
      PVLayerLoc const * rescaleLoc = targetRescaleLayer->getLayerLoc();
      PVHalo const * rescaleHalo = &rescaleLoc->halo;
      int nf = rescaleLoc->nf;
      HyPerLayer * originalLayer = targetRescaleLayer->getOriginalLayer();
      PVLayerLoc const * origLoc = originalLayer->getLayerLoc();
      PVHalo const * origHalo = &origLoc->halo;
      assert(origLoc->nf == nf);

      for(int b = 0; b < parent->getNBatch(); b++){
         pvadata_t const * rescaledData = targetRescaleLayer->getLayerData() + b * targetRescaleLayer->getNumExtended();
         pvadata_t const * originalData = originalLayer->getLayerData() + b * originalLayer->getNumExtended();
         for (int k=0; k<numNeurons; k++) {
            int rescale_kExtended = kIndexExtended(k, rescaleLoc->nx, rescaleLoc->ny, rescaleLoc->nf, rescaleHalo->lt, rescaleHalo->rt, rescaleHalo->dn, rescaleHalo->up);
            int orig_kExtended = kIndexExtended(k, origLoc->nx, origLoc->ny, origLoc->nf, origHalo->lt, origHalo->rt, origHalo->dn, origHalo->up);
            pvadata_t observedval = rescaledData[rescale_kExtended];
            pvpotentialdata_t correctval = originalData[orig_kExtended] ? observedval : -1.0;
            if (observedval != correctval) {
               fprintf(stderr, "RescaleLayerTestProbe \"%s\": RescaleLayer \"%s\", rank %d, restricted neuron %d has value %f instead of expected %f\n.",
                     this->getName(), targetRescaleLayer->getName(), parent->columnId(), k, observedval, correctval);
               status = PV_FAILURE;
            }
         }
      }
   }
   else {
      assert(0);  // All allowable rescaleMethod values are handled above.
   }
   if (status == PV_FAILURE) {
      exit(EXIT_FAILURE);
   }
   return status;
}
Exemplo n.º 17
0
int PoolingConn::deliverPostsynapticPerspective(PVLayerCube const * activity, int arborID) {
    //Check channel number for noupdate
    if(getChannel() == CHANNEL_NOUPDATE) {
        return PV_SUCCESS;
    }
    assert(post->getChannel(getChannel()));

    assert(arborID >= 0);
    //Get number of neurons restricted target
    const int numPostRestricted = post->getNumNeurons();

    float dt_factor = getConvertToRateDeltaTimeFactor();

    const PVLayerLoc * sourceLoc = preSynapticLayer()->getLayerLoc();
    const PVLayerLoc * targetLoc = post->getLayerLoc();

    const int sourceNx = sourceLoc->nx;
    const int sourceNy = sourceLoc->ny;
    const int sourceNf = sourceLoc->nf;
    const int targetNx = targetLoc->nx;
    const int targetNy = targetLoc->ny;
    const int targetNf = targetLoc->nf;

    const PVHalo * sourceHalo = &sourceLoc->halo;
    const PVHalo * targetHalo = &targetLoc->halo;

    //get source layer's extended y stride
    int sy  = (sourceNx+sourceHalo->lt+sourceHalo->rt)*sourceNf;

    //The start of the gsyn buffer
    pvdata_t * gSynPatchHead = post->getChannel(this->getChannel());

    clearGateIdxBuffer();
    int* gatePatchHead = NULL;
    if(needPostIndexLayer) {
        gatePatchHead = postIndexLayer->getChannel(CHANNEL_EXC);
    }


    long * startSourceExtBuf = getPostToPreActivity();
    if(!startSourceExtBuf) {
        std::cout << "HyPerLayer::recvFromPost error getting preToPostActivity from connection. Is shrink_patches on?\n";
        exit(EXIT_FAILURE);
    }

    float resetVal = 0;
    if(getPvpatchAccumulateType() == ACCUMULATE_MAXPOOLING) {
        resetVal = -INFINITY;
    }


    for(int b = 0; b < parent->getNBatch(); b++) {
#ifdef PV_USE_OPENMP_THREADS
        #pragma omp parallel for
#endif
        for (int kTargetRes = 0; kTargetRes < numPostRestricted; kTargetRes++) {
            pvdata_t * activityBatch = activity->data + b * (sourceNx + sourceHalo->rt + sourceHalo->lt) * (sourceNy + sourceHalo->up + sourceHalo->dn) * sourceNf;
            pvdata_t * gSynPatchHeadBatch = gSynPatchHead + b * targetNx * targetNy * targetNf;

            //Change restricted to extended post neuron
            int kTargetExt = kIndexExtended(kTargetRes, targetNx, targetNy, targetNf, targetHalo->lt, targetHalo->rt, targetHalo->dn, targetHalo->up);

            //Read from buffer
            long startSourceExt = startSourceExtBuf[kTargetRes];

            //Calculate target's start of gsyn
            pvdata_t * gSynPatchPos = gSynPatchHeadBatch + kTargetRes;
            //Initialize patch as a huge negative number
            *gSynPatchPos = resetVal;

            int* gatePatchPos = NULL;
            if(needPostIndexLayer) {
                gatePatchPos = gatePatchHead + b * postIndexLayer->getNumNeurons() + kTargetRes;
                //Initialize gatePatchPos as a negative number
                *gatePatchPos = -1;
            }

            float* activityStartBuf = &(activityBatch[startSourceExt]);

            pvwdata_t * weightY = NULL; //No weights in pooling
            int sf = postConn->fPatchSize();
            int yPatchSize = postConn->yPatchSize();
            int numPerStride = postConn->xPatchSize() * postConn->fPatchSize();

            const PVLayerLoc * postLoc = post->getLayerLoc();
            const int kfPost = featureIndex(kTargetExt, postLoc->nx + postLoc->halo.lt + postLoc->halo.rt, postLoc->ny + postLoc->halo.dn + postLoc->halo.up, postLoc->nf);
            int offset = kfPost;

            pvwdata_t w = 1.0;
            if(getPvpatchAccumulateType() == ACCUMULATE_SUMPOOLING) {
                float relative_XScale = pow(2, (post->getXScale() - pre->getXScale()));
                float relative_YScale = pow(2, (post->getYScale() - pre->getYScale()));
                w = 1.0/(nxp*nyp*relative_XScale*relative_YScale);
            }

            for (int ky = 0; ky < yPatchSize; ky++) {
                int kPreExt = startSourceExt + ky*sy+offset;
                const int kxPreExt = kxPos(kPreExt, sourceLoc->nx + sourceLoc->halo.lt + sourceLoc->halo.rt, sourceLoc->ny + sourceLoc->halo.dn + sourceLoc->halo.up, sourceLoc->nf);
                const int kyPreExt = kyPos(kPreExt, sourceLoc->nx + sourceLoc->halo.lt + sourceLoc->halo.rt, sourceLoc->ny + sourceLoc->halo.dn + sourceLoc->halo.up, sourceLoc->nf);
                const int kfPre = featureIndex(kPreExt, sourceLoc->nx + sourceLoc->halo.lt + sourceLoc->halo.rt, sourceLoc->ny + sourceLoc->halo.dn + sourceLoc->halo.up, sourceLoc->nf);
                const int kxPreGlobalExt = kxPreExt + sourceLoc->kx0;
                const int kyPreGlobalExt = kyPreExt + sourceLoc->ky0;
                const int kPreGlobalExt = kIndex(kxPreGlobalExt, kyPreGlobalExt, kfPre, sourceLoc->nxGlobal + sourceLoc->halo.lt + sourceLoc->halo.rt, sourceLoc->nyGlobal + sourceLoc->halo.up + sourceLoc->halo.dn, sourceLoc->nf);

                float * activityY = &(activityStartBuf[ky*sy+offset]);

                (accumulateFunctionFromPostPointer)(kPreGlobalExt, numPerStride, gSynPatchPos, activityY, &w, dt_factor, gatePatchPos, sf);
            }
        }
    }
    return PV_SUCCESS;
}
Exemplo n.º 18
0
int PointLIFProbe::calcValues(double timevalue) {
   // TODO: Reduce duplicated code between PointProbe::calcValues and PointLIFProbe::calcValues.
   assert(this->getNumValues()==NUMBER_OF_VALUES);
   LIF * LIF_layer = dynamic_cast<LIF *>(getTargetLayer());
   assert(LIF_layer != NULL);
   pvconductance_t const * G_E  = LIF_layer->getConductance(CHANNEL_EXC) + batchLoc * LIF_layer->getNumNeurons();
   pvconductance_t const * G_I  = LIF_layer->getConductance(CHANNEL_INH) + batchLoc * LIF_layer->getNumNeurons();
   pvconductance_t const * G_IB = LIF_layer->getConductance(CHANNEL_INHB) + batchLoc * LIF_layer->getNumNeurons();
   pvdata_t const * V = getTargetLayer()->getV();
   pvdata_t const * Vth  = LIF_layer->getVth();
   pvdata_t const * activity = getTargetLayer()->getLayerData();
   assert(V && activity && G_E && G_I && G_IB && Vth);
   double * valuesBuffer = this->getValuesBuffer();
   //We need to calculate which mpi process contains the target point, and send that info to the root process
   //Each process calculates local index
   const PVLayerLoc * loc = getTargetLayer()->getLayerLoc();
   //Calculate local cords from global
   const int kx0 = loc->kx0;
   const int ky0 = loc->ky0;
   const int kb0 = loc->kb0;
   const int nx = loc->nx;
   const int ny = loc->ny;
   const int nf = loc->nf;
   const int nbatch = loc->nbatch;
   const int xLocLocal = xLoc - kx0;
   const int yLocLocal = yLoc - ky0;
   const int nbatchLocal = batchLoc - kb0;
   
   //if in bounds
   if( xLocLocal >= 0 && xLocLocal < nx &&
       yLocLocal >= 0 && yLocLocal < ny &&
       nbatchLocal >= 0 && nbatchLocal < nbatch){
      const pvdata_t * V = getTargetLayer()->getV();
      const pvdata_t * activity = getTargetLayer()->getLayerData();
      //Send V and A to root
      const int k = kIndex(xLocLocal, yLocLocal, fLoc, nx, ny, nf);
      const int kbatch = k + nbatchLocal*getTargetLayer()->getNumNeurons();
      valuesBuffer[0] = G_E[kbatch];
      valuesBuffer[1] = G_I[kbatch];
      valuesBuffer[2] = G_IB[kbatch];
      valuesBuffer[3] = V[kbatch];
      valuesBuffer[4] = Vth[kbatch];
      const int kex = kIndexExtended(k, nx, ny, nf, loc->halo.lt, loc->halo.rt, loc->halo.dn, loc->halo.up);
      valuesBuffer[5] = activity[kex + nbatchLocal * getTargetLayer()->getNumExtended()];
      //If not in root process, send to root process
      if(parent->columnId()!=0){
         MPI_Send(valuesBuffer, NUMBER_OF_VALUES, MPI_DOUBLE, 0, 0, parent->icCommunicator()->communicator());
      }
   }

   //Root process
   if(parent->columnId()==0){
      //Calculate which rank target neuron is
      //TODO we need to calculate rank from batch as well
      int xRank = xLoc/nx;
      int yRank = yLoc/ny;

      int srcRank = rankFromRowAndColumn(yRank, xRank, parent->icCommunicator()->numCommRows(), parent->icCommunicator()->numCommColumns());

      //If srcRank is not root process, MPI_Recv from that rank
      if(srcRank != 0){
         MPI_Recv(valuesBuffer, NUMBER_OF_VALUES, MPI_DOUBLE, srcRank, 0, parent->icCommunicator()->communicator(), MPI_STATUS_IGNORE);
      }
   }
   return PV_SUCCESS;
}
Exemplo n.º 19
0
int TextStreamProbe::outputState(double timef) {
   if (timef<nextDisplayTime) return PV_SUCCESS;
   nextDisplayTime += displayPeriod;
   int status = PV_SUCCESS;
   assert(getTargetLayer()->getParent()->icCommunicator()->numCommColumns()==1);
   int num_rows = getTargetLayer()->getParent()->icCommunicator()->numCommRows();
   const PVLayerLoc * loc = getTargetLayer()->getLayerLoc();
   int nx = loc->nx;
   assert(nx==loc->nxGlobal); // num mpi cols is always 1
   int ny = loc->ny;
   int nyGlobal = loc->nyGlobal;
   int nf = loc->nf;
   assert(nyGlobal==ny*num_rows);
   MPI_Comm mpi_comm = getTargetLayer()->getParent()->icCommunicator()->communicator();
   pvdata_t * buf = (pvdata_t *) calloc(ny*nf*nx, sizeof(pvdata_t)); // Buffer holding the max feature value;

   int rootproc = 0;
   if (getTargetLayer()->getParent()->columnId()==rootproc) {
      char * cbuf = (char *) calloc(2*nx, sizeof(char)); // Translation of feature numbers into characters.  2x because nonprintable characters
      for (int proc=0; proc<num_rows; proc++) {
         if (proc==rootproc) {
            // Copy to layer data to buf.
            for (int y=0; y<ny; y++) {
               int kex = kIndexExtended(y*nx*nf, nx, ny, nf, loc->halo.lt, loc->halo.rt, loc->halo.dn, loc->halo.up);
               memcpy(&buf[y*nx*nf], &getTargetLayer()->getLayerData()[kex], nx*nf*sizeof(pvdata_t));
            }
         }
         else {
#ifdef PV_USE_MPI
            MPI_Recv(buf, ny*nx*nf, MPI_FLOAT, proc, 157, mpi_comm, MPI_STATUS_IGNORE);
#endif
         }
         for (int y=0; y<ny; y++) {
            char * curcbuf = cbuf;
            for (int x=0; x<nx; x++) {
               pvdata_t fmax = -FLT_MAX;
               int floc = -1;
               for (int f=0; f<nf; f++) {
                  if (buf[nf*(nx*y+x)+f]>fmax) {
                     fmax=buf[nf*(nx*y+x)+f];
                     floc = f;
                  }
               }
               assert(floc>=0 && floc < nf);
               // Now floc is the location of the maximum over f, and fmax is the value.
               featureNumberToCharacter(floc, &curcbuf, cbuf, 2*nx*ny);
            }
            assert(curcbuf-cbuf<2*nx*ny);
            *curcbuf = '\0';
            int firstChar = (int)(unsigned char)cbuf[0];
            if (firstChar == 10) { // line feed
            	fprintf(outputstream->fp,"\n");
            }
			else {
				fprintf(outputstream->fp, "%s ", cbuf);
			}
         }
      }
      //Flush outputstream
      fflush(outputstream->fp);
      //fprintf(outputstream->fp, "\n");
      free(cbuf); cbuf = NULL;
   }
   else {
      for (int y=0; y<ny; y++) {
         int kex = kIndexExtended(y*nx*nf, nx, ny, nf, loc->halo.lt, loc->halo.rt, loc->halo.dn, loc->halo.up);
         memcpy(&buf[y*nx*nf], &getTargetLayer()->getLayerData()[kex], nx*nf*sizeof(pvdata_t));
      }
#ifdef PV_USE_MPI
      MPI_Send(buf, ny*nx*nf, MPI_FLOAT, rootproc, 157, mpi_comm);
#endif
   }
   free(buf); buf=NULL;

   return status;
}
Exemplo n.º 20
0
int MLPErrorLayer::updateState(double time, double dt)
{
   //update_timer->start();
   //assert(getNumChannels()>= 3);

   const PVLayerLoc * loc = getLayerLoc();

   int nx = loc->nx;
   int ny = loc->ny;
   int nf = loc->nf;
   int num_neurons = nx*ny*nf;
   //Reset pointer of gSynHead to point to the inhib channel
   pvdata_t * GSynExt = getChannel(CHANNEL_EXC);
   pvdata_t * GSynInh = getChannel(CHANNEL_INH);

   pvdata_t Vth, sig_scale;
   if(!symSigmoid){
      //Calculate constants for derivitive of sigmoid layer
      Vth = (VthRest+Vrest)/2.0;
      sig_scale = -logf(1.0f/sigmoid_alpha - 1.0f)/(Vth - Vrest);
   }
   pvdata_t * A = getCLayer()->activity->data;
   pvdata_t * V = getV();

#ifdef PV_USE_OPENMP_THREADS
#pragma omp parallel for
#endif
   for(int ni = 0; ni < num_neurons; ni++){
      int next = kIndexExtended(ni, nx, ny, nf, loc->halo.lt, loc->halo.rt, loc->halo.dn, loc->halo.up);
      //Update activity
      //f'(V)*(error)
      //error = gt - finalLayer iff error is last error
      float errProp, gradient;
      //exct is expected, inh is actual
      if(lastError){
         //0 is DCR
         if(GSynExt[ni] == 0){
            errProp = 0;
         }
      }
      else{
         if(strcmp(lossFunction, "squared") == 0){
            //expected - actual
            errProp = GSynExt[ni] - GSynInh[ni];
         }
         else if(strcmp(lossFunction, "entropy") == 0){
            //expected/actual
            errProp = GSynExt[ni]/GSynInh[ni];
         }
         else if(strcmp(lossFunction, "hidden") == 0){
            errProp = GSynExt[ni];
         }
         if(symSigmoid){
            gradient = 1.14393 * (1/(pow(cosh(((float)2/3) * V[ni]), 2))) + linAlpha;
         }
         else{
            gradient = -.5 * sig_scale * (1/(pow(cosh(sig_scale*(Vth - V[ni])), 2)));
         }
      }
      A[next] = dropout[ni] ? 0 : errProp * gradient;
   }
   //update_timer->stop();
   return PV_SUCCESS;
}
Exemplo n.º 21
0
//
// update the state of a retinal layer (spiking)
//
//    assume called with 1D kernel
//
CL_KERNEL
void LIF_update_state(
    const float time,
    const float dt,

    const int nx,
    const int ny,
    const int nf,
    const int nb,

    CL_MEM_GLOBAL LIF_params * params,
    CL_MEM_GLOBAL uint4 * rnd,
    CL_MEM_GLOBAL float * V,
    CL_MEM_GLOBAL float * Vth,
    CL_MEM_GLOBAL float * G_E,
    CL_MEM_GLOBAL float * G_I,
    CL_MEM_GLOBAL float * G_IB,
    CL_MEM_GLOBAL float * GSynExc,
    CL_MEM_GLOBAL float * GSynInh,
    CL_MEM_GLOBAL float * GSynInhB,
    CL_MEM_GLOBAL float * activity)
{
   int k;

   const float exp_tauE    = EXP(-dt/params->tauE);
   const float exp_tauI    = EXP(-dt/params->tauI);
   const float exp_tauIB   = EXP(-dt/params->tauIB);
   const float exp_tauVth  = EXP(-dt/params->tauVth);

   const float dt_sec = .001 * dt;   // convert to seconds

#ifndef PV_USE_OPENCL

for (k = 0; k < nx*ny*nf; k++) {
#else
   k = get_global_id(0);
#endif

   int kex = kIndexExtended(k, nx, ny, nf, nb);

   //
   // kernel (nonheader part) begins here
   //

   // local param variables
   float tau, Vrest, VthRest, Vexc, Vinh, VinhB, deltaVth;

   const float GMAX = 10.0;

   // local variables
   float l_activ;

   uint4 l_rnd = rnd[k];

   float l_V   = V[k];
   float l_Vth = Vth[k];

   float l_G_E  = G_E[k];
   float l_G_I  = G_I[k];
   float l_G_IB = G_IB[k];

   float l_GSynExc  = GSynExc[k];
   float l_GSynInh  = GSynInh[k];
   float l_GSynInhB = GSynInhB[k];

   // temporary arrays
   float tauInf, VmemInf;

   //
   // start of LIF2_update_exact_linear
   //

   // define local param variables
   //
   tau   = params->tau;
   Vexc  = params->Vexc;
   Vinh  = params->Vinh;
   VinhB = params->VinhB;
   Vrest = params->Vrest;

   VthRest  = params->VthRest;
   deltaVth = params->deltaVth;

   // add noise
   //

#ifdef USE_CLRANDOM
   l_rnd = cl_random_get(l_rnd);
   if (cl_random_prob(l_rnd) < dt_sec*params->noiseFreqE) {
      l_rnd = cl_random_get(l_rnd);
      l_GSynExc = l_GSynExc + params->noiseAmpE*cl_random_prob(l_rnd);
   }

   l_rnd = cl_random_get(l_rnd);
   if (cl_random_prob(l_rnd) < dt_sec*params->noiseFreqI) {
      l_rnd = cl_random_get(l_rnd);
      l_GSynInh = l_GSynInh + params->noiseAmpI*cl_random_prob(l_rnd);
   }

   l_rnd = cl_random_get(l_rnd);
   if (cl_random_prob(l_rnd) < dt_sec*params->noiseFreqIB) {
      l_rnd = cl_random_get(l_rnd);
      l_GSynInhB = l_GSynInhB + params->noiseAmpIB*cl_random_prob(l_rnd);
   }
#else
   if (pv_random_prob() < dt_sec*params->noiseFreqE) {
      l_GSynExc = l_GSynExc + params->noiseAmpE*pv_random_prob();
   }

   if (pv_random_prob() < dt_sec*params->noiseFreqI) {
      l_GSynInh = l_GSynInh + params->noiseAmpI*pv_random_prob();
   }

   if (pv_random_prob() < dt_sec*params->noiseFreqIB) {
      l_GSynInhB = l_GSynInhB + params->noiseAmpIB*pv_random_prob();
   }
#endif

   l_G_E  = l_GSynExc  + l_G_E *exp_tauE;
   l_G_I  = l_GSynInh  + l_G_I *exp_tauI;
   l_G_IB = l_GSynInhB + l_G_IB*exp_tauIB;

   l_G_E  = (l_G_E  > GMAX) ? GMAX : l_G_E;
   l_G_I  = (l_G_I  > GMAX) ? GMAX : l_G_I;
   l_G_IB = (l_G_IB > GMAX) ? GMAX : l_G_IB;

   tauInf  = (dt/tau) * (1.0 + l_G_E + l_G_I + l_G_IB);
   VmemInf = (Vrest + l_G_E*Vexc + l_G_I*Vinh + l_G_IB*VinhB)
           / (1.0 + l_G_E + l_G_I + l_G_IB);

   l_V = VmemInf + (l_V - VmemInf)*EXP(-tauInf);

   //
   // start of LIF2_update_finish
   //

   l_Vth = VthRest + (l_Vth - VthRest)*exp_tauVth;

   //
   // start of update_f
   //

   bool fired_flag = (l_V > l_Vth);

   l_activ = fired_flag ? 1.0f             : 0.0f;
   l_V     = fired_flag ? Vrest            : l_V;
   l_Vth   = fired_flag ? l_Vth + deltaVth : l_Vth;
   l_G_IB  = fired_flag ? l_G_IB + 1.0f    : l_G_IB;

   //
   // These actions must be done outside of kernel
   //    1. set activity to 0 in boundary (if needed)
   //    2. update active indices
   //

   // store local variables back to global memory
   //
   rnd[k] = l_rnd;

   activity[kex] = l_activ;

   V[k]   = l_V;
   Vth[k] = l_Vth;

   G_E[k]  = l_G_E;
   G_I[k]  = l_G_I;
   G_IB[k] = l_G_IB;

   GSynExc[k]  = 0.0f;
   GSynInh[k]  = 0.0f;
   GSynInhB[k] = 0.0f;

#ifndef PV_USE_OPENCL
   } // loop over k
#endif

}
Exemplo n.º 22
0
double L0NormProbe::getValueInternal(double timevalue, int index) {
   if (index < 0 || index >= getParent()->getNBatch()) { return PV_FAILURE; }
   PVLayerLoc const * loc = getTargetLayer()->getLayerLoc();
   int const nx = loc->nx;
   int const ny = loc->ny;
   int const nf = loc->nf;
   PVHalo const * halo = &loc->halo;
   int const lt = halo->lt;
   int const rt = halo->rt;
   int const dn = halo->dn;
   int const up = halo->up;
   int sum = 0;
   pvadata_t const * aBuffer = getTargetLayer()->getLayerData() + index * getTargetLayer()->getNumExtended();

   if (getMaskLayer()) {
      PVLayerLoc const * maskLoc = getMaskLayer()->getLayerLoc();
      PVHalo const * maskHalo = &maskLoc->halo;
      pvadata_t const * maskLayerData = getMaskLayer()->getLayerData() + index*getMaskLayer()->getNumExtended(); // Is there a DataStore method to return the part of the layer data for a given batch index?
      int const maskLt = maskHalo->lt;
      int const maskRt = maskHalo->rt;
      int const maskDn = maskHalo->dn;
      int const maskUp = maskHalo->up;
      if (maskHasSingleFeature()) {
         assert(getTargetLayer()->getNumNeurons()==nx*ny*nf);
         int nxy = nx*ny;
         #ifdef PV_USE_OPENMP_THREADS
         #pragma omp parallel for
         #endif // PV_USE_OPENMP_THREADS
         for (int kxy=0; kxy<nxy; kxy++) {
            int kexMask = kIndexExtended(kxy, nx, ny, 1, maskLt, maskRt, maskDn, maskUp);
            if (maskLayerData[kexMask]) {
               int featureBase = kxy*nf;
               for (int f=0; f<nf; f++) {
                  int kex = kIndexExtended(featureBase++, nx, ny, nf, lt, rt, dn, up);
                  pvadata_t val = aBuffer[kex];
                  sum += aBuffer[kex]>nnzThreshold || aBuffer[kex]<nnzThreshold;
               }
            }
         }         
      }
      else {
         #ifdef PV_USE_OPENMP_THREADS
         #pragma omp parallel for
         #endif // PV_USE_OPENMP_THREADS
         for (int k=0; k<getTargetLayer()->getNumNeurons(); k++) {
            int kex = kIndexExtended(k, nx, ny, nf, lt, rt, dn, up);
            int kexMask = kIndexExtended(k, nx, ny, nf, maskLt, maskRt, maskDn, maskUp);
            if (maskLayerData[kexMask]) {
               pvadata_t val = aBuffer[kex];
               sum += aBuffer[kex]>nnzThreshold || aBuffer[kex]<nnzThreshold;
            }
         }
      }
   }
   else {
      #ifdef PV_USE_OPENMP_THREADS
      #pragma omp parallel for
      #endif // PV_USE_OPENMP_THREADS
      for (int k=0; k<getTargetLayer()->getNumNeurons(); k++) {      
         int kex = kIndexExtended(k, nx, ny, nf, lt, rt, dn, up);
         pvadata_t val = aBuffer[kex];
         sum += aBuffer[kex]>nnzThreshold || aBuffer[kex]<nnzThreshold;
      }
   }
   
   return (double) sum;
}
Exemplo n.º 23
0
double L2NormProbe::getValueInternal(double timevalue, int index) {
   if (index < 0 || index >= getParent()->getNBatch()) { return PV_FAILURE; }
   PVLayerLoc const * loc = getTargetLayer()->getLayerLoc();
   int const nx = loc->nx;
   int const ny = loc->ny;
   int const nf = loc->nf;
   PVHalo const * halo = &loc->halo;
   int const lt = halo->lt;
   int const rt = halo->rt;
   int const dn = halo->dn;
   int const up = halo->up;
   double l2normsq = 0.0;
   pvadata_t const * aBuffer = getTargetLayer()->getLayerData() + index * getTargetLayer()->getNumExtended();
   
   if (getMaskLayer()) {
      PVLayerLoc const * maskLoc = getMaskLayer()->getLayerLoc();
      PVHalo const * maskHalo = &maskLoc->halo;
      pvadata_t const * maskLayerData = getMaskLayer()->getLayerData() + index*getMaskLayer()->getNumExtended(); // Is there a DataStore method to return the part of the layer data for a given batch index?
      int const maskLt = maskHalo->lt;
      int const maskRt = maskHalo->rt;
      int const maskDn = maskHalo->dn;
      int const maskUp = maskHalo->up;
      if (maskHasSingleFeature()) {
         assert(getTargetLayer()->getNumNeurons()==nx*ny*nf);
         int nxy = nx*ny;
#ifdef PV_USE_OPENMP_THREADS
#pragma omp parallel for reduction(+ : l2normsq)
#endif // PV_USE_OPENMP_THREADS
         for (int kxy=0; kxy<nxy; kxy++) {
            int kexMask = kIndexExtended(kxy, nx, ny, 1, maskLt, maskRt, maskDn, maskUp);
            if (maskLayerData[kexMask]) {
               int featureBase = kxy*nf;
               for (int f=0; f<nf; f++) {
                  int kex = kIndexExtended(featureBase++, nx, ny, nf, lt, rt, dn, up);
                  pvadata_t val = aBuffer[kex];
                  l2normsq += val*val;
               }
            }
         }         
      }
      else {
#ifdef PV_USE_OPENMP_THREADS
#pragma omp parallel for reduction(+ : l2normsq)
#endif // PV_USE_OPENMP_THREADS
         for (int k=0; k<getTargetLayer()->getNumNeurons(); k++) {
            int kex = kIndexExtended(k, nx, ny, nf, lt, rt, dn, up);
            int kexMask = kIndexExtended(k, nx, ny, nf, maskLt, maskRt, maskDn, maskUp);
            //            if (maskLayerData[kexMask]) {
               pvadata_t val = aBuffer[kex];
               l2normsq += maskLayerData[kexMask] * val*val;
            //            }
         }
      }
   }
   else {
      if (getTargetLayer()->getSparseFlag()) {
         DataStore * store = parent->icCommunicator()->publisherStore(getTargetLayer()->getLayerId());
         int numActive = (int) store->numActiveBuffer(index)[0];
         unsigned int const * activeList = store->activeIndicesBuffer(index);
#ifdef PV_USE_OPENMP_THREADS
#pragma omp parallel for reduction(+ : l2normsq)
#endif // PV_USE_OPENMP_THREADS
         for (int k=0; k<numActive; k++) {
            int extIndex = activeList[k];
            int inRestricted = !extendedIndexInBorderRegion(extIndex, nx, ny, nf, halo->lt, halo->rt, halo->dn, halo->up);
            pvadata_t val = inRestricted * fabsf(aBuffer[extIndex]);
            l2normsq += val*val;
         }
      }
      else {
#ifdef PV_USE_OPENMP_THREADS
#pragma omp parallel for reduction(+ : l2normsq)
#endif // PV_USE_OPENMP_THREADS
         for (int k=0; k<getTargetLayer()->getNumNeurons(); k++) {
            int kex = kIndexExtended(k, nx, ny, nf, lt, rt, dn, up);
            pvadata_t val = aBuffer[kex];
            l2normsq += val*val;
         }
      }
   }

   return l2normsq;
}
Exemplo n.º 24
0
void MLPOutputLayer::binaryNonlocalStats(){
   const PVLayerLoc * loc = getLayerLoc();
   int nx = loc->nx;
   int ny = loc->ny;
   int nf = loc->nf;
   assert(nf == 1);
   int numNeurons = getNumNeurons();
   pvdata_t * A = getCLayer()->activity->data;
   pvdata_t * gtA = gtLayer->getCLayer()->activity->data;
   float sumsq = 0;
   float sum = 0;
   float gtSum = 0;
   int currNumRight = 0;
   int currNumWrong = 0;
   int totNum = 0;

   //Only go through restricted
   //Calculate the sum squared error
   for(int ni = 0; ni < numNeurons; ni++){
      int nExt = kIndexExtended(ni, nx, ny, nf, loc->halo.lt, loc->halo.rt, loc->halo.dn, loc->halo.up);
      int fi = featureIndex(nExt, nx+loc->halo.lt+loc->halo.rt, ny+loc->halo.dn+loc->halo.up, nf);
      //Sum over x and y direction
      sumsq += pow(A[nExt] - gtA[nExt], 2);
      //Sum over activity to find mean
      sum += A[nExt];
      gtSum += gtA[nExt];
   }

#ifdef PV_USE_MPI
   MPI_Allreduce(MPI_IN_PLACE, &sumsq, 1, MPI_FLOAT, MPI_SUM, parent->icCommunicator()->communicator());
   MPI_Allreduce(MPI_IN_PLACE, &sum, 1, MPI_FLOAT, MPI_SUM, parent->icCommunicator()->communicator());
   MPI_Allreduce(MPI_IN_PLACE, &gtSum, 1, MPI_FLOAT, MPI_SUM, parent->icCommunicator()->communicator());
#endif // PV_USE_MPI
   //Normalize sum to find mean
   sum /= loc->nxGlobal * loc->nyGlobal;
   gtSum /= loc->nxGlobal * loc->nyGlobal;
   //gtSum should be the same as the values
   assert(gtSum == gtA[0]);

   //Calculate stats
   if(sum < 0 && gtSum < 0){
      currNumRight++;
   }
   else if(sum > 0 && gtSum > 0){
      currNumRight++;
   }
   else{
      currNumWrong++;
   }
#ifdef PV_USE_MPI
   MPI_Allreduce(MPI_IN_PLACE, &currNumRight, 1, MPI_INT, MPI_SUM, parent->icCommunicator()->communicator());
   MPI_Allreduce(MPI_IN_PLACE, &currNumWrong, 1, MPI_INT, MPI_SUM, parent->icCommunicator()->communicator());
#endif // PV_USE_MPI
   numRight += currNumRight;
   numWrong += currNumWrong;
   progressNumRight += currNumRight;
   progressNumWrong += currNumWrong;
   //Print if need
   float timef = parent->simulationTime();
   if(timef >= nextStatProgress){
      //Update nextStatProgress
      nextStatProgress += statProgressPeriod;
      if (parent->columnId()==0) {
         float totalScore = 100*float(numRight)/float(numRight+numWrong);
         float progressScore = 100*float(progressNumRight)/float(progressNumRight+progressNumWrong);
         fprintf(stdout, "time:%f  layer:\"%s\"  total:%f%%  progressStep:%f%%  energy:%f\n", timef, name, totalScore, progressScore, sumsq/2);
      }
      //Reset progressStats
      progressNumRight = 0;
      progressNumWrong = 0;
   }
}
Exemplo n.º 25
0
void MLPOutputLayer::multiclassNonlocalStats(){
   const PVLayerLoc * loc = getLayerLoc();
   int nx = loc->nx;
   int ny = loc->ny;
   int nf = loc->nf;
   int numNeurons = getNumNeurons();
   pvdata_t * A = getCLayer()->activity->data;
   pvdata_t * gtA = gtLayer->getCLayer()->activity->data;
   float sumsq = 0;
   //Winner take all in the output layer
   int currNumRight = 0;
   int currNumWrong = 0;
   assert(classBuffer);
   //Clear classBuffer
   for(int i = 0; i < nf; i++){
      classBuffer[i] = 0;
   }
   //Only go through restricted
   //Calculate the sum squared error
   for(int ni = 0; ni < numNeurons; ni++){
      int nExt = kIndexExtended(ni, nx, ny, nf, loc->halo.lt, loc->halo.rt, loc->halo.dn, loc->halo.up);
      int fi = featureIndex(nExt, nx+loc->halo.lt+loc->halo.rt, ny+loc->halo.dn+loc->halo.up, nf);
      //Sum over x and y direction
      classBuffer[fi] += A[nExt];
      sumsq += pow(A[nExt] - gtA[nExt], 2);
   }
   //Normalize classBuffer to find mean
   for(int i = 0; i < nf; i++){
      classBuffer[i] /= nx*ny;
   }
   //Reduce all classBuffers through a mean
#ifdef PV_USE_MPI
   MPI_Allreduce(MPI_IN_PLACE, &sumsq, 1, MPI_FLOAT, MPI_SUM, parent->icCommunicator()->communicator());
   MPI_Allreduce(MPI_IN_PLACE, classBuffer, nf, MPI_FLOAT, MPI_SUM, parent->icCommunicator()->communicator());
   //Normalize classBuffer across processors
   for(int i = 0; i < nf; i++){
      classBuffer[i] /= parent->icCommunicator()->commSize();
   }
#endif // PV_USE_MPI
   //Find max
   float estMaxF = -1000;
   int estMaxFi = -1;
   float actualMaxF = -1000;
   int actualMaxFi = -1;
   for(int i = 0; i < nf; i++){
      if(classBuffer[i] >= estMaxF){
         estMaxF = classBuffer[i];
         estMaxFi = i;
      }
      int nExt = kIndex(loc->halo.lt, loc->halo.up, i, nx+loc->halo.lt+loc->halo.rt, ny+loc->halo.dn+loc->halo.up, nf);
      if(gtA[nExt] >= actualMaxF){
         actualMaxF = gtA[nExt];
         actualMaxFi = i;
      }
   }
   //Calculate stats
   //Found winning feature, compare to ground truth
   if(estMaxFi == actualMaxFi){
      currNumRight++;
   }
   else{
      currNumWrong++;
   }
#ifdef PV_USE_MPI
   MPI_Allreduce(MPI_IN_PLACE, &currNumRight, 1, MPI_INT, MPI_SUM, parent->icCommunicator()->communicator());
   MPI_Allreduce(MPI_IN_PLACE, &currNumWrong, 1, MPI_INT, MPI_SUM, parent->icCommunicator()->communicator());
#endif // PV_USE_MPI
   numRight += currNumRight;
   numWrong += currNumWrong;
   progressNumRight += currNumRight;
   progressNumWrong += currNumWrong;
   //Print if need
   float timef = parent->simulationTime();
   if(timef >= nextStatProgress){
      //Update nextStatProgress
      nextStatProgress += statProgressPeriod;
      if (parent->columnId()==0) {
         float totalScore = 100*float(numRight)/float(numRight+numWrong);
         float progressScore = 100*float(progressNumRight)/float(progressNumRight+progressNumWrong);
         fprintf(stdout, "time:%f  layer:\"%s\"  total:%f%%  progressStep:%f%%  energy:%f\n", timef, name, totalScore, progressScore, sumsq/2);
      }
      //Reset progressStats
      progressNumRight = 0;
      progressNumWrong = 0;
   }
}
Exemplo n.º 26
0
void MLPOutputLayer::binaryLocalStats(){
   const PVLayerLoc * loc = getLayerLoc();
   int nx = loc->nx;
   int ny = loc->ny;
   int nf = loc->nf;
   int numNeurons = getNumNeurons();
   pvdata_t * A = getCLayer()->activity->data;
   pvdata_t * gtA = gtLayer->getCLayer()->activity->data;
   float sumsq = 0;

   assert(nf == 1);
   int currNumTotPos = 0;
   int currNumTotNeg = 0;
   int currTruePos = 0;
   int currTrueNeg = 0;
   for(int ni = 0; ni < numNeurons; ni++){
      int nExt = kIndexExtended(ni, nx, ny, nf, loc->halo.lt, loc->halo.rt, loc->halo.dn, loc->halo.up);
      //DCR
      if(gtA[nExt] == 0){
         continue;
         //Note that sumsq doesn't get updated in this case, so a dcr doesn't contribute to the score at all
      }
      //Negative
      else if(gtA[nExt] == -1){
         currNumTotNeg++;
         if(A[nExt] < 0){
            currTrueNeg++;
         }
      }
      //Positive
      else if(gtA[nExt] == 1){
         currNumTotPos++;
         if(A[nExt] > 0){
            currTruePos++;
         }
      }
      sumsq += pow(A[nExt] - gtA[nExt], 2);
   }
   //Do MPI
#ifdef PV_USE_MPI
   MPI_Allreduce(MPI_IN_PLACE, &currNumTotPos, 1, MPI_INT, MPI_SUM, parent->icCommunicator()->communicator());
   MPI_Allreduce(MPI_IN_PLACE, &currNumTotNeg, 1, MPI_INT, MPI_SUM, parent->icCommunicator()->communicator());
   MPI_Allreduce(MPI_IN_PLACE, &currTruePos, 1, MPI_INT, MPI_SUM, parent->icCommunicator()->communicator());
   MPI_Allreduce(MPI_IN_PLACE, &currTrueNeg, 1, MPI_INT, MPI_SUM, parent->icCommunicator()->communicator());
   MPI_Allreduce(MPI_IN_PLACE, &sumsq, 1, MPI_FLOAT, MPI_SUM, parent->icCommunicator()->communicator());
#endif
   numTotPos += currNumTotPos;
   numTotNeg += currNumTotNeg;
   truePos += currTruePos;
   trueNeg += currTrueNeg;
   progressNumTotPos += currNumTotPos;
   progressNumTotNeg += currNumTotNeg;
   progressTruePos += currTruePos;
   progressTrueNeg += currTrueNeg;
   //Print if need
   float timef = parent->simulationTime();
   if(timef >= nextStatProgress){
      //Update nextStatProgress
      nextStatProgress += statProgressPeriod;
      if (parent->columnId()==0) {
         float totalScore = 50*(float(truePos)/float(numTotPos) + float(trueNeg)/float(numTotNeg));
         float progressScore = 50*(float(progressTruePos)/float(progressNumTotPos) + float(progressTrueNeg)/float(progressNumTotNeg));
         fprintf(stdout, "time:%f  layer:\"%s\"  total:%f%%  progressStep:%f%%  energy:%f\n", timef, name, totalScore, progressScore, sumsq/2);
      }
      //Reset progressStats
      progressNumTotPos = 0;
      progressNumTotNeg = 0;
      progressTruePos = 0;
      progressTrueNeg = 0;
   }
}
Exemplo n.º 27
0
int PointProbe::calcValues(double timevalue) {
   assert(this->getNumValues()==2);
   double * valuesBuffer = this->getValuesBuffer();
   //We need to calculate which mpi process contains the target point, and send that info to the root process
   //Each process calculates local index
   const PVLayerLoc * loc = getTargetLayer()->getLayerLoc();
   //Calculate local cords from global
   const int kx0 = loc->kx0;
   const int ky0 = loc->ky0;
   const int kb0 = loc->kb0;
   const int nx = loc->nx;
   const int ny = loc->ny;
   const int nf = loc->nf;
   const int nbatch = loc->nbatch;
   const int xLocLocal = xLoc - kx0;
   const int yLocLocal = yLoc - ky0;
   const int nbatchLocal = batchLoc - kb0;
   
   //if in bounds
   if( xLocLocal >= 0 && xLocLocal < nx &&
       yLocLocal >= 0 && yLocLocal < ny &&
       nbatchLocal >= 0 && nbatchLocal < nbatch){
      const pvdata_t * V = getTargetLayer()->getV();
      const pvdata_t * activity = getTargetLayer()->getLayerData();
      //Send V and A to root
      const int k = kIndex(xLocLocal, yLocLocal, fLoc, nx, ny, nf);
      if(V){
         valuesBuffer[0] = V[k + nbatchLocal*getTargetLayer()->getNumNeurons()];
      }
      else {
         valuesBuffer[0] = 0.0;
      }
      if(activity){
         const int kex = kIndexExtended(k, nx, ny, nf, loc->halo.lt, loc->halo.rt, loc->halo.dn, loc->halo.up);
         valuesBuffer[1] = activity[kex + nbatchLocal * getTargetLayer()->getNumExtended()];
      }
      else {
         valuesBuffer[1] = 0.0;
      }
      //If not in root process, send to root process
      if(parent->columnId()!=0){
         MPI_Send(&valuesBuffer, 2, MPI_DOUBLE, 0, 0, parent->icCommunicator()->communicator());
      }
   }

   //Root process
   if(parent->columnId()==0){
      //Calculate which rank target neuron is
      //TODO we need to calculate rank from batch as well
      int xRank = xLoc/nx;
      int yRank = yLoc/ny;

      int srcRank = rankFromRowAndColumn(yRank, xRank, parent->icCommunicator()->numCommRows(), parent->icCommunicator()->numCommColumns());

      //If srcRank is not root process, MPI_Recv from that rank
      if(srcRank != 0){
         MPI_Recv(&valuesBuffer, 2, MPI_DOUBLE, srcRank, 0, parent->icCommunicator()->communicator(), MPI_STATUS_IGNORE);
      }
   }
   return PV_SUCCESS;
}