Beispiel #1
0
int PoolingConn::deliverPresynapticPerspective(PVLayerCube const * activity, int arborID) {

    //Check if we need to update based on connection's channel
    if(getChannel() == CHANNEL_NOUPDATE) {
        return PV_SUCCESS;
    }
    assert(post->getChannel(getChannel()));

    float dt_factor;
    if (getPvpatchAccumulateType()==ACCUMULATE_STOCHASTIC) {
        dt_factor = getParent()->getDeltaTime();
    }
    else {
        dt_factor = getConvertToRateDeltaTimeFactor();
    }

    const PVLayerLoc * preLoc = preSynapticLayer()->getLayerLoc();
    const PVLayerLoc * postLoc = postSynapticLayer()->getLayerLoc();

    assert(arborID >= 0);
    const int numExtended = activity->numItems;

    float resetVal = 0;
    if(getPvpatchAccumulateType() == ACCUMULATE_MAXPOOLING) {
        resetVal = -INFINITY;
        float* gSyn = post->getChannel(getChannel());
        //gSyn is res
#ifdef PV_USE_OPENMP_THREADS
        #pragma omp parallel for
#endif
        for(int i = 0; i < post->getNumNeuronsAllBatches(); i++) {
            gSyn[i] = resetVal;
        }

    }


    clearGateIdxBuffer();

    for(int b = 0; b < parent->getNBatch(); b++) {
        pvdata_t * activityBatch = activity->data + b * (preLoc->nx + preLoc->halo.rt + preLoc->halo.lt) * (preLoc->ny + preLoc->halo.up + preLoc->halo.dn) * preLoc->nf;
        pvdata_t * gSynPatchHeadBatch = post->getChannel(getChannel()) + b * postLoc->nx * postLoc->ny * postLoc->nf;
        int* gatePatchHeadBatch = NULL;
        if(needPostIndexLayer) {
            gatePatchHeadBatch = postIndexLayer->getChannel(CHANNEL_EXC) + b * postIndexLayer->getNumNeurons();
        }

        unsigned int * activeIndicesBatch = NULL;
        if(activity->isSparse) {
            activeIndicesBatch = activity->activeIndices + b * (preLoc->nx + preLoc->halo.rt + preLoc->halo.lt) * (preLoc->ny + preLoc->halo.up + preLoc->halo.dn) * preLoc->nf;
        }
        int numLoop;
        if(activity->isSparse) {
            numLoop = activity->numActive[b];
        }
        else {
            numLoop = numExtended;
        }

        if(thread_gateIdxBuffer) {
#ifdef PV_USE_OPENMP_THREADS
            #pragma omp parallel for
#endif
            for(int i = 0; i < parent->getNumThreads() * post->getNumNeurons(); i++) {
                int ti = i/post->getNumNeurons();
                int ni = i % post->getNumNeurons();
                thread_gateIdxBuffer[ti][ni] = -1;
            }
        }

#ifdef PV_USE_OPENMP_THREADS
        //Clear all gsyn buffers
        if(thread_gSyn) {
            int numNeurons = post->getNumNeurons();
#ifdef PV_USE_OPENMP_THREADS
            #pragma omp parallel for
#endif
            for(int i = 0; i < parent->getNumThreads() * numNeurons; i++) {
                int ti = i/numNeurons;
                int ni = i % numNeurons;
                thread_gSyn[ti][ni] = resetVal;
            }
        }
#endif // PV_USE_OPENMP_THREADS

#ifdef PV_USE_OPENMP_THREADS
        #pragma omp parallel for schedule(static)
#endif
        for (int loopIndex = 0; loopIndex < numLoop; loopIndex++) {
            int kPreExt;
            if(activity->isSparse) {
                kPreExt = activeIndicesBatch[loopIndex];
            }
            else {
                kPreExt = loopIndex;
            }

            float a = activityBatch[kPreExt] * dt_factor;
            //if (a == 0.0f) continue;

            //If we're using thread_gSyn, set this here
            pvdata_t * gSynPatchHead;
            //float * gatePatchHead = NULL;
            int * gatePatchHead = NULL;
#ifdef PV_USE_OPENMP_THREADS
            if(thread_gSyn) {
                int ti = omp_get_thread_num();
                gSynPatchHead = thread_gSyn[ti];
            }
            else {
                gSynPatchHead = gSynPatchHeadBatch;
            }

            if(needPostIndexLayer) {
                if(thread_gateIdxBuffer) {
                    int ti = omp_get_thread_num();
                    gatePatchHead = thread_gateIdxBuffer[ti];
                }
                else {
                    gatePatchHead = gatePatchHeadBatch;
                }
            }
#else // PV_USE_OPENMP_THREADS
            gSynPatchHead = gSynPatchHeadBatch;
            if(needPostIndexLayer) {
                gatePatchHead = gatePatchHeadBatch;
            }
#endif // PV_USE_OPENMP_THREADS
            //deliverOnePreNeuronActivity(kPreExt, arborID, a, gSynPatchHead, gatePatchHead);

            PVPatch * weights = getWeights(kPreExt, arborID);
            const int nk = weights->nx * fPatchSize();
            const int ny = weights->ny;
            const int sy  = getPostNonextStrides()->sy;       // stride in layer
            pvwdata_t * weightDataStart = NULL;
            pvgsyndata_t * postPatchStart = gSynPatchHead + getGSynPatchStart(kPreExt, arborID);
            int* postGatePatchStart = gatePatchHead + getGSynPatchStart(kPreExt, arborID);
            //float* postGatePatchStart = gatePatchHead + getGSynPatchStart(kPreExt, arborID);

            const int kxPreExt = kxPos(kPreExt, preLoc->nx + preLoc->halo.lt + preLoc->halo.rt, preLoc->ny + preLoc->halo.dn + preLoc->halo.up, preLoc->nf);
            const int kyPreExt = kyPos(kPreExt, preLoc->nx + preLoc->halo.lt + preLoc->halo.rt, preLoc->ny + preLoc->halo.dn + preLoc->halo.up, preLoc->nf);
            const int kfPre = featureIndex(kPreExt, preLoc->nx + preLoc->halo.lt + preLoc->halo.rt, preLoc->ny + preLoc->halo.dn + preLoc->halo.up, preLoc->nf);

            const int kxPreGlobalExt = kxPreExt + preLoc->kx0;
            const int kyPreGlobalExt = kyPreExt + preLoc->ky0;

            const int kPreGlobalExt = kIndex(kxPreGlobalExt, kyPreGlobalExt, kfPre, preLoc->nxGlobal + preLoc->halo.lt + preLoc->halo.rt, preLoc->nyGlobal + preLoc->halo.up + preLoc->halo.dn, preLoc->nf);

            int offset = kfPre;
            int sf = fPatchSize();
            pvwdata_t w = 1.0;
            if(getPvpatchAccumulateType() == ACCUMULATE_SUMPOOLING) {
                float relative_XScale = pow(2, (post->getXScale() - pre->getXScale()));
                float relative_YScale = pow(2, (post->getYScale() - pre->getYScale()));
                w = 1.0/(nxp*nyp*relative_XScale*relative_YScale);
            }
            void* auxPtr = NULL;
            for (int y = 0; y < ny; y++) {
                if(needPostIndexLayer) {
                    auxPtr = (postGatePatchStart+ y*sy + offset);
                }
                (accumulateFunctionPointer)(kPreGlobalExt, nk, postPatchStart + y*sy + offset, a, &w, auxPtr, sf);
            }
        }
#ifdef PV_USE_OPENMP_THREADS
        //Accumulate back into gSyn // Should this be done in HyPerLayer where it can be done once, as opposed to once per connection?
        if(thread_gSyn) {
            pvdata_t * gSynPatchHead = gSynPatchHeadBatch;
            //float* gateIdxBuffer = postIndexLayer->getChannel(CHANNEL_EXC);
            int * gateIdxBuffer = NULL;
            if(needPostIndexLayer && thread_gateIdxBuffer) {
                gateIdxBuffer = gatePatchHeadBatch;
            }
            int numNeurons = post->getNumNeurons();
            //Looping over neurons first to be thread safe
            #pragma omp parallel for
            for(int ni = 0; ni < numNeurons; ni++) {
                //Different for maxpooling
                if(getPvpatchAccumulateType() == ACCUMULATE_MAXPOOLING) {
                    for(int ti = 0; ti < parent->getNumThreads(); ti++) {
                        if(gSynPatchHead[ni] < thread_gSyn[ti][ni]) {
                            gSynPatchHead[ni] = thread_gSyn[ti][ni];
                            if(needPostIndexLayer && thread_gateIdxBuffer) {
                                gateIdxBuffer[ni] = thread_gateIdxBuffer[ti][ni];
                                assert(gateIdxBuffer >= 0);
                            }
                        }
                    }
                }
                else {
                    for(int ti = 0; ti < parent->getNumThreads(); ti++) {
                        gSynPatchHead[ni] += thread_gSyn[ti][ni];
                    }
                }
            }
        }
#endif
    }
    if(activity->isSparse) {
        pvdata_t * gSyn = post->getChannel(getChannel());
        for (int k=0; k<post->getNumNeuronsAllBatches(); k++) {
            if (gSyn[k]==-INFINITY) {
                gSyn[k] = 0.0f;
            }
        }
    }
    return PV_SUCCESS;
}
int TransposePoolingConn::deliverPresynapticPerspective(PVLayerCube const * activity, int arborID) {
   //Check if we need to update based on connection's channel
   if(getChannel() == CHANNEL_NOUPDATE){
      return PV_SUCCESS;
   }
   assert(post->getChannel(getChannel()));

   const PVLayerLoc * preLoc = preSynapticLayer()->getLayerLoc();
   const PVLayerLoc * postLoc = postSynapticLayer()->getLayerLoc();

   assert(arborID >= 0);
   const int numExtended = activity->numItems;

   //Grab postIdxLayer's data
   int* postIdxData = NULL;
   if(pvpatchAccumulateType == ACCUMULATE_MAXPOOLING){
      PoolingIndexLayer* postIndexLayer = originalConn->getPostIndexLayer();
      assert(postIndexLayer);
      //Make sure this layer is an integer layer
      assert(postIndexLayer->getDataType() == PV_INT);
      DataStore * store = parent->icCommunicator()->publisherStore(postIndexLayer->getLayerId());
      int delay = getDelay(arborID);

      //TODO this is currently a hack, need to properly implement data types.
      postIdxData = (int*) store->buffer(LOCAL, delay);
   }

   for(int b = 0; b < parent->getNBatch(); b++){
      pvdata_t * activityBatch = activity->data + b * (preLoc->nx + preLoc->halo.rt + preLoc->halo.lt) * (preLoc->ny + preLoc->halo.up + preLoc->halo.dn) * preLoc->nf;
      pvdata_t * gSynPatchHeadBatch = post->getChannel(getChannel()) + b * postLoc->nx * postLoc->ny * postLoc->nf;
      int * postIdxDataBatch = NULL;
      if(pvpatchAccumulateType == ACCUMULATE_MAXPOOLING){
         postIdxDataBatch = postIdxData + b * originalConn->getPostIndexLayer()->getNumExtended();
      }

      unsigned int * activeIndicesBatch = NULL;
      if(activity->isSparse){
         activeIndicesBatch = activity->activeIndices + b * (preLoc->nx + preLoc->halo.rt + preLoc->halo.lt) * (preLoc->ny + preLoc->halo.up + preLoc->halo.dn) * preLoc->nf;
      }

      int numLoop;
      if(activity->isSparse){
         numLoop = activity->numActive[b];
      }
      else{
         numLoop = numExtended;
      }

#ifdef PV_USE_OPENMP_THREADS
      //Clear all thread gsyn buffer
      if(thread_gSyn){
         int numNeurons = post->getNumNeurons();
#ifdef PV_USE_OPENMP_THREADS
#pragma omp parallel for
#endif
         for(int i = 0; i < parent->getNumThreads() * numNeurons; i++){
            int ti = i/numNeurons;
            int ni = i % numNeurons;
            thread_gSyn[ti][ni] = 0;
         }
      }
#endif // PV_USE_OPENMP_THREADS


#ifdef PV_USE_OPENMP_THREADS
#pragma omp parallel for schedule(static)
#endif
      for (int loopIndex = 0; loopIndex < numLoop; loopIndex++) {
         int kPreExt;
         if(activity->isSparse){
            kPreExt = activeIndicesBatch[loopIndex];
         }
         else{
            kPreExt = loopIndex;
         }

         float a = activityBatch[kPreExt];
         if (a == 0.0f) continue;

         //If we're using thread_gSyn, set this here
         pvdata_t * gSynPatchHead;
#ifdef PV_USE_OPENMP_THREADS
         if(thread_gSyn){
            int ti = omp_get_thread_num();
            gSynPatchHead = thread_gSyn[ti];
         }
         else{
            gSynPatchHead = gSynPatchHeadBatch;
         }
#else // PV_USE_OPENMP_THREADS
         gSynPatchHead = gSynPatchHeadBatch;
#endif // PV_USE_OPENMP_THREADS

         const int kxPreExt = kxPos(kPreExt, preLoc->nx + preLoc->halo.lt + preLoc->halo.rt, preLoc->ny + preLoc->halo.dn + preLoc->halo.up, preLoc->nf);
         const int kyPreExt = kyPos(kPreExt, preLoc->nx + preLoc->halo.lt + preLoc->halo.rt, preLoc->ny + preLoc->halo.dn + preLoc->halo.up, preLoc->nf);
         const int kfPre = featureIndex(kPreExt, preLoc->nx + preLoc->halo.lt + preLoc->halo.rt, preLoc->ny + preLoc->halo.dn + preLoc->halo.up, preLoc->nf);

         if(pvpatchAccumulateType == ACCUMULATE_MAXPOOLING){
            const int kxPreGlobalExt = kxPreExt + preLoc->kx0;
            const int kyPreGlobalExt = kyPreExt + preLoc->ky0;
            if(kxPreGlobalExt < preLoc->halo.lt || kxPreGlobalExt >= preLoc->nxGlobal + preLoc->halo.lt ||
               kyPreGlobalExt < preLoc->halo.up || kyPreGlobalExt >= preLoc->nyGlobal + preLoc->halo.up){
               continue;
            }

            //Convert stored global extended index into local extended index
            int postGlobalExtIdx = postIdxDataBatch[kPreExt];

            // If all inputs are zero and input layer is sparse, postGlobalExtIdx will still be -1.
            if(postGlobalExtIdx == -1) { continue; }

            //Make sure the index is in bounds
            assert(postGlobalExtIdx >= 0 && postGlobalExtIdx <
                  (postLoc->nxGlobal + postLoc->halo.lt + postLoc->halo.rt) * 
                  (postLoc->nyGlobal + postLoc->halo.up + postLoc->halo.dn) * 
                  postLoc->nf);

            const int kxPostGlobalExt = kxPos(postGlobalExtIdx, postLoc->nxGlobal + postLoc->halo.lt + postLoc->halo.rt, postLoc->nyGlobal + postLoc->halo.dn + postLoc->halo.up, postLoc->nf);
            const int kyPostGlobalExt = kyPos(postGlobalExtIdx, postLoc->nxGlobal + postLoc->halo.lt + postLoc->halo.rt, postLoc->nyGlobal + postLoc->halo.dn + postLoc->halo.up, postLoc->nf);
            const int kfPost = featureIndex(postGlobalExtIdx, postLoc->nxGlobal + postLoc->halo.lt + postLoc->halo.rt, postLoc->nyGlobal + postLoc->halo.dn + postLoc->halo.up, postLoc->nf);

            const int kxPostLocalRes = kxPostGlobalExt - postLoc->kx0 - postLoc->halo.lt;
            const int kyPostLocalRes = kyPostGlobalExt - postLoc->ky0 - postLoc->halo.up;
            if(kxPostLocalRes < 0 || kxPostLocalRes >= postLoc->nx|| 
               kyPostLocalRes < 0 || kyPostLocalRes >= postLoc->ny){
               continue;
            }

            const int kPostLocalRes = kIndex(kxPostLocalRes, kyPostLocalRes, kfPost, postLoc->nx, postLoc->ny, postLoc->nf);
            gSynPatchHeadBatch[kPostLocalRes] = a;
         }
         else{
            PVPatch * weights = getWeights(kPreExt, arborID);
            const int nk = weights->nx * fPatchSize();
            const int ny = weights->ny;
            pvgsyndata_t * postPatchStart = gSynPatchHead + getGSynPatchStart(kPreExt, arborID);
            const int sy  = getPostNonextStrides()->sy;       // stride in layer

            int offset = kfPre;
            int sf = fPatchSize();

            pvwdata_t w = 1.0;
            if(getPvpatchAccumulateType() == ACCUMULATE_SUMPOOLING){
              float relative_XScale = pow(2, (post->getXScale() - pre->getXScale()));
              float relative_YScale = pow(2, (post->getYScale() - pre->getYScale()));
              w = 1.0/(nxp*nyp*relative_XScale*relative_YScale);
            }
            void* auxPtr = NULL;
            for (int y = 0; y < ny; y++) {
               (accumulateFunctionPointer)(0, nk, postPatchStart + y*sy + offset, a, &w, auxPtr, sf);
            }
         }
      }

#ifdef PV_USE_OPENMP_THREADS
      //Set back into gSyn
      if(thread_gSyn){
         pvdata_t * gSynPatchHead = gSynPatchHeadBatch;
         int numNeurons = post->getNumNeurons();
         //Looping over neurons first to be thread safe
#pragma omp parallel for
         for(int ni = 0; ni < numNeurons; ni++){
            for(int ti = 0; ti < parent->getNumThreads(); ti++){
               if(pvpatchAccumulateType == ACCUMULATE_MAXPOOLING){
                  if(gSynPatchHead[ni] < fabs(thread_gSyn[ti][ni])){
                     gSynPatchHead[ni] = thread_gSyn[ti][ni];
                  }
               }
               else{
                  gSynPatchHead[ni] += thread_gSyn[ti][ni];
               }
            }
         }
      }
#endif
   }
   return PV_SUCCESS;
}