Beispiel #1
0
int VaryingHyPerConn::updateWeights(int axonId) {
   int syPatch = yPatchStride();
   for( int kPatch = 0; kPatch < getNumDataPatches(); kPatch++) {
      PVPatch * W = getWeights(kPatch, axonId);
      int nkPatch = fPatchSize() * W->nx;
      pvwdata_t * Wdata = get_wData(axonId, kPatch); // W->data;
      pvdata_t * dWdata = get_dwData(axonId, kPatch);
      for(int kyPatch = 0; kyPatch < W->ny; kyPatch++) {
         for(int kPatch = 0; kPatch < nkPatch; kPatch++) {
            Wdata[kPatch] += dWdata[kPatch];
         }
         dWdata += syPatch;
      }
   }
   return PV_SUCCESS;
}
Beispiel #2
0
  int LCAConn::update_dW(int axonId)
  { // compute dW but don't add them to the weights yet.
    // That takes place in reduceKernels, so that the output is
    // independent of the number of processors.
    int nExt = preSynapticLayer()->getNumExtended();
    int numKernelIndices = getNumDataPatches();
    const pvdata_t * preactbuf = preSynapticLayer()->getLayerData(getDelay(axonId));
    const pvdata_t * postactbuf = postSynapticLayer()->getLayerData(getDelay(axonId));

    int sya = (post->getLayerLoc()->nf * (post->getLayerLoc()->nx + 2*post->getLayerLoc()->nb));
    
    for(int kExt=0; kExt<nExt;kExt++) {
      PVPatch * weights = getWeights(kExt,axonId);
      size_t offset = getAPostOffset(kExt, axonId);
      pvdata_t preact = preactbuf[kExt];
      int ny = weights->ny;
      int nk = weights->nx * nfp;
      const pvdata_t * postactRef = &postactbuf[offset];
      pvdata_t * dwdata = get_dwData(axonId, kExt);
      int lineoffsetw = 0;
      int lineoffseta = 0;
      for( int y=0; y<ny; y++ ) {
	for( int k=0; k<nk; k++ ) {
	  dwdata[lineoffsetw + k] += updateRule_dW(preact, postactRef[lineoffseta+k],lineoffseta+k);
	}
	lineoffsetw += syp;
	lineoffseta += sya;
      }
    }
    
    // Divide by (numNeurons/numKernels)
    int divisor = pre->getNumNeurons()/numKernelIndices;
    assert( divisor*numKernelIndices == pre->getNumNeurons() );
    for( int kernelindex=0; kernelindex<numKernelIndices; kernelindex++ ) {
      int numpatchitems = nxp*nyp*nfp;
      pvdata_t * dwpatchdata = get_dwDataHead(axonId,kernelindex);
      for( int n=0; n<numpatchitems; n++ ) {
	dwpatchdata[n] /= divisor;
      }
    }

    lastUpdateTime = parent->simulationTime();

    return PV_SUCCESS;    
  }
Beispiel #3
0
int VaryingHyPerConn::allocateDataStructures() {
   HyPerConn::allocateDataStructures();
   // initialize all dW's to one.
   int syPatch = yPatchStride();
   for(int kAxon = 0; kAxon < numberOfAxonalArborLists(); kAxon++){
      for(int kPatch = 0; kPatch < getNumDataPatches(); kPatch++){
         PVPatch * W = getWeights(kPatch, kAxon);
         int nkPatch = fPatchSize() * W->nx;
         float * dWdata = get_dwData(kAxon, kPatch);
         for(int kyPatch = 0; kyPatch < W->ny; kyPatch++){
            for(int kPatch = 0; kPatch < nkPatch; kPatch++){
               dWdata[kPatch] = 1.0f;
            }
            dWdata += syPatch;
         }
      }
   }

   return PV_SUCCESS;
}
int LCALIFLateralKernelConn::update_dW(int axonId) {
   if (parent->simulationTime() < dWUpdateTime) {
      return PV_SUCCESS;
   }
   dWUpdateTime += dWUpdatePeriod;
   int nExt = preSynapticLayer()->getNumExtended();
   int numKernelIndices = getNumDataPatches();
   updateIntegratedSpikeCount();
   float target_rate_sq = getTargetRateKHz()*getTargetRateKHz();
   const pvdata_t * preactbuf = integratedSpikeCount;
   const pvdata_t * postactbuf = integratedSpikeCount;

   int sya = (post->getLayerLoc()->nf * (post->getLayerLoc()->nx + post->getLayerLoc()->halo.lt + post->getLayerLoc()->halo.rt));

   const PVLayerLoc * preloc = pre->getLayerLoc();
   int nxpre = preloc->nx;
   int nypre = preloc->ny;
   int nfpre = preloc->nf;
   int nxglob = preloc->nxGlobal;
   int nyglob = preloc->nyGlobal;
   int kx0 = preloc->kx0;
   int ky0 = preloc->ky0;
   for(int kExt=0; kExt<nExt;kExt++) {
      int xglob = kxPos(kExt, nxpre + preloc->halo.lt + preloc->halo.rt, nypre + preloc->halo.dn + preloc->halo.up, nfpre) + kx0 - preloc->halo.lt;
      int yglob = kyPos(kExt, nxpre + preloc->halo.lt + preloc->halo.rt, nypre + preloc->halo.dn + preloc->halo.up, nfpre) + ky0 - preloc->halo.dn;
      if (xglob < 0 || xglob >= nxglob || yglob < 0 || yglob >= nyglob) {
         continue;
      }
      PVPatch * weights = getWeights(kExt,axonId);
      size_t offset = getAPostOffset(kExt, axonId);
      pvdata_t preactrate = preactbuf[kExt]/integrationTimeConstant;
      int ny = weights->ny;
      int nk = weights->nx * nfp;
      pvwdata_t * dwdata = get_dwData(axonId, kExt);
      int lineoffsetw = 0;
      int lineoffseta = 0;
      for( int y=0; y<ny; y++ ) {
         for( int k=0; k<nk; k++ ) {
            int postactindex = offset+lineoffseta+k;
            if (postactindex != kExt) { // Neurons don't inhibit themselves
               pvdata_t postactrate = postactbuf[postactindex]/integrationTimeConstant;
               pvdata_t dw = preactrate*postactrate-target_rate_sq;
               dwdata[lineoffsetw + k] += dw;
            }
         }
         lineoffsetw += syp;
         lineoffseta += sya;
      }
   }
   // Divide each dw by the number of correlations that contributed to that dw (divisorptr was summed over all MPI processes in initialization).
   // Also divide by target_rate_sq to normalize to a dimensionless quantity.
   // The nonlinear filter and the multiplication by dt/tauINH takes place in updateWeights, because the filter has to be applied after reduceKernels
   // and the multiplication by dt/tauINH needs to take place after the filter.
   int patch_size = nxp*nyp*nfp;
   for( int kernelindex=0; kernelindex<numKernelIndices; kernelindex++ ) {
      pvwdata_t * dwpatchdata = get_dwDataHead(axonId,kernelindex);
      float * divisorptr = &interiorCounts[axonId][kernelindex*patch_size];
      for( int n=0; n<patch_size; n++ ) {
         assert(divisorptr[n]>0 || dwpatchdata[n]==0);
         if (divisorptr[n]>0) dwpatchdata[n] /= target_rate_sq * divisorptr[n];
      }
   }

   lastUpdateTime = parent->simulationTime();

   return PV_SUCCESS;
}