Esempio n. 1
0
int MomentumConn::updateWeights(int arborId){
   if(timeBatchIdx != timeBatchPeriod - 1){
      return PV_SUCCESS;
   }
   //Add momentum right before updateWeights
   for(int kArbor = 0; kArbor < this->numberOfAxonalArborLists(); kArbor++){
      applyMomentum(arborId);
   }

   //Saved to prevweights
   assert(prev_dwDataStart);
   std::memcpy(*prev_dwDataStart, *get_dwDataStart(),
         sizeof(pvwdata_t) *
         numberOfAxonalArborLists() * 
         nxp * nyp * nfp *
         getNumDataPatches());


   // add dw to w
   for(int kArbor = 0; kArbor < this->numberOfAxonalArborLists(); kArbor++){
      pvwdata_t * w_data_start = get_wDataStart(kArbor);
      for( long int k=0; k<patchStartIndex(getNumDataPatches()); k++ ) {
         w_data_start[k] += get_dwDataStart(kArbor)[k];
      }
   }
   return PV_BREAK;
}
Esempio n. 2
0
int CopyConn::copy(int arborId) {
   size_t arborsize = (size_t) (xPatchSize() * yPatchSize() * fPatchSize() * getNumDataPatches()) * sizeof(pvwdata_t);
   memcpy(get_wDataStart(arborId), originalConn->get_wDataStart(arborId), arborsize);
   return PV_SUCCESS;
}
int LCALIFLateralKernelConn::allocateDataStructures() {
   int status = HyPerConn::allocateDataStructures();

   // Neurons don't inhibit themselves, only their neighbors; set self-interaction weights to mmzero.
   assert(nxp % 2 == 1 && nyp % 2 == 1 && getNumDataPatches()==nfp);
   for (int k=0; k<getNumDataPatches(); k++) {
      int n = kIndex((nxp-1)/2, (nyp-1)/2, k, nxp, nyp, nfp);
      get_wDataHead(0, k)[n] = 0.0f;
   }

   integratedSpikeCountCube = pvcube_new(pre->getLayerLoc(), pre->getNumExtended());
   integratedSpikeCount = integratedSpikeCountCube->data;
   for (int k=0; k<pre->getNumExtended(); k++) {
      integratedSpikeCount[k] = integrationTimeConstant*getTargetRateKHz(); // Spike counts initialized to equilibrium value
   }
   mpi_datatype = Communicator::newDatatypes(pre->getLayerLoc());
   if (mpi_datatype==NULL) {
      fprintf(stderr, "LCALIFLateralKernelConn \"%s\" error creating mpi_datatype\n", name);
      abort();
   }

   // Compute the number of times each patch contributes to dw, for proper averaging.
   int num_arbors = numberOfAxonalArborLists();
   interiorCounts = (float **) calloc(num_arbors, sizeof(float *));
   if (interiorCounts==NULL) {
      fprintf(stderr, "LCALIFLateralKernelConn::initialize \"%s\" error: unable to allocate memory for interiorCounts pointer\n", name);
   }
   interiorCounts[0] = (float *) calloc(getNumDataPatches()*nxp*nyp*nfp, sizeof(float));
   if (interiorCounts[0]==NULL) {
      fprintf(stderr, "LCALIFLateralKernelConn::initialize \"%s\" error: unable to allocate memory for interiorCounts\n", name);
   }
   for (int arbor=1; arbor<num_arbors; arbor++) {
      interiorCounts[arbor] = interiorCounts[0]+arbor*getNumDataPatches()*nxp*nyp*nfp;
   }

   const PVLayerLoc * preloc = pre->getLayerLoc();
   int nxpre = preloc->nx;
   int nypre = preloc->ny;
   int nfpre = preloc->nf;

   int nExt = pre->getNumExtended();
   int sya = getPostExtStrides()->sy;
   int nxglob = preloc->nxGlobal;
   int nyglob = preloc->nyGlobal;
   int kx0 = preloc->kx0;
   int ky0 = preloc->ky0;
   for (int arbor=0; arbor<numberOfAxonalArborLists(); arbor++) {
      for(int kExt=0; kExt<nExt;kExt++) {
         int xglob = kxPos(kExt, nxpre + preloc->halo.lt + preloc->halo.rt, nypre + preloc->halo.dn + preloc->halo.up, nfpre) + kx0 - preloc->halo.lt;
         int yglob = kyPos(kExt, nypre + preloc->halo.lt + preloc->halo.rt, nypre + preloc->halo.dn + preloc->halo.up, nfpre) + ky0 - preloc->halo.up;
         if (xglob < 0 || xglob >= nxglob || yglob < 0 || yglob >= nyglob) {
            continue;
         }
         PVPatch * weights = getWeights(kExt,arbor);
         int offset = (int) getAPostOffset(kExt, arbor);
         int ny = weights->ny;
         int nk = weights->nx * nfp;
         int interiorCountOffset = get_wData(arbor, kExt)-get_wDataStart(arbor);
         int lineoffsetw = 0;
         int lineoffseta = 0;
         for( int y=0; y<ny; y++ ) {
            for( int k=0; k<nk; k++ ) {
               int postactindex = offset+lineoffseta+k;
               if (postactindex != kExt) { // Neurons don't inhibit themselves
                  interiorCounts[arbor][interiorCountOffset + lineoffsetw + k]++;
               }
            }
            lineoffsetw += syp;
            lineoffseta += sya;
         }
      }
   }
   int bufsize = numberOfAxonalArborLists() * getNumDataPatches() * nxp * nyp * nfp;
// TODO-CER-2014.3.26 - Ensure that reduction is done when not using MPI
#ifdef PV_USE_MPI
   MPI_Allreduce(MPI_IN_PLACE, interiorCounts[0], bufsize, MPI_FLOAT, MPI_SUM, parent->icCommunicator()->communicator());
#endif

   return status;
}
Esempio n. 4
0
//Connections update first
int GradientCheckConn::updateState(double time, double dt){
   int status = PV_SUCCESS;
   int weightIdx = parent->getCurrentStep() - parent->getInitialStep() - 2;
   std::cout << "weightIdx " << weightIdx << "\n";
   int numPatch = nxp * nyp * nfp;
   int numData = getNumDataPatches();
   int arborIdx = weightIdx / (numPatch * numData);
   int dataIdx = (weightIdx / numPatch) % numData;
   int patchIdx = weightIdx % numPatch;

   if(firstRun){
      initialize_dW(0);
      firstRun = false;
      return PV_SUCCESS;
   }
   
   //Grab cost from previous timestep
   if(secondRun){
      //First run does regular updateState to calculate dw buffer
      for(int arborId=0;arborId<numberOfAxonalArborLists();arborId++) {
         status = calc_dW();        // Calculate changes in weights
         if (status==PV_BREAK) { break; }
         assert(status == PV_SUCCESS);
      }
      //for (int arborID = 0; arborID < numberOfAxonalArborLists(); arborID++) {
      //   if(sharedWeights){
      //      status = reduceKernels(arborID); // combine partial changes in each column
      //      if (status == PV_BREAK) {
      //         break;
      //      }
      //      assert(status == PV_SUCCESS);
      //   }
      //}
      //No update weights
      origCost = getCost();
      secondRun = false;
   }

   //Does not update after first run
   //Check if we are in bounds for non-shared weights
   if(!sharedWeights){
      PVPatch* weights = getWeights(dataIdx, arborIdx);
      //Calculate x and y of patchIdx and compare it to offset
      int xPatchIdx = kxPos(patchIdx, nxp, nyp, nfp);
      int yPatchIdx = kyPos(patchIdx, nxp, nyp, nfp);
      int xOffsetIdx = kxPos(weights->offset, nxp, nyp, nfp);
      int yOffsetIdx = kyPos(weights->offset, nxp, nyp, nfp);

      //If index is oob, skip
      if(xPatchIdx < xOffsetIdx || xPatchIdx >= xOffsetIdx + weights->nx ||
         yPatchIdx < yOffsetIdx || yPatchIdx >= yOffsetIdx + weights->ny){
         return PV_SUCCESS;
      }
   }

   //Calculate difference in numerical method and backprop method
   if(prevIdx != -1){
      currCost = getCost();
      //Check for accuracy
      float numGradient = (currCost - origCost)/epsilon;
      float backpropGradient = get_dwDataStart()[0][prevIdx] / dWMax;
      std::cout << "Numerical gradient: " << numGradient << "  Backprop gradient: " << backpropGradient << "\n";

      //if(fabs(numGradient + backpropGradient) >= .1){
      //   std::cout << "Numerical gradient: " << numGradient << "  Backprop gradient: " << backpropGradient << "\n";
      //   exit(-1);

      //}
   }

   //Restore weight
   if(prevIdx != -1){
      std::cout << "Restoring weight " << prevIdx << " to " << prevWeightVal << "\n";
      get_wDataStart()[0][prevIdx] = prevWeightVal;
   }

   //Set next weight if not the end
   if(weightIdx < numberOfAxonalArborLists() * numData * numPatch){
      prevWeightVal = get_wDataStart()[0][weightIdx];
      prevIdx = weightIdx;
      get_wDataStart()[0][weightIdx] += epsilon;
      std::cout << "Setting weight " << weightIdx << " to " << prevWeightVal + epsilon << "\n";
   }
   else{
      std::cout << "END\n";
   }

   return status;
}