Пример #1
0
void CellAutomataAgent::computeLoop() {
  _readFromLayer1 = true;
  
  PPtr<KGridBasic> readLayer = _layer1;
  PPtr<KGridBasic> writeLayer = _layer2;

  Ptr<KGridWindow> displayWindow
      = new KGridWindow(_layer1.AS(KGrid), _computeRange);

  int phase = 1;
  ProximityIterator j(1);
  
  while(!_stopFlag) {
    displayWindow->setSource(readLayer.AS(KGrid));
    _pPixmap.tput(phase, displayWindow.AS(KGrid));
    
    exchangeBorders(phase);
    
    System::sleep(_delay);
    
    evaluate(readLayer, writeLayer, phase);
    
    _readFromLayer1 = !_readFromLayer1;
    
    if(_readFromLayer1) {
      readLayer = _layer1;
      writeLayer = _layer2;
    } else {
      readLayer = _layer2;
      writeLayer = _layer1;
    }
    
    phase++;
  }
}
Пример #2
0
void Thread::oneIteration() {
  int sum;
  fieldType myNewPart(myPartWithBorders);
  for (ll i = 1; i < chunkHeight + 1; i++) {
    for (ll j = 0; j < chunkWigth; j++) {
      sum = numberOfNeighbours(i, j);
      if (myPartWithBorders[i][j]) {
        myNewPart[i][j] = (sum == 2) || (sum == 3);
      } else {
        myNewPart[i][j] = (sum == 3);
      }
    }
  }
  myPartWithBorders = myNewPart;
  //std::cout << threadNumber << ": computed my part" << std::endl;;
  exchangeBorders();
}
Пример #3
0
void Thread::oneIteration() {
  myPartWithBorders[0] = leftBorders.front();
  leftBorders.pop();
  myPartWithBorders[chunkHeight+1] = rightBorders.front();
  rightBorders.pop();
  int sum;
  fieldType myNewPart(myPartWithBorders);
  for (ll i = 1; i < chunkHeight + 1; i++) {
    for (ll j = 0; j < chunkWidth; j++) {
      sum = numberOfNeighbours(i, j);
      //std::cout << threadNumber << ": [" << i << ", " << j << "] = " << sum << std::endl;
      if (myPartWithBorders[i][j]) {
        myNewPart[i][j] = (sum == 2) || (sum == 3);
      } else {
        myNewPart[i][j] = (sum == 3);
      }
    }
  }
  myPartWithBorders = myNewPart;
  exchangeBorders();
}
Пример #4
0
int Publisher::publish(HyPerLayer* pub,
                       int neighbors[], int numNeighbors,
                       int borders[], int numBorders,
                       PVLayerCube* cube,
                       int delay/*default=0*/)
{
   //
   // Everyone publishes border region to neighbors even if no subscribers.
   // This means that everyone should wait as well.
   //

   size_t dataSize = cube->numItems * sizeof(pvdata_t);
   assert(dataSize == (store->size() * store->numberOfBuffers()));

   pvdata_t * sendBuf = cube->data;
   pvdata_t * recvBuf = recvBuffer(0); //Grab all of the buffer, allocated continuously  

   bool isSparse = store->isSparse();

   if (pub->getLastUpdateTime() >= pub->getParent()->simulationTime()) {
      // copy entire layer and let neighbors overwrite
      //Only need to exchange borders if layer was updated this timestep
      memcpy(recvBuf, sendBuf, dataSize);
      exchangeBorders(neighbors, numNeighbors, &cube->loc, 0);
      store->setLastUpdateTime(LOCAL/*bufferId*/, pub->getLastUpdateTime());

      //Updating active indices is done after MPI wait in HyPerCol
      //to avoid race condition because exchangeBorders mpi is async
   }
   else if (store->numberOfLevels()>1){
      // If there are delays, copy last level's data to this level.
      // TODO: we could use pointer indirection to cut down on the number of memcpy calls required, if this turns out to be an expensive step
      memcpy(recvBuf, recvBuffer(LOCAL/*bufferId*/,1), dataSize);
      store->setLastUpdateTime(LOCAL/*bufferId*/, pub->getLastUpdateTime());
   }

   return PV_SUCCESS;
}