int GradientCheckConn::communicateInitInfo() {
   int status = PV::HyPerConn::communicateInitInfo();

   PV::Communicator * comm = parent->icCommunicator();
   const int nxProcs = comm->numCommColumns();
   const int nyProcs = comm->numCommRows();
   const int nProcs = nxProcs * nyProcs;
   if(nProcs != 1){
      std::cout << "Error, GradientCheckConn cannot be ran with MPI\n";
   }


   estLayer = parent->getLayerFromName(estLayerName);
   if (estLayer ==NULL) {
      if (parent->columnId()==0) {
         fprintf(stderr, "%s \"%s\" error: estLayerName \"%s\" is not a layer in the HyPerCol.\n",
                 parent->parameters()->groupKeywordFromName(name), name, estLayerName);
      }
#ifdef PV_USE_MPI
      MPI_Barrier(parent->icCommunicator()->communicator());
#endif
      exit(EXIT_FAILURE);
   }

   gtLayer = parent->getLayerFromName(gtLayerName);
   if (gtLayer ==NULL) {
      if (parent->columnId()==0) {
         fprintf(stderr, "%s \"%s\" error: gtLayerName \"%s\" is not a layer in the HyPerCol.\n",
                 parent->parameters()->groupKeywordFromName(name), name, gtLayerName);
      }
#ifdef PV_USE_MPI
      MPI_Barrier(parent->icCommunicator()->communicator());
#endif
      exit(EXIT_FAILURE);
   }

   assert(gtLayer->getNumNeurons() == estLayer->getNumNeurons());

   return status;
}
int main(int argc, char * argv[])
{
   PV::PV_Init* initObj = new PV::PV_Init(&argc, &argv);
   int err = 0;
   PVLayerLoc loc;

   PV::Communicator * comm = new PV::Communicator(argc, argv);

   int nxProc = comm->numCommColumns();
   int nyProc = comm->numCommRows();

   int commRow = comm->commRow();
   int commCol = comm->commColumn();

   printf("[%d]: nxProc==%d nyProc==%d commRow==%d commCol==%d numNeighbors==%d\n", comm->commRank(), nxProc, nyProc, commRow, commCol, comm->numberOfNeighbors());  fflush(stdout);

   loc.nx = 128;
   loc.ny = 128;

   loc.nxGlobal = nxProc * loc.nx;
   loc.nyGlobal = nyProc * loc.ny;

   // this info not used for send/recv
   loc.kx0 = 0; loc.ky0 = 0;

   const int nxBorder = 16;
   const int nyBorder = 16;

   int numItems = (2*nxBorder + loc.nx) * (2*nyBorder + loc.ny);


   MPI_Datatype * datatypes = comm->newDatatypes(&loc);

   // create a local portion of the "image"
   float * image = new float [numItems];

   int k0 = commCol * loc.nx + commRow * loc.ny * loc.nxGlobal;
   int sy = 2 * nxBorder + loc.nx;

   for (int ky = 0; ky < loc.ny; ky++) {
      int k = k0 + ky * loc.nxGlobal;
      float * buf = image + nxBorder + (ky + nyBorder) * sy;
      for (int kx = 0; kx < loc.nx; kx++) {
         buf[kx] = (float) k++;
      }
   }

   // send and recv the "image"

   comm->exchange(image, datatypes, &loc);

   err = check_borders(image, comm, loc);
   if (err != 0) {
      printf("[%d]: check_borders failed\n", comm->commRank());
   }
   else {
      printf("[%d]: check_borders succeeded\n", comm->commRank());
   }

   delete datatypes;
   delete comm;

   delete initObj;

   return 0;
}
Exemple #3
0
int main(int argc, char * argv[])
{
   int err = 0;
   LayerLoc loc;

   const int nloops = 1000;

   PV::Communicator * comm = new PV::Communicator(&argc, &argv);

   const int rank = comm->commRank();

   const int nxProc = comm->numCommColumns();
   const int nyProc = comm->numCommRows();

   const int commRow = comm->commRow();
   const int commCol = comm->commColumn();

   if (rank == 0) {
      fprintf(stderr, "\n[0]: nxProc==%d nyProc==%d commRow==%d commCol==%d numNeighbors==%d\n\n", nxProc, nyProc, commRow, commCol, comm->numberOfNeighbors());
   }

   loc.nx = 128;
   loc.ny = 128;

   loc.nxGlobal = nxProc * loc.nx;
   loc.nyGlobal = nyProc * loc.ny;

   // this info not used for send/recv
   loc.kx0 = 0; loc.ky0 = 0;

   loc.nPad = 16;
   const int nxBorder = loc.nPad;
   const int nyBorder = loc.nPad;

   int numItems = (2*nxBorder + loc.nx) * (2*nyBorder + loc.ny);

   MPI_Datatype * datatypes = comm->newDatatypes(&loc);

   // create a local portion of the "image"
   float * image = new float [numItems];

   int k0 = commCol * loc.nx + commRow * loc.ny * loc.nxGlobal;
   int sy = 2 * nxBorder + loc.nx;

   for (int ky = 0; ky < loc.ny; ky++) {
      int k = k0 + ky * loc.nxGlobal;
      float * buf = image + nxBorder + (ky + nyBorder) * sy;
      for (int kx = 0; kx < loc.nx; kx++) {
         buf[kx] = (float) k++;
      }
   }

#ifdef PV_USE_MPI
   MPI_Barrier(MPI_COMM_WORLD);
#endif

   start_clock();
   double start = MPI_Wtime();

   for (int n = 0; n < nloops; n++) {
      comm->exchange(image, datatypes, &loc);
   }

#ifdef PV_USE_MPI
   MPI_Barrier(MPI_COMM_WORLD);
#endif

   stop_clock();
   double elapsed = MPI_Wtime() - start;

   if (rank == 0) {
      float cycle_time = (1000 * elapsed) / nloops;
      fprintf(stderr, "\n[0]: number of send/recv cycles == %d\n", nloops);
      fprintf(stderr, "[0]: time per send/recv cycle   == %f ms\n", cycle_time);
      fprintf(stderr, "[0]: elapsed time (MPI_Wtime)   == %f s\n\n", (float) elapsed);
   }

   delete datatypes;
   delete comm;

   return err;
}