Пример #1
1
extern "C" int main(int ac, char **av)
{
  MPI_CALL(Init(&ac, &av));
  ospcommon::tasking::initTaskingSystem();
  maml::init();

  std::mt19937 rng(std::random_device{}());
  std::uniform_int_distribution<int> distrib(0, 255);

  int numRuns = 1000000;
  int rank = -1;
  int numRanks = 0;
  MPI_CALL(Comm_size(MPI_COMM_WORLD,&numRanks));
  MPI_CALL(Comm_rank(MPI_COMM_WORLD,&rank));

  int numMessages = 100;
  int payloadSize = 100000;

  MyHandler handler;
  maml::registerHandlerFor(MPI_COMM_WORLD,&handler);

  char *payload = (char*)malloc(payloadSize);
  for (int i=0;i<payloadSize;i++)
    payload[i] = distrib(rng);

  for (int run=0;run<numRuns;run++) {
    MPI_CALL(Barrier(MPI_COMM_WORLD));
    double t0 = ospcommon::getSysTime();
    maml::start();

    for (int mID=0;mID<numMessages;mID++) {
      for (int r=0;r<numRanks;r++) {
        maml::sendTo(MPI_COMM_WORLD,r,std::make_shared<maml::Message>(payload,payloadSize));
      }
    }

    while (handler.numReceived != numRanks*numMessages*(run+1)) {
      std::this_thread::sleep_for(std::chrono::milliseconds(10));
    }

    maml::stop();
    double t1 = ospcommon::getSysTime();
    double bytes = numRanks * numMessages * payloadSize / (t1-t0);
    std::string rate = ospcommon::prettyNumber(bytes);
    printf("rank %i: received %i messages in %lf secs; that is %sB/s\n",rank,numRanks*numMessages,t1-t0,
           rate.c_str());
    MPI_CALL(Barrier(MPI_COMM_WORLD));
  }

  maml::shutdown();

  MPI_CALL(Barrier(MPI_COMM_WORLD));
  MPI_Finalize();
}
Пример #2
0
 Group::Group(//const std::string &name, 
              MPI_Comm comm, 
              Consumer *consumer, int32 tag)
   :  tag(tag), consumer(consumer)
 {
   this->comm = comm;
   int rc=MPI_SUCCESS;
   MPI_CALL(Comm_rank(comm,&rank));
   MPI_CALL(Comm_size(comm,&size));
 }
Пример #3
0
    void MPIDistributedDevice::commit()
    {
      if (!initialized) {
        int _ac = 1;
        const char *_av[] = {"ospray_mpi_distributed_device"};

        auto *setComm =
          static_cast<MPI_Comm*>(getParam<void*>("worldCommunicator", nullptr));
        shouldFinalizeMPI = mpicommon::init(&_ac, _av, setComm == nullptr);

        if (setComm) {
          MPI_CALL(Comm_dup(*setComm, &mpicommon::world.comm));
          MPI_CALL(Comm_rank(mpicommon::world.comm, &mpicommon::world.rank));
          MPI_CALL(Comm_size(mpicommon::world.comm, &mpicommon::world.size));
        }

        auto &embreeDevice = api::ISPCDevice::embreeDevice;

        embreeDevice = rtcNewDevice(generateEmbreeDeviceCfg(*this).c_str());
        rtcSetDeviceErrorFunction(embreeDevice, embreeErrorFunc, nullptr);
        RTCError erc = rtcGetDeviceError(embreeDevice);
        if (erc != RTC_ERROR_NONE) {
          // why did the error function not get called !?
          postStatusMsg() << "#osp:init: embree internal error number " << erc;
          assert(erc == RTC_ERROR_NONE);
        }
        initialized = true;
      }

      Device::commit();

      masterRank = getParam<int>("masterRank", 0);

      TiledLoadBalancer::instance =
                      make_unique<staticLoadBalancer::Distributed>();
    }
Пример #4
0
MPI_Comm build_grp_comm_inner(int np, int np_outer, int np_inner, int myid, 
                              int *myid_inner,int *ranks, MPI_Comm world)

/*=======================================================================*/
/*             Begin routine                                             */
{/*begin routine */
/*=======================================================================*/
/*  Local Variables */

  int iii;
  int myid_outer;

  int i,j,k;

  MPI_Group world_group,incl_group;
  MPI_Comm gen_comm;
  MPI_Comm junk_comm;

/*=====================================================================*/
/* Set local inner id's                                                */

  myid_outer = (myid / np_inner);

  if(np_outer*np_inner != np){
    printf("Incorrect number of procs %d vs %d\n",np,np_outer*np_inner);
    Finalize();
    exit(0);
  }/*endfor*/

/*=======================================================================*/
/*             Get rank of processor in new communicator                 */

  Comm_group(world,&world_group);

  for(j=0;j < np_outer;j++){
 /*-----------------------------------------------------------------------*/
 /* i) set the ranks   */

     for(i=0;i<np_inner;i++){
       ranks[i] = np_inner*j+i;
     }/*endfor*/

 /*-----------------------------------------------------------------------*/
 /* ii) Create the new communicator                                      */

     Group_incl(world_group,np_inner,ranks,&incl_group);
     Barrier(world);
     if(myid_outer==j){
       Comm_create(world,incl_group,&gen_comm);
       Barrier(world);
       Comm_rank(gen_comm,myid_inner);
       if(myid!= myid_outer*np_inner + *myid_inner){
         printf("Problems in building inner communicator\n");
         printf("ID expected %d not equal to ID given %d\n",myid,
                    myid_outer*np_inner + *myid_inner);
         printf("with myid_out %d and myid_in %d\n",myid_outer,*myid_inner);
       }/*endif*/
     }else{
       Comm_create(world,incl_group,&junk_comm);
       Barrier(world);
     }/*endif*/
     Group_free(&incl_group);
     Barrier(world);
   }/*endfor*/

   Group_free(&world_group);

   return gen_comm;

/*------------------------------------------------------------------------*/
  } /*end routine*/ 
Пример #5
0
 void Group::makeIntraComm()
 {
   MPI_CALL(Comm_rank(comm,&rank));
   MPI_CALL(Comm_size(comm,&size));
   containsMe = true;
 }