Exemple #1
1
extern "C" int main(int ac, char **av)
{
  MPI_CALL(Init(&ac, &av));
  ospcommon::tasking::initTaskingSystem();
  maml::init();

  std::mt19937 rng(std::random_device{}());
  std::uniform_int_distribution<int> distrib(0, 255);

  int numRuns = 1000000;
  int rank = -1;
  int numRanks = 0;
  MPI_CALL(Comm_size(MPI_COMM_WORLD,&numRanks));
  MPI_CALL(Comm_rank(MPI_COMM_WORLD,&rank));

  int numMessages = 100;
  int payloadSize = 100000;

  MyHandler handler;
  maml::registerHandlerFor(MPI_COMM_WORLD,&handler);

  char *payload = (char*)malloc(payloadSize);
  for (int i=0;i<payloadSize;i++)
    payload[i] = distrib(rng);

  for (int run=0;run<numRuns;run++) {
    MPI_CALL(Barrier(MPI_COMM_WORLD));
    double t0 = ospcommon::getSysTime();
    maml::start();

    for (int mID=0;mID<numMessages;mID++) {
      for (int r=0;r<numRanks;r++) {
        maml::sendTo(MPI_COMM_WORLD,r,std::make_shared<maml::Message>(payload,payloadSize));
      }
    }

    while (handler.numReceived != numRanks*numMessages*(run+1)) {
      std::this_thread::sleep_for(std::chrono::milliseconds(10));
    }

    maml::stop();
    double t1 = ospcommon::getSysTime();
    double bytes = numRanks * numMessages * payloadSize / (t1-t0);
    std::string rate = ospcommon::prettyNumber(bytes);
    printf("rank %i: received %i messages in %lf secs; that is %sB/s\n",rank,numRanks*numMessages,t1-t0,
           rate.c_str());
    MPI_CALL(Barrier(MPI_COMM_WORLD));
  }

  maml::shutdown();

  MPI_CALL(Barrier(MPI_COMM_WORLD));
  MPI_Finalize();
}
Exemple #2
0
/*
 * Attempts to release the requests structures used in asynchronous communications
 */
static void 
gc(hashtable ht) {
  MPI_Request *handle;
  hashnode* node;
  MPI_Status status;
  int flag;

  node=(hashnode*)next_hashnode(ht);
  if ( node==NULL) return;
  
  gc(ht); // start at the end
  
  handle=INT2HANDLE(node->value);
  MPI_CALL(MPI_Test( handle , &flag, &status ));
  if ( flag==true) {
    MPI_CALL(MPI_Wait(handle,&status));
#ifdef DEBUG
    write_msg(__FUNCTION__,__FILE__,__LINE__,"Released handle...%s\n",(char*)node->obj);
#endif
    if (ht==requests)
      free_request(handle);
    else
      free_broadcast_request(handle);
  }
}
      void SimpleSendRecvImpl::RecvThread::run()
      {
        Group *g = (Group *)this->group;

        while (1) {
          MPI_Status status;
          // PING;fflush(0);
          MPI_CALL(Probe(MPI_ANY_SOURCE,g->tag,g->comm,&status));
          Action *action = new Action;
          action->addr = Address(g,status.MPI_SOURCE);

          MPI_CALL(Get_count(&status,MPI_BYTE,&action->size));

          action->data = malloc(action->size);
          double t0 = getSysTime();
          MPI_CALL(Recv(action->data,action->size,MPI_BYTE,status.MPI_SOURCE,status.MPI_TAG,
                        g->comm,MPI_STATUS_IGNORE));
          double t1 = getSysTime();
#if PROFILE_MPI
          if (logIt) {
            t_recv += (t1-t0);
            b_recv += action->size;

            MsgLog log;
            log.to = action->addr.rank;
            log.size = action->size;
            log.begin = t0;
            log.end = t1;
            recvLog.push_back(log);
          }
#endif
          g->procQueue.put(action);
        }
      }
Exemple #4
0
  bool init(int *ac, const char **av, bool useCommWorld)
  {
    int initialized = false;
    MPI_CALL(Initialized(&initialized));

    int provided = 0;
    if (!initialized) {
      /* MPI not initialized by the app - it's up to us */
      MPI_CALL(Init_thread(ac, const_cast<char ***>(&av),
                           MPI_THREAD_MULTIPLE, &provided));
    } else {
      /* MPI was already initialized by the app that called us! */
      MPI_Query_thread(&provided);
    }
    if (provided != MPI_THREAD_MULTIPLE && provided != MPI_THREAD_SERIALIZED) {
      throw std::runtime_error("MPI initialization error: The MPI runtime must"
                               " support either MPI_THREAD_MULTIPLE or"
                               " MPI_THREAD_SERIALIZED.");
    }
    mpiIsThreaded = provided == MPI_THREAD_MULTIPLE;

    if (useCommWorld) {
      world.setTo(MPI_COMM_WORLD);
    }
    return !initialized;
  }
Exemple #5
0
/*
 * Broadcasts a message from the process with rank "root" to
 *      all other processes of the group.
 * mpi_ibcast(+Root,+Data,+Tag).
 */
static YAP_Bool 
my_ibcast(YAP_Term t1,YAP_Term t2, YAP_Term t3) {
  int root;
  int k,worldsize;
  size_t len=0;
  char *str;
  int tag;
  BroadcastRequest *b;

  //fprintf(stderr,"ibcast1");
  //The arguments should be bound
  if(YAP_IsVarTerm(t2) || !YAP_IsIntTerm(t1) || !YAP_IsIntTerm(t3)) {
    return false;
  }

  CONT_TIMER();

  //  fprintf(stderr,"ibcast2");
  MPI_CALL(MPI_Comm_size(MPI_COMM_WORLD,&worldsize));

  root = YAP_IntOfTerm(t1);
  tag = YAP_IntOfTerm(t3);
  str = term2string(NULL,&len,t2);
  b=new_broadcast();
  if ( b==NULL ) {
    PAUSE_TIMER();
    return false;
  }
  //fprintf(stderr,"ibcast3");
  for(k=0;k<=worldsize-1;++k) {
    if(k!=root) {
      MPI_Request *handle=(MPI_Request*)malloc(sizeof(MPI_Request));
      MSG_SENT(len);
      // Use async send
      if(MPI_CALL(MPI_Isend(str, len, MPI_CHAR, k, tag, MPI_COMM_WORLD,handle))!=MPI_SUCCESS) {
	free(handle);
	PAUSE_TIMER();
	return false;
      }
      new_broadcast_request(b,handle,str);
      //new_request(handle,str);
      USED_BUFFER();
    }
  }
  if(!b->nreq)//release b if no messages were sent (worldsize==1)
    free(b);

#if defined(DEBUG) && defined(MALLINFO)
  {
    struct mallinfo s = mallinfo();
    printf("%d: %d=%d/%d\n",getpid(),s.arena,s.uordblks,s.fordblks); //vsc
  }
#endif
  PAUSE_TIMER();
  //fprintf(stderr,"ibcast4");
  return true;
}
Exemple #6
0
 Group::Group(//const std::string &name, 
              MPI_Comm comm, 
              Consumer *consumer, int32 tag)
   :  tag(tag), consumer(consumer)
 {
   this->comm = comm;
   int rc=MPI_SUCCESS;
   MPI_CALL(Comm_rank(comm,&rank));
   MPI_CALL(Comm_size(comm,&size));
 }
void comm_schedule_sync( comm_schedule_t* schedule) {
    rank_t p, r;
    MPI_Comm comm = schedule->comm;
    MPI_CALL( MPI_Comm_size(comm, &p) );
    MPI_CALL( MPI_Comm_rank(comm, &r) );

    LOG(LOG_TRACE, "map.before(%u): %lu %lu\n", r, schedule->comm_map[0], schedule->comm_map[1]);

    MPI_CALL( MPI_Allgather( schedule->comm_map + r*p, p, MPI_OFFSET_T, schedule->comm_map, p, MPI_OFFSET_T, comm ) );

    LOG(LOG_TRACE, "map.after(%u): %lu %lu %lu %lu\n", r, schedule->comm_map[0], schedule->comm_map[1], schedule->comm_map[2], schedule->comm_map[3] );
}
Exemple #8
0
/*
 * Broadcasts a message from the process with rank "root" to
 *      all other processes of the group.
 *  Note: Collective communication means all processes within a communicator call the same routine.
 *       To be able to use a regular MPI_Recv to recv the messages, one should use mpi_bcast2
 *
 *  mpi_bcast(+Root,+Data).
 */
static YAP_Bool 
mpi_bcast(term_t YAP_ARG1,...) {
  YAP_Term t1 = YAP_Deref(YAP_ARG1), 
    t2 = YAP_Deref(YAP_ARG2);
  int root,val;
  size_t len=0;
  char *str;
  int  rank;
  //The arguments should be bound
  if(!YAP_IsIntTerm(t1)) {
    return false;
  }
  MPI_CALL(MPI_Comm_rank(MPI_COMM_WORLD, &rank));

  CONT_TIMER();
  root = YAP_IntOfTerm(t1);
  if (root == rank) {
    str=term2string(NULL,&len,t2);
#ifdef DEBUG
    write_msg(__FUNCTION__,__FILE__,__LINE__,"mpi_bcast(%s,%u, MPI_CHAR,%d)\n",str,len,root);
#endif
  } else {
    RESET_BUFFER();
    str = BUFFER_PTR;
    len = BLOCK_SIZE;
  }
  // send the data 
  val=(MPI_CALL(MPI_Bcast( str, len, MPI_CHAR, root, MPI_COMM_WORLD))==MPI_SUCCESS?true:false);


#ifdef MPISTATS
  {
   int	size;
   MPI_CALL(MPI_Comm_size(MPI_COMM_WORLD, &size));
   MSG_SENT(len*size);
  }
#endif
  PAUSE_TIMER();
  if (root != rank) {
    YAP_Term out;
    len=YAP_SizeOfExportedTerm(str);
    // make sure we only fetch ARG3 after constructing the term
    out = string2term(str,(size_t*)&len);
    MSG_RECV(len);
    if (!YAP_Unify(YAP_ARG2, out))
      return false;
  }
  return(val);
}
Exemple #9
0
/*
 * Provides information regarding a handle, ie. if a communication operation has been completed.
 * If the operation has been completed the predicate succeeds with the completion status,
 * otherwise it fails.
 *
 * mpi_test(+Handle,-Status,-Data).
 */
static YAP_Bool 
mpi_test_recv(void) {
  YAP_Term t1 = YAP_Deref(YAP_ARG1); // data

  MPI_Status status;
  MPI_Request *handle;
  int flag,len,ret;
  char *s;
  YAP_Term out;

  // The first argument (handle) must be an integer
  if(!YAP_IsIntTerm(t1)) {
    return false;
  }
  CONT_TIMER();

  handle=INT2HANDLE(YAP_IntOfTerm(t1));
  //
  if( MPI_CALL(MPI_Test( handle , &flag, &status ))!=MPI_SUCCESS) {
    PAUSE_TIMER();
    return false;
  }
  s=(char*)get_request(handle);
  len=strlen(s);
  out = string2term(s,(size_t*)&len);
  // make sure we only fetch ARG3 after constructing the term
  ret=YAP_Unify(YAP_ARG3,out);
  free_request(handle);
  PAUSE_TIMER();
  return(ret & YAP_Unify(YAP_ARG2,YAP_MkIntTerm(status.MPI_ERROR)));
}
      void SimpleSendRecvImpl::SendThread::run()
      {
        Group *g = this->group;

        while (1) {
          Action *action = g->sendQueue.get();
          double t0 = getSysTime();
          MPI_CALL(Send(action->data,action->size,MPI_BYTE,
                        action->addr.rank,g->tag,action->addr.group->comm));
          double t1 = getSysTime();
#if PROFILE_MPI
          if (logIt) {
            t_send += (t1-t0);
            b_sent += action->size;

            MsgLog log;
            log.to = action->addr.rank;
            log.size = action->size;
            log.begin = t0;
            log.end = t1;
            sendLog.push_back(log);
          }
#endif
          free(action->data);
          delete action;
        }
      }
Exemple #11
0
/*
 * mpi_test(+Handle,-Status)
 *
 *  Provides information regarding a handle, ie. if a communication operation has been completed.
 * If the operation has been completed the predicate succeeds with the completion status,
 * otherwise it fails.
 * ).
*/
static YAP_Bool 
mpi_test(term_t YAP_ARG1,...) {
  YAP_Term t1 = YAP_Deref(YAP_ARG1), // Handle
           t2 = YAP_Deref(YAP_ARG2); // Status
  MPI_Status status;
  MPI_Request *handle;
  int flag;

  // The first argument (handle) must be an integer
  if(!YAP_IsIntTerm(t1)) {
    return false;
  }
  CONT_TIMER();

  handle=INT2HANDLE(YAP_IntOfTerm(t1));
  //
  MPI_CALL(MPI_Test( handle , &flag, &status ));
  if( flag != true ) {
    PAUSE_TIMER();
    return false;
  }
  free_request(handle);
  PAUSE_TIMER();
  return(YAP_Unify(t2,YAP_MkIntTerm(status.MPI_ERROR)));
}
Exemple #12
0
/*
 * Returns the major and minor version of MPI.
 *  mpi_version(-Major,-Minor).
 */
static YAP_Bool 
mpi_version(term_t YAP_ARG1,...){
  int  major,minor;

  MPI_CALL(MPI_Get_version(&major,&minor));
  return (YAP_Unify(YAP_ARG1,YAP_MkIntTerm(major)) && YAP_Unify(YAP_ARG2,YAP_MkIntTerm(minor)));
}
Exemple #13
0
static YAP_Bool 
mpi_get_processor_name(term_t YAP_ARG1,...) {
  char name[MPI_MAX_PROCESSOR_NAME];
  int length;
  MPI_CALL(MPI_Get_processor_name(name,&length));
  return (YAP_Unify(YAP_ARG1,YAP_MkAtomTerm(YAP_LookupAtom(name))));
}
Exemple #14
0
/*
 * Collective communication function that performs a barrier synchronization among all processes.
 * mpi_barrier
 */
static YAP_Bool 
mpi_barrier(void) {
  CONT_TIMER();
  int ret=MPI_CALL(MPI_Barrier(MPI_COMM_WORLD));
  PAUSE_TIMER();
  return (ret==MPI_SUCCESS?true:false);
}
Exemple #15
0
/*
 * Blocking communication function. The message is sent immediatly.
 * mpi_send(+Data, +Destination, +Tag).
 */
static YAP_Bool 
mpi_send(term_t YAP_ARG1,...) {

  YAP_Term t1 = YAP_Deref(YAP_ARG1), 
    t2 = YAP_Deref(YAP_ARG2), 
    t3 = YAP_Deref(YAP_ARG3);
  char *str=NULL;
  int dest,tag;
  size_t len=0;
  int val;
  if (YAP_IsVarTerm(t1) || !YAP_IsIntTerm(t2) || !YAP_IsIntTerm(t3)) {
    return  false;
  }

  CONT_TIMER();
  //
  dest = YAP_IntOfTerm(t2);
  tag  = YAP_IntOfTerm(t3);
  // the data is packaged as a string
  str=term2string(NULL,&len,t1);
#if  defined(DEBUG) && 0
  write_msg(__FUNCTION__,__FILE__,__LINE__,"%s(%s,%u, MPI_CHAR,%d,%d)\n",__FUNCTION__,str,len,dest,tag);
#endif
  // send the data 
  val=(MPI_CALL(MPI_Send( str, len, MPI_CHAR, dest, tag, MPI_COMM_WORLD))==MPI_SUCCESS?true:false);
  
  PAUSE_TIMER();
  return(val);
}
Exemple #16
0
/** mpi_wait(+Handle,-Status,-Data
 *   
 *  Completes a non-blocking operation. IF the operation was a send, the
 * function waits until the message is buffered or sent by the runtime
 * system. At this point the send buffer is released. If the operation
 * was a receive, it waits until the message is copied to the receive
 * buffer.
 * .
 */
static YAP_Bool 
mpi_wait_recv(term_t YAP_ARG1,...) {
  YAP_Term t1 = YAP_Deref(YAP_ARG1); // data
  MPI_Status status;
  MPI_Request *handle;
  char *s;
  int ret;
  size_t len;
  YAP_Term out;

  // The first argument (handle) must be an integer
  if(!YAP_IsIntTerm(t1)) {
    return false;
  }
  CONT_TIMER();

  handle=INT2HANDLE(YAP_IntOfTerm(t1));
  s=(char*)get_request(handle);
  // wait for communication completion
  if( MPI_CALL(MPI_Wait( handle , &status )) != MPI_SUCCESS) {
    PAUSE_TIMER();
    return false;
  }
  len=YAP_SizeOfExportedTerm(s);
  // make sure we only fetch ARG3 after constructing the term
  out = string2term(s,(size_t*)&len);
  MSG_RECV(len);
  free_request(handle);
  PAUSE_TIMER();
  ret=YAP_Unify(YAP_ARG3,out);
  return(ret & YAP_Unify(YAP_ARG2,YAP_MkIntTerm(status.MPI_ERROR)));
}
void comm_schedule_init( comm_schedule_t* schedule, MPI_Comm comm ) {
    rank_t p, r;
    MPI_CALL( MPI_Comm_size(comm, &p) );
    MPI_CALL( MPI_Comm_rank(comm, &r) );

    schedule->comm = comm;

    schedule->map_size = p*p*sizeof(offset_t);
    schedule->comm_map = (offset_t*)malloc( schedule->map_size );
    CHECK_ALLOC( schedule->comm_map );
    memset( schedule->comm_map, 0, schedule->map_size );

    schedule->map_size = p*sizeof(offset_t*);
    schedule->recv_cols = (offset_t**)malloc( schedule->map_size );
    CHECK_ALLOC(schedule->recv_cols);
    memset( schedule->recv_cols, 0, schedule->map_size );
}
 void MultiIsendIrecvImpl::ProcThread::run()
 {
   Group *g = (Group *)this->group;
   while (1) {
     Action *action = g->recvQueue.get();
     MPI_CALL(Wait(&action->request,MPI_STATUS_IGNORE));
     g->consumer->process(action->addr,action->data,action->size);
     delete action;
   }
 }
      void MultiIsendIrecvImpl::RecvThread::run()
      {
        // note this thread not only _probes_ for new receives, it
        // also immediately starts the receive operation using Irecv()
        Group *g = (Group *)this->group;
        
        while (1) {
          MPI_Status status;
          MPI_CALL(Probe(MPI_ANY_SOURCE,g->tag,g->comm,&status));
          Action *action = new Action;
          action->addr = Address(g,status.MPI_SOURCE);

          MPI_CALL(Get_count(&status,MPI_BYTE,&action->size));

          action->data = malloc(action->size);
          MPI_CALL(Irecv(action->data,action->size,MPI_BYTE,status.MPI_SOURCE,status.MPI_TAG,
                        g->comm,&action->request));
          g->recvQueue.put(action);
        }
      }
      void SimpleSendRecvImpl::shutdown()
      {
        mpi::world.barrier();
        printf("#osp:mpi:SimpleSendRecvMessaging shutting down %i/%i\n",mpi::world.rank,mpi::world.size);
        fflush(0);
        mpi::world.barrier();
        for (int i=0;i<myGroups.size();i++)
          myGroups[i]->shutdown();

        MPI_CALL(Finalize());
      }
// FIXME offset_t vs. value_t!!!
void comm_schedule_add( comm_schedule_t* schedule, rank_t src_r, offset_t element) {
    rank_t p, r;
    MPI_Comm comm = schedule->comm;
    MPI_CALL( MPI_Comm_size(comm, &p) );
    MPI_CALL( MPI_Comm_rank(comm, &r) );

    assert(src_r < p);

    offset_t idx = r*p + src_r;

    assert(idx < p*p);


    offset_t num_elems = schedule->comm_map[ idx ];

    // avoid duplicates.
    offset_t i;
    for(i = 0; i < num_elems; i++) {
        offset_t el = schedule->recv_cols[ src_r ][ i ];

        if( el == element ) {
            return; // num_elems;
        }
    }



    LOG(LOG_TRACE, "%u NEEDS: row %lu from %u. comm_map idx=%lu\n", r, element, src_r, idx);

    // add the column to receive
    size_t alloc_size = (schedule->comm_map[ idx ] + 1)*sizeof(offset_t);
    schedule->recv_cols[ src_r ] = (offset_t*)realloc( schedule->recv_cols[ src_r ], alloc_size );
    CHECK_ALLOC( schedule->recv_cols[ src_r ] );
    schedule->recv_cols[ src_r ][ schedule->comm_map[ idx ] ] = element;

    //LOG(LOG_TRACE, "%lu NEEDS: row %lu (%lu) from %lu.\n", r, element, schedule->recv_cols[ src_r ][ schedule->comm_map[ idx ] ], src_r);

    // increment the number of columns to receive
    schedule->comm_map[ idx ]++;

}
    MPIDistributedDevice::~MPIDistributedDevice()
    {
      maml::shutdown();

      if (shouldFinalizeMPI) {
        try {
          MPI_CALL(Finalize());
        } catch (...) {
          //TODO: anything to do here? try-catch added to silence a warning...
        }
      }
    }
      void MultiIsendIrecvImpl::send(const Address &dest, void *msgPtr, int32 msgSize)
      {
        Action *action = new Action;
        action->addr   = dest;
        action->data   = msgPtr;
        action->size   = msgSize;

        Group *g = (Group *)dest.group;
        MPI_CALL(Isend(action->data,action->size,MPI_BYTE,
                       dest.rank,g->tag,g->comm,&action->request));
        g->sendQueue.put(action);
      }
Exemple #24
0
 /*! set to given intercomm, and properly set size, root, etc */
 void Group::setTo(MPI_Comm _comm)
 {
   this->comm = _comm;
   if (comm == MPI_COMM_NULL) {
     rank = size = -1;
   } else {
     int isInter;
     MPI_CALL(Comm_test_inter(comm,&isInter));
     if (isInter)
       makeInterComm(comm);
     else
       makeIntraComm(comm);
   }
 }
Exemple #25
0
    void initMPI(int &ac, char **&av)
    {
      // PING;fflush(0);
      if (WORLD != NULL)
        throw std::runtime_error("#osp:mpi: MPI already initialized.");

      // PING;fflush(0);
      MPI_Init_thread(&ac,&av,requested,&provided);
      // PING;fflush(0);
      if (provided != requested)
        throw std::runtime_error("#osp:mpi: the MPI implementation you are trying to run this application on does not support threading.");
      // world = new Group(MPI_COMM_WORLD);
      WORLD = new Group("MPI_COMM_WORLD");
      // PRINT(WORLD->toString());

      MPI_CALL(Barrier(MPI_COMM_WORLD));
      // printf("#osp:mpi: MPI Initialized, we are world rank %i/%i\n",WORLD->rank,WORLD->size);
      // fflush(0);
      MPI_CALL(Barrier(MPI_COMM_WORLD));
      printf("#osp:mpi: MPI Initialized, we are world rank %i/%i\n",
             WORLD->rank,WORLD->size);
      MPI_CALL(Barrier(WORLD->comm));
      PING;
    }
Exemple #26
0
/*
 * Broadcasts a message from the process with rank "root" to
 *      all other processes of the group.
 * Note: Collective communication means all processes within a communicator call the same routine.
 *       To be able to use a regular MPI_Recv to recv the messages, one should use mpi_bcast2
 * mpi_bcast_int(+Root,+Data,+Tag).
 */
static YAP_Bool 
my_bcast(YAP_Term t1,YAP_Term t2, YAP_Term t3) {
  int root;
  int k,worldsize;
  size_t len=0;
  char *str;
  int tag;

  //The arguments should be bound
  if(YAP_IsVarTerm(t2) || !YAP_IsIntTerm(t1) || !YAP_IsIntTerm(t3)) {
    return false;
  }
  
  CONT_TIMER();

  MPI_CALL(MPI_Comm_size(MPI_COMM_WORLD,&worldsize));

  root = YAP_IntOfTerm(t1);
  tag = YAP_IntOfTerm(t3);
  str=term2string(NULL,&len,t2);
  
  for(k=0;k<=worldsize-1;++k)
    if(k!=root) {
      // Use async send?
      MSG_SENT(len);
      if(MPI_CALL(MPI_Send( str, len, MPI_CHAR, k, tag, MPI_COMM_WORLD))!=MPI_SUCCESS) {
	PAUSE_TIMER();
	return false;
      }
#ifdef DEBUG
  write_msg(__FUNCTION__,__FILE__,__LINE__,"bcast2(%s,%u, MPI_CHAR,%d,%d)\n",str,len,k,tag);
#endif
    }
  PAUSE_TIMER();
  return true;
}
    void MPIDistributedDevice::commit()
    {
      if (!initialized) {
        int _ac = 1;
        const char *_av[] = {"ospray_mpi_distributed_device"};

        auto *setComm =
          static_cast<MPI_Comm*>(getParam<void*>("worldCommunicator", nullptr));
        shouldFinalizeMPI = mpicommon::init(&_ac, _av, setComm == nullptr);

        if (setComm) {
          MPI_CALL(Comm_dup(*setComm, &mpicommon::world.comm));
          MPI_CALL(Comm_rank(mpicommon::world.comm, &mpicommon::world.rank));
          MPI_CALL(Comm_size(mpicommon::world.comm, &mpicommon::world.size));
        }

        auto &embreeDevice = api::ISPCDevice::embreeDevice;

        embreeDevice = rtcNewDevice(generateEmbreeDeviceCfg(*this).c_str());
        rtcSetDeviceErrorFunction(embreeDevice, embreeErrorFunc, nullptr);
        RTCError erc = rtcGetDeviceError(embreeDevice);
        if (erc != RTC_ERROR_NONE) {
          // why did the error function not get called !?
          postStatusMsg() << "#osp:init: embree internal error number " << erc;
          assert(erc == RTC_ERROR_NONE);
        }
        initialized = true;
      }

      Device::commit();

      masterRank = getParam<int>("masterRank", 0);

      TiledLoadBalancer::instance =
                      make_unique<staticLoadBalancer::Distributed>();
    }
Exemple #28
0
/*
 * Sets up the mpi enviromment. This function should be called before any other MPI
 * function.
 * the argument is the name of the predicate that will be invoked when a message is received
 */
static YAP_Bool 
rcv_msg_thread(char *handle_pred) {
  YAP_Term pred=YAP_MkAtomTerm(YAP_LookupAtom(handle_pred));
  MPI_Status status;

  while(1) { 
    write_msg(__FUNCTION__,__FILE__,__LINE__,"Waiting for MPI msg\n");
    if( MPI_CALL(MPI_Probe( MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status )) == MPI_SUCCESS ) {
      // call handle
      write_msg(__FUNCTION__,__FILE__,__LINE__,"MPI Msg received\n");
      YAP_CallProlog(pred);
    } else
      write_msg(__FUNCTION__,__FILE__,__LINE__,"Error in MPI_Probe\n");
  }
  return 1;
}
Exemple #29
0
/*
 * Non blocking communication function. The message is sent when possible. To check for the status of the message,
 * the mpi_wait and mpi_test should be used. Until mpi_wait is called, the memory allocated for the buffer containing 
 * the message is not released.
 *
 * mpi_isend(+Data, +Destination, +Tag, -Handle).
 */
static YAP_Bool 
mpi_isend(term_t YAP_ARG1,...) {
  YAP_Term t1 = YAP_Deref(YAP_ARG1), 
    t2 = YAP_Deref(YAP_ARG2), 
    t3 = YAP_Deref(YAP_ARG3), 
    t4 = YAP_Deref(YAP_ARG4);
  char *str=NULL;
  int dest,tag;
  size_t len=0;
  MPI_Request *handle=(MPI_Request*)malloc(sizeof(MPI_Request));

  CONT_TIMER();
  if ( handle==NULL ) return  false;

  if (YAP_IsVarTerm(t1) || !YAP_IsIntTerm(t2) || !YAP_IsIntTerm(t3) || !YAP_IsVarTerm(t4)) {
    PAUSE_TIMER();
    return false;
  }
  //
  dest = YAP_IntOfTerm(t2);
  tag  = YAP_IntOfTerm(t3);
  // 
  str=term2string(NULL,&len,t1);
  MSG_SENT(len);
  // send the data 
  if( MPI_CALL(MPI_Isend( str, len, MPI_CHAR, dest, tag, MPI_COMM_WORLD ,handle)) != MPI_SUCCESS ) {
    PAUSE_TIMER();
    return false;
  }

#ifdef DEBUG
  write_msg(__FUNCTION__,__FILE__,__LINE__,"%s(%s,%u, MPI_CHAR,%d,%d)\n",__FUNCTION__,str,len,dest,tag);
#endif
  USED_BUFFER(); //  informs the prologterm2c module that the buffer is now used and should not be messed
  // We must associate the string to each handle
  new_request(handle,str);
  PAUSE_TIMER();
  return(YAP_Unify(YAP_ARG4,YAP_MkIntTerm(HANDLE2INT(handle))));// it should always succeed
}
Exemple #30
0
/*
 *  Completes a non-blocking operation. IF the operation was a send, the
 * function waits until the message is buffered or sent by the runtime
 * system. At this point the send buffer is released. If the operation
 * was a receive, it waits until the message is copied to the receive
 * buffer.
 * mpi_wait(+Handle,-Status).
*/
static YAP_Bool 
mpi_wait(term_t YAP_ARG1,...) {
  YAP_Term t1 = YAP_Deref(YAP_ARG1), // Handle
    t2 = YAP_Deref(YAP_ARG2); // Status
  MPI_Status status;
  MPI_Request *handle;

  // The first argument  must be an integer (an handle)
  if(!YAP_IsIntTerm(t1)) {
    return false;
  }
  handle=INT2HANDLE(YAP_IntOfTerm(t1));
  CONT_TIMER();
  // probe for term' size
  if( MPI_CALL(MPI_Wait( handle , &status )) != MPI_SUCCESS ) {
    PAUSE_TIMER();
    return false;
  }
  free_request(handle);
  PAUSE_TIMER();
  return(YAP_Unify(t2,YAP_MkIntTerm(status.MPI_ERROR)));
}