Beispiel #1
0
int smpi_sample_2(int global, const char *file, int line)
{
  char *loc = sample_location(global, file, line);
  local_data_t *data;

  xbt_assert(samples, "Y U NO use SMPI_SAMPLE_* macros? Stop messing directly with smpi_sample_* functions!");
  data = xbt_dict_get(samples, loc);
  XBT_DEBUG("sample2 %s",loc);
  free(loc);

  if (data->benching==1) {
    // we need to run a new bench
    XBT_DEBUG("benchmarking: count:%d iter:%d stderr:%f thres:%f; mean:%f",
        data->count, data->iters, data->relstderr, data->threshold, data->mean);
    smpi_bench_begin();
    return 1;
  } else {
    // Enough data, no more bench (either we got enough data from previous visits to this benched nest, or we just ran one bench and need to bail out now that our job is done).
    // Just sleep instead
    XBT_DEBUG("No benchmark (either no need, or just ran one): count >= iter (%d >= %d) or stderr<thres (%f<=%f). apply the %fs delay instead",
        data->count, data->iters, data->relstderr, data->threshold, data->mean);
    smpi_execute(data->mean);

    smpi_bench_begin(); // prepare to capture future, unrelated computations
    return 0;
  }
}
Beispiel #2
0
  void kernel(simgrid::xbt::ReplayAction&)
  {
    static std::map<simgrid::s4u::ActorPtr, int> migration_call_counter;
    static simgrid::s4u::Barrier smpilb_bar(smpi_process_count());
    simgrid::s4u::Host* cur_host = simgrid::s4u::this_actor::get_host();
    simgrid::s4u::Host* migrate_to_host;

    TRACE_migration_call(my_proc_id, nullptr);

    // We only migrate every "cfg_migration_frequency"-times, not at every call
    migration_call_counter[simgrid::s4u::Actor::self()]++;
    if ((migration_call_counter[simgrid::s4u::Actor::self()] % simgrid::config::get_value<int>(cfg_migration_frequency.get_name())) != 0) {
      return;
    }

    // TODO cheinrich: Why do we need this barrier?
    smpilb_bar.wait();

    static bool was_executed = false;
    if (not was_executed) {
      was_executed = true;
      XBT_DEBUG("Process %li runs the load balancer", my_proc_id);
      smpi_bench_begin();
      lb.run();
      smpi_bench_end();
    }

    // This barrier is required to ensure that the mapping has been computed and is available
    smpilb_bar.wait();
    was_executed = false; // Must stay behind this barrier so that all processes have passed the if clause

    migrate_to_host = lb.get_mapping(simgrid::s4u::Actor::self());
    if (cur_host != migrate_to_host) { // Origin and dest are not the same -> migrate
      std::vector<simgrid::s4u::Host*> migration_hosts = {cur_host, migrate_to_host};
      std::vector<double> comp_amount                  = {0, 0};
      std::vector<double> comm_amount = {0, /*must not be 0*/ std::max(args.memory_consumption, 1.0), 0, 0};

      xbt_os_timer_t timer = smpi_process()->timer();
      xbt_os_threadtimer_start(timer);
      simgrid::s4u::this_actor::parallel_execute(migration_hosts, comp_amount, comm_amount, -1.0);
      xbt_os_threadtimer_stop(timer);
      smpi_execute(xbt_os_timer_elapsed(timer));

      // Update the process and host mapping in SimGrid.
      XBT_DEBUG("Migrating process %li from %s to %s", my_proc_id, cur_host->get_cname(), migrate_to_host->get_cname());
      TRACE_smpi_process_change_host(my_proc_id, migrate_to_host);
      simgrid::s4u::this_actor::migrate(migrate_to_host);
    }

    smpilb_bar.wait();

    smpi_bench_begin();
  }
Beispiel #3
0
unsigned long long smpi_rastro_resolution (void)
{
  smpi_bench_end();
  double resolution = (1/sg_maxmin_precision);
  smpi_bench_begin();
  return (unsigned long long)resolution;
}
Beispiel #4
0
/*
 * This function starts a request returned by init functions such as
 * MPI_Send_init(), MPI_Ssend_init (see above), and friends.
 * They should already have performed sanity checks.
 */
int PMPI_Start(MPI_Request * request)
{
  int retval = 0;

  smpi_bench_end();
  if (request == nullptr || *request == MPI_REQUEST_NULL) {
    retval = MPI_ERR_REQUEST;
  } else {
    MPI_Request req = *request;
    int my_proc_id = (req->comm() != MPI_COMM_NULL) ? simgrid::s4u::this_actor::get_pid() : -1;
    TRACE_smpi_comm_in(my_proc_id, __func__,
                       new simgrid::instr::Pt2PtTIData("Start", req->dst(),
                                                       req->size(),
                                                       req->tag(), 
                                                       simgrid::smpi::Datatype::encode(req->type())));
    if (not TRACE_smpi_view_internals() && req->flags() & MPI_REQ_SEND)
      TRACE_smpi_send(my_proc_id, my_proc_id, getPid(req->comm(), req->dst()), req->tag(), req->size());

    req->start();

    if (not TRACE_smpi_view_internals() && req->flags() & MPI_REQ_RECV)
      TRACE_smpi_recv(getPid(req->comm(), req->src()), my_proc_id, req->tag());
    retval = MPI_SUCCESS;
    TRACE_smpi_comm_out(my_proc_id);
  }
  smpi_bench_begin();
  return retval;
}
Beispiel #5
0
int PMPI_Ssend(const void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm) {
  int retval = 0;

  smpi_bench_end();

  if (comm == MPI_COMM_NULL) {
    retval = MPI_ERR_COMM;
  } else if (dst == MPI_PROC_NULL) {
    retval = MPI_SUCCESS;
  } else if (dst >= comm->group()->size() || dst <0){
    retval = MPI_ERR_RANK;
  } else if ((count < 0) || (buf==nullptr && count > 0)) {
    retval = MPI_ERR_COUNT;
  } else if (datatype==MPI_DATATYPE_NULL || not datatype->is_valid()) {
    retval = MPI_ERR_TYPE;
  } else if(tag<0 && tag !=  MPI_ANY_TAG){
    retval = MPI_ERR_TAG;
  } else {
    int my_proc_id         = simgrid::s4u::this_actor::get_pid();
    int dst_traced         = getPid(comm, dst);
    TRACE_smpi_comm_in(my_proc_id, __func__,
                       new simgrid::instr::Pt2PtTIData("Ssend", dst,
                                                       datatype->is_replayable() ? count : count * datatype->size(),
                                                       tag, simgrid::smpi::Datatype::encode(datatype)));
    TRACE_smpi_send(my_proc_id, my_proc_id, dst_traced, tag, count * datatype->size());

    simgrid::smpi::Request::ssend(buf, count, datatype, dst, tag, comm);
    retval = MPI_SUCCESS;

    TRACE_smpi_comm_out(my_proc_id);
  }

  smpi_bench_begin();
  return retval;
}
Beispiel #6
0
int PMPI_Test(MPI_Request * request, int *flag, MPI_Status * status)
{
  int retval = 0;
  smpi_bench_end();
  if (request == nullptr || flag == nullptr) {
    retval = MPI_ERR_ARG;
  } else if (*request == MPI_REQUEST_NULL) {
    if (status != MPI_STATUS_IGNORE){
      *flag= true;
      simgrid::smpi::Status::empty(status);
    }
    retval = MPI_SUCCESS;
  } else {
    int my_proc_id = ((*request)->comm() != MPI_COMM_NULL) ? simgrid::s4u::this_actor::get_pid() : -1;

    TRACE_smpi_comm_in(my_proc_id, __func__, new simgrid::instr::NoOpTIData("test"));
    
    *flag = simgrid::smpi::Request::test(request,status);

    TRACE_smpi_comm_out(my_proc_id);
    retval = MPI_SUCCESS;
  }
  smpi_bench_begin();
  return retval;
}
Beispiel #7
0
int PMPI_Waitany(int count, MPI_Request requests[], int *index, MPI_Status * status)
{
  if (index == nullptr)
    return MPI_ERR_ARG;

  if (count <= 0)
    return MPI_SUCCESS;

  smpi_bench_end();
  //for tracing, save the handles which might get overriden before we can use the helper on it
  std::vector<MPI_Request> savedreqs(requests, requests + count);
  for (MPI_Request& req : savedreqs) {
    if (req != MPI_REQUEST_NULL && not(req->flags() & MPI_REQ_FINISHED))
      req->ref();
    else
      req = MPI_REQUEST_NULL;
  }

  int rank_traced = simgrid::s4u::this_actor::get_pid(); // FIXME: In PMPI_Wait, we check if the comm is null?
  TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::CpuTIData("waitAny", static_cast<double>(count)));

  *index = simgrid::smpi::Request::waitany(count, requests, status);

  if(*index!=MPI_UNDEFINED){
    trace_smpi_recv_helper(&savedreqs[*index], status);
    TRACE_smpi_comm_out(rank_traced);
  }

  for (MPI_Request& req : savedreqs)
    if (req != MPI_REQUEST_NULL)
      simgrid::smpi::Request::unref(&req);

  smpi_bench_begin();
  return MPI_SUCCESS;
}
Beispiel #8
0
int PMPI_Waitall(int count, MPI_Request requests[], MPI_Status status[])
{
  smpi_bench_end();

  //for tracing, save the handles which might get overriden before we can use the helper on it
  std::vector<MPI_Request> savedreqs(requests, requests + count);
  for (MPI_Request& req : savedreqs) {
    if (req != MPI_REQUEST_NULL && not(req->flags() & MPI_REQ_FINISHED))
      req->ref();
    else
      req = MPI_REQUEST_NULL;
  }

  int rank_traced = simgrid::s4u::this_actor::get_pid(); // FIXME: In PMPI_Wait, we check if the comm is null?
  TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::CpuTIData("waitall", static_cast<double>(count)));

  int retval = simgrid::smpi::Request::waitall(count, requests, status);

  for (int i = 0; i < count; i++) {
    trace_smpi_recv_helper(&savedreqs[i], status!=MPI_STATUSES_IGNORE ? &status[i]: MPI_STATUS_IGNORE);
  }
  TRACE_smpi_comm_out(rank_traced);

  for (MPI_Request& req : savedreqs)
    if (req != MPI_REQUEST_NULL)
      simgrid::smpi::Request::unref(&req);

  smpi_bench_begin();
  return retval;
}
Beispiel #9
0
int PMPI_Win_allocate_shared( MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm, void *base, MPI_Win *win){
  int retval = 0;
  smpi_bench_end();
  if (comm == MPI_COMM_NULL) {
    retval= MPI_ERR_COMM;
  }else if (disp_unit <= 0 || size < 0 ){
    retval= MPI_ERR_OTHER;
  }else{
    void* ptr = nullptr;
    int rank = comm->rank();
    if(rank==0){
       ptr = xbt_malloc(size*comm->size());
       if(ptr==nullptr)
         return MPI_ERR_NO_MEM;
    }
    
    simgrid::smpi::Colls::bcast(&ptr, sizeof(void*), MPI_BYTE, 0, comm);
    simgrid::smpi::Colls::barrier(comm);
    
    *static_cast<void**>(base) = (char*)ptr+rank*size;
    *win = new simgrid::smpi::Win( ptr, size, disp_unit, info, comm,rank==0);
    retval = MPI_SUCCESS;
  }
  smpi_bench_begin();
  return retval;
}
Beispiel #10
0
int PMPI_Compare_and_swap(const void* origin_addr, void* compare_addr, void* result_addr, MPI_Datatype datatype,
                          int target_rank, MPI_Aint target_disp, MPI_Win win)
{
  int retval = 0;
  smpi_bench_end();
  if (win == MPI_WIN_NULL) {
    retval = MPI_ERR_WIN;
  } else if (target_rank == MPI_PROC_NULL) {
    retval = MPI_SUCCESS;
  } else if (target_rank <0){
    retval = MPI_ERR_RANK;
  } else if (win->dynamic()==0 && target_disp <0){
    //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
    retval = MPI_ERR_ARG;
  } else if (origin_addr==nullptr || result_addr==nullptr || compare_addr==nullptr){
    retval = MPI_ERR_COUNT;
  } else if ((datatype == MPI_DATATYPE_NULL) || (not datatype->is_valid())) {
    retval = MPI_ERR_TYPE;
  } else {
    int my_proc_id = simgrid::s4u::this_actor::get_pid();
    MPI_Group group;
    win->get_group(&group);
    TRACE_smpi_comm_in(my_proc_id, __func__,
                       new simgrid::instr::Pt2PtTIData("Compare_and_swap", target_rank,
                                                       datatype->is_replayable() ? 1 : datatype->size(),
                                                       simgrid::smpi::Datatype::encode(datatype)));

    retval = win->compare_and_swap(origin_addr, compare_addr, result_addr, datatype, target_rank, target_disp);

    TRACE_smpi_comm_out(my_proc_id);
  }
  smpi_bench_begin();
  return retval;
}
Beispiel #11
0
unsigned int smpi_sleep(unsigned int secs)
{
  smpi_bench_end();
  smpi_execute((double) secs);
  smpi_bench_begin();
  return secs;
}
Beispiel #12
0
int smpi_sample_2(int global, const char *file, int line)
{
  char *loc = sample_location(global, file, line);
  local_data_t *data;

  xbt_assert(samples, "You did something very inconsistent, didn't you?");
  data = xbt_dict_get_or_null(samples, loc);
  if (!data) {
    xbt_assert(data, "Please, do thing in order");
  }
  if (!data->started) {
    if ((data->iters > 0 && data->count >= data->iters)
        || (data->count > 1 && data->threshold > 0.0 && data->relstderr <= data->threshold)) {
      XBT_DEBUG("Perform some wait of %f", data->mean);
      smpi_execute(data->mean);
    } else {
      data->started = 1;
      data->count++;
    }
  } else {
    data->started = 0;
  }
  free(loc);
  smpi_bench_begin();
  smpi_process_simulated_start();
  return data->started;
}
Beispiel #13
0
int PMPI_Sendrecv(const void* sendbuf, int sendcount, MPI_Datatype sendtype, int dst, int sendtag, void* recvbuf,
                  int recvcount, MPI_Datatype recvtype, int src, int recvtag, MPI_Comm comm, MPI_Status* status)
{
  int retval = 0;

  smpi_bench_end();

  if (comm == MPI_COMM_NULL) {
    retval = MPI_ERR_COMM;
  } else if (not sendtype->is_valid() || not recvtype->is_valid()) {
    retval = MPI_ERR_TYPE;
  } else if (src == MPI_PROC_NULL) {
    if(status!=MPI_STATUS_IGNORE){
      simgrid::smpi::Status::empty(status);
      status->MPI_SOURCE = MPI_PROC_NULL;
    }
    if(dst != MPI_PROC_NULL)
      simgrid::smpi::Request::send(sendbuf, sendcount, sendtype, dst, sendtag, comm);
    retval = MPI_SUCCESS;
  }else if (dst == MPI_PROC_NULL){
    simgrid::smpi::Request::recv(recvbuf, recvcount, recvtype, src, recvtag, comm, status);
    retval = MPI_SUCCESS;
  }else if (dst >= comm->group()->size() || dst <0 ||
      (src!=MPI_ANY_SOURCE && (src >= comm->group()->size() || src <0))){
    retval = MPI_ERR_RANK;
  } else if ((sendcount < 0 || recvcount<0) ||
      (sendbuf==nullptr && sendcount > 0) || (recvbuf==nullptr && recvcount>0)) {
    retval = MPI_ERR_COUNT;
  } else if((sendtag<0 && sendtag !=  MPI_ANY_TAG)||(recvtag<0 && recvtag != MPI_ANY_TAG)){
    retval = MPI_ERR_TAG;
  } else {
    int my_proc_id         = simgrid::s4u::this_actor::get_pid();
    int dst_traced         = getPid(comm, dst);
    int src_traced         = getPid(comm, src);

    // FIXME: Hack the way to trace this one
    std::vector<int>* dst_hack = new std::vector<int>;
    std::vector<int>* src_hack = new std::vector<int>;
    dst_hack->push_back(dst_traced);
    src_hack->push_back(src_traced);
    TRACE_smpi_comm_in(my_proc_id, __func__,
                       new simgrid::instr::VarCollTIData(
                           "sendRecv", -1, sendtype->is_replayable() ? sendcount : sendcount * sendtype->size(),
                           dst_hack, recvtype->is_replayable() ? recvcount : recvcount * recvtype->size(), src_hack,
                           simgrid::smpi::Datatype::encode(sendtype), simgrid::smpi::Datatype::encode(recvtype)));

    TRACE_smpi_send(my_proc_id, my_proc_id, dst_traced, sendtag, sendcount * sendtype->size());

    simgrid::smpi::Request::sendrecv(sendbuf, sendcount, sendtype, dst, sendtag, recvbuf, recvcount, recvtype, src,
                                     recvtag, comm, status);
    retval = MPI_SUCCESS;

    TRACE_smpi_recv(src_traced, my_proc_id, recvtag);
    TRACE_smpi_comm_out(my_proc_id);
  }

  smpi_bench_begin();
  return retval;
}
Beispiel #14
0
unsigned long long smpi_rastro_timestamp (void)
{
  smpi_bench_end();
  double now = SIMIX_get_clock();

  unsigned long long sec = (unsigned long long)now;
  unsigned long long pre = (now - sec) * smpi_rastro_resolution();
  smpi_bench_begin();
  return (unsigned long long)sec * smpi_rastro_resolution() + pre;
}
Beispiel #15
0
int PMPI_Win_create_dynamic( MPI_Info info, MPI_Comm comm, MPI_Win *win){
  int retval = 0;
  smpi_bench_end();
  if (comm == MPI_COMM_NULL) {
    retval= MPI_ERR_COMM;
  }else{
    *win = new simgrid::smpi::Win(info, comm);
    retval = MPI_SUCCESS;
  }
  smpi_bench_begin();
  return retval;
}
Beispiel #16
0
int PMPI_Win_free( MPI_Win* win){
  int retval = 0;
  smpi_bench_end();
  if (win == nullptr || *win == MPI_WIN_NULL) {
    retval = MPI_ERR_WIN;
  }else{
    delete *win;
    retval=MPI_SUCCESS;
  }
  smpi_bench_begin();
  return retval;
}
Beispiel #17
0
int PMPI_Win_attach(MPI_Win win, void *base, MPI_Aint size){
  int retval = 0;
  smpi_bench_end();
  if(win == MPI_WIN_NULL){
    retval = MPI_ERR_WIN;
  } else if ((base == nullptr && size != 0) || size < 0 ){
    retval= MPI_ERR_OTHER;
  }else{
    retval = win->attach(base, size);
  }
  smpi_bench_begin();
  return retval;
}
Beispiel #18
0
int PMPI_Win_flush_local_all(MPI_Win win){
  int retval = 0;
  smpi_bench_end();
  if (win == MPI_WIN_NULL) {
    retval = MPI_ERR_WIN;
  } else {
    int my_proc_id = simgrid::s4u::this_actor::get_pid();
    TRACE_smpi_comm_in(my_proc_id, __func__, new simgrid::instr::NoOpTIData("Win_flush_local_all"));
    retval = win->flush_local_all();
    TRACE_smpi_comm_out(my_proc_id);
  }
  smpi_bench_begin();
  return retval;
}
Beispiel #19
0
int PMPI_Win_create( void *base, MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm, MPI_Win *win){
  int retval = 0;
  smpi_bench_end();
  if (comm == MPI_COMM_NULL) {
    retval= MPI_ERR_COMM;
  }else if ((base == nullptr && size != 0) || disp_unit <= 0 || size < 0 ){
    retval= MPI_ERR_OTHER;
  }else{
    *win = new simgrid::smpi::Win( base, size, disp_unit, info, comm);
    retval = MPI_SUCCESS;
  }
  smpi_bench_begin();
  return retval;
}
Beispiel #20
0
int PMPI_Win_detach(MPI_Win win, const void* base)
{
  int retval = 0;
  smpi_bench_end();
  if(win == MPI_WIN_NULL){
    retval = MPI_ERR_WIN;
  } else if (base == nullptr){
    retval= MPI_ERR_OTHER;
  }else{
    retval = win->detach(base);
  }
  smpi_bench_begin();
  return retval;
}
Beispiel #21
0
int PMPI_Request_free(MPI_Request * request)
{
  int retval = 0;

  smpi_bench_end();
  if (*request == MPI_REQUEST_NULL) {
    retval = MPI_ERR_ARG;
  } else {
    simgrid::smpi::Request::unref(request);
    retval = MPI_SUCCESS;
  }
  smpi_bench_begin();
  return retval;
}
Beispiel #22
0
int PMPI_Waitsome(int incount, MPI_Request requests[], int *outcount, int *indices, MPI_Status status[])
{
  int retval = 0;

  smpi_bench_end();
  if (outcount == nullptr) {
    retval = MPI_ERR_ARG;
  } else {
    *outcount = simgrid::smpi::Request::waitsome(incount, requests, indices, status);
    retval = MPI_SUCCESS;
  }
  smpi_bench_begin();
  return retval;
}
Beispiel #23
0
int PMPI_Cancel(MPI_Request* request)
{
  int retval = 0;

  smpi_bench_end();
  if (*request == MPI_REQUEST_NULL) {
    retval = MPI_ERR_REQUEST;
  } else {
    (*request)->cancel();
    retval = MPI_SUCCESS;
  }
  smpi_bench_begin();
  return retval;
}
Beispiel #24
0
int PMPI_Win_unlock(int rank, MPI_Win win){
  int retval = 0;
  smpi_bench_end();
  if (win == MPI_WIN_NULL) {
    retval = MPI_ERR_WIN;
  } else if (rank == MPI_PROC_NULL){
    retval = MPI_SUCCESS;
  } else {
    int my_proc_id = simgrid::s4u::this_actor::get_pid();
    TRACE_smpi_comm_in(my_proc_id, __func__, new simgrid::instr::NoOpTIData("Win_unlock"));
    retval = win->unlock(rank);
    TRACE_smpi_comm_out(my_proc_id);
  }
  smpi_bench_begin();
  return retval;
}
Beispiel #25
0
int PMPI_Win_start(MPI_Group group, int assert, MPI_Win win){
  int retval = 0;
  smpi_bench_end();
  if (win == MPI_WIN_NULL) {
    retval = MPI_ERR_WIN;
  } else if (group==MPI_GROUP_NULL){
    retval = MPI_ERR_GROUP;
  } else {
    int my_proc_id = simgrid::s4u::this_actor::get_pid();
    TRACE_smpi_comm_in(my_proc_id, __func__, new simgrid::instr::NoOpTIData("Win_start"));
    retval = win->start(group,assert);
    TRACE_smpi_comm_out(my_proc_id);
  }
  smpi_bench_begin();
  return retval;
}
Beispiel #26
0
int PMPI_Testall(int count, MPI_Request* requests, int* flag, MPI_Status* statuses)
{
  int retval = 0;

  smpi_bench_end();
  if (flag == nullptr) {
    retval = MPI_ERR_ARG;
  } else {
    int my_proc_id = simgrid::s4u::this_actor::get_pid();
    TRACE_smpi_comm_in(my_proc_id, __func__, new simgrid::instr::NoOpTIData("testall"));
    retval = simgrid::smpi::Request::testall(count, requests, flag, statuses);
    TRACE_smpi_comm_out(my_proc_id);
  }
  smpi_bench_begin();
  return retval;
}
Beispiel #27
0
int PMPI_Testsome(int incount, MPI_Request requests[], int* outcount, int* indices, MPI_Status status[])
{
  int retval = 0;

  smpi_bench_end();
  if (outcount == nullptr) {
    retval = MPI_ERR_ARG;
  } else {
    int my_proc_id = simgrid::s4u::this_actor::get_pid();
    TRACE_smpi_comm_in(my_proc_id, __func__, new simgrid::instr::NoOpTIData("testsome"));
    retval = simgrid::smpi::Request::testsome(incount, requests, outcount, indices, status);
    TRACE_smpi_comm_out(my_proc_id);
  }
  smpi_bench_begin();
  return retval;
}
Beispiel #28
0
int smpi_gettimeofday(struct timeval *tv)
{
  double now;
  smpi_bench_end();
  now = SIMIX_get_clock();
  if (tv) {
    tv->tv_sec = (time_t)now;
#ifdef WIN32
    tv->tv_usec = (useconds_t)((now - tv->tv_sec) * 1e6);
#else
    tv->tv_usec = (suseconds_t)((now - tv->tv_sec) * 1e6);
#endif
  }
  smpi_bench_begin();
  return 0;
}
Beispiel #29
0
int PMPI_Recv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Status * status)
{
  int retval = 0;

  smpi_bench_end();
  if (comm == MPI_COMM_NULL) {
    retval = MPI_ERR_COMM;
  } else if (src == MPI_PROC_NULL) {
    if(status != MPI_STATUS_IGNORE){
      simgrid::smpi::Status::empty(status);
      status->MPI_SOURCE = MPI_PROC_NULL;
    }
    retval = MPI_SUCCESS;
  } else if (src!=MPI_ANY_SOURCE && (src >= comm->group()->size() || src <0)){
    retval = MPI_ERR_RANK;
  } else if ((count < 0) || (buf==nullptr && count > 0)) {
    retval = MPI_ERR_COUNT;
  } else if (datatype==MPI_DATATYPE_NULL || not datatype->is_valid()) {
    retval = MPI_ERR_TYPE;
  } else if(tag<0 && tag !=  MPI_ANY_TAG){
    retval = MPI_ERR_TAG;
  } else {
    int my_proc_id = simgrid::s4u::this_actor::get_pid();
    TRACE_smpi_comm_in(my_proc_id, __func__,
                       new simgrid::instr::Pt2PtTIData("recv", src,
                                                       datatype->is_replayable() ? count : count * datatype->size(),
                                                       tag, simgrid::smpi::Datatype::encode(datatype)));

    simgrid::smpi::Request::recv(buf, count, datatype, src, tag, comm, status);
    retval = MPI_SUCCESS;

    // the src may not have been known at the beginning of the recv (MPI_ANY_SOURCE)
    int src_traced=0;
    if (status != MPI_STATUS_IGNORE) 
      src_traced = getPid(comm, status->MPI_SOURCE);
    else
      src_traced = getPid(comm, src);
    if (not TRACE_smpi_view_internals()) {
      TRACE_smpi_recv(src_traced, my_proc_id, tag);
    }
    
    TRACE_smpi_comm_out(my_proc_id);
  }

  smpi_bench_begin();
  return retval;
}
Beispiel #30
0
int PMPI_Rget_accumulate(const void *origin_addr, int origin_count, MPI_Datatype origin_datatype, void *result_addr,
int result_count, MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp, int target_count,
MPI_Datatype target_datatype, MPI_Op op, MPI_Win win, MPI_Request* request){
  int retval = 0;
  smpi_bench_end();
  if (win == MPI_WIN_NULL) {
    retval = MPI_ERR_WIN;
  } else if (target_rank == MPI_PROC_NULL) {
    *request = MPI_REQUEST_NULL;
    retval = MPI_SUCCESS;
  } else if (target_rank <0){
    retval = MPI_ERR_RANK;
  } else if (win->dynamic()==0 && target_disp <0){
    //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
    retval = MPI_ERR_ARG;
  } else if ((origin_count < 0 || target_count < 0 || result_count <0) ||
             (origin_addr==nullptr && origin_count > 0 && op != MPI_NO_OP) ||
             (result_addr==nullptr && result_count > 0)){
    retval = MPI_ERR_COUNT;
  } else if (((target_datatype == MPI_DATATYPE_NULL) || (result_datatype == MPI_DATATYPE_NULL)) ||
            (((origin_datatype != MPI_DATATYPE_NULL) && (not origin_datatype->is_valid())) || (not target_datatype->is_valid()) || (not result_datatype->is_valid()))) {
    retval = MPI_ERR_TYPE;
  } else if (op == MPI_OP_NULL) {
    retval = MPI_ERR_OP;
  } else if(request == nullptr){
    retval = MPI_ERR_REQUEST;
  } else {
    int my_proc_id = simgrid::s4u::this_actor::get_pid();
    MPI_Group group;
    win->get_group(&group);
    TRACE_smpi_comm_in(my_proc_id, __func__,
                       new simgrid::instr::Pt2PtTIData(
                           "Rget_accumulate", target_rank,
                           target_datatype->is_replayable() ? target_count : target_count * target_datatype->size(),
                           simgrid::smpi::Datatype::encode(target_datatype)));

    retval = win->get_accumulate( origin_addr, origin_count, origin_datatype, result_addr,
                                  result_count, result_datatype, target_rank, target_disp,
                                  target_count, target_datatype, op, request);

    TRACE_smpi_comm_out(my_proc_id);
  }
  smpi_bench_begin();
  return retval;
}