Пример #1
0
int
smpi_coll_tuned_reduce_flat_tree(void *sbuf, void *rbuf, int count,
                                 MPI_Datatype dtype, MPI_Op op,
                                 int root, MPI_Comm comm)
{
  int i, tag = 4321;
  int size;
  int rank;
  MPI_Aint extent;
  char *origin = 0;
  char *inbuf;
  MPI_Status status;

  rank = smpi_comm_rank(comm);
  size = smpi_comm_size(comm);

  /* If not root, send data to the root. */
  extent = smpi_datatype_get_extent(dtype);

  if (rank != root) {
    smpi_mpi_send(sbuf, count, dtype, root, tag, comm);
    return 0;
  }

  /* Root receives and reduces messages.  Allocate buffer to receive
     messages. */

  if (size > 1)
    origin = (char *) xbt_malloc(count * extent);


  /* Initialize the receive buffer. */
  if (rank == (size - 1))
    smpi_mpi_sendrecv(sbuf, count, dtype, rank, tag,
                 rbuf, count, dtype, rank, tag, comm, &status);
  else
    smpi_mpi_recv(rbuf, count, dtype, size - 1, tag, comm, &status);

  /* Loop receiving and calling reduction function (C or Fortran). */

  for (i = size - 2; i >= 0; --i) {
    if (rank == i)
      inbuf = sbuf;
    else {
      smpi_mpi_recv(origin, count, dtype, i, tag, comm, &status);
      inbuf = origin;
    }

    /* Call reduction function. */
    smpi_op_apply(op, inbuf, rbuf, &count, &dtype);

  }

  if (origin)
    free(origin);

  /* All done */
  return 0;
}
Пример #2
0
int main(int argc, char *argv[])
{
  int rank, size;
  int i;
  int *sb;
  int *rb;
  int status;
  int mult=1;

  MPI_Init(&argc, &argv);
  int maxlen = argc >= 2 ? atoi(argv[1]) : 1;

  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  MPI_Comm_size(MPI_COMM_WORLD, &size);
  if (maxlen>1)mult=size;
  sb = (int *) xbt_malloc(size *maxlen * sizeof(int));
  rb = (int *) xbt_malloc(size *maxlen * sizeof(int));
  
  for (i = 0; i < size *maxlen; ++i) {
    sb[i] = rank*size + i;
    rb[i] = 0;
  }

  printf("[%d] sndbuf=[", rank);
  for (i = 0; i < size *mult; i++)
    printf("%d ", sb[i]);
  printf("]\n");

  status = MPI_Allreduce(sb, rb, size *maxlen, MPI_INT, MPI_SUM, MPI_COMM_WORLD);

  printf("[%d] rcvbuf=[", rank);
  for (i =  0; i < size *mult; i++)//do not print everything
    printf("%d ", rb[i]);
  printf("]\n");

  if (rank == 0) {
    if (status != MPI_SUCCESS) {
      printf("all_to_all returned %d\n", status);
      fflush(stdout);
    }
  }
  free(sb);
  free(rb);
  MPI_Finalize();
  return (EXIT_SUCCESS);
}
Пример #3
0
/**
 * Main function
 * Create the platform, list the available hosts and give them some work
 */
int main(int argc, char **argv) {

  unsigned long seed[] = {134, 233445, 865, 2634, 424242, 876543};
  int connected;
  int max_tries = 10;

  //MSG initialization
  MSG_init(&argc, argv);

  //Set up the seed for the platform generation
  platf_random_seed(seed);

  XBT_INFO("creating nodes...");
  platf_graph_uniform(50);
  do {
    max_tries--;
    XBT_INFO("creating links...");
    platf_graph_clear_links();
    platf_graph_interconnect_uniform(0.07); //Unrealistic, but simple
    XBT_INFO("done. Check connectedness...");
    connected = platf_graph_is_connected();
    XBT_INFO("Is it connected : %s", connected ? "yes" : (max_tries ? "no, retrying" : "no"));
  } while(!connected && max_tries);

  if(!connected && !max_tries) {
    xbt_die("Impossible to connect the graph, aborting.");
  }

  XBT_INFO("registering callbacks...");
  platf_graph_promoter(promoter_1);
  platf_graph_labeler(labeler_1);

  XBT_INFO("protmoting...");
  platf_do_promote();

  XBT_INFO("labeling...");
  platf_do_label();

  XBT_INFO("Putting it in surf...");
  platf_generate();

  XBT_INFO("Let's get the available hosts and dispatch work:");

  unsigned int i;
  msg_host_t host = NULL;
  msg_host_t host_master = NULL;
  xbt_dynar_t host_dynar = MSG_hosts_as_dynar();
  char** hostname_list = xbt_malloc(sizeof(char*) * xbt_dynar_length(host_dynar));

  xbt_dynar_foreach(host_dynar, i, host) {
    MSG_process_create("slave", slave, NULL, host);
    if(i==0) {
      //The first node will also be the master
      XBT_INFO("%s will be the master", MSG_host_get_name(host));
      host_master = host;
    }
    hostname_list[i] = (char*) MSG_host_get_name(host);
  }
Пример #4
0
int main(int argc, char *argv[])
{
  int i, rank, size;
  int *sb, *rb;
  int status;

  int root = 0;

  MPI_Init(&argc, &argv);
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  MPI_Comm_size(MPI_COMM_WORLD, &size);

  int count = 2;
  sb = (int *) xbt_malloc(count * sizeof(int));
  rb = (int *) xbt_malloc(count * size * sizeof(int));
  
  for (i = 0; i < count; ++i)
    sb[i] = rank * count + i;
  for (i = 0; i < count * size; ++i)  
    rb[i] = 0;

  printf("[%d] sndbuf=[", rank);
  for (i = 0; i < count; i++)
    printf("%d ", sb[i]);
  printf("]\n");

  status = MPI_Gather(sb, count, MPI_INT, rb, count, MPI_INT, root, MPI_COMM_WORLD);

  if (rank == root) {
    printf("[%d] rcvbuf=[", rank);
    for (i = 0; i < count * size; i++)
      printf("%d ", rb[i]);
    printf("]\n");

    if (status != MPI_SUCCESS) {
      printf("allgather returned %d\n", status);
      fflush(stdout);
    }
  }
  free(sb);
  free(rb);
  MPI_Barrier(MPI_COMM_WORLD);
  MPI_Finalize();
  return (EXIT_SUCCESS);
}
Пример #5
0
//allocate a single buffer for all recv
void* smpi_get_tmp_recvbuffer(int size){
  if (!smpi_process_get_replaying())
  return xbt_malloc(size);
  if (recvbuffer_size<size){
    recvbuffer=static_cast<char*>(xbt_realloc(recvbuffer,size));
    recvbuffer_size=size;
  }
  return recvbuffer;
}
Пример #6
0
//allocate a single buffer for all recv
void* smpi_get_tmp_recvbuffer(int size){
  if (!smpi_process_get_replaying())
	return xbt_malloc(size);
  if (recvbuffer_size<size){
    recvbuffer=xbt_realloc(recvbuffer,size);
    recvbuffer_size=size;
  }
  return sendbuffer;
}
Пример #7
0
int main(int argc, char *argv[])
{
    int rank, size;
    int i;
    int *sb;
    int *rb;
    int status;

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);

    sb = (int *) xbt_malloc(size * sizeof(int) * 2);
    rb = (int *) xbt_malloc(size * sizeof(int) * 2);

    for (i = 0; i < size; ++i) {
        sb[i] = rank*size + i;
        rb[i] = 0;
    }

    printf("[%d] sndbuf=[", rank);
    for (i = 0; i < size; i++)
        printf("%d ", sb[i]);
    printf("]\n");

    status = MPI_Alltoall(sb, 1, MPI_INT, rb, 1, MPI_INT, MPI_COMM_WORLD);

    printf("[%d] rcvbuf=[", rank);
    for (i = 0; i < size; i++)
        printf("%d ", rb[i]);
    printf("]\n");


    if (rank == 0) {
        if (status != MPI_SUCCESS) {
            printf("all_to_all returned %d\n", status);
            fflush(stdout);
        }
    }
    free(sb);
    free(rb);
    MPI_Finalize();
    return (EXIT_SUCCESS);
}
Пример #8
0
void smpi_process_init(int *argc, char ***argv)
{
  int index=-1;
  smpi_process_data_t data;
  smx_process_t proc;

  if (argc && argv) {
    proc = SIMIX_process_self();
    //FIXME: dirty cleanup method to avoid using msg cleanup functions on these processes when using MSG+SMPI
    SIMIX_process_set_cleanup_function(proc, MSG_process_cleanup_from_SIMIX);
    char* instance_id = (*argv)[1];
    int rank = xbt_str_parse_int((*argv)[2], "Invalid rank: %s");
    index = smpi_process_index_of_smx_process(proc);

    if(!index_to_process_data){
      index_to_process_data=(int*)xbt_malloc(SIMIX_process_count()*sizeof(int));
    }

    if(smpi_privatize_global_variables){
      /* Now using segment index of the process  */
      index = proc->segment_index;
      /* Done at the process's creation */
      SMPI_switch_data_segment(index);
    }

    MPI_Comm* temp_comm_world;
    xbt_bar_t temp_bar;
    smpi_deployment_register_process(instance_id, rank, index, &temp_comm_world, &temp_bar);
    data              = smpi_process_remote_data(index);
    data->comm_world  = temp_comm_world;
    if(temp_bar != NULL) data->finalization_barrier = temp_bar;
    data->index       = index;
    data->instance_id = instance_id;
    data->replaying   = 0;
    //xbt_free(simcall_process_get_data(proc));

    simdata_process_t simdata = static_cast<simdata_process_t>(simcall_process_get_data(proc));
    simdata->data             = data;

    if (*argc > 3) {
      free((*argv)[1]);
      memmove(&(*argv)[0], &(*argv)[2], sizeof(char *) * (*argc - 2));
      (*argv)[(*argc) - 1] = NULL;
      (*argv)[(*argc) - 2] = NULL;
    }
    (*argc)-=2;
    data->argc = argc;
    data->argv = argv;
    // set the process attached to the mailbox
    simcall_mbox_set_receiver(data->mailbox_small, proc);
    XBT_DEBUG("<%d> New process in the game: %p", index, proc);
  }
  xbt_assert(smpi_process_data(),
      "smpi_process_data() returned NULL. You probably gave a NULL parameter to MPI_Init. Although it's required by "
      "MPI-2, this is currently not supported by SMPI.");
}
Пример #9
0
/** \brief constructor */
xbt_matrix_t xbt_matrix_new(int lines, int rows, const unsigned long elmsize, void_f_pvoid_t const free_f)
{
  xbt_matrix_t res = xbt_new(s_xbt_matrix_t, 1);
  res->lines = lines;
  res->rows = rows;
  res->elmsize = elmsize;
  res->free_f = free_f;
  res->data = xbt_malloc(elmsize * lines * rows);
  return res;
}
Пример #10
0
/**
 * \brief This functions splits a string after using another string as separator
 * For example A!!B!!C splitted after !! will return the dynar {A,B,C}
 * \return An array of dynars containing the string tokens
 */
xbt_dynar_t xbt_str_split_str(const char *s, const char *sep)
{
  xbt_dynar_t res = xbt_dynar_new(sizeof(char *), &xbt_free_ref);
  int done;
  const char *p, *q;

  p = q = s;
  done = 0;

  if (s[0] == '\0')
    return res;
  if (sep[0] == '\0') {
    s = xbt_strdup(s);
    xbt_dynar_push(res, &s);
    return res;
  }

  while (!done) {
    char *to_push;
    int v = 0;
    //get the start of the first occurence of the substring
    q = strstr(p, sep);
    //if substring was not found add the entire string
    if (NULL == q) {
      v = strlen(p);
      to_push = xbt_malloc(v + 1);
      memcpy(to_push, p, v);
      to_push[v] = '\0';
      xbt_dynar_push(res, &to_push);
      done = 1;
    } else {
      //get the appearance
      to_push = xbt_malloc(q - p + 1);
      memcpy(to_push, p, q - p);
      //add string terminator
      to_push[q - p] = '\0';
      xbt_dynar_push(res, &to_push);
      p = q + strlen(sep);
    }
  }
  return res;
}
Пример #11
0
static char *remplace(char *value, const char **src_list, int src_size,
    const char **dst_list, int dst_size)
{
  char result[BUFFER_SIZE];
  int i_res = 0;
  int i = 0;

  while (value[i]) {
    if (value[i] == '$') {
      i++;                      /* skip the '$' */
      if (value[i] < '0' || value[i] > '9')
        xbt_die("bad string parameter, no number indication, at offset: "
            "%d (\"%s\")", i, value);

      /* solve the number */
      int number = value[i++] - '0';
      while (value[i] >= '0' && value[i] <= '9')
        number = 10 * number + (value[i++] - '0');

      /* solve the indication */
      const char **param_list;
      _XBT_GNUC_UNUSED int param_size;
      if (value[i] == 's' && value[i + 1] == 'r' && value[i + 2] == 'c') {
        param_list = src_list;
        param_size = src_size;
      } else if (value[i] == 'd' && value[i + 1] == 's'
          && value[i + 2] == 't') {
        param_list = dst_list;
        param_size = dst_size;
      } else {
        xbt_die("bad string parameter, support only \"src\" and \"dst\", "
            "at offset: %d (\"%s\")", i, value);
      }
      i += 3;

      xbt_assert(number < param_size,
          "bad string parameter, not enough length param_size, "
          "at offset: %d (\"%s\") %d %d", i, value, param_size, number);

      const char *param = param_list[number];
      int j = 0;
      while (param[j] && i_res < BUFFER_SIZE)
        result[i_res++] = param[j++];
    } else {
      result[i_res++] = value[i++]; /* next char */
    }
    if (i_res >= BUFFER_SIZE)
      xbt_die("solving string \"%s\", small buffer size (%d)",
          value, BUFFER_SIZE);
  }
  result[i_res++] = '\0';
  char *res = xbt_malloc(i_res);
  return memcpy(res, result, i_res);
}
Пример #12
0
static void array_new(unsigned **a, xbt_dynar_t *data)
{
  int i;
  *a = xbt_malloc(ARRAY_SIZE * sizeof **a);
  *data = xbt_dynar_new(sizeof *a, NULL);
  xbt_dynar_shrink(*data, ARRAY_SIZE);
  for (i = 0 ; i < ARRAY_SIZE ; i++) {
    (*a)[i] = i;
    xbt_dynar_push_as(*data, void*, &(*a)[i]);
  }
}
Пример #13
0
void _xbt_log_event_log(xbt_log_event_t ev, const char *fmt, ...)
{
  xbt_log_category_t cat = ev->cat;

  xbt_assert(ev->priority >= 0,
             "Negative logging priority naturally forbidden");
  xbt_assert(ev->priority < sizeof(xbt_log_priority_names),
             "Priority %d is greater than the biggest allowed value",
             ev->priority);

  do {
    xbt_log_appender_t appender = cat->appender;

    if (!appender)
      continue;                 /* No appender, try next */

    xbt_assert(cat->layout,
               "No valid layout for the appender of category %s", cat->name);

    /* First, try with a static buffer */
    if (XBT_LOG_STATIC_BUFFER_SIZE) {
      char buff[XBT_LOG_STATIC_BUFFER_SIZE];
      int done;
      ev->buffer = buff;
      ev->buffer_size = sizeof buff;
      va_start(ev->ap, fmt);
      done = cat->layout->do_layout(cat->layout, ev, fmt);
      va_end(ev->ap);
      if (done) {
        appender->do_append(appender, buff);
        continue;               /* Ok, that worked: go next */
      }
    }

    /* The static buffer was too small, use a dynamically expanded one */
    ev->buffer_size = XBT_LOG_DYNAMIC_BUFFER_SIZE;
    ev->buffer = xbt_malloc(ev->buffer_size);
    while (1) {
      int done;
      va_start(ev->ap, fmt);
      done = cat->layout->do_layout(cat->layout, ev, fmt);
      va_end(ev->ap);
      if (done)
        break;                  /* Got it */
      ev->buffer_size *= 2;
      ev->buffer = xbt_realloc(ev->buffer, ev->buffer_size);
    }
    appender->do_append(appender, ev->buffer);
    xbt_free(ev->buffer);

  } while (cat->additivity && (cat = cat->parent, 1));
}
/*****************************************************************************
 * Function: allgather_spreading_simple
 * return: int
 *  inputs:
 *   send_buff: send input buffer
 *   send_count: number of elements to send
 *   send_type: data type of elements being sent
 *   recv_buff: receive output buffer
 *   recv_count: number of elements to received
 *   recv_type: data type of elements being received
 *   comm: communication
 * Descrp: Let i -> j denote the communication from node i to node j. The
 *         order of communications for node i is i -> i + 1, i -> i + 2, ...,
 *         i -> (i + p -1) % P.
 *
 * Auther: Ahmad Faraj
 ****************************************************************************/
int
smpi_coll_tuned_allgather_spreading_simple(void *send_buff, int send_count,
                                           MPI_Datatype send_type,
                                           void *recv_buff, int recv_count,
                                           MPI_Datatype recv_type,
                                           MPI_Comm comm)
{
  MPI_Request *reqs, *req_ptr;
  MPI_Aint extent;
  int i, src, dst, rank, num_procs, num_reqs;
  int tag = COLL_TAG_ALLGATHER;
  MPI_Status status;
  char *recv_ptr = (char *) recv_buff;

  rank = smpi_comm_rank(comm);
  num_procs = smpi_comm_size(comm);
  extent = smpi_datatype_get_extent(send_type);

  num_reqs = (2 * num_procs) - 2;
  reqs = (MPI_Request *) xbt_malloc(num_reqs * sizeof(MPI_Request));
  if (!reqs) {
    printf("allgather-spreading-simple.c:40: cannot allocate memory\n");
    MPI_Finalize();
    exit(0);
  }

  req_ptr = reqs;
  smpi_mpi_sendrecv(send_buff, send_count, send_type, rank, tag,
               (char *) recv_buff + rank * recv_count * extent, recv_count,
               recv_type, rank, tag, comm, &status);

  for (i = 0; i < num_procs; i++) {
    src = (rank + i) % num_procs;
    if (src == rank)
      continue;
    *(req_ptr++) = smpi_mpi_irecv(recv_ptr + src * recv_count * extent, recv_count, recv_type,
              src, tag, comm);
  }

  for (i = 0; i < num_procs; i++) {
    dst = (rank + i) % num_procs;
    if (dst == rank)
      continue;
    *(req_ptr++) = smpi_mpi_isend(send_buff, send_count, send_type, dst, tag, comm);
  }

  smpi_mpi_waitall(num_reqs, reqs, MPI_STATUSES_IGNORE);
  free(reqs);

  return MPI_SUCCESS;
}
Пример #15
0
/**
 * \brief Opens the simgrid Lua module.
 *
 * This function is called automatically by the Lua interpreter when some
 * Lua code requires the "simgrid" module.
 *
 * \param L the Lua state
 */
int luaopen_simgrid(lua_State *L)
{
  XBT_DEBUG("luaopen_simgrid *****");

  /* Get the command line arguments from the lua interpreter */
  char **argv = xbt_malloc(sizeof(char *) * LUA_MAX_ARGS_COUNT);
  int argc = 1;
  argv[0] = (char *) "/usr/bin/lua";    /* Lie on the argv[0] so that the stack dumping facilities find the right binary. FIXME: what if lua is not in that location? */

  lua_getglobal(L, "arg");
  /* if arg is a null value, it means we use lua only as a script to init platform
   * else it should be a table and then take arg in consideration
   */
  if (lua_istable(L, -1)) {
    int done = 0;
    while (!done) {
      argc++;
      lua_pushinteger(L, argc - 2);
      lua_gettable(L, -2);
      if (lua_isnil(L, -1)) {
        done = 1;
      } else {
        xbt_assert(lua_isstring(L, -1),
                    "argv[%d] got from lua is no string", argc - 1);
        xbt_assert(argc < LUA_MAX_ARGS_COUNT,
                    "Too many arguments, please increase LUA_MAX_ARGS_COUNT in %s before recompiling SimGrid if you insist on having more than %d args on command line",
                    __FILE__, LUA_MAX_ARGS_COUNT - 1);
        argv[argc - 1] = (char *) luaL_checkstring(L, -1);
        lua_pop(L, 1);
        XBT_DEBUG("Got command line argument %s from lua", argv[argc - 1]);
      }
    }
    argv[argc--] = NULL;

    /* Initialize the MSG core */
    MSG_init(&argc, argv);
    MSG_process_set_data_cleanup((void_f_pvoid_t) lua_close);
    XBT_DEBUG("Still %d arguments on command line", argc); // FIXME: update the lua's arg table to reflect the changes from SimGrid
  }

  /* Keep the context mechanism informed of our lua world today */
  sglua_maestro_state = L;

  /* initialize access to my tables by children Lua states */
  lua_newtable(L);
  lua_setfield(L, LUA_REGISTRYINDEX, "simgrid.maestro_tables");

  sglua_register_c_functions(L);

  return 1;
}
Пример #16
0
int PMPI_Type_create_hindexed_block(int count, int blocklength, const MPI_Aint* indices, MPI_Datatype old_type,
                                    MPI_Datatype* new_type) {
  if (old_type == MPI_DATATYPE_NULL) {
    return MPI_ERR_TYPE;
  } else if (count<0){
    return MPI_ERR_COUNT;
  } else {
    int* blocklens=(int*)xbt_malloc(blocklength*count*sizeof(int));
    for (int i     = 0; i < count; i++)
      blocklens[i] = blocklength;
    int retval     = simgrid::smpi::Datatype::create_hindexed(count, blocklens, indices, old_type, new_type);
    xbt_free(blocklens);
    return retval;
  }
}
Пример #17
0
int PMPI_Win_allocate( MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm, void *base, MPI_Win *win){
  int retval = 0;
  smpi_bench_end();
  if (comm == MPI_COMM_NULL) {
    retval= MPI_ERR_COMM;
  }else if (disp_unit <= 0 || size < 0 ){
    retval= MPI_ERR_OTHER;
  }else{
    void* ptr = xbt_malloc(size);
    if(ptr==nullptr)
      return MPI_ERR_NO_MEM;
    *static_cast<void**>(base) = ptr;
    *win = new simgrid::smpi::Win( ptr, size, disp_unit, info, comm,1);
    retval = MPI_SUCCESS;
  }
  smpi_bench_begin();
  return retval;
}
Пример #18
0
RngStream RngStream_CopyStream (const RngStream src)
{
   RngStream g;

   if(src == NULL) {
     printf ("RngStream_CopyStream: 'src' not initialized\n\n");
     exit (EXIT_FAILURE);
   }

   g = (RngStream) xbt_malloc (sizeof (struct RngStream_InfoState));
   if (g == NULL) {
      printf ("RngStream_CopyStream: No more memory\n\n");
      exit (EXIT_FAILURE);
   }
   memcpy((void*) g, (void*) src, sizeof (struct RngStream_InfoState));

   return g;
}
Пример #19
0
int smpi_coll_tuned_scatter_ompi(void *sbuf, int scount, 
                                            MPI_Datatype sdtype,
                                            void* rbuf, int rcount, 
                                            MPI_Datatype rdtype, 
                                            int root, MPI_Comm  comm
                                            )
{
    const size_t small_block_size = 300;
    const int small_comm_size = 10;
    int communicator_size, rank;
    size_t dsize, block_size;

    XBT_DEBUG("smpi_coll_tuned_scatter_ompi");

    communicator_size = smpi_comm_size(comm);
    rank = smpi_comm_rank(comm);
    // Determine block size 
    if (root == rank) {
        dsize=smpi_datatype_size(sdtype);
        block_size = dsize * scount;
    } else {
        dsize=smpi_datatype_size(rdtype);
        block_size = dsize * rcount;
    } 

    if ((communicator_size > small_comm_size) &&
        (block_size < small_block_size)) {
        if(rank!=root){
            sbuf=xbt_malloc(rcount*smpi_datatype_get_extent(rdtype));
            scount=rcount;
            sdtype=rdtype;
        }
        int ret=smpi_coll_tuned_scatter_ompi_binomial (sbuf, scount, sdtype,
            rbuf, rcount, rdtype,
            root, comm);
        if(rank!=root){
            xbt_free(sbuf);
        }
        return ret;
    }
    return smpi_coll_tuned_scatter_ompi_basic_linear (sbuf, scount, sdtype, 
                                                       rbuf, rcount, rdtype, 
                                                       root, comm);
}
Пример #20
0
xbt_dynar_t xbt_str_split(const char *s, const char *sep)
{
  xbt_dynar_t res = xbt_dynar_new(sizeof(char *), &xbt_free_ref);
  const char *p, *q;
  int done;
  const char *sep_dflt = " \t\n\r\x0B";
  char is_sep[256] = { 1, 0 };

  /* check what are the separators */
  memset(is_sep, 0, sizeof(is_sep));
  if (!sep) {
    while (*sep_dflt)
      is_sep[(unsigned char) *sep_dflt++] = 1;
  } else {
    while (*sep)
      is_sep[(unsigned char) *sep++] = 1;
  }
  is_sep[0] = 1;                /* End of string is also separator */

  /* Do the job */
  p = q = s;
  done = 0;

  if (s[0] == '\0')
    return res;

  while (!done) {
    char *topush;
    while (!is_sep[(unsigned char) *q]) {
      q++;
    }
    if (*q == '\0')
      done = 1;

    topush = xbt_malloc(q - p + 1);
    memcpy(topush, p, q - p);
    topush[q - p] = '\0';
    xbt_dynar_push(res, &topush);
    p = ++q;
  }

  return res;
}
Пример #21
0
int vasprintf(char **ptr, const char *fmt, va_list ap)
{
  size_t str_m;
  int str_l;

  *ptr = NULL;
  {
    va_list ap2;
    va_copy(ap2, ap);           /* don't consume the original ap, we'll need it again */
    str_l = vsnprintf(NULL, (size_t) 0, fmt, ap2);     /*get required size */
    va_end(ap2);
  }
  xbt_assert(str_l >= 0);           /* possible integer overflow if str_m > INT_MAX */
  *ptr = (char *) xbt_malloc(str_m = (size_t) str_l + 1);

  int str_l2 = vsnprintf(*ptr, str_m, fmt, ap);
  assert(str_l2 == str_l);

  return str_l;
}
Пример #22
0
void smpi_comm_copy_buffer_callback(smx_synchro_t comm,
                                           void *buff, size_t buff_size)
{
  XBT_DEBUG("Copy the data over");
  void* tmpbuff=buff;

  if((smpi_privatize_global_variables)
      && ((char*)buff >= smpi_start_data_exe)
      && ((char*)buff < smpi_start_data_exe + smpi_size_data_exe )
    ){
       XBT_DEBUG("Privatization : We are copying from a zone inside global memory... Saving data to temp buffer !");
       smpi_switch_data_segment(((smpi_process_data_t)(((simdata_process_t)SIMIX_process_get_data(comm->comm.src_proc))->data))->index);
       tmpbuff = (void*)xbt_malloc(buff_size);
       memcpy(tmpbuff, buff, buff_size);
  }


  if((smpi_privatize_global_variables)
      && ((char*)comm->comm.dst_buff >= smpi_start_data_exe)
      && ((char*)comm->comm.dst_buff < smpi_start_data_exe + smpi_size_data_exe )
    ){
       XBT_DEBUG("Privatization : We are copying to a zone inside global memory - Switch data segment");
       smpi_switch_data_segment(((smpi_process_data_t)(((simdata_process_t)SIMIX_process_get_data(comm->comm.dst_proc))->data))->index);
  }


  memcpy(comm->comm.dst_buff, tmpbuff, buff_size);
  if (comm->comm.detached) {
    // if this is a detached send, the source buffer was duplicated by SMPI
    // sender to make the original buffer available to the application ASAP
    xbt_free(buff);
    //It seems that the request is used after the call there this should
    //be free somewhereelse  but where???
    //xbt_free(comm->comm.src_data);// inside SMPI the request is keep
    //inside the user data and should be free
    comm->comm.src_buff = NULL;
  }

  if(tmpbuff!=buff)xbt_free(tmpbuff);

}
Пример #23
0
static int test_parmap_basic(e_xbt_parmap_mode_t mode)
{
  int ret = 0;
  unsigned num_workers;
  for (num_workers = 1 ; num_workers <= 16 ; num_workers *= 2) {
    const unsigned len = 1033;
    const unsigned num = 5;
    unsigned *a;
    xbt_dynar_t data;
    xbt_parmap_t parmap;
    unsigned i;

    parmap = xbt_parmap_new(num_workers, mode);

    a = xbt_malloc(len * sizeof *a);
    data = xbt_dynar_new(sizeof a, NULL);
    for (i = 0; i < len; i++) {
      a[i] = i;
      xbt_dynar_push_as(data, void *, &a[i]);
    }

    for (i = 0; i < num; i++)
      xbt_parmap_apply(parmap, fun_double, data);

    for (i = 0; i < len; i++) {
      unsigned expected = (1U << num) * (i + 1) - 1;
      if (a[i] != expected) {
        XBT_CRITICAL("with %u threads, a[%u]: expected %u, got %u",
                     num_workers, i, expected, a[i]);
        ret = 1;
        break;
      }
    }

    xbt_dynar_free(&data);
    xbt_free(a);
    xbt_parmap_destroy(parmap);
  }
  return ret;
}
Пример #24
0
/** \ingroup smpi_simulation
 * \brief Registers a running instance of a MPI program.
 *
 * FIXME : remove MSG from the loop at some point.
 * \param name the reference name of the function.
 * \param code the main mpi function (must have a int ..(int argc, char *argv[])) prototype
 * \param num_processes the size of the instance we want to deploy
 */
void SMPI_app_instance_register(const char *name, xbt_main_func_t code, int num_processes)
{
  SIMIX_function_register(name, code);

  s_smpi_mpi_instance_t* instance = (s_smpi_mpi_instance_t*)xbt_malloc(sizeof(s_smpi_mpi_instance_t));

  instance->name = name;
  instance->size = num_processes;
  instance->present_processes = 0;
  instance->index = process_count;
  instance->comm_world = MPI_COMM_NULL;
  instance->finalization_barrier=xbt_barrier_init(num_processes);

  process_count+=num_processes;

  if(smpi_instances==nullptr){
    smpi_instances = xbt_dict_new_homogeneous(xbt_free_f);
  }

  xbt_dict_set(smpi_instances, name, (void*)instance, nullptr);
  return;
}
Пример #25
0
xbt_edge_t new_xbt_graph_edge(xbt_graph_t graph, xbt_node_t s, xbt_node_t d, xbt_dict_t edges)
{
  const char *sn = instr_node_name(s);
  const char *dn = instr_node_name(d);
  int len = strlen(sn) + strlen(dn) + 1;
  char *name = (char *) xbt_malloc(len * sizeof(char));


  snprintf(name, len, "%s%s", sn, dn);
  xbt_edge_t ret = (xbt_edge_t) xbt_dict_get_or_null(edges, name);
  if (ret == nullptr) {
    snprintf(name, len, "%s%s", dn, sn);
    ret = (xbt_edge_t) xbt_dict_get_or_null(edges, name);
  }

  if (ret == nullptr) {
    ret = xbt_graph_new_edge(graph, s, d, nullptr);
    xbt_dict_set(edges, name, ret, nullptr);
  }
  free(name);
  return ret;
}
Пример #26
0
static int test_parmap_extended(e_xbt_parmap_mode_t mode)
{
  int ret = 0;
  unsigned num_workers;

  for (num_workers = 1 ; num_workers <= 16 ; num_workers *= 2) {
    const unsigned len = 2 * num_workers;
    uintptr_t *a;
    xbt_parmap_t parmap;
    xbt_dynar_t data;
    unsigned i;
    unsigned count;

    parmap = xbt_parmap_new(num_workers, mode);

    a = xbt_malloc(len * sizeof *a);
    data = xbt_dynar_new(sizeof a, NULL);
    for (i = 0; i < len; i++)
      xbt_dynar_push_as(data, void *, &a[i]);

    xbt_parmap_apply(parmap, fun_get_id, data);

    qsort(a, len, sizeof a[0], fun_compare);
    count = 1;
    for (i = 1; i < len; i++)
      if (a[i] != a[i - 1])
        count++;
    if (count != num_workers) {
      XBT_CRITICAL("only %u/%u threads did some work", count, num_workers);
      ret = 1;
    }

    xbt_dynar_free(&data);
    xbt_free(a);
    xbt_parmap_destroy(parmap);
  }
  return ret;
}
Пример #27
0
int smpi_coll_tuned_gather_mvapich2_two_level(void *sendbuf,
                                            int sendcnt,
                                            MPI_Datatype sendtype,
                                            void *recvbuf,
                                            int recvcnt,
                                            MPI_Datatype recvtype,
                                            int root,
                                            MPI_Comm comm)
{
    void *leader_gather_buf = NULL;
    int comm_size, rank;
    int local_rank, local_size;
    int leader_comm_rank = -1, leader_comm_size = 0;
    int mpi_errno = MPI_SUCCESS;
    int recvtype_size = 0, sendtype_size = 0, nbytes=0;
    int leader_root, leader_of_root;
    MPI_Status status;
    MPI_Aint sendtype_extent = 0, recvtype_extent = 0;  /* Datatype extent */
    MPI_Aint true_lb, sendtype_true_extent, recvtype_true_extent;
    MPI_Comm shmem_comm, leader_comm;
    void* tmp_buf = NULL;
    

    //if not set (use of the algo directly, without mvapich2 selector)
    if(MV2_Gather_intra_node_function==NULL)
      MV2_Gather_intra_node_function=smpi_coll_tuned_gather_mpich;
    
    if(smpi_comm_get_leaders_comm(comm)==MPI_COMM_NULL){
      smpi_comm_init_smp(comm);
    }
    comm_size = smpi_comm_size(comm);
    rank = smpi_comm_rank(comm);

    if (((rank == root) && (recvcnt == 0)) ||
        ((rank != root) && (sendcnt == 0))) {
        return MPI_SUCCESS;
    }

    if (sendtype != MPI_DATATYPE_NULL) {
        sendtype_extent=smpi_datatype_get_extent(sendtype);
        sendtype_size=smpi_datatype_size(sendtype);
        smpi_datatype_extent(sendtype, &true_lb,
                                       &sendtype_true_extent);
    }
    if (recvtype != MPI_DATATYPE_NULL) {
        recvtype_extent=smpi_datatype_get_extent(recvtype);
        recvtype_size=smpi_datatype_size(recvtype);
        smpi_datatype_extent(recvtype, &true_lb,
                                       &recvtype_true_extent);
    }

    /* extract the rank,size information for the intra-node
     * communicator */
    shmem_comm = smpi_comm_get_intra_comm(comm);
    local_rank = smpi_comm_rank(shmem_comm);
    local_size = smpi_comm_size(shmem_comm);
    
    if (local_rank == 0) {
        /* Node leader. Extract the rank, size information for the leader
         * communicator */
        leader_comm = smpi_comm_get_leaders_comm(comm);
        if(leader_comm==MPI_COMM_NULL){
          leader_comm = MPI_COMM_WORLD;
        }
        leader_comm_size = smpi_comm_size(leader_comm);
        leader_comm_rank = smpi_comm_rank(leader_comm);
    }

    if (rank == root) {
        nbytes = recvcnt * recvtype_size;

    } else {
        nbytes = sendcnt * sendtype_size;
    }

#if defined(_SMP_LIMIC_)
     if((g_use_limic2_coll) && (shmem_commptr->ch.use_intra_sock_comm == 1) 
         && (use_limic_gather)
         &&((num_scheme == USE_GATHER_PT_PT_BINOMIAL) 
            || (num_scheme == USE_GATHER_PT_PT_DIRECT)
            ||(num_scheme == USE_GATHER_PT_LINEAR_BINOMIAL) 
            || (num_scheme == USE_GATHER_PT_LINEAR_DIRECT)
            || (num_scheme == USE_GATHER_LINEAR_PT_BINOMIAL)
            || (num_scheme == USE_GATHER_LINEAR_PT_DIRECT)
            || (num_scheme == USE_GATHER_LINEAR_LINEAR)
            || (num_scheme == USE_GATHER_SINGLE_LEADER))) {
            
            mpi_errno = MV2_Gather_intra_node_function(sendbuf, sendcnt, sendtype,
                                                    recvbuf, recvcnt,recvtype, 
                                                    root, comm);
     } else

#endif/*#if defined(_SMP_LIMIC_)*/    
    {
        if (local_rank == 0) {
            /* Node leader, allocate tmp_buffer */
            if (rank == root) {
                tmp_buf = smpi_get_tmp_recvbuffer(recvcnt * MAX(recvtype_extent,
                            recvtype_true_extent) * local_size);
            } else {
                tmp_buf = smpi_get_tmp_sendbuffer(sendcnt * MAX(sendtype_extent,
                            sendtype_true_extent) *
                        local_size);
            }
            if (tmp_buf == NULL) {
                mpi_errno = MPI_ERR_OTHER;
                return mpi_errno;
            }
        }
         /*while testing mpich2 gather test, we see that
         * which basically splits the comm, and we come to
         * a point, where use_intra_sock_comm == 0, but if the 
         * intra node function is MPIR_Intra_node_LIMIC_Gather_MV2,
         * it would use the intra sock comm. In such cases, we 
         * fallback to binomial as a default case.*/
#if defined(_SMP_LIMIC_)         
        if(*MV2_Gather_intra_node_function == MPIR_Intra_node_LIMIC_Gather_MV2) {

            mpi_errno  = MPIR_pt_pt_intra_gather(sendbuf,sendcnt, sendtype,
                                                 recvbuf, recvcnt, recvtype,
                                                 root, rank, 
                                                 tmp_buf, nbytes, 
                                                 TEMP_BUF_HAS_NO_DATA,
                                                 shmem_commptr,
                                                 MPIR_Gather_intra);
        } else
#endif
        {
            /*We are gathering the data into tmp_buf and the output
             * will be of MPI_BYTE datatype. Since the tmp_buf has no
             * local data, we pass is_data_avail = TEMP_BUF_HAS_NO_DATA*/
            mpi_errno  = MPIR_pt_pt_intra_gather(sendbuf,sendcnt, sendtype,
                                                 recvbuf, recvcnt, recvtype,
                                                 root, rank, 
                                                 tmp_buf, nbytes, 
                                                 TEMP_BUF_HAS_NO_DATA,
                                                 shmem_comm,
                                                 MV2_Gather_intra_node_function
                                                 );
        }
    }
    leader_comm = smpi_comm_get_leaders_comm(comm);
    int* leaders_map = smpi_comm_get_leaders_map(comm);
    leader_of_root = smpi_group_rank(smpi_comm_group(comm),leaders_map[root]);
    leader_root = smpi_group_rank(smpi_comm_group(leader_comm),leaders_map[root]);
    /* leader_root is the rank of the leader of the root in leader_comm. 
     * leader_root is to be used as the root of the inter-leader gather ops 
     */
    if (!smpi_comm_is_uniform(comm)) {
        if (local_rank == 0) {
            int *displs = NULL;
            int *recvcnts = NULL;
            int *node_sizes;
            int i = 0;
            /* Node leaders have all the data. But, different nodes can have
             * different number of processes. Do a Gather first to get the 
             * buffer lengths at each leader, followed by a Gatherv to move
             * the actual data */

            if (leader_comm_rank == leader_root && root != leader_of_root) {
                /* The root of the Gather operation is not a node-level 
                 * leader and this process's rank in the leader_comm 
                 * is the same as leader_root */
                if(rank == root) { 
                    leader_gather_buf = smpi_get_tmp_recvbuffer(recvcnt *
                                                MAX(recvtype_extent,
                                                recvtype_true_extent) *
                                                comm_size);
                } else { 
                    leader_gather_buf = smpi_get_tmp_sendbuffer(sendcnt *
                                                MAX(sendtype_extent,
                                                sendtype_true_extent) *
                                                comm_size);
                } 
                if (leader_gather_buf == NULL) {
                    mpi_errno =  MPI_ERR_OTHER;
                    return mpi_errno;
                }
            }

            node_sizes = smpi_comm_get_non_uniform_map(comm);

            if (leader_comm_rank == leader_root) {
                displs = xbt_malloc(sizeof (int) * leader_comm_size);
                recvcnts = xbt_malloc(sizeof (int) * leader_comm_size);
                if (!displs || !recvcnts) {
                    mpi_errno = MPI_ERR_OTHER;
                    return mpi_errno;
                }
            }

            if (root == leader_of_root) {
                /* The root of the gather operation is also the node 
                 * leader. Receive into recvbuf and we are done */
                if (leader_comm_rank == leader_root) {
                    recvcnts[0] = node_sizes[0] * recvcnt;
                    displs[0] = 0;

                    for (i = 1; i < leader_comm_size; i++) {
                        displs[i] = displs[i - 1] + node_sizes[i - 1] * recvcnt;
                        recvcnts[i] = node_sizes[i] * recvcnt;
                    }
                } 
                smpi_mpi_gatherv(tmp_buf,
                                         local_size * nbytes,
                                         MPI_BYTE, recvbuf, recvcnts,
                                         displs, recvtype,
                                         leader_root, leader_comm);
            } else {
                /* The root of the gather operation is not the node leader. 
                 * Receive into leader_gather_buf and then send 
                 * to the root */
                if (leader_comm_rank == leader_root) {
                    recvcnts[0] = node_sizes[0] * nbytes;
                    displs[0] = 0;

                    for (i = 1; i < leader_comm_size; i++) {
                        displs[i] = displs[i - 1] + node_sizes[i - 1] * nbytes;
                        recvcnts[i] = node_sizes[i] * nbytes;
                    }
                } 
                smpi_mpi_gatherv(tmp_buf, local_size * nbytes,
                                         MPI_BYTE, leader_gather_buf,
                                         recvcnts, displs, MPI_BYTE,
                                         leader_root, leader_comm);
            }
            if (leader_comm_rank == leader_root) {
                xbt_free(displs);
                xbt_free(recvcnts);
            }
        }
    } else {
        /* All nodes have the same number of processes. 
         * Just do one Gather to get all 
         * the data at the leader of the root process */
        if (local_rank == 0) {
            if (leader_comm_rank == leader_root && root != leader_of_root) {
                /* The root of the Gather operation is not a node-level leader
                 */
                leader_gather_buf = smpi_get_tmp_sendbuffer(nbytes * comm_size);
                if (leader_gather_buf == NULL) {
                    mpi_errno = MPI_ERR_OTHER;
                    return mpi_errno;
                }
            }
            if (root == leader_of_root) {
                mpi_errno = MPIR_Gather_MV2_Direct(tmp_buf,
                                                   nbytes * local_size,
                                                   MPI_BYTE, recvbuf,
                                                   recvcnt * local_size,
                                                   recvtype, leader_root,
                                                   leader_comm);
                 
            } else {
                mpi_errno = MPIR_Gather_MV2_Direct(tmp_buf, nbytes * local_size,
                                                   MPI_BYTE, leader_gather_buf,
                                                   nbytes * local_size,
                                                   MPI_BYTE, leader_root,
                                                   leader_comm);
            }
        }
    }
    if ((local_rank == 0) && (root != rank)
        && (leader_of_root == rank)) {
        smpi_mpi_send(leader_gather_buf,
                                 nbytes * comm_size, MPI_BYTE,
                                 root, COLL_TAG_GATHER, comm);
    }

    if (rank == root && local_rank != 0) {
        /* The root of the gather operation is not the node leader. Receive
         y* data from the node leader */
        smpi_mpi_recv(recvbuf, recvcnt * comm_size, recvtype,
                                 leader_of_root, COLL_TAG_GATHER, comm,
                                 &status);
    }

    /* check if multiple threads are calling this collective function */
    if (local_rank == 0 ) {
        if (tmp_buf != NULL) {
            smpi_free_tmp_buffer(tmp_buf);
        }
        if (leader_gather_buf != NULL) {
            smpi_free_tmp_buffer(leader_gather_buf);
        }
    }

    return (mpi_errno);
}
/* Non-topology-specific pipelined linear-bcast function */
int smpi_coll_tuned_bcast_arrival_pattern_aware_wait(void *buf, int count,
                                                     MPI_Datatype datatype,
                                                     int root, MPI_Comm comm)
{
  MPI_Status status;
  MPI_Request request;
  MPI_Request *send_request_array;
  MPI_Request *recv_request_array;
  MPI_Status *send_status_array;
  MPI_Status *recv_status_array;


  MPI_Status temp_status_array[BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE];

  int rank, size;
  int i, j, k;
  int tag = -COLL_TAG_BCAST;
  int will_send[BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE];

  int sent_count;
  int header_index;
  int flag_array[BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE];
  int already_sent[BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE];

  int header_buf[BCAST_ARRIVAL_PATTERN_AWARE_HEADER_SIZE];
  char temp_buf[BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE];

  int max_node = BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE;
  int header_size = BCAST_ARRIVAL_PATTERN_AWARE_HEADER_SIZE;

  MPI_Aint extent;
  extent = smpi_datatype_get_extent(datatype);

  /* source and destination */
  int to, from;



  rank = smpi_comm_rank(MPI_COMM_WORLD);
  size = smpi_comm_size(MPI_COMM_WORLD);


  /* segment is segment size in number of elements (not bytes) */
  int segment = bcast_arrival_pattern_aware_wait_segment_size_in_byte / extent;

  /* pipeline length */
  int pipe_length = count / segment;

  /* use for buffer offset for sending and receiving data = segment size in byte */
  int increment = segment * extent;

  /* if the input size is not divisible by segment size => 
     the small remainder will be done with native implementation */
  int remainder = count % segment;

  /* if root is not zero send to rank zero first
     this can be modified to make it faster by using logical src, dst.
   */
  if (root != 0) {
    if (rank == root) {
      smpi_mpi_send(buf, count, datatype, 0, tag, comm);
    } else if (rank == 0) {
      smpi_mpi_recv(buf, count, datatype, root, tag, comm, &status);
    }
  }


  /* value == 0 means root has not send data (or header) to the node yet */
  for (i = 0; i < max_node; i++) {
    already_sent[i] = 0;
  }

  /* when a message is smaller than a block size => no pipeline */
  if (count <= segment) {
    segment = count;
    pipe_length = 1;
  }

  /* start pipeline bcast */

  send_request_array =
      (MPI_Request *) xbt_malloc((size + pipe_length) * sizeof(MPI_Request));
  recv_request_array =
      (MPI_Request *) xbt_malloc((size + pipe_length) * sizeof(MPI_Request));
  send_status_array =
      (MPI_Status *) xbt_malloc((size + pipe_length) * sizeof(MPI_Status));
  recv_status_array =
      (MPI_Status *) xbt_malloc((size + pipe_length) * sizeof(MPI_Status));

  /* root */
  if (rank == 0) {
    sent_count = 0;
    int iteration = 0;

    for (i = 0; i < BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE; i++)
      will_send[i] = 0;
    while (sent_count < (size - 1)) {
      iteration++;

      /* loop k times to let more processes arrive before start sending data */
      for (k = 0; k < 3; k++) {
        for (i = 1; i < size; i++) {
          if ((already_sent[i] == 0) && (will_send[i] == 0)) {
            smpi_mpi_iprobe(i, MPI_ANY_TAG, MPI_COMM_WORLD, &flag_array[i],
                       &temp_status_array[i]);
            if (flag_array[i] == 1) {
              will_send[i] = 1;
              smpi_mpi_recv(&temp_buf[i], 1, MPI_CHAR, i, tag, MPI_COMM_WORLD,
                       &status);
              i = 0;
            }
          }
        }
      }

      header_index = 0;

      /* recv 1-byte message */
      for (i = 1; i < size; i++) {
        /* message arrive */
        if ((will_send[i] == 1) && (already_sent[i] == 0)) {
          header_buf[header_index] = i;
          header_index++;
          sent_count++;

          /* will send in the next step */
          already_sent[i] = 1;
        }
      }

      /* send header followed by data */
      if (header_index != 0) {
        header_buf[header_index] = -1;
        to = header_buf[0];

        /* send header */
        smpi_mpi_send(header_buf, header_size, MPI_INT, to, tag, comm);

        /* send data - pipeline */
        for (i = 0; i < pipe_length; i++) {
          send_request_array[i] = smpi_mpi_isend((char *)buf + (i * increment), segment, datatype, to, tag, comm);
        }
        smpi_mpi_waitall((pipe_length), send_request_array, send_status_array);
      }


      /* end - send header followed by data */
      /* randomly MPI_Send to one node */
      /* this part has been commented out - performance-wise */
      else if (2 == 3) {
        /* search for the first node that never received data before */
        for (i = 0; i < size; i++) {
          if (i == root)
            continue;
          if (already_sent[i] == 0) {
            header_buf[0] = i;
            header_buf[1] = -1;
            to = i;

            smpi_mpi_send(header_buf, header_size, MPI_INT, to, tag, comm);

            /* still need to chop data so that we can use the same non-root code */
            for (j = 0; j < pipe_length; j++) {
              smpi_mpi_send((char *)buf + (j * increment), segment, datatype, to, tag, comm);
            }
          }
        }
      }
    }                           /* end - while (send_count < size-1) loop */
  }

  /* end - root */
  /* none root */
  else {

    /* send 1-byte message to root */
    smpi_mpi_send(temp_buf, 1, MPI_CHAR, 0, tag, comm);

    /* wait for header forward when required */
    request = smpi_mpi_irecv(header_buf, header_size, MPI_INT, MPI_ANY_SOURCE, tag, comm);
    smpi_mpi_wait(&request, MPI_STATUS_IGNORE);

    /* search for where it is */
    int myordering = 0;
    while (rank != header_buf[myordering]) {
      myordering++;
    }

    to = header_buf[myordering + 1];
    if (myordering == 0) {
      from = 0;
    } else {
      from = header_buf[myordering - 1];
    }

    /* send header when required */
    if (to != -1) {
      smpi_mpi_send(header_buf, header_size, MPI_INT, to, tag, comm);
    }

    /* receive data */

    for (i = 0; i < pipe_length; i++) {
      recv_request_array[i] = smpi_mpi_irecv((char *)buf + (i * increment), segment, datatype, from, tag, comm);
    }

    /* forward data */
    if (to != -1) {
      for (i = 0; i < pipe_length; i++) {
        smpi_mpi_wait(&recv_request_array[i], MPI_STATUS_IGNORE);
        send_request_array[i] = smpi_mpi_isend((char *)buf + (i * increment), segment, datatype, to, tag, comm);
      }
      smpi_mpi_waitall((pipe_length), send_request_array, send_status_array);
    }

    /* recv only */
    else {
      smpi_mpi_waitall((pipe_length), recv_request_array, recv_status_array);
    }
  }

  free(send_request_array);
  free(recv_request_array);
  free(send_status_array);
  free(recv_status_array);
  /* end pipeline */

  /* when count is not divisible by block size, use default BCAST for the remainder */
  if ((remainder != 0) && (count > segment)) {
    XBT_WARN("MPI_bcast_arrival_pattern_aware_wait use default MPI_bcast.");	  	  
    smpi_mpi_bcast((char *)buf + (pipe_length * increment), remainder, datatype, root, comm);
  }

  return MPI_SUCCESS;
}
Пример #29
0
int Coll_allgather_mvapich2_smp::allgather(void *sendbuf,int sendcnt, MPI_Datatype sendtype,
                            void *recvbuf, int recvcnt,MPI_Datatype recvtype,
                            MPI_Comm  comm)
{
    int rank, size;
    int local_rank, local_size;
    int leader_comm_size = 0;
    int mpi_errno = MPI_SUCCESS;
    MPI_Aint recvtype_extent = 0;  /* Datatype extent */
    MPI_Comm shmem_comm, leader_comm;

  if(comm->get_leaders_comm()==MPI_COMM_NULL){
    comm->init_smp();
  }

  if (not comm->is_uniform() || not comm->is_blocked())
    THROWF(arg_error,0, "allgather MVAPICH2 smp algorithm can't be used with irregular deployment. Please insure that processes deployed on the same node are contiguous and that each node has the same number of processes");

    if (recvcnt == 0) {
        return MPI_SUCCESS;
    }

    rank = comm->rank();
    size = comm->size();

    /* extract the rank,size information for the intra-node communicator */
    recvtype_extent=recvtype->get_extent();

    shmem_comm = comm->get_intra_comm();
    local_rank = shmem_comm->rank();
    local_size = shmem_comm->size();

    if (local_rank == 0) {
        /* Node leader. Extract the rank, size information for the leader communicator */
        leader_comm = comm->get_leaders_comm();
        if(leader_comm==MPI_COMM_NULL){
          leader_comm = MPI_COMM_WORLD;
        }
        leader_comm_size = leader_comm->size();
    }

    /*If there is just one node, after gather itself,
     * root has all the data and it can do bcast*/
    if(local_rank == 0) {
        mpi_errno = Colls::gather(sendbuf, sendcnt,sendtype,
                                    (void*)((char*)recvbuf + (rank * recvcnt * recvtype_extent)),
                                     recvcnt, recvtype,
                                     0, shmem_comm);
    } else {
        /*Since in allgather all the processes could have
         * its own data in place*/
        if(sendbuf == MPI_IN_PLACE) {
            mpi_errno = Colls::gather((void*)((char*)recvbuf + (rank * recvcnt * recvtype_extent)),
                                         recvcnt , recvtype,
                                         recvbuf, recvcnt, recvtype,
                                         0, shmem_comm);
        } else {
            mpi_errno = Colls::gather(sendbuf, sendcnt,sendtype,
                                         recvbuf, recvcnt, recvtype,
                                         0, shmem_comm);
        }
    }
    /* Exchange the data between the node leaders*/
    if (local_rank == 0 && (leader_comm_size > 1)) {
        /*When data in each socket is different*/
        if (comm->is_uniform() != 1) {

            int *displs = NULL;
            int *recvcnts = NULL;
            int *node_sizes = NULL;
            int i = 0;

            node_sizes = comm->get_non_uniform_map();

            displs =  static_cast<int *>(xbt_malloc(sizeof (int) * leader_comm_size));
            recvcnts =  static_cast<int *>(xbt_malloc(sizeof (int) * leader_comm_size));
            if (not displs || not recvcnts) {
              return MPI_ERR_OTHER;
            }
            recvcnts[0] = node_sizes[0] * recvcnt;
            displs[0] = 0;

            for (i = 1; i < leader_comm_size; i++) {
                displs[i] = displs[i - 1] + node_sizes[i - 1] * recvcnt;
                recvcnts[i] = node_sizes[i] * recvcnt;
            }


            void* sendbuf=((char*)recvbuf)+recvtype->get_extent()*displs[leader_comm->rank()];

            mpi_errno = Colls::allgatherv(sendbuf,
                                       (recvcnt*local_size),
                                       recvtype,
                                       recvbuf, recvcnts,
                                       displs, recvtype,
                                       leader_comm);
            xbt_free(displs);
            xbt_free(recvcnts);
        } else {
        void* sendtmpbuf=((char*)recvbuf)+recvtype->get_extent()*(recvcnt*local_size)*leader_comm->rank();



            mpi_errno = Coll_allgather_mpich::allgather(sendtmpbuf,
                                               (recvcnt*local_size),
                                               recvtype,
                                               recvbuf, (recvcnt*local_size), recvtype,
                                             leader_comm);

        }
    }

    /*Bcast the entire data from node leaders to all other cores*/
    mpi_errno = Colls::bcast (recvbuf, recvcnt * size, recvtype, 0, shmem_comm);
    return mpi_errno;
}
Пример #30
0
int Coll_scatter_mvapich2_two_level_direct::scatter(void *sendbuf,
                                      int sendcnt,
                                      MPI_Datatype sendtype,
                                      void *recvbuf,
                                      int recvcnt,
                                      MPI_Datatype recvtype,
                                      int root, MPI_Comm  comm)
{
    int comm_size, rank;
    int local_rank, local_size;
    int leader_comm_rank = -1, leader_comm_size = -1;
    int mpi_errno = MPI_SUCCESS;
    int recvtype_size, sendtype_size, nbytes;
    void *tmp_buf = NULL;
    void *leader_scatter_buf = NULL;
    MPI_Status status;
    int leader_root, leader_of_root = -1;
    MPI_Comm shmem_comm, leader_comm;
    //if not set (use of the algo directly, without mvapich2 selector)
    if(MV2_Scatter_intra_function==NULL)
      MV2_Scatter_intra_function=Coll_scatter_mpich::scatter;

    if(comm->get_leaders_comm()==MPI_COMM_NULL){
      comm->init_smp();
    }
    comm_size = comm->size();
    rank = comm->rank();

    if (((rank == root) && (recvcnt == 0))
        || ((rank != root) && (sendcnt == 0))) {
        return MPI_SUCCESS;
    }

    /* extract the rank,size information for the intra-node
     * communicator */
    shmem_comm = comm->get_intra_comm();
    local_rank = shmem_comm->rank();
    local_size = shmem_comm->size();

    if (local_rank == 0) {
        /* Node leader. Extract the rank, size information for the leader
         * communicator */
        leader_comm = comm->get_leaders_comm();
        leader_comm_size = leader_comm->size();
        leader_comm_rank = leader_comm->rank();
    }

    if (local_size == comm_size) {
        /* purely intra-node scatter. Just use the direct algorithm and we are done */
        mpi_errno = MPIR_Scatter_MV2_Direct(sendbuf, sendcnt, sendtype,
                                            recvbuf, recvcnt, recvtype,
                                            root, comm);

    } else {
        recvtype_size=recvtype->size();
        sendtype_size=sendtype->size();

        if (rank == root) {
            nbytes = sendcnt * sendtype_size;
        } else {
            nbytes = recvcnt * recvtype_size;
        }

        if (local_rank == 0) {
            /* Node leader, allocate tmp_buffer */
            tmp_buf = smpi_get_tmp_sendbuffer(nbytes * local_size);
        }

        leader_comm = comm->get_leaders_comm();
        int* leaders_map = comm->get_leaders_map();
        leader_of_root = comm->group()->rank(leaders_map[root]);
        leader_root = leader_comm->group()->rank(leaders_map[root]);
        /* leader_root is the rank of the leader of the root in leader_comm.
         * leader_root is to be used as the root of the inter-leader gather ops
         */

        if ((local_rank == 0) && (root != rank)
            && (leader_of_root == rank)) {
            /* The root of the scatter operation is not the node leader. Recv
             * data from the node leader */
            leader_scatter_buf = smpi_get_tmp_sendbuffer(nbytes * comm_size);
            Request::recv(leader_scatter_buf, nbytes * comm_size, MPI_BYTE,
                             root, COLL_TAG_SCATTER, comm, &status);

        }

        if (rank == root && local_rank != 0) {
            /* The root of the scatter operation is not the node leader. Send
             * data to the node leader */
            Request::send(sendbuf, sendcnt * comm_size, sendtype,
                                     leader_of_root, COLL_TAG_SCATTER, comm
                                     );
        }

        if (leader_comm_size > 1 && local_rank == 0) {
          if (not comm->is_uniform()) {
            int* displs   = NULL;
            int* sendcnts = NULL;
            int* node_sizes;
            int i      = 0;
            node_sizes = comm->get_non_uniform_map();

            if (root != leader_of_root) {
              if (leader_comm_rank == leader_root) {
                displs      = static_cast<int*>(xbt_malloc(sizeof(int) * leader_comm_size));
                sendcnts    = static_cast<int*>(xbt_malloc(sizeof(int) * leader_comm_size));
                sendcnts[0] = node_sizes[0] * nbytes;
                displs[0]   = 0;

                for (i = 1; i < leader_comm_size; i++) {
                  displs[i]   = displs[i - 1] + node_sizes[i - 1] * nbytes;
                  sendcnts[i] = node_sizes[i] * nbytes;
                }
              }
              Colls::scatterv(leader_scatter_buf, sendcnts, displs, MPI_BYTE, tmp_buf, nbytes * local_size, MPI_BYTE,
                              leader_root, leader_comm);
            } else {
              if (leader_comm_rank == leader_root) {
                displs      = static_cast<int*>(xbt_malloc(sizeof(int) * leader_comm_size));
                sendcnts    = static_cast<int*>(xbt_malloc(sizeof(int) * leader_comm_size));
                sendcnts[0] = node_sizes[0] * sendcnt;
                displs[0]   = 0;

                for (i = 1; i < leader_comm_size; i++) {
                  displs[i]   = displs[i - 1] + node_sizes[i - 1] * sendcnt;
                  sendcnts[i] = node_sizes[i] * sendcnt;
                }
              }
              Colls::scatterv(sendbuf, sendcnts, displs, sendtype, tmp_buf, nbytes * local_size, MPI_BYTE, leader_root,
                              leader_comm);
            }
            if (leader_comm_rank == leader_root) {
              xbt_free(displs);
              xbt_free(sendcnts);
            }
            } else {
                if (leader_of_root != root) {
                    mpi_errno =
                        MPIR_Scatter_MV2_Direct(leader_scatter_buf,
                                                nbytes * local_size, MPI_BYTE,
                                                tmp_buf, nbytes * local_size,
                                                MPI_BYTE, leader_root,
                                                leader_comm);
                } else {
                    mpi_errno =
                        MPIR_Scatter_MV2_Direct(sendbuf, sendcnt * local_size,
                                                sendtype, tmp_buf,
                                                nbytes * local_size, MPI_BYTE,
                                                leader_root, leader_comm);

                }
            }
        }
        /* The leaders are now done with the inter-leader part. Scatter the data within the nodes */

        if (rank == root && recvbuf == MPI_IN_PLACE) {
            mpi_errno = MV2_Scatter_intra_function(tmp_buf, nbytes, MPI_BYTE,
                                                (void *)sendbuf, sendcnt, sendtype,
                                                0, shmem_comm);
        } else {
            mpi_errno = MV2_Scatter_intra_function(tmp_buf, nbytes, MPI_BYTE,
                                                recvbuf, recvcnt, recvtype,
                                                0, shmem_comm);
        }
    }

    /* check if multiple threads are calling this collective function */
    if (comm_size != local_size && local_rank == 0) {
        smpi_free_tmp_buffer(tmp_buf);
        if (leader_of_root == rank && root != rank) {
            smpi_free_tmp_buffer(leader_scatter_buf);
        }
    }
    return (mpi_errno);
}