示例#1
0
static int fill_ranks_in_win_grp(MTCORE_Win * uh_win)
{
    int mpi_errno = MPI_SUCCESS;
    int *ranks_in_post_grp = NULL;
    int i, post_grp_size;

    mpi_errno = PMPI_Group_size(uh_win->post_group, &post_grp_size);
    if (mpi_errno != MPI_SUCCESS)
        goto fn_fail;

    ranks_in_post_grp = calloc(post_grp_size, sizeof(int));
    for (i = 0; i < post_grp_size; i++) {
        ranks_in_post_grp[i] = i;
    }

    mpi_errno = PMPI_Group_translate_ranks(uh_win->post_group, post_grp_size,
                                           ranks_in_post_grp, uh_win->user_group,
                                           uh_win->post_ranks_in_win_group);
    if (mpi_errno != MPI_SUCCESS)
        goto fn_fail;

  fn_exit:
    if (ranks_in_post_grp)
        free(ranks_in_post_grp);
    return mpi_errno;

  fn_fail:
    goto fn_exit;
}
示例#2
0
int MPI_Win_wait(MPI_Win win)
{
    MTCORE_Win *uh_win;
    int mpi_errno = MPI_SUCCESS;
    int post_grp_size = 0;
    int i;

    MTCORE_DBG_PRINT_FCNAME();

    MTCORE_Fetch_uh_win_from_cache(win, uh_win);

    if (uh_win == NULL) {
        /* normal window */
        return PMPI_Win_wait(win);
    }

    MTCORE_Assert((uh_win->info_args.epoch_type & MTCORE_EPOCH_PSCW));

    if (uh_win->post_group == MPI_GROUP_NULL) {
        /* standard says do nothing for empty group */
        MTCORE_DBG_PRINT("Wait empty group\n");
        return mpi_errno;
    }

    mpi_errno = PMPI_Group_size(uh_win->post_group, &post_grp_size);
    if (mpi_errno != MPI_SUCCESS)
        goto fn_fail;
    MTCORE_Assert(post_grp_size > 0);

    MTCORE_DBG_PRINT("Wait group 0x%x, size %d\n", uh_win->post_group, post_grp_size);

    /* Wait for the completion on all origin processes */
    mpi_errno = MTCORE_Wait_pscw_complete_msg(post_grp_size, uh_win);
    if (mpi_errno != MPI_SUCCESS)
        goto fn_fail;

    /* TODO: MPI implementation should do memory barrier in flush handler. */
    mpi_errno = PMPI_Win_sync(uh_win->active_win);
    if (mpi_errno != MPI_SUCCESS)
        goto fn_fail;

    MTCORE_DBG_PRINT("Wait done\n");

  fn_exit:
    if (uh_win->post_ranks_in_win_group)
        free(uh_win->post_ranks_in_win_group);
    uh_win->post_group = MPI_GROUP_NULL;
    uh_win->post_ranks_in_win_group = NULL;

    return mpi_errno;

  fn_fail:
    goto fn_exit;
}
/**
 * Measurement wrapper for MPI_Group_size
 * @note Auto-generated by wrapgen from template: std.w
 * @note C interface
 * @note Introduced with MPI 1.0
 * @ingroup cg
 */
int MPI_Group_size(MPI_Group group,
                   int*      size)
{
  int return_val;

  if (IS_EVENT_GEN_ON_FOR(CG))
  {
    EVENT_GEN_OFF();
    esd_enter(epk_mpi_regid[EPK__MPI_GROUP_SIZE]);

    return_val = PMPI_Group_size(group, size);

    esd_exit(epk_mpi_regid[EPK__MPI_GROUP_SIZE]);
    EVENT_GEN_ON();
  }
  else
  {
    return_val = PMPI_Group_size(group, size);
  }

  return return_val;
}
示例#4
0
static void group_to_bitvector(MPI_Group group)
{
  int i;
  int size;

  /* determine the world rank of each process in group
     (Parameter #3 is world.ranks here, as we need an array of integers
      initialized with 0 to n-1, which world.ranks happens to be. */
  PMPI_Group_size(group, &size);
  PMPI_Group_translate_ranks(group, size, world.ranks, world.group, ranks);

  /* initialize grpv */
  memset(grpv, 0, world.size_grpv);

  /* set corresponding bit for each process in group */
  for (i = 0; i < size; i++)
    grpv[ranks[i] / 8] |= (1 << (ranks[i] % 8));
}
示例#5
0
void vt_comm_init()
{
  VT_MPI_INT i;

  if ( !comm_initialized )
  {
    comm_initialized = 1;

    groups = (struct VTGroup*)calloc(max_groups, sizeof(struct VTGroup));
    if ( !groups )
      vt_error();

    comms = (struct VTComm*)calloc(max_comms, sizeof(struct VTComm));
    if ( !comms )
      vt_error();

#if defined(HAVE_MPI2_1SIDED) && HAVE_MPI2_1SIDED
    wins = (struct VTWin*)calloc(max_wins, sizeof(struct VTWin));
    if ( !wins )
      vt_error();
#endif /* HAVE_MPI2_1SIDED */

    PMPI_Comm_group(MPI_COMM_WORLD, &vt_mpi_comm_world_group);
    PMPI_Comm_group(MPI_COMM_SELF, &vt_mpi_comm_self_group);

    world.group = vt_mpi_comm_world_group;
    PMPI_Group_size(world.group, &world.size);
    world.size_grpv = world.size / 8 + (world.size % 8 ? 1 : 0);

    world.ranks  = (VT_MPI_INT*)calloc(world.size, sizeof(VT_MPI_INT));
    if ( !world.ranks )
      vt_error();

    for (i = 0; i < world.size; i++)
      world.ranks[i] = i;

    ranks  = (VT_MPI_INT*)calloc(world.size, sizeof(VT_MPI_INT));
    grpv = (uint8_t*)calloc(world.size_grpv, sizeof(uint8_t));

    vt_comm_create(MPI_COMM_WORLD);
    vt_comm_create(MPI_COMM_SELF);
  }
}
示例#6
0
int MPI_Group_size ( MPI_Group group, int *size )
{
  _MPI_COVERAGE();
  return PMPI_Group_size (group, size);
}
示例#7
0
int MPI_Win_post(MPI_Group group, int assert, MPI_Win win)
{
    MTCORE_Win *uh_win;
    int mpi_errno = MPI_SUCCESS;
    int post_grp_size = 0;
    int i;

    MTCORE_Fetch_uh_win_from_cache(win, uh_win);

    if (uh_win == NULL) {
        /* normal window */
        return PMPI_Win_post(group, assert, win);
    }

    MTCORE_Assert((uh_win->info_args.epoch_type & MTCORE_EPOCH_PSCW));

    if (group == MPI_GROUP_NULL) {
        /* standard says do nothing for empty group */
        MTCORE_DBG_PRINT("Post empty group\n");
        return mpi_errno;
    }

    mpi_errno = PMPI_Group_size(group, &post_grp_size);
    if (mpi_errno != MPI_SUCCESS)
        goto fn_fail;

    if (post_grp_size <= 0) {
        /* standard says do nothing for empty group */
        MTCORE_DBG_PRINT("Post empty group\n");
        return mpi_errno;
    }

    uh_win->post_group = group;
    uh_win->post_ranks_in_win_group = calloc(post_grp_size, sizeof(int));
    MTCORE_DBG_PRINT("post group 0x%x, size %d\n", uh_win->post_group, post_grp_size);

    /* Both lock and start only allow no_check assert. */
    assert = (assert == MPI_MODE_NOCHECK) ? MPI_MODE_NOCHECK : 0;

    mpi_errno = fill_ranks_in_win_grp(uh_win);
    if (mpi_errno != MPI_SUCCESS)
        goto fn_fail;

    /* Synchronize start-post if user does not specify nocheck */
    if ((assert & MPI_MODE_NOCHECK) == 0) {
        mpi_errno = MTCORE_Send_pscw_post_msg(post_grp_size, uh_win);
        if (mpi_errno != MPI_SUCCESS)
            goto fn_fail;
    }

    /* Need win_sync for synchronizing local window update.
     * Still need it to avoid instruction reordering of preceding load
     * even if user says no preceding store. */
    mpi_errno = PMPI_Win_sync(uh_win->active_win);
    if (mpi_errno != MPI_SUCCESS)
        goto fn_fail;

    MTCORE_DBG_PRINT("Post done\n");

  fn_exit:
    return mpi_errno;

  fn_fail:
    if (uh_win->post_ranks_in_win_group)
        free(uh_win->post_ranks_in_win_group);
    uh_win->post_group = MPI_GROUP_NULL;
    uh_win->post_ranks_in_win_group = NULL;

    return mpi_errno;
}
示例#8
0
int MPI_Win_complete(MPI_Win win)
{
    MTCORE_Win *uh_win;
    int mpi_errno = MPI_SUCCESS;
    int start_grp_size = 0;
    int i;

    MTCORE_DBG_PRINT_FCNAME();

    MTCORE_Fetch_uh_win_from_cache(win, uh_win);

    if (uh_win == NULL) {
        /* normal window */
        return PMPI_Win_complete(win);
    }

    MTCORE_Assert((uh_win->info_args.epoch_type & MTCORE_EPOCH_PSCW));

    if (uh_win->start_group == MPI_GROUP_NULL) {
        /* standard says do nothing for empty group */
        MTCORE_DBG_PRINT("Complete empty group\n");
        return mpi_errno;
    }

    mpi_errno = PMPI_Group_size(uh_win->start_group, &start_grp_size);
    if (mpi_errno != MPI_SUCCESS)
        goto fn_fail;
    MTCORE_Assert(start_grp_size > 0);

    MTCORE_DBG_PRINT("Complete group 0x%x, size %d\n", uh_win->start_group, start_grp_size);

    mpi_errno = MTCORE_Complete_flush(start_grp_size, uh_win);
    if (mpi_errno != MPI_SUCCESS)
        goto fn_fail;

    uh_win->is_self_locked = 0;

    mpi_errno = MTCORE_Send_pscw_complete_msg(start_grp_size, uh_win);
    if (mpi_errno != MPI_SUCCESS)
        goto fn_fail;

    /* Indicate epoch status, later operations should not be redirected to active_win
     * after the start counter decreases to 0 .*/
    uh_win->start_counter--;
    if (uh_win->start_counter == 0) {
        uh_win->epoch_stat = MTCORE_WIN_NO_EPOCH;
    }

    MTCORE_DBG_PRINT("Complete done\n");

  fn_exit:
    if (uh_win->start_ranks_in_win_group)
        free(uh_win->start_ranks_in_win_group);
    uh_win->start_group = MPI_GROUP_NULL;
    uh_win->start_ranks_in_win_group = NULL;

    return mpi_errno;

  fn_fail:
    goto fn_exit;

}
示例#9
0
int MPI_Group_size(MPI_Group group, int *size)
{
  return PMPI_Group_size(group, size);
}