示例#1
0
/** Initialize an ARMCI group's remaining fields using the communicator field.
  */
void ARMCII_Group_init_from_comm(ARMCI_Group *group) {
  if (group->comm != MPI_COMM_NULL) {
    MPI_Comm_size(group->comm, &group->size);
    MPI_Comm_rank(group->comm, &group->rank);

  } else {
    group->rank = -1;
    group->size =  0;
  }

  /* If noncollective groups are in use, create a separate communicator that
    can be used for noncollective group creation with this group as the parent.
    This ensures that calls to MPI_Intercomm_create can't clash with any user
    communication. */

  if (ARMCII_GLOBAL_STATE.noncollective_groups && group->comm != MPI_COMM_NULL)
    MPI_Comm_dup(group->comm, &group->noncoll_pgroup_comm);
  else
    group->noncoll_pgroup_comm = MPI_COMM_NULL;

  /* Check if translation caching is enabled */
  if (ARMCII_GLOBAL_STATE.cache_rank_translation) {
    if (group->comm != MPI_COMM_NULL) {
      int      *ranks, i;
      MPI_Group world_group, sub_group;

      group->abs_to_grp = malloc(sizeof(int)*ARMCI_GROUP_WORLD.size);
      group->grp_to_abs = malloc(sizeof(int)*group->size);
      ranks = malloc(sizeof(int)*ARMCI_GROUP_WORLD.size);

      ARMCII_Assert(group->abs_to_grp != NULL && group->grp_to_abs != NULL && ranks != NULL);

      for (i = 0; i < ARMCI_GROUP_WORLD.size; i++)
        ranks[i] = i;

      MPI_Comm_group(ARMCI_GROUP_WORLD.comm, &world_group);
      MPI_Comm_group(group->comm, &sub_group);

      MPI_Group_translate_ranks(sub_group, group->size, ranks, world_group, group->grp_to_abs);
      MPI_Group_translate_ranks(world_group, ARMCI_GROUP_WORLD.size, ranks, sub_group, group->abs_to_grp);

      MPI_Group_free(&world_group);
      MPI_Group_free(&sub_group);

      free(ranks);
    }
  }
  
  /* Translation caching is disabled */
  else {
    group->abs_to_grp = NULL;
    group->grp_to_abs = NULL;
  }
}
示例#2
0
文件: groups.c 项目: arnolda/scafacos
static void get_group_clus_id(ARMCI_iGroup *igroup, int grp_nproc, 
                              int *grp_clus_id) {
    int i, *ranks1, *ranks2;
#ifdef ARMCI_GROUP
    assert(grp_nproc<=igroup->grp_attr.nproc);
    for(i=0; i<grp_nproc; i++) {
      grp_clus_id[i] = armci_clus_id(igroup->grp_attr.proc_list[i]);
    }
#else
    MPI_Group group2;
    
    /* Takes the list of processes from one group and attempts to determine
     * the corresponding ranks in a second group (here, MPI_COMM_WORLD) */

    ranks1 = (int *)malloc(2*grp_nproc*sizeof(int));
    ranks2 = ranks1 + grp_nproc;
    for(i=0; i<grp_nproc; i++) ranks1[i] = i;
    MPI_Comm_group(MPI_COMM_WORLD, &group2);
    MPI_Group_translate_ranks(igroup->igroup, grp_nproc, ranks1, group2, ranks2);
    
    /* get the clus_id of processes */
    for(i=0; i<grp_nproc; i++) grp_clus_id[i] = armci_clus_id(ranks2[i]);
    free(ranks1);
#endif
}
  int translateMpiRank(const int& rankA, 
		       const Teuchos::Comm<int>& commA,
		       const Teuchos::Comm<int>& commB)
  {
    MPI_Group groupA;
    {
      const Teuchos::MpiComm<int>* teuchosMpiCommA = dynamic_cast<const Teuchos::MpiComm<int>* >(&commA);
      TEUCHOS_ASSERT(teuchosMpiCommA != 0);
      MPI_Comm rawMpiCommA = (*teuchosMpiCommA->getRawMpiComm())();
      MPI_Comm_group(rawMpiCommA,&groupA);
    }
    
    MPI_Group groupB;
    {
      const Teuchos::MpiComm<int>* teuchosMpiCommB = dynamic_cast<const Teuchos::MpiComm<int>* >(&commB);
      TEUCHOS_ASSERT(teuchosMpiCommB != 0);
      MPI_Comm rawMpiCommB = (*teuchosMpiCommB->getRawMpiComm())();
      MPI_Comm_group(rawMpiCommB,&groupB);
    }
    
    int rankB = -1;
    MPI_Group_translate_ranks(groupA,1,const_cast<int*>(&rankA),groupB,&rankB);
    
    return rankB;
  }
示例#4
0
文件: parallel.c 项目: sleitner/cart
/*
//  This function must be called by all members of communicator com.
*/
void print_comm_contents(MPI_Comm com, const char *name)
{
  MPI_Group world, local;
  int i, n, *ranks_local, *ranks_world;

  MPI_Comm_group(mpi.comm.world,&world);
  MPI_Comm_group(com,&local);

  MPI_Group_size(local,&n);
  MPI_Group_rank(local,&i);

  if(i == 0)
    {
      ranks_local = cart_alloc(int,n);
      ranks_world = cart_alloc(int,n);
  
      for(i=0; i<n; i++) ranks_local[i] = i;

      MPI_Group_translate_ranks(local,n,ranks_local,world,ranks_world);

      cart_debug("Communicator %s (%p), size = %d:",name,com,n);
      for(i=0; i<n; i++) cart_debug("id = %d -> world id = %d",i,ranks_world[i]);

      cart_free(ranks_local);
      cart_free(ranks_world);
    }
示例#5
0
int print_comm(int testid, int rank, MPI_Comm comm)
{
  int i;

  if (comm != MPI_COMM_NULL) {
    int ranks;
    MPI_Comm_size(comm, &ranks);

    int* members_comm  = (int*) malloc(ranks * sizeof(int));
    int* members_world = (int*) malloc(ranks * sizeof(int));

    for (i = 0; i < ranks; i++) {
      members_comm[i] = i;
    }

    MPI_Group group_world, group_comm;
    MPI_Comm_group(MPI_COMM_WORLD, &group_world);
    MPI_Comm_group(comm, &group_comm);
    MPI_Group_translate_ranks(group_comm, ranks, members_comm, group_world, members_world);
    MPI_Group_free(&group_comm);
    MPI_Group_free(&group_world);

    print_members(testid, rank, ranks, members_world);

    free(members_world);
    free(members_comm);
  }

  return 0;
}
示例#6
0
static VALUE group_translate_ranks(VALUE self, VALUE ary, VALUE rgrp2)
{
    VALUE outary;
    MPI_Group *grp1, *grp2;
    int rv, i, len, *ranks1, *ranks2;

    Data_Get_Struct(self, MPI_Group, grp1);
    Data_Get_Struct(grp2, MPI_Group, grp2);

    len = RARRAY(ary)->len;
    ranks1 = ALLOCA_N(int, len);
    ranks2 = ALLOCA_N(int, len);

    for (i = 0; i < len; i++)
        ranks1[i] = FIX2INT(rb_ary_entry(ary, i));

    rv = MPI_Group_translate_ranks(*grp1, len, ranks1, *grp2, ranks2);
    mpi_exception(rv);

    outary = rb_ary_new2(len);
    for (i = 0; i < len; i++) {
        if (ranks2[i] == MPI_UNDEFINED)
            rb_ary_store(outary, i, UNDEFINED);
        else
            rb_ary_store(outary, i, rb_fix_new(ranks2[i]));
    }

    return outary;
}
void ompi_group_translate_ranks_f(MPI_Fint *group1, MPI_Fint *n,
				  MPI_Fint *ranks1, MPI_Fint *group2,
				  MPI_Fint *ranks2, MPI_Fint *ierr)
{
  int c_ierr;
  ompi_group_t *c_group1, *c_group2;
  OMPI_ARRAY_NAME_DECL(ranks1);
  OMPI_ARRAY_NAME_DECL(ranks2);

  /* Make the fortran to c representation conversion */
  c_group1 = MPI_Group_f2c(*group1);
  c_group2 = MPI_Group_f2c(*group2);

  OMPI_ARRAY_FINT_2_INT(ranks1, *n);
  OMPI_ARRAY_FINT_2_INT_ALLOC(ranks2, *n);

  c_ierr = MPI_Group_translate_ranks(c_group1,
                                     OMPI_FINT_2_INT(*n),
                                     OMPI_ARRAY_NAME_CONVERT(ranks1),
                                     c_group2,
                                     OMPI_ARRAY_NAME_CONVERT(ranks2)
                                     );
  if (NULL != ierr) *ierr = OMPI_INT_2_FINT(c_ierr);

  if (MPI_SUCCESS == c_ierr) {
      OMPI_ARRAY_INT_2_FINT(ranks2, *n);
  } else {
      OMPI_ARRAY_FINT_2_INT_CLEANUP(ranks2);
  }
  OMPI_ARRAY_FINT_2_INT_CLEANUP(ranks1);
}
示例#8
0
/** Translate a group process rank to the corresponding process rank in the
  * ARMCI world group.
  *
  * @param[in] group      Group to translate from.
  * @param[in] group_rank Rank of the process in group.
  */
int ARMCI_Absolute_id(ARMCI_Group *group, int group_rank) {
  int       world_rank;
  MPI_Group world_group, sub_group;

  ARMCII_Assert(group_rank >= 0 && group_rank < group->size);

  /* Check if group is the world group */
  if (group->comm == ARMCI_GROUP_WORLD.comm)
    world_rank = group_rank;

  /* Check for translation cache */
  else if (group->grp_to_abs != NULL)
    world_rank = group->grp_to_abs[group_rank];

  else {
    /* Translate the rank */
    MPI_Comm_group(ARMCI_GROUP_WORLD.comm, &world_group);
    MPI_Comm_group(group->comm, &sub_group);

    MPI_Group_translate_ranks(sub_group, 1, &group_rank, world_group, &world_rank);

    MPI_Group_free(&world_group);
    MPI_Group_free(&sub_group);
  }

  /* Check if translation failed */
  if (world_rank == MPI_UNDEFINED)
    return -1;
  else
    return world_rank;
}
示例#9
0
char * get_str_failed_procs(MPI_Comm comm, MPI_Group f_group)
{
    int f_size, i, c_size;
    MPI_Group c_group;
    int *failed_ranks = NULL;
    int *comm_ranks   = NULL;
    int rank_len = 7;
    char * ranks_failed = NULL;

    MPI_Group_size(f_group, &f_size);

    if( f_size <= 0 ) {
        ranks_failed = strdup("None");
    } else {
        MPI_Comm_group(comm, &c_group);
        MPI_Comm_size( comm, &c_size);

        failed_ranks = (int *)malloc(f_size * sizeof(int));
        comm_ranks   = (int *)malloc(f_size * sizeof(int));
        for( i = 0; i < f_size; ++i) {
            failed_ranks[i] = i;
        }

        MPI_Group_translate_ranks(f_group, f_size, failed_ranks,
                                  c_group, comm_ranks);

        ranks_failed = (char *)malloc(sizeof(char) * (rank_len) * f_size + 1);
        for( i = 0; i < f_size; ++i) {
            /*
            printf("%2d of %2d) Error Handler: %2d / %2d Failed Rank %3d\n",
                   mpi_mcw_rank, mpi_mcw_size, i, f_size, comm_ranks[i]);
            */
            if( i+1 == f_size ) {
                sprintf((ranks_failed+(i*rank_len)), "%c%5d.%c",
                        (0 == i ? ' ' : ','), comm_ranks[i], '\0');
            }
            else if( 0 == i ) {
                sprintf(ranks_failed, "  %5d", comm_ranks[i]);
            }
            else {
                sprintf((ranks_failed+(i*rank_len)), ", %5d", comm_ranks[i]);
            }
        }

        MPI_Group_free(&c_group);
    }

    if( NULL != failed_ranks ) {
        free(failed_ranks);
    }
    if( NULL != comm_ranks ) {
        free(comm_ranks);
    }

    return ranks_failed;
}
  /* defines global rank  -> shmcomm rank mapping;
     output: partners_map is array of ranks in shmcomm  */
  void translate_ranks(MPI_Comm shmcomm, int partners[], int partners_map[])
  {
    MPI_Group world_group, shared_group;
    
    /* create MPI groups for global communicator and shm communicator */
    MPI_Comm_group (MPI_COMM_WORLD, &world_group); 
    MPI_Comm_group (shmcomm, &shared_group);

    MPI_Group_translate_ranks (world_group, n_partners, partners, shared_group, partners_map); 
  }
示例#11
0
dart_ret_t dart_team_unit_l2g(
  dart_team_t teamid,
  dart_unit_t localid,
  dart_unit_t *globalid)
{
#if 0
  dart_unit_t *unitids;
  int size;
  int i = 0;
  dart_group_t group;
  dart_team_get_group (teamid, &group);
  MPI_Group_size (group.mpi_group, &size);
  if (localid >= size)
  {
    DART_LOG_ERROR ("Invalid localid input");
    return DART_ERR_INVAL;
  }
  unitids = (dart_unit_t*)malloc (sizeof(dart_unit_t) * size);
  dart_group_getmembers (&group, unitids);

  /* The unitids array is arranged in ascending order. */
  *globalid = unitids[localid];
//  printf ("globalid is %d\n", *globalid);
  return DART_OK;
#endif
  int size;
  dart_group_t group;

  dart_team_get_group (teamid, &group);
  MPI_Group_size (group.mpi_group, &size);

  if (localid >= size) {
    DART_LOG_ERROR ("Invalid localid input: %d", localid);
    return DART_ERR_INVAL;
  }
  if (teamid == DART_TEAM_ALL) {
    *globalid = localid;
  }
  else {
    MPI_Group group_all;
    MPI_Comm_group(MPI_COMM_WORLD, &group_all);
    MPI_Group_translate_ranks(
      group.mpi_group,
      1,
      &localid,
      group_all,
      globalid);
  }

  return DART_OK;
}
示例#12
0
int ARMCI_Absolute_id(ARMCI_Group *group,int group_rank)
{
    int abs_rank,status;
    ARMCI_iGroup *igroup = (ARMCI_iGroup *)group;
#ifdef ARMCI_GROUP
    assert(group_rank < igroup->grp_attr.nproc);
    return igroup->grp_attr.proc_list[group_rank];
#else
    MPI_Group grp;
    status = MPI_Comm_group(MPI_COMM_WORLD,&grp);
    MPI_Group_translate_ranks(igroup->igroup,1,&group_rank,grp,&abs_rank);
    return(abs_rank);
#endif
}
示例#13
0
文件: common.hpp 项目: mrzv/henson
int translate_rank(MPI_Comm comm1, int rank1, MPI_Comm comm2)
{
    MPI_Group group1, group2;
    MPI_Comm_group(comm1, &group1);
    MPI_Comm_group(comm2, &group2);

    int rank2;
    MPI_Group_translate_ranks(group1, 1, &rank1, group2, &rank2);

    MPI_Group_free(&group2);
    MPI_Group_free(&group1);

    return rank2;
}
示例#14
0
文件: groups.c 项目: DMClambo/pfff
value caml_mpi_group_translate_ranks(value group1, value ranks, value group2)
{
  int n = Wosize_val(ranks);
  int * ranks1 = stat_alloc(n * sizeof(int));
  int * ranks2 = stat_alloc(n * sizeof(int));
  int i;
  value res;

  for (i = 0; i < n; i++) ranks1[i] = Int_val(Field(ranks, i));
  MPI_Group_translate_ranks(Group_val(group1), n, ranks1,
                            Group_val(group2), ranks2);
  res = alloc(n, 0);
  for (i = 0; i < n; i++) Field(res, i) = Val_int(ranks2[i]);
  stat_free(ranks1);
  stat_free(ranks2);
  return res;
}
示例#15
0
文件: groups.c 项目: jeffhammond/ga
int comex_group_translate_world(comex_group_t group, int group_rank, int *world_rank)
{
    if (COMEX_GROUP_WORLD == group) {
        *world_rank = group_rank;
    }
    else {
        comex_igroup_t *igroup = comex_get_igroup_from_group(group);
        comex_igroup_t *world_igroup = comex_get_igroup_from_group(COMEX_GROUP_WORLD);
        int status = MPI_Group_translate_ranks(
                igroup->group, 1, &group_rank, world_igroup->group, world_rank);
        if (status != MPI_SUCCESS) {
            comex_error("MPI_Group_translate_ranks: Failed ", status);
        }
    }

    return COMEX_SUCCESS;
}
示例#16
0
dart_ret_t dart_team_unit_g2l(
  dart_team_t teamid,
  dart_unit_t globalid,
  dart_unit_t *localid)
{
#if 0
  dart_unit_t *unitids;
  int size;
  int i;
  dart_group_t group;
  dart_team_get_group (teamid, &group);
  MPI_Group_size (group.mpi_group, &size);
  unitids = (dart_unit_t *)malloc (sizeof (dart_unit_t) * size);

  dart_group_getmembers (&group, unitids);


  for (i = 0; (i < size) && (unitids[i] < globalid); i++);

  if ((i == size) || (unitids[i] > globalid))
  {
    *localid = -1;
    return DART_OK;
  }

  *localid = i;
  return DART_OK;
#endif
  if(teamid == DART_TEAM_ALL) {
    *localid = globalid;
  }
  else {
    dart_group_t group;
    MPI_Group group_all;
    dart_team_get_group(teamid, &group);
    MPI_Comm_group(MPI_COMM_WORLD, &group_all);
    MPI_Group_translate_ranks(
      group_all,
      1,
      &globalid,
      group.mpi_group,
      localid);
  }
  return DART_OK;
}
示例#17
0
int unit_g2l(
  uint16_t      index,
  dart_unit_t   abs_id,
  dart_unit_t * rel_id)
{
  if (index == 0) {
    *rel_id = abs_id;
  }
  else {
    MPI_Comm comm;
    MPI_Group group, group_all;
    comm = dart_teams[index];
    MPI_Comm_group(comm, &group);
    MPI_Comm_group(MPI_COMM_WORLD, &group_all);
    MPI_Group_translate_ranks (group_all, 1, &abs_id, group, rel_id);
  }
  return 0;
}
示例#18
0
文件: groups.c 项目: jeffhammond/ga
/**
 * Translates the given rank from the given group into that of the world group.
 */
int comex_group_translate_world(comex_group_t id, int group_rank, int *world_rank)
{
    int status;
    MPI_Group world_group;
    comex_igroup_t *igroup = comex_get_igroup_from_group(id);

    status = MPI_Comm_group(l_state.world_comm, &world_group);
    if (status != MPI_SUCCESS) {
        comex_error("MPI_Comm_group: Failed ", status);
    }
    status = MPI_Group_translate_ranks(
            igroup->group, 1, &group_rank, world_group, world_rank);
    if (status != MPI_SUCCESS) {
        comex_error("MPI_Group_translate_ranks: Failed ", status);
    }

    return COMEX_SUCCESS;
}
示例#19
0
文件: mpi_Group.c 项目: 00datman/ompi
JNIEXPORT jintArray JNICALL Java_mpi_Group_translateRanks(
        JNIEnv *env, jclass jthis, jlong group1,
        jintArray ranks1, jlong group2)
{
    jsize n = (*env)->GetArrayLength(env, ranks1);
    jintArray ranks2 = (*env)->NewIntArray(env,n);
    jint *jRanks1, *jRanks2;
    int  *cRanks1, *cRanks2;
    ompi_java_getIntArray(env, ranks1, &jRanks1, &cRanks1);
    ompi_java_getIntArray(env, ranks2, &jRanks2, &cRanks2);

    int rc = MPI_Group_translate_ranks((MPI_Group)group1, n, cRanks1,
                                       (MPI_Group)group2, cRanks2);
    ompi_java_exceptionCheck(env, rc);
    ompi_java_forgetIntArray(env, ranks1, jRanks1, cRanks1);
    ompi_java_releaseIntArray(env, ranks2, jRanks2, cRanks2);
    return ranks2;
}
  int MultiphysicsDistributor::mapRankToCommWorldRank(int myRank, const Teuchos::Comm<int>& myTeuchosComm)
  {
    int worldRank = -1;
    
    const Teuchos::MpiComm<int>* myTeuchosMpiComm = dynamic_cast<const Teuchos::MpiComm<int>* >(&myTeuchosComm);
    TEUCHOS_ASSERT(myTeuchosMpiComm != 0);

    MPI_Comm myRawMpiComm = (*myTeuchosMpiComm->getRawMpiComm())();
    MPI_Group myGroup;
    MPI_Comm_group(myRawMpiComm,&myGroup);

    MPI_Group worldGroup;
    MPI_Comm_group(MPI_COMM_WORLD,&worldGroup);

    MPI_Group_translate_ranks(myGroup,1,&myRank,worldGroup,&worldRank);
   
    return worldRank;
  }
示例#21
0
文件: vsgpackedmsg.c 项目: pigay/vsg
static MPI_Fint _trace_write_comm (MPI_Comm comm)
{
  MPI_Fint ret = MPI_Comm_c2f (comm);
  MPI_Group world_group, group;
  gint rk, sz, i;
  gint world_rk, world_sz;
  gint *world_rks, *comm_rks;

  MPI_Comm_group (MPI_COMM_WORLD, &world_group);
  MPI_Comm_group (comm, &group);

  MPI_Comm_rank (MPI_COMM_WORLD, &world_rk);
  MPI_Comm_size (MPI_COMM_WORLD, &world_sz);

  MPI_Comm_rank (comm, &rk);
  MPI_Comm_size (comm, &sz);

  world_rks = g_malloc (world_sz * sizeof (int));
  comm_rks = g_malloc (world_sz * sizeof (int));

  for (i=0; i<world_sz; i++)
    world_rks[i] = i;

  MPI_Group_translate_ranks (world_group, world_sz, world_rks, group, comm_rks);

  g_fprintf (_trace_file, "comm=%d rk=%d sz=%d translate={",
	     ret, rk, sz);

  for (i=0; i<world_sz; i++)
    {
      g_fprintf (_trace_file, "%d ", comm_rks[i]);
    }

  g_fprintf (_trace_file, "}\n");

  MPI_Group_free (&world_group);
  MPI_Group_free (&group);

  g_free (world_rks);
  g_free (comm_rks);

  return ret;
}
示例#22
0
文件: groups.c 项目: bcernohous/ga
/**
 * Translates the given rank from the given group into that of the world group.
 */
int ARMCI_Absolute_id(ARMCI_Group *id, int group_rank)
{
    int world_rank;
    int status;
    MPI_Group world_group;
    ARMCI_iGroup *igroup = armci_get_igroup_from_group(id);

    status = MPI_Comm_group(ARMCI_COMM_WORLD, &world_group);
    if (status != MPI_SUCCESS) {
        armci_die("MPI_Comm_group: Failed ", status);
    }
    status = MPI_Group_translate_ranks(
            igroup->group, 1, &group_rank, world_group, &world_rank);
    if (status != MPI_SUCCESS) {
        armci_die("MPI_Group_translate_ranks: Failed ", status);
    }

    return world_rank;
}
示例#23
0
/*
 * Determine which processes in my communicator are in which communicators in
 * the next level down. (Communicators are equivalent to topological entities.)
 */
void level_down(int level, int *classes, int *me, int *nprocs)
{
int i;

  MPI_Group current, next;

  MPI_Comm_group(comm[level], &current);
  MPI_Comm_group(comm[level+1], &next);

  /* Rank zero in each sub communicator tells Rank zero at the current level
   * which processes are in its communicator.
   */

  if (me[level+1] > 0){
    for (i=0; i < nprocs[level]; i++){
      buf[i] = -1;
    }
  }
  else{
    for (i=0; i < nprocs[level+1]; i++){
      buf[i] = i;
    }
    MPI_Group_translate_ranks(next, nprocs[level+1], buf, current, classes);

#ifdef DEBUG_ME
    int j;
    for (j=0; j < nprocs[level+1]; j++){
      printf("(%d) Next level's (%s) rank %d is current level's (%s) rank %d\n",me[0],commName[level],buf[j],commName[level+1],classes[j]);
    }
#endif

    for (i=0; i < nprocs[level]; i++){
      buf[i] = -1;
    }

    for (i=0; i < nprocs[level+1]; i++){
      buf[classes[i]] = me[level];   /* mark procs in my sub communicator */
    }
  }

  MPI_Allreduce(buf, classes, nprocs[level], MPI_INT, MPI_MAX, comm[level]);
}
static
int _transRank_withComm(MPI_Comm comm1, int rank1, MPI_Comm comm2)
{
  int rank2;
  MPI_Group group1, group2;
  int stat1, stat2, stat3;

  stat1 = MPI_Comm_group(comm1, &group1);
  stat2 = MPI_Comm_group(comm2, &group2);

  stat3 = MPI_Group_translate_ranks(group1, 1, &rank1, group2, &rank2);
  //(in:Group1, n, rank1[n], Group2, out:rank2[n])
  if (rank2 == MPI_UNDEFINED){
    rank2 = -1;
  }

  if (stat1 != 0 || stat2 != 0 || stat3 != 0)
    fprintf(stderr, "INTERNAL: _transRank_withComm failed with stat1=%d, stat2=%d, stat3=%d",
	    stat1, stat2, stat3);

  return rank2;
}
示例#25
0
文件: util.c 项目: Egor-Skv/mpiperf
/*
 * getworldrank: Translate rank of process from communicator comm to
 *              communicator MPI_COMM_WORLD.
 */
int getworldrank(MPI_Comm comm, int rank)
{
    static MPI_Group worldgroup;
    static int isfirstcall = 1;
    int worldrank, isintercomm = 0;
    MPI_Group group;

    MPI_Comm_test_inter(comm, &isintercomm);
    if (isintercomm) {
        MPI_Comm_remote_group(comm, &group);
    } else {
        MPI_Comm_group(comm, &group);
    }

    if (isfirstcall) {
        MPI_Comm_group(MPI_COMM_WORLD, &worldgroup);
        isfirstcall = 0;
    }
    MPI_Group_translate_ranks(group, 1, &rank, worldgroup, &worldrank);
    MPI_Group_free(&group);
    return worldrank;
}
示例#26
0
dart_ret_t dart_group_getmembers(
  const dart_group_t *g,
  dart_unit_t *unitids)
{
  int size, i;
  int *array;
  MPI_Group group_all;
  MPI_Group_size(g -> mpi_group, &size);
  MPI_Comm_group(MPI_COMM_WORLD, &group_all);
  array = (int*) malloc (sizeof (int) * size);
  for (i = 0; i < size; i++) {
    array[i] = i;
  }
  MPI_Group_translate_ranks(
    g->mpi_group,
    size,
    array,
    group_all,
    unitids);
  free (array);
  return DART_OK;
}
示例#27
0
/*
 * Class:     mpi_Group
 * Method:    Translate_ranks
 * Signature: (Lmpi/Group;[ILmpi/Group;)[I
 */
JNIEXPORT jintArray JNICALL Java_mpi_Group_Translate_1ranks(JNIEnv *env, jclass jthis,
                                                            jobject group1, jintArray ranks1,
                                                            jobject group2)
{
    jboolean isCopy=JNI_TRUE;
    int n=(*env)->GetArrayLength(env,ranks1);
    jint *rks1,*rks2;
    jintArray jranks2;

    ompi_java_clearFreeList(env) ;

    rks1=(*env)->GetIntArrayElements(env,ranks1,&isCopy);
    jranks2=(*env)->NewIntArray(env,n);
    rks2=(*env)->GetIntArrayElements(env,jranks2,&isCopy);
    MPI_Group_translate_ranks((MPI_Group)((*env)->GetLongField(env,group1,ompi_java.GrouphandleID)),
                              n, (int*)rks1,
                              (MPI_Group)((*env)->GetLongField(env,group2,ompi_java.GrouphandleID)),
                              (int*)rks2);
    (*env)->ReleaseIntArrayElements(env,ranks1,rks1,0);
    (*env)->ReleaseIntArrayElements(env,jranks2,rks2,0);
    return jranks2;
}
示例#28
0
int foMPI_Win_create(void *base, MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm, foMPI_Win *win) {

  int i;
  int* temp;
  MPI_Group group_comm_world, group;
  foMPI_Win_struct_t win_info;

  assert( size >= 0 );
  assert( disp_unit > 0 );

  /* allocate the window */
  void * memptr;
  _foMPI_ALIGNED_ALLOC(&memptr,  sizeof(foMPI_Win_desc_t) )
  *win = memptr;
  assert(*win != NULL);

  /**transition info. As soon as an foMPI Communicator is implemented update this UGNI use this*/
  (*win)->fompi_comm = glob_info.comm_world;
  /* the window communicator specific informations */
  (*win)->comm = comm;
  MPI_Comm_size( comm, &((*win)->commsize) );
  MPI_Comm_rank( comm, &((*win)->commrank) );

  /* get all ranks from the members of the group */
  (*win)->group_ranks = _foMPI_ALLOC((*win)->commsize * sizeof(int32_t));
  assert((*win)->group_ranks != NULL);
  
  temp = _foMPI_ALLOC((*win)->commsize * sizeof(int));
  assert( temp != NULL );
  for( i=0 ; i<(*win)->commsize ; i++) {
    temp[i] = i;
  }
  MPI_Comm_group(comm, &group);
  MPI_Comm_group(MPI_COMM_WORLD, &group_comm_world);
  MPI_Group_translate_ranks(group, (*win)->commsize, &temp[0], group_comm_world, &((*win)->group_ranks[0]));

  _foMPI_FREE(temp);
  MPI_Group_free(&group_comm_world);
#ifdef UGNI
  gni_return_t status_gni;

  #ifdef _foMPI_UGNI_WIN_RELATED_SRC_CQ
  /*
  	 * Create the source completion queue.
  	 *     nic_handle is the NIC handle that this completion queue will be
  	 *          associated with.
  	 *     number_of_cq_entries is the size of the completion queue.
  	 *     zero is the delay count is the number of allowed events before an
  	 *          interrupt is generated.
  	 *     GNI_CQ_NOBLOCK states that the operation mode is non-blocking.
  	 *     NULL states that no user supplied callback function is defined.
  	 *     NULL states that no user supplied pointer is passed to the callback
  	 *          function.
  	 *     cq_handle is the handle that is returned pointing to this newly
  	 *          created completion queue.
  	 */
  	(*win)->number_of_source_cq_entries = _foMPI_NUM_SRC_CQ_ENTRIES;
  	status_gni = GNI_CqCreate((*win)->fompi_comm->nic_handle, (*win)->number_of_source_cq_entries , 0,
  	_foMPI_SRC_CQ_MODE, NULL, NULL, &((*win)->source_cq_handle));
  	_check_gni_status(status_gni, GNI_RC_SUCCESS, (char*) __FILE__, __LINE__);
  	_foMPI_TRACE_LOG(3, "GNI_CqCreate      source with %i entries\n", (*win)->number_of_source_cq_entries);
#endif
  	(*win)->counter_ugni_nbi = 0;

  	  /*
  		 * Create the destination_completion queue.
  		 *     nic_handle is the NIC handle that this completion queue will be
  		 *          associated with.
  		 *     number_of_dest_cq_entries is the size of the completion queue.
  		 *     zero is the delay count is the number of allowed events before
  		 *          an interrupt is generated.
  		 *     GNI_CQ_NOBLOCK states that the operation mode is non-blocking.
  		 *     NULL states that no user supplied callback function is defined.
  		 *     NULL states that no user supplied pointer is passed to the
  		 *          callback function.
  		 *     destination_cq_handle is the handle that is returned pointing to
  		 *          this newly created completion queue.
  		 */
  		(*win)->number_of_dest_cq_entries = _foMPI_NUM_DST_CQ_ENTRIES;
  	//we try to use the handler instead of only the dispatcher trying to decrease the latency of the notification
  	#ifdef NOTIFICATION_SOFTWARE_AGENT
  	//	status_gni = GNI_CqCreate(glob_info.comm_world->nic_handle, (*win)->number_of_dest_cq_entries, 0,
  	//			foMPI_DST_CQ_MODE, &foMPI_NotificationHandler, (*win), &((*win)->destination_cq_handle));
  	#else
  		//TODO: substitute comme world with foMPI_Comm
  		status_gni = GNI_CqCreate((*win)->fompi_comm->nic_handle, (*win)->number_of_dest_cq_entries, 0,
  			_foMPI_DST_CQ_MODE, NULL, NULL, &((*win)->destination_cq_handle));
  	#endif
  		_check_gni_status(status_gni, GNI_RC_SUCCESS, (char*) __FILE__, __LINE__);
  		_foMPI_TRACE_LOG(3 , "GNI_CqCreate      destination with %i entries\n",(*win)->number_of_dest_cq_entries);

  		/*init backup_queue*/
  		(*win)->destination_cq_discarded = _fompi_notif_uq_init();
  		_foMPI_TRACE_LOG(3, "fompi_oset    Created \n");
  #endif
#ifdef XPMEM
  /* get communicator for all onnode processes that are part of the window */
  MPI_Group_intersection( glob_info.onnode_group, group /* window group */, &((*win)->win_onnode_group) );
  MPI_Comm_create( comm, (*win)->win_onnode_group, &((*win)->win_onnode_comm) );

  /* mapping of the global ranks (of the window communicator */
  MPI_Group_size( (*win)->win_onnode_group, &((*win)->onnode_size) );

  temp = _foMPI_ALLOC( (*win)->onnode_size * sizeof(int));
  assert( temp != NULL );

  (*win)->onnode_ranks = _foMPI_ALLOC( (*win)->onnode_size * sizeof(int));
  assert( (*win)->onnode_ranks != NULL );

  for( i=0 ; i<(*win)->onnode_size ; i++) {
    temp[i] = i;
  }
  MPI_Group_translate_ranks((*win)->win_onnode_group, (*win)->onnode_size, &temp[0], group, &((*win)->onnode_ranks[0]) );

  for( i=1 ; i<(*win)->onnode_size ; i++ ) {
    if( (*win)->onnode_ranks[i] != ( (*win)->onnode_ranks[i-1]+1 ) ) {
      break; 
    }
  }
  if (i == (*win)->onnode_size) {
    (*win)->onnode_lower_bound = (*win)->onnode_ranks[0];
    (*win)->onnode_upper_bound = (*win)->onnode_ranks[(*win)->onnode_size-1];
    _foMPI_FREE( (*win)->onnode_ranks );
  } else {
    (*win)->onnode_lower_bound = -1;
    (*win)->onnode_upper_bound = -1;
  }

  //NOTIFICATION QUEUE
  /*init data structure and export*/
  int exp_size;
  xpmem_notif_init_queue(*win,(*win)->onnode_size);
  /*export memory and save into the segment descriptor to send to others on-node PEs*/
  	(*win)->xpmem_segdesc.notif_queue = foMPI_export_memory_xpmem((*win)->xpmem_notif_queue, sizeof(fompi_xpmem_notif_queue_t));
  	(*win)->xpmem_segdesc.notif_queue_state = foMPI_export_memory_xpmem((void*)((*win)->xpmem_notif_state_lock),sizeof(fompi_xpmem_notif_state_t) + (*win)->onnode_size * sizeof(lock_flags_t));

  _foMPI_FREE( temp );
#endif
  MPI_Group_free(&group);

  /* allocate the memory for the remote window information */

  _foMPI_ALIGNED_ALLOC(&memptr, (*win)->commsize * sizeof(foMPI_Win_struct_t) )
  (*win)->win_array =  memptr ;
  assert((*win)->win_array != NULL);

  /* set the information for the remote processes */
  if ( (base != NULL) && (size > 0) ) {
    win_info.base = base;
    _foMPI_mem_register( base, (uint64_t) size, &(win_info.seg), *win );

#ifdef XPMEM
    (*win)->xpmem_segdesc.base = foMPI_export_memory_xpmem(base, size);
#endif
  } else {
    win_info.base = NULL;
#ifdef XPMEM
    (*win)->xpmem_segdesc.base.seg = -1;
#endif
  }

  win_info.size = size;
  win_info.disp_unit = disp_unit;

  win_info.win_ptr = *win;
  _foMPI_mem_register( *win, (uint64_t) sizeof(foMPI_Win_desc_t), &(win_info.win_ptr_seg), *win );

#ifdef XPMEM
  (*win)->xpmem_segdesc.win_ptr = foMPI_export_memory_xpmem(*win, sizeof(foMPI_Win_desc_t));
#endif

  /* PCSW matching */

  _foMPI_ALIGNED_ALLOC(&memptr, (*win)->commsize * sizeof(uint64_t) )
  win_info.pscw_matching_exposure = memptr;
  assert( win_info.pscw_matching_exposure != NULL );

  _foMPI_ALIGNED_ALLOC(&memptr, (*win)->commsize * sizeof(uint32_t))
  (*win)->pscw_matching_access = memptr ;
  assert( (*win)->pscw_matching_access != NULL );

  for( i=0 ; i<(*win)->commsize ; i++ ){
    win_info.pscw_matching_exposure[i] = 0;
    (*win)->pscw_matching_access[i] = 0;
  }

  _foMPI_mem_register( win_info.pscw_matching_exposure, (uint64_t) (*win)->commsize * sizeof(uint64_t), &(win_info.pscw_matching_exposure_seg), *win );

#ifdef XPMEM
  (*win)->xpmem_segdesc.pscw_matching_exposure = foMPI_export_memory_xpmem( win_info.pscw_matching_exposure, (*win)->commsize * sizeof(uint64_t) );
#endif

  /* lock synchronisation */
  (*win)->mutex = foMPI_MUTEX_NONE;
  (*win)->lock_mutex = 0; /* no current access */
  if ( (*win)->commrank == MASTER ) {
    (*win)->lock_all_mutex = 0; /* no current access */
  }
  (*win)->local_exclusive_count = 0;
  (*win)->excl_locks = NULL;

  /* management of rma operations */
  (*win)->nbi_counter = 0;

  (*win)->name = NULL;
 
  (*win)->create_flavor = foMPI_WIN_FLAVOR_CREATE;

  MPI_Allgather( &win_info, sizeof(foMPI_Win_struct_t), MPI_BYTE, &((*win)->win_array[0]), sizeof(foMPI_Win_struct_t), MPI_BYTE, comm );

#ifdef XPMEM
  /* exchange the exposure infos with the onnode processes */
  (*win)->xpmem_array = _foMPI_ALLOC( (*win)->onnode_size * sizeof(fompi_xpmem_addr_t) );
  assert( (*win)->xpmem_array != NULL );
  fompi_xpmem_info_t* xpmem_temp = _foMPI_ALLOC( (*win)->onnode_size * sizeof(fompi_xpmem_info_t) );
 
  MPI_Allgather( &((*win)->xpmem_segdesc), sizeof(fompi_xpmem_info_t), MPI_BYTE, &(xpmem_temp[0]), sizeof(fompi_xpmem_info_t), MPI_BYTE, (*win)->win_onnode_comm );
  
  /* map the onnode memory */
  for( i=0 ; i<(*win)->onnode_size ; i++ ) {
    if (xpmem_temp[i].base.seg != -1) {
      (*win)->xpmem_array[i].base = foMPI_map_memory_xpmem( xpmem_temp[i].base, (*win)->win_array[foMPI_onnode_rank_local_to_global( i, (*win) )].size,
        &((*win)->xpmem_array[i].base_apid), &((*win)->xpmem_array[i].base_offset) );
    } else {
      (*win)->xpmem_array[i].base_apid = -1;
    }
    (*win)->xpmem_array[i].win_ptr = foMPI_map_memory_xpmem( xpmem_temp[i].win_ptr, sizeof(foMPI_Win_desc_t), &((*win)->xpmem_array[i].win_ptr_apid), &((*win)->xpmem_array[i].win_ptr_offset) );
    (*win)->xpmem_array[i].pscw_matching_exposure = foMPI_map_memory_xpmem( xpmem_temp[i].pscw_matching_exposure, (*win)->commsize * sizeof(uint64_t),
      &((*win)->xpmem_array[i].pscw_matching_exposure_apid), &((*win)->xpmem_array[i].pscw_matching_exposure_offset) );
    //notifications
    (*win)->xpmem_array[i].notif_queue = foMPI_map_memory_xpmem( xpmem_temp[i].notif_queue, sizeof(fompi_xpmem_notif_queue_t), &((*win)->xpmem_array[i].notif_queue_apid), &((*win)->xpmem_array[i].notif_queue_offset) );
    (*win)->xpmem_array[i].notif_queue_state = foMPI_map_memory_xpmem( xpmem_temp[i].notif_queue_state, (*win)->onnode_size * sizeof(lock_flags_t), &((*win)->xpmem_array[i].notif_queue_state_apid), &((*win)->xpmem_array[i].notif_queue_state_offset) );

  }

  _foMPI_FREE( xpmem_temp );
#endif

  return MPI_SUCCESS;
}
示例#29
0
int main(int argc, char *argv[])
{
    int rank, nproc, i, x;
    int errors = 0, all_errors = 0;
    MPI_Win win = MPI_WIN_NULL;

    MPI_Comm shm_comm = MPI_COMM_NULL;
    int shm_nproc, shm_rank;
    double **shm_bases = NULL, *my_base;
    MPI_Win shm_win = MPI_WIN_NULL;
    MPI_Group shm_group = MPI_GROUP_NULL, world_group = MPI_GROUP_NULL;
    int *shm_ranks = NULL, *shm_ranks_in_world = NULL;
    MPI_Aint get_target_base_offsets = 0;

    int win_size = sizeof(double) * BUF_CNT;
    int new_win_size = win_size;
    int win_unit = sizeof(double);
    int shm_root_rank_in_world;
    int origin = -1, put_target, get_target;

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &nproc);
    MPI_Comm_group(MPI_COMM_WORLD, &world_group);

    if (nproc != 4) {
        if (rank == 0)
            printf("Error: must be run with four processes\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    MPI_Comm_split_type(MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED, rank, MPI_INFO_NULL, &shm_comm);
    MPI_Comm_rank(shm_comm, &shm_rank);
    MPI_Comm_size(shm_comm, &shm_nproc);
    MPI_Comm_group(shm_comm, &shm_group);

    /* Platform does not support shared memory or wrong host file, just return. */
    if (shm_nproc != 2) {
        goto exit;
    }

    shm_bases = (double **) calloc(shm_nproc, sizeof(double *));
    shm_ranks = (int *) calloc(shm_nproc, sizeof(int));
    shm_ranks_in_world = (int *) calloc(shm_nproc, sizeof(int));

    if (shm_rank == 0)
        shm_root_rank_in_world = rank;
    MPI_Bcast(&shm_root_rank_in_world, 1, MPI_INT, 0, shm_comm);

    /* Identify ranks of target processes which are located on node 0 */
    if (rank == 0) {
        for (i = 0; i < shm_nproc; i++) {
            shm_ranks[i] = i;
        }
        MPI_Group_translate_ranks(shm_group, shm_nproc, shm_ranks, world_group, shm_ranks_in_world);
    }
    MPI_Bcast(shm_ranks_in_world, shm_nproc, MPI_INT, 0, MPI_COMM_WORLD);

    put_target = shm_ranks_in_world[shm_nproc - 1];
    get_target = shm_ranks_in_world[0];

    /* Identify the rank of origin process which are located on node 1 */
    if (shm_root_rank_in_world == 1 && shm_rank == 0) {
        origin = rank;
        if (verbose) {
            printf("----   I am origin = %d, get_target = %d, put_target = %d\n",
                   origin, get_target, put_target);
        }
    }

    /* Allocate shared memory among local processes */
    MPI_Win_allocate_shared(win_size, win_unit, MPI_INFO_NULL, shm_comm, &my_base, &shm_win);

    if (shm_root_rank_in_world == 0 && verbose) {
        MPI_Aint size;
        int disp_unit;
        for (i = 0; i < shm_nproc; i++) {
            MPI_Win_shared_query(shm_win, i, &size, &disp_unit, &shm_bases[i]);
            printf("%d --    shared query: base[%d]=%p, size %zd, "
                   "unit %d\n", rank, i, shm_bases[i], size, disp_unit);
        }
    }

    /* Get offset of put target(1) on get target(0) */
    get_target_base_offsets = (shm_nproc - 1) * win_size / win_unit;

    if (origin == rank && verbose)
        printf("%d --    base_offset of put_target %d on get_target %d: %zd\n",
               rank, put_target, get_target, get_target_base_offsets);

    /* Create using MPI_Win_create(). Note that new window size of get_target(0)
     * is equal to the total size of shm segments on this node, thus get_target
     * process can read the byte located on put_target process.*/
    for (i = 0; i < BUF_CNT; i++) {
        local_buf[i] = (i + 1) * 1.0;
        my_base[i] = 0.0;
    }

    if (get_target == rank)
        new_win_size = win_size * shm_nproc;

    MPI_Win_create(my_base, new_win_size, win_unit, MPI_INFO_NULL, MPI_COMM_WORLD, &win);

    if (verbose)
        printf("%d --    new window my_base %p, size %d\n", rank, my_base, new_win_size);

    MPI_Barrier(MPI_COMM_WORLD);

    /* Check if flush guarantees the completion of put operations on target side.
     *
     * P exclusively locks 2 processes whose windows are shared with each other.
     * P first put and flush to a process, then get the updated data from another process.
     * If flush returns before operations are done on the target side, the data may be
     * incorrect.*/
    for (x = 0; x < ITER; x++) {
        for (i = 0; i < BUF_CNT; i++) {
            local_buf[i] += x;
            check_buf[i] = 0;
        }

        if (rank == origin) {
            MPI_Win_lock(MPI_LOCK_EXCLUSIVE, put_target, 0, win);
            MPI_Win_lock(MPI_LOCK_EXCLUSIVE, get_target, 0, win);

            for (i = 0; i < BUF_CNT; i++) {
                MPI_Put(&local_buf[i], 1, MPI_DOUBLE, put_target, i, 1, MPI_DOUBLE, win);
            }
            MPI_Win_flush(put_target, win);

            MPI_Get(check_buf, BUF_CNT, MPI_DOUBLE, get_target,
                    get_target_base_offsets, BUF_CNT, MPI_DOUBLE, win);
            MPI_Win_flush(get_target, win);

            for (i = 0; i < BUF_CNT; i++) {
                if (check_buf[i] != local_buf[i]) {
                    printf("%d(iter %d) - Got check_buf[%d] = %.1lf, expected %.1lf\n",
                           rank, x, i, check_buf[i], local_buf[i]);
                    errors++;
                }
            }

            MPI_Win_unlock(put_target, win);
            MPI_Win_unlock(get_target, win);
        }
    }

    MPI_Barrier(MPI_COMM_WORLD);

    MPI_Reduce(&errors, &all_errors, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);

  exit:

    if (rank == 0 && all_errors == 0)
        printf(" No Errors\n");

    if (shm_bases)
        free(shm_bases);
    if (shm_ranks)
        free(shm_ranks);
    if (shm_ranks_in_world)
        free(shm_ranks_in_world);

    if (shm_win != MPI_WIN_NULL)
        MPI_Win_free(&shm_win);

    if (win != MPI_WIN_NULL)
        MPI_Win_free(&win);

    if (shm_comm != MPI_COMM_NULL)
        MPI_Comm_free(&shm_comm);

    if (shm_group != MPI_GROUP_NULL)
        MPI_Group_free(&shm_group);

    if (world_group != MPI_GROUP_NULL)
        MPI_Group_free(&world_group);

    MPI_Finalize();

    return 0;
}
示例#30
0
int main( int argc, char *argv[] )
{
    MPI_Group g1, g2, g4, g5, g45, selfgroup, g6;
    int ranks[16], size, rank, myrank, range[1][3];
    int errs = 0;
    int i, rin[16], rout[16], result;

    MPI_Init(&argc,&argv);

	MPI_Comm_group( MPI_COMM_WORLD, &g1 );
	MPI_Comm_rank( MPI_COMM_WORLD, &myrank );
	MPI_Comm_size( MPI_COMM_WORLD, &size );
	if (size < 8) {
	    fprintf( stderr, 
		  "Test requires 8 processes (16 prefered) only %d provided\n",
		     size );
	    errs++;
	}

	/* 16 members, this process is rank 0, return in group 1 */
	ranks[0] = myrank; ranks[1] = 2; ranks[2] = 7;
	if (myrank == 2) ranks[1] = 3;
	if (myrank == 7) ranks[2] = 6;
	MPI_Group_incl( g1, 3, ranks, &g2 );
	
	/* Check the resulting group */
	MPI_Group_size( g2, &size );
	MPI_Group_rank( g2, &rank );
	
	if (size != 3) {
	    fprintf( stderr, "Size should be %d, is %d\n", 3, size );
	    errs++;
	}
	if (rank != 0) {
	    fprintf( stderr, "Rank should be %d, is %d\n", 0, rank );
	    errs++;
	}

	rin[0] = 0; rin[1] = 1; rin[2] = 2;
	MPI_Group_translate_ranks( g2, 3, rin, g1, rout );
	for (i=0; i<3; i++) {
	    if (rout[i] != ranks[i]) {
		fprintf( stderr, "translated rank[%d] %d should be %d\n", 
			 i, rout[i], ranks[i] );
		errs++;
	    }
	}
	
	/* Translate the process of the self group against another group */
	MPI_Comm_group( MPI_COMM_SELF, &selfgroup );
	rin[0] = 0;
	MPI_Group_translate_ranks( selfgroup, 1, rin, g1, rout );
	if (rout[0] != myrank) {
	    fprintf( stderr, "translated of self is %d should be %d\n", 
			 rout[0], myrank );
	    errs++;
	}

	for (i=0; i<size; i++) 
	    rin[i] = i;
	MPI_Group_translate_ranks( g1, size, rin, selfgroup, rout );
	for (i=0; i<size; i++) {
	    if (i == myrank && rout[i] != 0) {
		fprintf( stderr, "translated world to self of %d is %d\n",
			 i, rout[i] );
		errs++;
	    }
	    else if (i != myrank && rout[i] != MPI_UNDEFINED) {
		fprintf( stderr, "translated world to self of %d should be undefined, is %d\n",
			 i, rout[i] );
		errs++;
	    }
	}
	MPI_Group_free( &selfgroup );

	/* Exclude everyone in our group */
	{
	    int ii, *lranks, g1size;

	    MPI_Group_size( g1, &g1size );
	    
	    lranks = (int *)malloc( g1size * sizeof(int) );
	    for (ii=0; ii<g1size; ii++) lranks[ii] = ii;
	    MPI_Group_excl( g1, g1size, lranks, &g6 );
	    if (g6 != MPI_GROUP_EMPTY) {
		fprintf( stderr, "Group formed by excluding all ranks not empty\n" );
		errs++;
		MPI_Group_free( &g6 );
	    }
	    free( lranks );
	}
	
	/* Add tests for additional group operations */
	/* 
	   g2 = incl 1,3,7
	   g3 = excl 1,3,7
	   intersect ( w, g2 ) => g2
	   intersect ( w, g3 ) => g3
	   intersect ( g2, g3 ) => empty
	   
	   g4 = rincl 1:n-1:2
	   g5 = rexcl 1:n-1:2
	   union( g4, g5 ) => world
	   g6 = rincl n-1:1:-1 
	   g7 = rexcl n-1:1:-1
	   union( g6, g7 ) => concat of entries, similar to world
	   diff( w, g2 ) => g3
	*/
	MPI_Group_free( &g2 );

	range[0][0] = 1;
	range[0][1] = size-1;
	range[0][2] = 2;
	MPI_Group_range_excl( g1, 1, range, &g5 );

	range[0][0] = 1;
	range[0][1] = size-1;
	range[0][2] = 2;
	MPI_Group_range_incl( g1, 1, range, &g4 );
	MPI_Group_union( g4, g5, &g45 );
	MPI_Group_compare( MPI_GROUP_EMPTY, g4, &result );
	if (result != MPI_UNEQUAL) {
	    errs++;
	    fprintf( stderr, "Comparison with empty group gave %d, not 3\n",
		     result );
	}
	MPI_Group_free( &g4 );
	MPI_Group_free( &g5 );
	MPI_Group_free( &g45 );

	/* Now, duplicate the test, but using negative strides */
	range[0][0] = size-1;
	range[0][1] = 1;
	range[0][2] = -2;
	MPI_Group_range_excl( g1, 1, range, &g5 );

	range[0][0] = size-1;
	range[0][1] = 1;
	range[0][2] = -2;
	MPI_Group_range_incl( g1, 1, range, &g4 );

	MPI_Group_union( g4, g5, &g45 );

	MPI_Group_compare( MPI_GROUP_EMPTY, g4, &result );
	if (result != MPI_UNEQUAL) {
	    errs++;
	    fprintf( stderr, "Comparison with empty group (formed with negative strides) gave %d, not 3\n",
		     result );
	}
	MPI_Group_free( &g4 );
	MPI_Group_free( &g5 );
	MPI_Group_free( &g45 );
        MPI_Group_free( &g1 );

    if (myrank == 0) 
    {
	if (errs == 0) {
	    printf( " No Errors\n" );
	}
	else {
	    printf( "Found %d errors\n", errs );
	}
    }

    MPI_Finalize();
    return 0;
}