Beispiel #1
0
/* to check if a process belongs to this group */
int chk_grp_membership(int rank, ARMCI_Group *grp, int *memberlist) 
{
    int i,grp_size;
    ARMCI_Group_size(grp, &grp_size);
    for(i=0; i<grp_size; i++) if(rank==memberlist[i]) return 1;
    return 0;
}
Beispiel #2
0
/**
 * Construct the armci_clus_t arrays of structs for a given group. 
 *
 * @param grp_nclus_nodes OUT #clus_info objects returned
 * @param igroup IN Group whose clus_info needs to be constructed
 * @return array of armci_clus_t objects
 */
static armci_clus_t* group_construct_clusinfo(int *grp_nclus_nodes, ARMCI_iGroup *igroup) {
  armci_clus_t *grp_clus_info=NULL;
  int i, *grp_clus_id, cluster, clus_id, grp_nproc, grp_nclus;

  ARMCI_Group_size((ARMCI_Group *)igroup, &grp_nproc);

  /* get the cluster_id of processes in the group */
  grp_clus_id = (int *)malloc(grp_nproc*sizeof(int));
  get_group_clus_id(igroup, grp_nproc, grp_clus_id);
       
  /* first find out how many cluster nodes we got for this group */
  grp_nclus=1;
  for(i=0; i<grp_nproc-1; i++) {
    if(grp_clus_id[i] != grp_clus_id[i+1]) ++grp_nclus;
  }
  *grp_nclus_nodes = grp_nclus;
  grp_clus_info = (armci_clus_t*)malloc(grp_nclus*sizeof(armci_clus_t));
  if(!grp_clus_info)armci_die("malloc failed for grp_clusinfo",grp_nclus);

  cluster = 1;
  clus_id = grp_clus_id[0];
  grp_clus_info[0].nslave = 1;
  grp_clus_info[0].master = 0;
  strcpy(grp_clus_info[0].hostname, armci_clus_info[clus_id].hostname);

  for(i=1; i<grp_nproc; i++) {
    if(grp_clus_id[i-1] == grp_clus_id[i]) 
      ++grp_clus_info[cluster-1].nslave;
    else {
      clus_id = grp_clus_id[i];
      grp_clus_info[cluster].nslave = 1;
      grp_clus_info[cluster].master = i;
      strcpy(grp_clus_info[cluster].hostname, 
	     armci_clus_info[clus_id].hostname);
      ++cluster;
    }
  }

  free(grp_clus_id);
  if(grp_nclus != cluster)
    armci_die("inconsistency processing group clusterinfo", grp_nclus);

#   if DEBUG_
  {
    int i,j;
    for(i=0; i<cluster;i++) {
      printf("%d: Cluster %d: Master=%d, #Slaves=%d, HostName=%s\n", 
	     grp_nclus, i, grp_clus_info[i].master, 
	     grp_clus_info[i].nslave, grp_clus_info[i].hostname);
      fflush(stdout);
    }
  }
#   endif  
  return grp_clus_info;
}
Beispiel #3
0
/*\ ************** Begin Group Collective Memory Allocation ******************
 *  returns array of pointers to blocks of memory allocated by everybody
 *  Note: as the same shared memory region can be mapped at different locations
 *        in each process address space, the array might hold different values
 *        on every process. However, the addresses are legitimate
 *        and can be used in the ARMCI data transfer operations.
 *        ptr_arr[nproc]
\*/
int ARMCI_Malloc_group(void *ptr_arr[], armci_size_t bytes,
                       ARMCI_Group *group)
{
    void *ptr;
    int grp_me, grp_nproc;
    ARMCI_PR_DBG("enter",0);
    ARMCI_Group_size(group, &grp_nproc);
    ARMCI_Group_rank(group, &grp_me);
    if(DEBUG_)fprintf(stderr,"%d (grp_id=%d) bytes in armci_malloc_group %d\n",
                      armci_me, grp_me, (int)bytes);
    
    ARMCI_PR_DBG("exit",0);
    return(0);
}
Beispiel #4
0
void test_one_group(ARMCI_Group *group, int *pid_list) {
  int grp_me, grp_size;
  int i,j,src_proc,dst_proc;
  double *ddst_put[MAXPROC];
  double dsrc[ELEMS];
  int elems[2] = {MAXPROC,ELEMS};
  int value = -1, bytes, world_me;
  
  MP_MYID(&world_me);
  ARMCI_Group_rank(group, &grp_me);
  ARMCI_Group_size(group, &grp_size);
  if(grp_me==0) printf("GROUP SIZE = %d\n", grp_size);
  printf("%d:group rank = %d\n", me, grp_me);

  src_proc = 0; dst_proc = grp_size-1;
       
  bytes = ELEMS*sizeof(double);       
  ARMCI_Malloc_group((void **)ddst_put, bytes, group);
       
  for(i=0; i<ELEMS; i++) dsrc[i]=i*1.001*(grp_me+1); 
  for(i=0; i<ELEMS; i++) ddst_put[grp_me][i]=-1.0;
       
  armci_msg_group_barrier(group);
       
  if(grp_me==src_proc) {
    /* NOTE: make sure to specify absolute ids in ARMCI calls */
    ARMCI_Put(dsrc, &ddst_put[dst_proc][0], bytes,
	      ARMCI_Absolute_id(group,dst_proc));
  }
       
  armci_msg_group_barrier(group);
  /* NOTE: make sure to specify absolute ids in ARMCI calls */
  ARMCI_Fence(ARMCI_Absolute_id(group,dst_proc));
  sleep(1);
       
       
  /* Verify*/
  if(grp_me==dst_proc) {
    for(j=0; j<ELEMS; j++) {
      if(ARMCI_ABS(ddst_put[grp_me][j]-j*1.001*(src_proc+1)) > 0.1) {
	printf("\t%d: ddst_put[%d][%d] = %lf and expected value is %lf\n",
	       me, grp_me, j, ddst_put[grp_me][j], j*1.001*(src_proc+1));
	ARMCI_Error("groups: armci put failed...1", 0);
      }
    }
    printf("\n%d(%d): Test O.K. Verified\n", dst_proc, world_me);
  }
  armci_msg_group_barrier(group);
  ARMCI_Free_group(ddst_put[grp_me], group);
}
Beispiel #5
0
/* group based exchange address */
void armci_shmalloc_exchange_address_grp(void **ptr_arr, ARMCI_Group *group) 
{
    int i, world_rank;
    int grp_nproc;

    ARMCI_Group_size(group, &grp_nproc);
    
    /* now combine individual addresses into a single array */
    armci_exchange_address_grp(ptr_arr, grp_nproc, group);

    /* since shmalloc may not give symmetric addresses (especially on Linux),
     * adjust addresses based on offset calculated during initialization */
    for (i=0; i<grp_nproc; i++) 
    {
       world_rank = ARMCI_Absolute_id(group,i);
       ptr_arr[i] = (char*)ptr_arr[i] + offset_arr[world_rank];
    }
}
Beispiel #6
0
void armci_altix_shm_malloc_group(void *ptr_arr[], armci_size_t bytes,
                                  ARMCI_Group *group) {
    long size=bytes;
    void *ptr;
    int i,grp_me, grp_nproc;
    armci_grp_attr_t *grp_attr=ARMCI_Group_getattr(group);
    ARMCI_PR_DBG("enter",0);

    ARMCI_Group_size(group, &grp_nproc);
    ARMCI_Group_rank(group, &grp_me);
    armci_msg_group_lgop(&size,1,"max",group);
    ptr=kr_malloc((size_t)size, &altix_ctx_shmem_grp);
    if(size!=0 && ptr==NULL)
       armci_die("armci_altix_shm_malloc_group(): malloc failed for groups. Increase _SHMMAX_ALTIX_GRP", armci_me);
    bzero(ptr_arr,(grp_nproc)*sizeof(void*));
    ptr_arr[grp_me] = ptr;
    for(i=0; i< grp_nproc; i++) if(i!=grp_me) ptr_arr[i]=shmem_ptr(ptr,ARMCI_Absolute_id(group, i));
    ARMCI_PR_DBG("exit",0);
}
Beispiel #7
0
int ARMCI_Free_group(void *ptr, ARMCI_Group *group)
{
    int grp_me, grp_nproc, grp_master, grp_clus_me;
    armci_grp_attr_t *grp_attr=ARMCI_Group_getattr(group);
    ARMCI_PR_DBG("enter",0);
    
    if(!ptr)return 1;

    ARMCI_Group_size(group, &grp_nproc);
    ARMCI_Group_rank(group, &grp_me);
    if(grp_me == MPI_UNDEFINED) { /* check if the process is in this group */
       armci_die("armci_malloc_group: process is not a member in this group",
                 armci_me);
    }
    /* get the group cluster info */
    grp_clus_me    = grp_attr->grp_clus_me;
    grp_master     = grp_attr->grp_clus_info[grp_clus_me].master;

    ARMCI_PR_DBG("exit",0);
    return 0;
}
Beispiel #8
0
int ARMCI_Uses_shm_grp(ARMCI_Group *group) 
{    
    int uses=0, grp_me, grp_nproc, grp_nclus;
    ARMCI_PR_DBG("enter",0);
    armci_grp_attr_t *grp_attr=ARMCI_Group_getattr(group);

    ARMCI_Group_size(group, &grp_nproc);
    ARMCI_Group_rank(group, &grp_me);
    grp_nclus = grp_attr->grp_nclus;
    
#if (defined(SYSV) || defined(WIN32) || defined(MMAP) ||defined(HITACHI)) \
    && !defined(NO_SHM)
#   ifdef RMA_NEEDS_SHMEM
      if(grp_nproc >1) uses= 1; /* always unless serial mode */
#   else
      if(grp_nproc != grp_nclus)uses= 1; /* only when > 1 node used */
#   endif
#endif
    if(DEBUG_) fprintf(stderr,"%d (grp_id=%d):uses shmem %d\n",armci_me, grp_me, uses);
    ARMCI_PR_DBG("exit",0);
    return uses;
}
Beispiel #9
0
int main(int argc, char **argv) {
  int                      me, nproc;
  int                      i, *procs;
  ARMCI_Group              g_world, g_odd, g_even;

  MPI_Init(&argc, &argv);
  ARMCI_Init();

  MPI_Comm_rank(MPI_COMM_WORLD, &me);
  MPI_Comm_size(MPI_COMM_WORLD, &nproc);

  procs = malloc(sizeof(int) * ( nproc/2 + (nproc % 2 ? 1 : 0 )));

  if (me == 0) printf("ARMCI Group test starting on %d procs\n", nproc);

  ARMCI_Group_get_world(&g_world);
  
  if (me == 0) printf(" + Creating odd group\n");

  for (i = 1; i < nproc; i += 2) {
    procs[i/2] = i;
  }

  ARMCI_Group_create_child(i/2, procs, &g_odd, &g_world);

  if (me == 0) printf(" + Creating even group\n");

  for (i = 0; i < nproc; i += 2) {
    procs[i/2] = i;
  }

  ARMCI_Group_create_child(i/2, procs, &g_even, &g_world);

  /***********************************************************************/
  {
    int    grp_me, grp_nproc;
    double t_abs_to_grp, t_grp_to_abs;
    const int iter = 1000000;

    if (me == 0) {
      ARMCI_Group_rank(&g_even, &grp_me);
      ARMCI_Group_size(&g_even, &grp_nproc);

      t_abs_to_grp = MPI_Wtime();

      for (i = 0; i < iter; i++)
        ARMCII_Translate_absolute_to_group(&g_even, (grp_me+1) % grp_nproc);

      t_abs_to_grp = MPI_Wtime() - t_abs_to_grp;

      t_grp_to_abs = MPI_Wtime();

      for (i = 0; i < iter; i++)
        ARMCI_Absolute_id(&g_even, (grp_me+1) % grp_nproc);

      t_grp_to_abs = MPI_Wtime() - t_grp_to_abs;

      printf("t_abs_to_grp = %f us, t_grp_to_abs = %f us\n", t_abs_to_grp/iter * 1.0e6, t_grp_to_abs/iter * 1.0e6);
    }

    ARMCI_Barrier();
  }
  /***********************************************************************/

  if (me == 0) printf(" + Freeing groups\n");

  if (me % 2 > 0)
    ARMCI_Group_free(&g_odd);
  else
    ARMCI_Group_free(&g_even);

  free(procs);

  ARMCI_Finalize();
  MPI_Finalize();

  return 0;
}
Beispiel #10
0
/**
 * Group cluster information "grp_clus_info" (similar to armci_clus_info)
 */
static void group_process_list(ARMCI_Group *group, 
                               armci_grp_attr_t *grp_attr) {
    ARMCI_iGroup *igroup = (ARMCI_iGroup *)group;
#ifndef ARMCI_GROUP
    ARMCI_Comm comm = igroup->icomm;
#endif

    int grp_me, grp_nproc, grp_nclus, grp_clus_me;
    armci_clus_t *grp_clus_info=NULL;
#ifdef CLUSTER
    int i, len, root=0;
#endif
    
#ifndef ARMCI_GROUP
    if(comm==MPI_COMM_NULL || igroup->igroup==MPI_GROUP_NULL) 
       armci_die("group_process_list: NULL COMMUNICATOR",0);
#endif
    
    ARMCI_Group_rank(group, &grp_me);
    ARMCI_Group_size(group, &grp_nproc);
    
#ifdef CLUSTER
#ifdef ARMCI_GROUP
    /*all processes construct the clus_info structure in parallel*/
    grp_clus_info = group_construct_clusinfo(&grp_nclus, igroup);
#else
    /* process 0 gets group cluster information: grp_nclus, grp_clus_info */
    if(grp_me == 0) {
      grp_clus_info = group_construct_clusinfo(&grp_nclus, igroup);
    }

    /* process 0 broadcasts group cluster information */
    len = sizeof(int);
    ARMCI_Bcast_(&grp_nclus, len, root, comm);
    
    if(grp_me){
       /* allocate memory */
       grp_clus_info = (armci_clus_t*)malloc(grp_nclus*sizeof(armci_clus_t));
       if(!armci_clus_info)armci_die("malloc failed for clusinfo",armci_nclus);
    }
    
    len = sizeof(armci_clus_t)*grp_nclus;
    ARMCI_Bcast_(grp_clus_info, len, root, comm);
#endif
    /* determine current group cluster node id by comparing me to master */
    grp_clus_me =  grp_nclus-1;
    for(i =0; i< grp_nclus-1; i++) {
       if(grp_me < grp_clus_info[i+1].master){
          grp_clus_me=i;
          break;
       }
    }

#else
    grp_clus_me = 0;
    grp_nclus = 1;
    grp_clus_info = (armci_clus_t*)malloc(grp_nclus*sizeof(armci_clus_t));
    if(!grp_clus_info)armci_die("malloc failed for clusinfo",grp_nclus);
    strcpy(grp_clus_info[0].hostname, armci_clus_info[0].hostname);
    grp_clus_info[0].master=0;
    grp_clus_info[0].nslave=grp_nproc;
#endif
#ifdef ARMCI_GROUP
    /*Set in ARMCI_Group_create. ARMCI_Group_rank is used before
      setting this field. So moving it there in the generic
      implementation.*/
#else
    grp_attr->grp_me        = grp_me;
#endif
    grp_attr->grp_clus_info = grp_clus_info;
    grp_attr->grp_nclus     = grp_nclus;
    grp_attr->grp_clus_me   = grp_clus_me;
}