示例#1
0
main(int argc, char **argv)
{
  int rank, nprocs, i, j;
  int p_Geven, p_Godd, p_size, mod, p_size_mod, *list_even=NULL, *list_odd=NULL;
  
  MPI_Init(&argc, &argv);
  
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  MPI_Comm_size(MPI_COMM_WORLD, &nprocs);

  MA_init(C_INT, 1000, 1000);

  GA_Initialize();
  
  p_size=nprocs/2;
  mod=nprocs%2;
  p_size_mod=p_size+mod;
  list_even = (int*)malloc(p_size*sizeof(int));
  list_odd = (int*)malloc(p_size*sizeof(int));
  
  j=0;
  for(i=0; i<nprocs; i++)
    {
      if(i%2==0)
	list_even[j]=i;
      else if(i%2==1)
	list_odd[j]=i;
      else
	break;
      j++;
    }
  j=0;
  /*
  for(i=0; i<nprocs; i++)
    {
      if(i%2==1)
	{

	  j++;
	}
    }
  */
  p_Geven=GA_Pgroup_create(list_even, p_size_mod);
  p_Godd=GA_Pgroup_create(list_odd, p_size);

  if(rank%2==0)
    printf("%d: My ID is %d :: %d -- even \n", rank, GA_Pgroup_nodeid(p_Geven), GA_Pgroup_nnodes(p_Geven));
  else
    printf("%d: My ID is %d :: %d --- odd\n", rank, GA_Pgroup_nodeid(p_Godd), GA_Pgroup_nnodes(p_Godd));
  
  GA_Sync();
  if(rank==0)
    GA_PRINT_MSG();
  
  GA_Terminate();
  MPI_Finalize();
}
示例#2
0
// -------------------------------------------------------------
// MPIComm2GApgroup
// -------------------------------------------------------------
static
PetscErrorCode
MPIComm2GApgroup(MPI_Comm comm, int *pGrpHandle)
{
  PetscErrorCode ierr = 0;
  int nproc;
  int me, myGlobalRank;
  int *proclist;
  int p;

  ierr = MPI_Comm_size(comm, &nproc); CHKERRQ(ierr);
  ierr = MPI_Comm_rank(comm, &me); CHKERRQ(ierr);
  myGlobalRank = GA_Nodeid();
  ierr = PetscMalloc(nproc*sizeof(int), &proclist); CHKERRQ(ierr);
  for (p = 0; p < nproc; ++p) {
    proclist[p] = 0;
  }
  proclist[me] = myGlobalRank;
  ierr = MPI_Allreduce(MPI_IN_PLACE, &proclist[0], nproc, MPI_INT, MPI_SUM, comm); CHKERRQ(ierr);
  *pGrpHandle = GA_Pgroup_create(&proclist[0], nproc); 
  ierr = PetscFree(proclist); CHKERRQ(ierr);
  return ierr;
}
示例#3
0
int main(int argc, char **argv) {
    int me;
    int g_a;
    int status;
    int i,j;
    int dims[] = {n,n};
    int proc_group[PROC_LIST_SIZE],proclist[PROC_LIST_SIZE],inode;
    int sbuf[1],rbuf[1];
    MPI_Comm comm;

    MP_INIT(argc,argv);
    GA_Initialize();
    me = GA_Nodeid();

    status = MA_init(MT_DBL, 100000, 100000);
    if (!status) GA_Error("ma_init failed",-1);
    status = MA_set_auto_verify(1);
    status = MA_set_hard_fail(1);
    status = MA_set_error_print(1);

    inode = GA_Cluster_nodeid();
    if (me == 0) {
        printf("there are %d nodes, node 0 has %d procs\n",
                GA_Cluster_nnodes(), GA_Cluster_nprocs(0));
        fflush(stdout);
    }
    GA_Sync();
    for (i=0; i<GA_Cluster_nnodes(); ++i) {
        for (j=0; j<GA_Cluster_nprocs(i); ++j) {
            proclist[j]=GA_Cluster_procid(i,j);
        }
        proc_group[i]=GA_Pgroup_create(proclist,GA_Cluster_nprocs(i));
    }
    GA_Sync();
    for (i=0; i<GA_Cluster_nnodes(); ++i) {
        if (i == inode) {
            printf("%d joining group %d\n", me, proc_group[inode]);
            GA_Pgroup_set_default(proc_group[inode]);
            g_a = NGA_Create(C_DBL, 2, dims, "a", NULL);
            if (!g_a) GA_Error("NGA_Create failed",-1);
            printf("%d Created array of  group %d as proc no. %d\n",
                    me, proc_group[inode], GA_Nodeid());
            GA_Print_distribution(g_a);
            comm = GA_MPI_Comm_pgroup_default();
            if (comm != MPI_COMM_NULL) {
                sbuf[0] = GA_Nodeid();
                status = MPI_Allreduce(sbuf, rbuf, 1, MPI_INT, MPI_MAX, comm);
                printf("%d max nodeid is %d\n", me, rbuf[0]);
                if ((rbuf[0]+1) != GA_Cluster_nprocs(i)) {
                    GA_Error("MPI_Allreduce failed",1);
                }
            }
            else {
                printf("MPI_Comm was null!\n");
            }
            GA_Pgroup_set_default(GA_Pgroup_get_world());
        }
        GA_Sync();
    }

    GA_Terminate();
    MP_FINALIZE();

    return 0;
}
示例#4
0
/** Client code. Receives signals from the server to process a task or
    terminate processing and return*/
void client_code() {
  int *buf = NULL, buf_size;
  int flag;
  MPI_Status status;
  Integer p_handle;
  int ntsks=0, src;
  const char *pname = "client_code";
  double e1, e2, e3, e4, e5, f1, f2, f3, f4,f5,f6,f7,f8;
  double t_prepar=0, t_wait_start=0, t_grp=0,t_sync=0,t_compl=0,t_dest=0;
/*   double get_doit_time_(); */
/*   double get_esp_time_(); */
/*   double get_gm_crt_time_(); */
/*   double get_chrg_set_time_(); */
/*   double get_gm_push_time_(); */
  const int server = GA_Pgroup_absolute_id(ga_pgroup_get_default_(),SVR);
  const int default_grp = ga_pgroup_get_default_();; /*default GA group for this dispatcher instance*/
  const int world_me = GA_Nodeid();
  const int nproc = GA_Nnodes();

  t_ptask = 0.0;
/*   fprintf(stderr, "%d: 0 server=%d %s\n", GA_Nodeid(), server,pname); */

  e1 = util_wallsec_();
/*   fprintf(stderr, "%d: 0 %s\n", GA_Nodeid(), pname); */

/*   GA_Pgroup_set_default(GA_Pgroup_get_world()); */

/*   fprintf(stderr, "%d: 1 %s\n", world_me, pname); */

  buf_size = 1+ /*action to perform*/
    1+ /*task id - if TASK_SIGNAL*/
    nproc /*process group info*/
    ;

/*   buf = (int *)malloc(buf_size*sizeof(int)); */
  buf = (int *)alloca(buf_size*sizeof(int));
  assert(buf != NULL);

/*   fprintf(stderr, "%d: 2 %s\n", world_me, pname); */

  e2 = util_wallsec_();
  while(1) {
    int nelem, grp_me;
    Integer tskid;

    f1 = util_wallsec_();
/*     fprintf(stderr, "%d:: Waiting for work\n", world_me); */
    MPI_Recv(buf, buf_size, MPI_INT, MPI_ANY_SOURCE, SIGNAL_TAG, MPI_COMM_WORLD, &status);
    f2 = util_wallsec_();
    t_wait_start += (f2-f1);
/*     fprintf(stderr, "%d:: Client got msg from %d\n", world_me, status.MPI_SOURCE); */

    MPI_Get_elements(&status, MPI_INT, &nelem);
    assert(nelem >= 1);
      
    if(buf[0] == TERM_CLIENT) {
      /*process termination and return*/
/*        fprintf(stderr, "%d:: Recv-ed term signal\n", GA_Nodeid()); */
/*       free(buf); */
/*       fprintf(stderr, "%d:: Terminating client\n", GA_Nodeid()); */
#ifdef LEADER_BCAST
      signal_termination(SVR,status.MPI_SOURCE);
#endif
      break;
    }
/*     fprintf(stderr, "%d:: got a task to process\n", world_me); */
    /*Got a task to process*/
    assert(buf[0] == TASK_START);
    ntsks += 1;

    if(status.MPI_SOURCE == server) {
      qsort(buf+2, nelem-2, sizeof(int), int_compare);
    }
    f3  = util_wallsec_();
    t_prepar += (f3-f2);

#if LEADER_BCAST
    src = (server==status.MPI_SOURCE)?buf[2]:status.MPI_SOURCE;
    broadcast(nelem-2,buf+2,buf[2],src,buf,nelem*sizeof(int));
#endif

    /*The proc ids are in world group. So create sub-group of world group*/
    GA_Pgroup_set_default(GA_Pgroup_get_world());
    p_handle = GA_Pgroup_create(&buf[2], nelem-2);
    GA_Pgroup_set_default(p_handle);
/*     GA_Pgroup_sync(p_handle); */
    f4 = MPI_Wtime();
    t_grp += (f4-f3);

    tskid = buf[1];
/*     fprintf(stderr, "%d(%d):: Invoking process task tskid=%d\n", grp_me, world_me, tskid); */
    process_task_(&tskid, &p_handle);
    f5 = MPI_Wtime();
    t_ptask += (f5-f4);
    
    GA_Pgroup_sync(p_handle);
    grp_me = GA_Nodeid();
    f6 = util_wallsec_();
    t_sync += (f6-f5);

    if(grp_me == 0) {
      int v[2] = {TASK_DONE, tskid};
/*        fprintf(stderr, "%d(%d):: Sending ack for task %d to %d\n", */
/*  	      grp_me, world_me, tskid, SERVER); */
      MPI_Send(v, 2, MPI_INT, server, SIGNAL_TAG, MPI_COMM_WORLD);
    }
    f7 = util_wallsec_();
    t_compl += (f7-f6);
/*     GA_Pgroup_sync(p_handle); */
    GA_Pgroup_destroy(p_handle);
    GA_Pgroup_set_default(default_grp);
    f8 = util_wallsec_();
    t_dest += (f8-f7);
  }
  e3 = util_wallsec_();
/*   fprintf(stderr, "%d:: CLIENT total time=%lf\n", ga_nodeid_(), e3-e1); */
/*   fprintf(stderr, "%d:: CLIENT ntsks=%d\n", ga_nodeid_(), ntsks); */
/*   fprintf(stderr, "%d:: CLIENT loop time=%lf\n", ga_nodeid_(), e3-e2); */
/*   fprintf(stderr, "%d:: CLIENT wait start time=%lf\n", ga_nodeid_(),t_wait_start); */
/*   fprintf(stderr, "%d:: CLIENT prepare time=%lf\n", ga_nodeid_(),t_prepar); */
/*   fprintf(stderr, "%d:: CLIENT grp crt time=%lf\n", ga_nodeid_(), t_grp); */
/*   fprintf(stderr, "%d:: CLIENT ptask time=%lf\n", ga_nodeid_(), t_ptask); */
/*   fprintf(stderr, "%d:: CLIENT sync time=%lf\n", ga_nodeid_(), t_sync); */
/*   fprintf(stderr, "%d:: CLIENT compl time=%lf\n", ga_nodeid_(), t_compl); */
/*   fprintf(stderr, "%d:: CLIENT grp dstry time=%lf\n", ga_nodeid_(), t_dest); */
/*   fflush(stdout); */
/*   fprintf(stderr, "%d:: CLIENT doit time=%lf\n",ga_nodeid_(),get_doit_time_()); */
/*   fprintf(stderr, "%d:: CLIENT esp time=%lf\n",ga_nodeid_(),get_esp_time_()); */
/*   fprintf(stderr, "%d:: CLIENT chrg_set time=%lf\n",ga_nodeid_(),get_chrg_set_time_()); */
/*   fprintf(stderr, "%d:: CLIENT gm_crt time=%lf\n",ga_nodeid_(),get_gm_crt_time_()); */
/*   fprintf(stderr, "%d:: CLIENT gm_push time=%lf\n",ga_nodeid_(),get_gm_push_time_()); */
}