Пример #1
0
void init_comms(void){ 
    extern int numtasks, rank, 
    myfieldrank, myenglandrank, mybrazilrank, 
    field_ranks[12], eng_ranks[11], bra_ranks[11];
    
    extern MPI_Group world, england, brazil, engfield, brafield, field;
    extern MPI_Comm eng_comm, bra_comm, engfield_comm, brafield_comm, field_comm; 
    
    MPI_Comm_size(MPI_COMM_WORLD, &numtasks);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
	if (numtasks != NPROCS) 
	{
		printf("Must specify MP_PROCS= %d. Terminating.\n",NPROCS);
		MPI_Finalize();
		exit(EXIT_FAILURE);
	}
    
    MPI_Comm_group(MPI_COMM_WORLD, &world);
    
    MPI_Group_incl(world, 12, field_ranks, &field);
    MPI_Group_incl(world, 11, eng_ranks, &england);
    MPI_Group_incl(world, 11, bra_ranks, &brazil);
    MPI_Group_union(field, england, &engfield);
    MPI_Group_union(field, brazil, &brafield);
    
	MPI_Comm_create(MPI_COMM_WORLD, field, &field_comm);
    MPI_Comm_create(MPI_COMM_WORLD, england, &eng_comm);
    MPI_Comm_create(MPI_COMM_WORLD, brazil, &bra_comm);
    MPI_Comm_create(MPI_COMM_WORLD, engfield, &engfield_comm);
    MPI_Comm_create(MPI_COMM_WORLD, brafield, &brafield_comm);
    
    MPI_Group_rank (field, &myfieldrank);
    MPI_Group_rank (england, &myenglandrank);    
    MPI_Group_rank (brazil, &mybrazilrank); 
}
Пример #2
0
bool SplitMPI_Communicator::CreateCommunicator(MPI_Comm comm_world, int np, int nb_ddc)
{
	int n_DDC;
	bool splitcomm;

	if ((nb_ddc > 0) && (nb_ddc < np))
	{ // if the number of total cores is larger than the number of DDCs is the same, two new MPI groups will be
// generated will be generated
#ifdef OGS_FEM_IPQC
		splitcomm = true;
		n_DDC = nb_ddc; // number of ddc

		int DDC_ranks[n_DDC];
		for (int k = 0; k < n_DDC; k++)
		{
			DDC_ranks[k] = k;
		}

		MPI_Comm comm_IPQC;
		MPI_Group group_base, group_DDC, group_IPQC;

		// define MPI group and communicator for DDC related processes WH
		MPI_Comm_group(comm_world, &group_base);
		MPI_Group_incl(group_base, n_DDC, DDC_ranks, &group_DDC); // define group flow and mass transport
		MPI_Comm_create(comm_world, group_DDC, &comm_DDC);

		// define MPI group and communicator for IPQC WH
		MPI_Group_difference(group_base, group_DDC, &group_IPQC);
		MPI_Comm_create(comm_world, group_IPQC, &comm_IPQC);

		int myrank_IPQC, mysize_IPQC;
		MPI_Group_size(group_DDC, &mysize); // WH
		MPI_Group_rank(group_DDC, &myrank); // WH
		MPI_Group_rank(group_IPQC, &myrank_IPQC);
		MPI_Group_size(group_IPQC, &mysize_IPQC);
		if (myrank_IPQC != MPI_UNDEFINED) // WH
			std::cout << "After MPI_Init myrank_IPQC = " << myrank_IPQC << '\n';
		if (myrank != MPI_UNDEFINED) // WH
			std::cout << "After MPI_Init myrank_DDC = " << myrank << '\n';

		if (myrank_IPQC != MPI_UNDEFINED) // ranks of group_IPQC will call to IPhreeqc
			Call_IPhreeqc();
#endif
	}
	else
	{ // if no -ddc is specified or the number of ddc is incorrect, make ddc = np, no new MPI groups willnot be
		// generated;
		splitcomm = false;
		n_DDC = np;
		comm_DDC = comm_world;
		MPI_Comm_size(comm_DDC, &mysize);
		MPI_Comm_rank(comm_DDC, &myrank);
		std::cout << "After MPI_Init myrank_DDC = " << myrank << '\n';
	}

	return splitcomm;
}
Пример #3
0
/*
//  This function must be called by all members of communicator com.
*/
void print_comm_contents(MPI_Comm com, const char *name)
{
  MPI_Group world, local;
  int i, n, *ranks_local, *ranks_world;

  MPI_Comm_group(mpi.comm.world,&world);
  MPI_Comm_group(com,&local);

  MPI_Group_size(local,&n);
  MPI_Group_rank(local,&i);

  if(i == 0)
    {
      ranks_local = cart_alloc(int,n);
      ranks_world = cart_alloc(int,n);
  
      for(i=0; i<n; i++) ranks_local[i] = i;

      MPI_Group_translate_ranks(local,n,ranks_local,world,ranks_world);

      cart_debug("Communicator %s (%p), size = %d:",name,com,n);
      for(i=0; i<n; i++) cart_debug("id = %d -> world id = %d",i,ranks_world[i]);

      cart_free(ranks_local);
      cart_free(ranks_world);
    }
Пример #4
0
int main(int argc, char *argv[])  {
	int rank, new_rank, sendbuf, recvbuf, numtasks;
	int P[4][4]={{0,1,2,3}, {4,5,6,7}, {8,9,10,11}, {12,13,14,15} };
	MPI_Group orig_group, new_group;
	MPI_Comm new_comm;

	MPI_Init(&argc,&argv);
	MPI_Comm_rank(MPI_COMM_WORLD, &rank);
	MPI_Comm_size(MPI_COMM_WORLD, &numtasks);

	if (numtasks != NPROCS) {
		printf("Must specify MP_PROCS= %d. Terminating.\n",NPROCS);
		MPI_Finalize();
		exit(0);
	}

	sendbuf = rank;
	MPI_Comm_group(MPI_COMM_WORLD, &orig_group);
	MPI_Group_incl(orig_group, NPROCS/4, P[rank/4], &new_group);

	MPI_Comm_create(MPI_COMM_WORLD, new_group, &new_comm);
	MPI_Allreduce(&sendbuf, &recvbuf, 1, MPI_INT, MPI_SUM, new_comm);

	MPI_Group_rank (new_group, &new_rank);
	printf("rank= %2d newgroup= %2d newrank= %2d recvbuf= %2d\n",rank,rank/4,new_rank,recvbuf);

	MPI_Finalize();
	return 0;
}
Пример #5
0
JNIEXPORT jint JNICALL Java_mpi_Group_getRank(
        JNIEnv *env, jobject jthis, jlong group)
{
    int rank, rc;
    rc = MPI_Group_rank((MPI_Group)group, &rank);
    ompi_java_exceptionCheck(env, rc);
    return rank;
}
Пример #6
0
int ARMCI_Group_rank(ARMCI_Group *group, int *rank) {
    ARMCI_iGroup *igroup = (ARMCI_iGroup *)group;
#ifdef ARMCI_GROUP
    if(!igroup) return MPI_ERR_GROUP;
    *rank = igroup->grp_attr.grp_me;
    return MPI_SUCCESS;
#else
    return MPI_Group_rank((MPI_Group)(igroup->igroup), rank);
#endif
}
Пример #7
0
/* * Class:     mpi_Group
 * Method:    Rank
 * Signature: ()I
 */
JNIEXPORT jint JNICALL Java_mpi_Group_Rank(JNIEnv *env, jobject jthis)
{
    int rank;

    ompi_java_clearFreeList(env) ;

    MPI_Group_rank((MPI_Group)((*env)->GetLongField(env,jthis,ompi_java.GrouphandleID)),
                   &rank);
    return rank;
}
Пример #8
0
int main (int argc, char **argv)
{
  int num, i, rank, localRank;
  MPI_Group all, odd, even;
  MPI_Comm oddComm, evenComm;
  char mess[11];

  MPI_Init (&argc, &argv);
  // copy all the processes in group "all"
  MPI_Comm_group (MPI_COMM_WORLD, &all);
  MPI_Comm_size (MPI_COMM_WORLD, &num);
  MPI_Comm_rank (MPI_COMM_WORLD, &rank);

  int grN = 0;
  int ranks[num / 2];

  for (i = 0; i < num; i += 2)
    ranks[grN++] = i;

  // extract from "all" only the odd ones
  MPI_Group_excl (all, grN, ranks, &odd);
  // sutract odd group from all to get the even ones
  MPI_Group_difference (all, odd, &even);

  MPI_Comm_create (MPI_COMM_WORLD, odd, &oddComm);
  MPI_Comm_create (MPI_COMM_WORLD, even, &evenComm);
  
  // check group membership
  MPI_Group_rank (odd, &localRank);
  if (localRank != MPI_UNDEFINED)
    {
      if (localRank == 0)       // local group root, sets-up message
        strcpy (mess, "ODD GROUP");
      MPI_Bcast (mess, 11, MPI_CHAR, 0, oddComm);
      MPI_Comm_free (&oddComm);  // free communicator in processes where it is valid
    }
  else
    {
      MPI_Comm_rank (evenComm, &localRank);
      if (localRank == 0)       // local group root, sets-up message
        strcpy (mess, "EVEN GROUP");
      MPI_Bcast (mess, 11, MPI_CHAR, 0, evenComm);
      MPI_Comm_free (&evenComm);
    }

  printf ("Process %i with local rank %i received %s\n", rank, localRank, mess);

  // free up memory
  MPI_Group_free (&all);
  MPI_Group_free (&odd);
  MPI_Group_free (&even);
  MPI_Finalize ();
  return 0;
}
Пример #9
0
/**
 * Returns the rank of this process within the given group.
 */
int comex_group_rank(comex_group_t id, int *rank)
{
    int status;

    comex_igroup_t *igroup = comex_get_igroup_from_group(id);
    status = MPI_Group_rank(igroup->group, rank);
    if (status != MPI_SUCCESS) {
        comex_error("MPI_Group_rank: Failed ", status);
    }

    return COMEX_SUCCESS;
}
Пример #10
0
/**
 * Returns the rank of this process within the given group.
 */
int ARMCI_Group_rank(ARMCI_Group *id, int *rank)
{
    int status;

    ARMCI_iGroup *igroup = armci_get_igroup_from_group(id);
    status = MPI_Group_rank(igroup->group, rank);
    if (status != MPI_SUCCESS) {
        armci_die("MPI_Group_rank: Failed ", status);
    }

    return 0; /* TODO what should this return? an error code? */
}
Пример #11
0
static VALUE group_rank(VALUE self)
{
    int rv, rank;
    MPI_Group *grp;

    Data_Get_Struct(self, MPI_Group, grp);

    rv = MPI_Group_rank(*grp, &rank);
    mpi_exception(rv);

    if (rank == MPI_UNDEFINED)
        return UNDEFINED;

    return rb_fix_new(rank);
}
Пример #12
0
void ompi_group_rank_f(MPI_Fint *group, MPI_Fint *rank, MPI_Fint *ierr)
{
  int c_ierr;
  ompi_group_t *c_group;
  OMPI_SINGLE_NAME_DECL(rank);

  /* Make the fortran to c representation conversion */
  c_group = MPI_Group_f2c(*group);
  
  c_ierr = MPI_Group_rank(c_group, OMPI_SINGLE_NAME_CONVERT(rank));
  if (NULL != ierr) *ierr = OMPI_INT_2_FINT(c_ierr);

  if (MPI_SUCCESS == c_ierr) {
      OMPI_SINGLE_INT_2_FINT(rank);
  }
}
Пример #13
0
int main (int argc, char **argv)
{
  int num, i, rank;
  MPI_Group all, odd, even;

  MPI_Init (&argc, &argv);
  // copy all the processes in group "all"
  MPI_Comm_group (MPI_COMM_WORLD, &all);
  MPI_Comm_size (MPI_COMM_WORLD, &num);
  MPI_Comm_rank (MPI_COMM_WORLD, &rank);

  int grN = 0;
  int ranks[num / 2];

  for (i = 0; i < num; i += 2)
    ranks[grN++] = i;

  // extract from "all" only the odd ones
  MPI_Group_excl (all, grN, ranks, &odd);
  // sutract odd group from all to get the even ones
  MPI_Group_difference (all, odd, &even);

  // print group sizes
  if (rank == 0)
    {
      MPI_Group_size (odd, &i);
      printf ("Odd group has %i processes\n", i);
      MPI_Group_size (even, &i);
      printf ("Even group has %i processes\n", i);
    }

  // check group membership
  MPI_Group_rank (odd, &i);
  if (i == MPI_UNDEFINED)
    printf ("Process %i belongs to even group\n", rank);
  else
    printf ("Process %i belongs to odd group\n", rank);

  // free up memory
  MPI_Group_free (&all);
  MPI_Group_free (&odd);
  MPI_Group_free (&even);
  MPI_Finalize ();
  return 0;
}
Пример #14
0
MPI_Fint c2fgroup_ (MPI_Fint *group)
{
    MPI_Group cGroup = MPI_Group_f2c(*group);
    int cSize, wSize, cRank, wRank;

    /* We pass in the group of comm world */
    MPI_Comm_size( MPI_COMM_WORLD, &wSize );
    MPI_Comm_rank( MPI_COMM_WORLD, &wRank );
    MPI_Group_size( cGroup, &cSize );
    MPI_Group_rank( cGroup, &cRank );

    if (wSize != cSize || wRank != cRank) {
	fprintf( stderr, "Group: Did not get expected size,rank (got %d,%d)",
		 cSize, cRank );
	return 1;
    }
    return 0;
}
Пример #15
0
void armci_group_init() 
{
    int grp_me;
#ifdef ARMCI_GROUP
    int i;
#endif
    ARMCI_iGroup *igroup = (ARMCI_iGroup *)&ARMCI_World_Proc_Group;

#ifdef ARMCI_GROUP
    /*setup the world proc group*/

    /*
    MPI_Comm_size(MPI_COMM_WORLD, &igroup->grp_attr.nproc); 
    MPI_Comm_rank(MPI_COMM_WORLD, &igroup->grp_attr.grp_me); 
    */

    igroup->grp_attr.nproc = armci_msg_nproc();
    igroup->grp_attr.grp_me = armci_msg_me();

    igroup->grp_attr.proc_list = (int *)malloc(igroup->grp_attr.nproc*sizeof(int));
    assert(igroup->grp_attr.proc_list != NULL);
    for(i=0; i<igroup->grp_attr.nproc; i++) {
      igroup->grp_attr.proc_list[i] = i;
    } 
    igroup->grp_attr.grp_clus_info = NULL;
    armci_cache_attr((ARMCI_Group*)&ARMCI_World_Proc_Group);
#else
    /* save MPI world group and communicatior in ARMCI_World_Proc_Group */
    igroup->icomm = MPI_COMM_WORLD;
    MPI_Comm_group(MPI_COMM_WORLD, &(igroup->igroup));

    /* processes belong to this group should cache attributes */
    MPI_Group_rank((MPI_Group)(igroup->igroup), &grp_me);
    if(grp_me != MPI_UNDEFINED) 
    {
       armci_cache_attr((ARMCI_Group*)&ARMCI_World_Proc_Group);
    }
#endif    

    /* Initially, World group is the default group */
    ARMCI_Default_Proc_Group = ARMCI_World_Proc_Group;
}
Пример #16
0
int main( int argc, char *argv[] )
{
    int errs = 0;
    int rank, size, grank, gsize;
    int minsize = 2, isleft; 
    MPI_Comm      comm;
    MPI_Group     group;

    MTest_Init( &argc, &argv );

    /* The following illustrates the use of the routines to 
       run through a selection of communicators and datatypes.
       Use subsets of these for tests that do not involve combinations 
       of communicators, datatypes, and counts of datatypes */
    while (MTestGetIntercomm( &comm, &isleft, minsize )) {
	if (comm == MPI_COMM_NULL) continue;
	/* Determine the sender and receiver */
	MPI_Comm_rank( comm, &rank );
	MPI_Comm_size( comm, &size );
	MPI_Comm_group( comm, &group );
	MPI_Group_rank( group, &grank );
	MPI_Group_size( group, &gsize );
	if (rank != grank) {
	    errs++;
	    fprintf( stderr, "Ranks of groups do not match %d != %d\n",
		     rank, grank );
	}
	if (size != gsize) {
	    errs++;
	    fprintf( stderr, "Sizes of groups do not match %d != %d\n",
		     size, gsize );
	}
	MPI_Group_free( &group );
	MTestFreeComm( &comm );
    }

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}
Пример #17
0
int main(int argc,char** argv)
{
    int p,n,i, rank, g_rank, new_rank,a=0;
    int* rank_new;
    MPI_Group g, g_new;
    MPI_Comm c_new;
    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD,&p);
    MPI_Comm_rank(MPI_COMM_WORLD, & rank);
    MPI_Comm_group(MPI_COMM_WORLD, &g);
    n = p / 2;
    rank_new = malloc(n * sizeof(int));
    if (rank %2==0) for(i=0; i <n ; i++) rank_new[i] = 2 * i;
    else for(i=0; i <n; i++) rank_new[i] = 2 * i + 1;
    MPI_Group_incl(g,n,rank_new, &g_new);
    MPI_Comm_create(MPI_COMM_WORLD, g_new, &c_new);
    MPI_Group_rank(g_new, &g_rank);
    MPI_Comm_rank(c_new, &new_rank);
    if(rank == 0) a = 10;
    if(rank == 1) a = 20;
    MPI_Bcast(&a, 1, MPI_INT, 0, c_new);
        printf("rank : %d, g_rank: %d, new rank : %d, a = %d\n", rank, g_rank, new_rank,a);
    return 0;
}
Пример #18
0
int main( int argc, char *argv[] )
{
    MPI_Group g1, g2, g4, g5, g45, selfgroup, g6;
    int ranks[16], size, rank, myrank, range[1][3];
    int errs = 0;
    int i, rin[16], rout[16], result;

    MPI_Init(&argc,&argv);

	MPI_Comm_group( MPI_COMM_WORLD, &g1 );
	MPI_Comm_rank( MPI_COMM_WORLD, &myrank );
	MPI_Comm_size( MPI_COMM_WORLD, &size );
	if (size < 8) {
	    fprintf( stderr, 
		  "Test requires 8 processes (16 prefered) only %d provided\n",
		     size );
	    errs++;
	}

	/* 16 members, this process is rank 0, return in group 1 */
	ranks[0] = myrank; ranks[1] = 2; ranks[2] = 7;
	if (myrank == 2) ranks[1] = 3;
	if (myrank == 7) ranks[2] = 6;
	MPI_Group_incl( g1, 3, ranks, &g2 );
	
	/* Check the resulting group */
	MPI_Group_size( g2, &size );
	MPI_Group_rank( g2, &rank );
	
	if (size != 3) {
	    fprintf( stderr, "Size should be %d, is %d\n", 3, size );
	    errs++;
	}
	if (rank != 0) {
	    fprintf( stderr, "Rank should be %d, is %d\n", 0, rank );
	    errs++;
	}

	rin[0] = 0; rin[1] = 1; rin[2] = 2;
	MPI_Group_translate_ranks( g2, 3, rin, g1, rout );
	for (i=0; i<3; i++) {
	    if (rout[i] != ranks[i]) {
		fprintf( stderr, "translated rank[%d] %d should be %d\n", 
			 i, rout[i], ranks[i] );
		errs++;
	    }
	}
	
	/* Translate the process of the self group against another group */
	MPI_Comm_group( MPI_COMM_SELF, &selfgroup );
	rin[0] = 0;
	MPI_Group_translate_ranks( selfgroup, 1, rin, g1, rout );
	if (rout[0] != myrank) {
	    fprintf( stderr, "translated of self is %d should be %d\n", 
			 rout[0], myrank );
	    errs++;
	}

	for (i=0; i<size; i++) 
	    rin[i] = i;
	MPI_Group_translate_ranks( g1, size, rin, selfgroup, rout );
	for (i=0; i<size; i++) {
	    if (i == myrank && rout[i] != 0) {
		fprintf( stderr, "translated world to self of %d is %d\n",
			 i, rout[i] );
		errs++;
	    }
	    else if (i != myrank && rout[i] != MPI_UNDEFINED) {
		fprintf( stderr, "translated world to self of %d should be undefined, is %d\n",
			 i, rout[i] );
		errs++;
	    }
	}
	MPI_Group_free( &selfgroup );

	/* Exclude everyone in our group */
	{
	    int ii, *lranks, g1size;

	    MPI_Group_size( g1, &g1size );
	    
	    lranks = (int *)malloc( g1size * sizeof(int) );
	    for (ii=0; ii<g1size; ii++) lranks[ii] = ii;
	    MPI_Group_excl( g1, g1size, lranks, &g6 );
	    if (g6 != MPI_GROUP_EMPTY) {
		fprintf( stderr, "Group formed by excluding all ranks not empty\n" );
		errs++;
		MPI_Group_free( &g6 );
	    }
	    free( lranks );
	}
	
	/* Add tests for additional group operations */
	/* 
	   g2 = incl 1,3,7
	   g3 = excl 1,3,7
	   intersect ( w, g2 ) => g2
	   intersect ( w, g3 ) => g3
	   intersect ( g2, g3 ) => empty
	   
	   g4 = rincl 1:n-1:2
	   g5 = rexcl 1:n-1:2
	   union( g4, g5 ) => world
	   g6 = rincl n-1:1:-1 
	   g7 = rexcl n-1:1:-1
	   union( g6, g7 ) => concat of entries, similar to world
	   diff( w, g2 ) => g3
	*/
	MPI_Group_free( &g2 );

	range[0][0] = 1;
	range[0][1] = size-1;
	range[0][2] = 2;
	MPI_Group_range_excl( g1, 1, range, &g5 );

	range[0][0] = 1;
	range[0][1] = size-1;
	range[0][2] = 2;
	MPI_Group_range_incl( g1, 1, range, &g4 );
	MPI_Group_union( g4, g5, &g45 );
	MPI_Group_compare( MPI_GROUP_EMPTY, g4, &result );
	if (result != MPI_UNEQUAL) {
	    errs++;
	    fprintf( stderr, "Comparison with empty group gave %d, not 3\n",
		     result );
	}
	MPI_Group_free( &g4 );
	MPI_Group_free( &g5 );
	MPI_Group_free( &g45 );

	/* Now, duplicate the test, but using negative strides */
	range[0][0] = size-1;
	range[0][1] = 1;
	range[0][2] = -2;
	MPI_Group_range_excl( g1, 1, range, &g5 );

	range[0][0] = size-1;
	range[0][1] = 1;
	range[0][2] = -2;
	MPI_Group_range_incl( g1, 1, range, &g4 );

	MPI_Group_union( g4, g5, &g45 );

	MPI_Group_compare( MPI_GROUP_EMPTY, g4, &result );
	if (result != MPI_UNEQUAL) {
	    errs++;
	    fprintf( stderr, "Comparison with empty group (formed with negative strides) gave %d, not 3\n",
		     result );
	}
	MPI_Group_free( &g4 );
	MPI_Group_free( &g5 );
	MPI_Group_free( &g45 );
        MPI_Group_free( &g1 );

    if (myrank == 0) 
    {
	if (errs == 0) {
	    printf( " No Errors\n" );
	}
	else {
	    printf( "Found %d errors\n", errs );
	}
    }

    MPI_Finalize();
    return 0;
}
Пример #19
0
FastQuery::FastQuery(const std::string& dataFileName,
                     const FQ::FileFormat ffmt,
                     const std::string& indexFileName,
                     const int v, const char *rcfile,
                     const char *logfile,
                     bool readOnly,
                     const MPI_Comm comm,
                     void *extra)
#endif
{
    ibis::util::setVerboseLevel(v);
    ibis::init(rcfile, logfile);

    dataFile = 0;
    indexFile = 0;
    metadataMgr = 0;
    // true if indexFile needs to be initiated.
    bool indexing = (indexFileName.compare("") != 0 &&
                     indexFileName.compare(dataFileName) != 0);

#ifndef FQ_NOMPI
    mpi_comm = comm;
    MPI_Group mpi_group;
    MPI_Comm_group(mpi_comm, &mpi_group);
    MPI_Group_size(mpi_group, &mpi_size);
    MPI_Group_rank(mpi_group, &mpi_rank);
#endif

    LOGGER(ibis::gVerbose > 0)
        << "FastQuery constructor invoked with datafileName=" << dataFileName
        << ", fileFormat=" << ffmt << ", readOnly=" << readOnly;

    // open the file
    std::string indexPath = "";
    switch (ffmt) {

#ifdef FQ_HAVE_HDF5
    case FQ::FQ_H5Part: indexPath = "/__H5PartIndex__";
    case FQ::FQ_HDF5: {
#ifdef FQ_NOMPI
        if (readOnly == false && indexing == true) {
            dataFile = new HDF5(dataFileName, true, indexPath);
        }
        else {
            dataFile = new HDF5(dataFileName, readOnly, indexPath);
        }
#else
        if (readOnly == false && indexing == true) {
            dataFile = new HDF5(dataFileName, mpi_comm, true, indexPath);
        }
        else {
            dataFile = new HDF5(dataFileName, mpi_comm, readOnly,
                                indexPath);
        }
#endif
        if (dataFile->isValid() == false) {
            delete (dataFile);
            dataFile = 0;
        }
        if (indexing) {
#ifdef FQ_NOMPI
            indexFile = new HDF5(indexFileName, readOnly, indexPath);
#else
            indexFile = new HDF5(indexFileName, mpi_comm, readOnly,
                                 indexPath);
#endif
            if (indexFile->isValid() == false) {
                delete (indexFile);
                indexFile = 0;
            }
        }
        else {
            indexFile = dataFile;
        }
        break;}
#endif


#ifdef FQ_HAVE_NETCDF
    case FQ::FQ_NetCDF: {
#ifndef FQ_NOMPI
        // netcdf is not supported in MPI mode yet
        LOGGER(ibis::gVerbose > 0)
            << "Warning -- FastQuery::FastQuery:"
            << " MPI is not supported for NetCDF yet";
#else
        if (readOnly == false && indexing == true) {
            dataFile = new NETCDF(dataFileName, true, "");
        }
        else {
            dataFile = new NETCDF(dataFileName, readOnly, "");
        }
        if (dataFile->isValid() == false) {
            delete (dataFile);
            dataFile = 0;
        }
        if (indexing) {
            indexFile = new NETCDF(indexFileName, readOnly, "");
            if (indexFile->isValid() == false) {
                delete (dataFile);
                dataFile = 0;
                delete (indexFile);
                indexFile = 0;
            }
        }
        else {
            indexFile = dataFile;
        }
#endif
        break;}
#endif


#ifdef FQ_HAVE_PNETCDF
    case FQ::FQ_pnetCDF: {
        if (readOnly == false && indexing == true) {
            dataFile = new PNETCDF(dataFileName, true, "", mpi_comm);
        } else {
            dataFile = new PNETCDF(dataFileName, readOnly, "", mpi_comm);
        }
        if (dataFile->isValid() == false) {
            delete (dataFile);
            dataFile = 0;
        }
        if (indexing) {
            indexFile = new PNETCDF(indexFileName, readOnly, "", mpi_comm);
            if (indexFile->isValid() == false) {
                delete (dataFile);
                dataFile = 0;
                delete (indexFile);
                indexFile = 0;
            }
        } else {
            indexFile = dataFile;
        }
        break;}
#endif


#ifdef FQ_HAVE_BP
    case FQ::FQ_BP: {
        MPI_Comm comm = MPI_COMM_WORLD;
#ifndef FQ_NOMPI
        comm = mpi_comm;
#endif
        bool streaming = true;
        float timeout = 0.0;
        enum ADIOS_READ_METHOD read_method = ADIOS_READ_METHOD_BP;
        if (extra != 0) {
            const BPExtras &bpx = *static_cast<BPExtras*>(extra);
            streaming = bpx.streaming;
            read_method = bpx.read_method;
            if (bpx.timeout != 0.0)
                timeout = bpx.timeout;
            else if (read_method == ADIOS_READ_METHOD_DATASPACES ||
                     read_method == ADIOS_READ_METHOD_DIMES ||
                     read_method == ADIOS_READ_METHOD_FLEXIO)
                timeout = FQ_ADIOS_STREAM_TIMEOUT;
        }

        if (readOnly == false && indexing == true) {
            dataFile = new BPArrayIODriver
                (dataFileName, "", comm, read_method, timeout, streaming);
        }
        else {
            dataFile = new BPArrayIODriver
                (dataFileName, "", comm, read_method, timeout, streaming);
        }
        if (dataFile->isValid() == false) {
            delete (dataFile);
            dataFile = 0;
        }
        if (indexing) {
            indexFile = new BPArrayIODriver
                (indexFileName, "", comm, read_method, timeout, streaming);
            if (indexFile->isValid() == false) {
                delete (dataFile);
                dataFile = 0;
                delete (indexFile);
                indexFile = 0;
            }
        }
        else {
            indexFile = dataFile;
        }
        break;}
#endif
    default: {
        LOGGER(ibis::gVerbose > 0)
            << "Warning -- FastQuery::FastQuery: unsupport file model";
        break;}
    }

    if (dataFile == 0 || indexFile == 0) {
        LOGGER(ibis::gVerbose > 0)
            << "Warning -- FastQuery:"
            << " failed to initialize the FastQuery object";
        return;
    }
#if defined(DEBUG) && DEBUG+0 > 0
    report_timing = true;
#else
#ifdef FQ_REPORT_STATISTIC
    if (! report_timing)
        report_timing = ibis::gParameters().isTrue(FQ_REPORT_STATISTIC);
#endif
    if (ibis::gVerbose < 0) {
        report_timing = false;
    }
    else if (! report_timing) {
        if (ibis::gVerbose > 3) {
            report_timing = true;
        }
        else {
            report_timing =
                ibis::gParameters().isTrue("FastQuery.reportTiming");
        }
    }
#endif

#ifndef FQ_NOMPI
    if (report_timing)
        report_timing = (ibis::gVerbose > 5 || mpi_rank == 0);
#endif

    ibis::horometer timer;
    if (report_timing)
        timer.start();
    // initialize information manager
    metadataMgr = new MetadataMgr(*dataFile);
    if (report_timing) {
        timer.stop();
        LOGGER(true) << "Statistic\tFQ::init\t"
                     << timer.CPUTime() << "\t" << timer.realTime();
    }
    LOGGER(ibis::gVerbose > 2)
        << "FastQuery: successfully initialized the FastQuery object";
} // FastQuery::FastQuery
Пример #20
0
int main(int argc, char **argv) {

	int n, rank, size, new_rank;
	int count;
	unsigned char black[] = { 0, 0, 0 };

	MPI::Init(argc, argv);
	size = MPI::COMM_WORLD.Get_size();
	rank = MPI::COMM_WORLD.Get_rank();
	MPI_Status status ;
	MPI_Request request;

	int FstSkelSize;
	int* ranks1;
	int* ranks2;
	ranks1 = (int*)malloc((size/2)*sizeof(int));
	ranks2 = (int*)malloc((size/2)*sizeof(int));

	for (int i=0; i<size/2; i++){
		ranks1[i] = i;
		ranks2[i] = (size/2)+i;
	}

	MPI_Group orig_group, group_farm,group_map;
	MPI_Comm new_comm,comm_farm;

	/* Extract the original group handle */

	MPI_Comm_group(MPI_COMM_WORLD, &orig_group);


	/* Divide tasks into two distinct groups based upon rank */

	FstSkelSize = size/2;
	if (rank < size/2) { MPI_Group_incl(orig_group, size/2, ranks1, &group_map);}
	else { MPI_Group_incl(orig_group, size/2, ranks2, &group_map); }

	MPI_Comm_create(MPI_COMM_WORLD, group_map, &new_comm);

	MPI_Group_rank (group_map, &new_rank);

	cout << "myRank: " << rank <<" myNewRank: "<< new_rank << endl;

	unsigned char** MatrixR;
	unsigned char** MatrixG;
	unsigned char** MatrixB;

	int height,width =0;

	//parameters for the scatter
	int * displ;
	int *scounts;
	displ = (int*)malloc((size-FstSkelSize-1)*sizeof(int));
	scounts = (int*)malloc((size-FstSkelSize-1)*sizeof(int));


	double x,y;
	double xstart,xstep,ystart,ystep;
	double xend, yend;
	double z,zi,newz,newzi;
	double colour;
	int iter,dest,source;
	long col;
	int i,j,k;
	int inset;
	int fd;
	int neg;

	int EOS = 0;

	//stream process for Mandelbrot farm
	if (rank == 0){

		for (int m=0; m<atoi(argv[1]);m++){

			/*Images have two possible sizes*/
			if (rand() > RAND_MAX/2){
				width = 800;
				height = 600;
			}
			else{
				width = 600;
				height = 480;
			}


			/*Generate randomly the extremes*/
			xstart = RandomGen(-2,2);
			//cout << "xstart " << xstart << endl;
			xend = RandomGen(xstart,2);
			//cout << "xend " << xend << endl;
			ystart = RandomGen(-2,2);
			yend = RandomGen(ystart,2);
			//			 all processes in a communicator
			//receive the rank of the destination
			MPI_Recv(&dest,1,MPI_INT,MPI_ANY_SOURCE,5,MPI_COMM_WORLD,&status);
			MPI_Send(&EOS,1, MPI_INT, dest, 5, MPI_COMM_WORLD);
			//send width height and parameters
			MPI_Send(&width,1, MPI_INT, dest, 5, MPI_COMM_WORLD);
			MPI_Send(&height,1, MPI_INT, dest, 5, MPI_COMM_WORLD);
			MPI_Send(&xstart,1, MPI_DOUBLE, dest, 5, MPI_COMM_WORLD);
			MPI_Send(&ystart,1, MPI_DOUBLE, dest, 5, MPI_COMM_WORLD);
			MPI_Send(&xend,1, MPI_DOUBLE, dest, 5, MPI_COMM_WORLD);
			MPI_Send(&yend,1, MPI_DOUBLE, dest, 5, MPI_COMM_WORLD);

		}
		EOS++;
		for (int i=0; i<FstSkelSize-1;i++){
			MPI_Recv(&dest,1,MPI_INT,MPI_ANY_SOURCE,5,MPI_COMM_WORLD,&status);
			MPI_Send(&EOS,1, MPI_INT, dest, 5, MPI_COMM_WORLD);
		}
	}


	if (rank > 0 && rank < FstSkelSize){
		iter = 200;

		while (1){
			//Send my rank for on demand farm
			MPI_Send(&rank,1, MPI_INT, 0, 5, MPI_COMM_WORLD);
			//cout << "rank sent "<< rank<<endl;
			MPI_Recv(&EOS,1,MPI_INT,0,5,MPI_COMM_WORLD,&status);
			//cout <<"EOS: " << EOS << endl;
			if (EOS != 0) break;
			MPI_Recv(&width,1,MPI_INT,0,5,MPI_COMM_WORLD,&status);
			MPI_Recv(&height,1,MPI_INT,0,5,MPI_COMM_WORLD,&status);
			MPI_Recv(&xstart,1,MPI_DOUBLE,0,5,MPI_COMM_WORLD,&status);
			MPI_Recv(&ystart,1,MPI_DOUBLE,0,5,MPI_COMM_WORLD,&status);
			MPI_Recv(&xend,1,MPI_DOUBLE,0,5,MPI_COMM_WORLD,&status);
			MPI_Recv(&yend,1,MPI_DOUBLE,0,5,MPI_COMM_WORLD,&status);

			//Allocate space for matrixes
			malloc2duchar(&MatrixR,width,height);
			malloc2duchar(&MatrixG,width,height);
			malloc2duchar(&MatrixB,width,height);



			/* these are used for calculating the points corresponding to the pixels */
			xstep = (xend-xstart)/width;
			ystep = (yend-ystart)/height;

			/*the main loop */
			x = xstart;
			//cout << "xstart: "<<xstart<<endl;
			y = ystart;
			//cout << "ystart: "<<ystart<<endl;
			for (i=0; i<width; i++)
			{
				//printf("Now on line: %d\n", i);
				for (j=0; j<height; j++)
				{
					z = 0;
					zi = 0;
					inset = 1;
					for (k=0; k<iter; k++)
					{
						/* z^2 = (a+bi)(a+bi) = a^2 + 2abi - b^2 */
						newz = (z*z)-(zi*zi) + x;
						newzi = 2*z*zi + y;
						z = newz;
						zi = newzi;
						if(((z*z)+(zi*zi)) > 4)
						{
							inset = 0;
							colour = k;
							k = iter;
							break;
						}
					}
					if (inset)
					{
						MatrixR[i][j] = 255;
						MatrixG[i][j] = 255;
						MatrixB[i][j] = 255;
					}
					else
					{
						MatrixR[i][j] = sin(colour)*255;
						MatrixG[i][j] = cos(colour)*255;
						MatrixB[i][j] = (colour/iter)*255;
					}
					x += xstep;

				}
				y += ystep;

				x = xstart;
			}

			//pass the matrix to the scatterer
			MPI_Send(&rank,1, MPI_INT, FstSkelSize, 5, MPI_COMM_WORLD);
			MPI_Send(&EOS,1, MPI_INT, FstSkelSize, 5, MPI_COMM_WORLD);
			MPI_Send(&width,1, MPI_INT, FstSkelSize, 5, MPI_COMM_WORLD);
			MPI_Send(&height,1, MPI_INT, FstSkelSize, 5, MPI_COMM_WORLD);
			MPI_Send(&(MatrixR[0][0]),width*height, MPI_UNSIGNED_CHAR, FstSkelSize,15, MPI_COMM_WORLD);
			MPI_Send(&(MatrixG[0][0]),width*height, MPI_UNSIGNED_CHAR, FstSkelSize,15, MPI_COMM_WORLD);
			MPI_Send(&(MatrixB[0][0]),width*height, MPI_UNSIGNED_CHAR, FstSkelSize,15, MPI_COMM_WORLD);
			//free the matrixes
			free2duchar(&MatrixB);
			free2duchar(&MatrixG);
			free2duchar(&MatrixR);

		}
		MPI_Send(&rank,1, MPI_INT, FstSkelSize, 5, MPI_COMM_WORLD);
		MPI_Send(&EOS,1, MPI_INT, FstSkelSize, 5, MPI_COMM_WORLD);

		//	cout << "end of " << rank << endl;

	}

	double widthLocal,widthBound;
	unsigned char *BufferR;
	unsigned char *BufferG;
	unsigned char *BufferB;
	int myWidth;

	// in this case I am the scatterer
	if (rank == FstSkelSize){
		int oldwidth=width;
		int oldheight=height;
		int count =0;

		while(count < rank-1){
			//receive the end of stream
			MPI_Recv(&source,1,MPI_INT,MPI_ANY_SOURCE,5,MPI_COMM_WORLD,&status);
			MPI_Recv(&EOS,1,MPI_INT,source,5,MPI_COMM_WORLD,&status);
			//cout << rank << " " << "EOS: "<< EOS << endl;
			if (EOS == 0){
				MPI_Recv(&width,1,MPI_INT,source,5,MPI_COMM_WORLD,&status);
				MPI_Recv(&height,1,MPI_INT,source,5,MPI_COMM_WORLD,&status);

				widthBound = floor( (double)width / (double)(size-FstSkelSize-1) );
				cout << widthBound*height << endl;

				if (oldwidth != width || oldheight != height){
					if (oldwidth > 0){
						free2duchar(&MatrixB);
						free2duchar(&MatrixG);
						free2duchar(&MatrixR);}
					//Allocate space for matrixes
					malloc2duchar(&MatrixR,width,height);
					malloc2duchar(&MatrixG,width,height);
					malloc2duchar(&MatrixB,width,height);
				}


				for (int dest = 1; dest <= (size-FstSkelSize-1); dest++)
				{
					scounts[dest-1] = widthBound;
					displ[dest-1] = 0;
					if (dest == size-FstSkelSize-1 ) {
						cout <<"ciao" <<endl;
						scounts[dest-1] = scounts[dest-1] +(width % (size-FstSkelSize-1));
					}
					scounts[dest-1] = scounts [dest-1]*height;
					if (dest > 1)
						MPI_Send(&(scounts[dest-1]),1, MPI_INT, rank+dest-1, 5, MPI_COMM_WORLD);
					else
						myWidth=scounts[dest-1];
				}

				cout << "rank " << rank << " mywidth "<< scounts[0] <<endl;
				MPI_Recv(&(MatrixR[0][0]),width*height,MPI_UNSIGNED_CHAR,source,15,MPI_COMM_WORLD,&status);
				MPI_Recv(&(MatrixG[0][0]),width*height,MPI_UNSIGNED_CHAR,source,15,MPI_COMM_WORLD,&status);
				MPI_Recv(&(MatrixB[0][0]),width*height,MPI_UNSIGNED_CHAR,source,15,MPI_COMM_WORLD,&status);

			}
			else count++;
		}
	}


	if (rank > FstSkelSize && rank < size-1){
		MPI_Recv(&myWidth,1,MPI_INT,FstSkelSize,5,MPI_COMM_WORLD,&status);
		cout << rank << " " << myWidth << endl;
		/*//Do computations on sub-image
						CImg<unsigned char> oImage(widthLocal,height,1,3,0);


						for(int x = 0; x < widthLocal; x++)
						{
							for (int y = 0; y < height; y++)
							{
								oImage.draw_point(x, y, black);
								const unsigned char color[] = { MatrixR[x][y], MatrixG[x][y],MatrixB[x][y] };
								oImage.draw_point(x, y, color);
							}
						}
						char a[16]; int n;
						n = sprintf(a,"%d.png",(rank+count)*(count+1));
						count++;
						//oImage.save(a);*/
	}

	if(rank >= FstSkelSize && rank < size-1){
		BufferR = (unsigned char*)malloc(myWidth*sizeof(unsigned char));
		MPI_Scatterv(&(MatrixR[0][0]),scounts,displ,MPI_UNSIGNED_CHAR,BufferR,myWidth,MPI_UNSIGNED_CHAR,0,new_comm);
		//	MPI_Scatterv(&(MatrixG[0][0]),scounts,displ,MPI_UNSIGNED_CHAR,BufferG,widthBound*height,MPI_UNSIGNED_CHAR,0,new_comm);
		//	MPI_Scatterv(&(MatrixB[0][0]),scounts,displ,MPI_UNSIGNED_CHAR,BufferB,widthBound*height,MPI_UNSIGNED_CHAR,0,new_comm);

	}




	MPI::Finalize();
	exit(0);



}
Пример #21
0
FORTRAN_API void FORT_CALL mpi_group_rank_ ( MPI_Fint *group, MPI_Fint *rank, MPI_Fint *__ierr )
{
    int l_rank;
    *__ierr = MPI_Group_rank( MPI_Group_f2c(*group), &l_rank );
    *rank = l_rank;
}
Пример #22
0
/**
 * Create a child group for to the given group.
 *
 * @param[in] n #procs in this group (<= that in group_parent)
 * @param[in] pid_list The list of proc ids (w.r.t. group_parent)
 * @param[out] id_child Handle to store the created group
 * @param[in] id_parent Parent group 
 */
int comex_group_create(
        int n, int *pid_list, comex_group_t id_parent, comex_group_t *id_child)
{
    int status;
    int grp_me;
    comex_igroup_t *igroup_child = NULL;
    MPI_Group    *group_child = NULL;
    MPI_Comm     *comm_child = NULL;
    comex_igroup_t *igroup_parent = NULL;
    MPI_Group    *group_parent = NULL;
    MPI_Comm     *comm_parent = NULL;

    /* create the node in the linked list of groups and */
    /* get the child's MPI_Group and MPI_Comm, to be populated shortly */
    comex_create_group_and_igroup(id_child, &igroup_child);
    group_child = &(igroup_child->group);
    comm_child  = &(igroup_child->comm);

    /* get the parent's MPI_Group and MPI_Comm */
    igroup_parent = comex_get_igroup_from_group(id_parent);
    group_parent = &(igroup_parent->group);
    comm_parent  = &(igroup_parent->comm);

    status = MPI_Group_incl(*group_parent, n, pid_list, group_child);
    if (status != MPI_SUCCESS) {
        comex_error("MPI_Group_incl: Failed ", status);
    }

    {
        MPI_Comm comm, comm1, comm2;
        int lvl=1, local_ldr_pos;
        MPI_Group_rank(*group_child, &grp_me);
        if (grp_me == MPI_UNDEFINED) {
            *comm_child = MPI_COMM_NULL;
            /* FIXME: keeping the group around for now */
            return COMEX_SUCCESS;
        }
        /* SK: sanity check for the following bitwise operations */
        assert(grp_me>=0);
        MPI_Comm_dup(MPI_COMM_SELF, &comm); /* FIXME: can be optimized away */
        local_ldr_pos = grp_me;
        while(n>lvl) {
            int tag=0;
            int remote_ldr_pos = local_ldr_pos^lvl;
            if (remote_ldr_pos < n) {
                int remote_leader = pid_list[remote_ldr_pos];
                MPI_Comm peer_comm = *comm_parent;
                int high = (local_ldr_pos<remote_ldr_pos)?0:1;
                MPI_Intercomm_create(
                        comm, 0, peer_comm, remote_leader, tag, &comm1);
                MPI_Comm_free(&comm);
                MPI_Intercomm_merge(comm1, high, &comm2);
                MPI_Comm_free(&comm1);
                comm = comm2;
            }
            local_ldr_pos &= ((~0)^lvl);
            lvl<<=1;
        }
        *comm_child = comm;
        /* cleanup temporary group (from MPI_Group_incl above) */
        MPI_Group_free(group_child);
        /* get the actual group associated with comm */
        MPI_Comm_group(*comm_child, group_child);
    }

    return COMEX_SUCCESS;
}
Пример #23
0
 group(MPI_Comm parent) : mpi_comm(parent)
 {
     MPI_Comm_group(this->mpi_comm,  &this->mpi_group);
     MPI_Group_size(this->mpi_group, &this->size);
     MPI_Group_rank(this->mpi_group, &this->rank);
 }
Пример #24
0
void configure_runtime_setup()
{
  const char *str;

  MPI_Group world_grp, run_grp, fft_grp;
  int i, irun, nrun, ifft, nfft, sel[MAX_PROCS];
  char **str1, **str2;
  char *buf1, *buf2;

  MPI_Comm_size(mpi.comm.world,&mpi.world.size);
  MPI_Comm_rank(mpi.comm.world,&mpi.world.rank);

  /*
  //  for cart_error to work
  */
  num_procs = mpi.world.size;
  local_proc_id = mpi.world.rank;

  MPI_Comm_group(mpi.comm.world,&world_grp);

  /*
  //  ************************************************
  //
  //  Main configuration of the runtime setup
  //
  //  ------------------------------------------------
  */
  str = extract_option1("mpi-setup","mpi",NULL);
  if(str == NULL)
    {
      /* 
      //  Default behaviour: all tasks are run tasks, at least one K-slice per fft task 
      */
      irun = 0;
      nrun = mpi.world.size;

      ifft = 0;
      nfft = MIN(num_grid,mpi.world.size);
    }
  else
    {
      if(sscanf(str,"run:%d-%d,fft:%d-%d",&irun,&nrun,&ifft,&nfft) != 4)
	{
	  cart_error("A valid format for the --mpi-setup option argument is run:N1-N2,fft:N3-N4, where N1-N2 is the range of run tasks ids, and N3-N4 is the range of fft tasks ids.");
	}

      if(irun<0 || irun>nrun)
	{
	  cart_error("Invalid range %d - %d",irun,nrun);
	}
      if(ifft<0 || ifft>nfft)
	{
	  cart_error("Invalid range %d - %d",ifft,nfft);
	}

      if(nrun >= mpi.world.size)
	{
	  cart_error("The range of run tasks overflows the available number of tasks %d",mpi.world.size);
	}
      if(nfft >= mpi.world.size)
	{
	  cart_error("The range of fft tasks overflows the available number of tasks %d",mpi.world.size);
	}

      nrun = nrun - irun + 1;
      nfft = nfft - ifft + 1;
    }
 
  cart_assert(irun>=0 && irun+nrun<=mpi.world.size);
  cart_assert(ifft>=0 && ifft+nfft<=mpi.world.size);

  for(i=0; i<nrun; i++) sel[i] = irun + i;
  MPI_Group_incl(world_grp,nrun,sel,&run_grp);

  for(i=0; i<nfft; i++) sel[i] = ifft + i;
  MPI_Group_incl(world_grp,nfft,sel,&fft_grp);

  /*
  //  ************************************************
  //
  //  Create our communicators, etc (no customization here)
  */
  MPI_Comm_create(mpi.comm.world,run_grp,&mpi.comm.run);
  MPI_Comm_create(mpi.comm.world,fft_grp,&mpi.comm.fft);

  mpi.task_type = 0;

  /*
  //  Sizes and ranks can only be safely querued from a group, 
  //  not a communicator!!!
  */
  MPI_Group_rank(run_grp,&i);
  if(i != MPI_UNDEFINED)
    {
      mpi.task_type += MPI_TASK_TYPE_RUN;
      mpi.run.size = nrun;
      mpi.run.rank = i;
    }
  else
    {
      mpi.run.size = 0;
      mpi.run.rank = i;
    }

  MPI_Group_rank(fft_grp,&i);
  if(i != MPI_UNDEFINED)
    {
      mpi.task_type += MPI_TASK_TYPE_FFT;
      mpi.fft.size = nrun;
      mpi.fft.rank = i;
    }
  else
    {
      mpi.fft.size = 0;
      mpi.fft.rank = i;
    }

  MPI_Group_free(&world_grp);

  str = extract_option1("num-omp-threads","omp",NULL);
  if(str != NULL)
    {
#ifdef _OPENMP
      if(sscanf(str,"%d",&i)!=1 || i<1 || i>256)
	{
	  cart_error("--num-omp-threads=<num> option requires a positive integer <num> as an argument");
	}
      omp_set_num_threads(i);
      cart_debug("num openmp threads = %u", omp_get_max_threads() );
#else
      cart_debug("OpenMP support is not compiled in; ignoring --num-omp-threads option.");
#endif
    }

  root_grid_fft_init(run_grp,fft_grp);

  MPI_Group_free(&run_grp);
  MPI_Group_free(&fft_grp);

  /*
  //  Measure tasks per node
  */
  buf1 = cart_alloc(char,mpi.world.size*MPI_MAX_PROCESSOR_NAME);
  buf2 = cart_alloc(char,mpi.world.size*MPI_MAX_PROCESSOR_NAME);
  str1 = cart_alloc(char*,mpi.world.size);
  str2 = cart_alloc(char*,mpi.world.size);
  for(i=0; i<mpi.world.size; i++)
    {
      str1[i] = buf1 + i*MPI_MAX_PROCESSOR_NAME;
      str2[i] = buf2 + i*MPI_MAX_PROCESSOR_NAME;
    }

  MPI_Get_processor_name(str1[0],&i);

  for(i=1; i<mpi.world.size; i++)
    {
      strcpy(str1[i],str1[0]);
    }

  MPI_Alltoall(buf1,MPI_MAX_PROCESSOR_NAME,MPI_CHAR,buf2,MPI_MAX_PROCESSOR_NAME,MPI_CHAR,mpi.comm.world);

  tasks_per_node = 0;
  for(i=0; i<mpi.world.size; i++)
    {
      if(strcmp(str2[i],str2[mpi.world.rank]) == 0) tasks_per_node++;
    }

  cart_debug("Tasks per node: %d",tasks_per_node);

  cart_assert(tasks_per_node > 0);

  cart_free(buf1);
  cart_free(buf2);
  cart_free(str1);
  cart_free(str2);
}
Пример #25
0
value caml_mpi_group_rank(value group)
{
  int size;
  MPI_Group_rank(Group_val(group), &size);
  return Val_int(size);
}
Пример #26
0
int main( int argc, char **argv )
{
    int errs=0, toterr;
    MPI_Group basegroup;
    MPI_Group g1, g2, g3, g4, g5, g6, g7, g8, g9, g10;
    MPI_Group g3a, g3b;
    MPI_Comm  comm, newcomm, splitcomm, dupcomm;
    int       i, grp_rank, rank, grp_size, size, result;
    int       nranks, *ranks, *ranks_out;
    int       range[1][3];
    int       worldrank;

    MPI_Init( &argc, &argv );
    MPI_Comm_rank( MPI_COMM_WORLD, &worldrank );

    comm = MPI_COMM_WORLD;

    MPI_Comm_group( comm, &basegroup );
    MPI_Comm_rank( comm, &rank );
    MPI_Comm_size( comm, &size );

/* Get the basic information on this group */
    MPI_Group_rank( basegroup, &grp_rank );
    if (grp_rank != rank) {
	errs++;
	fprintf( stdout, "group rank %d != comm rank %d\n", grp_rank, rank );
    }

    MPI_Group_size( basegroup, &grp_size );
    if (grp_size != size) {
	errs++;
	fprintf( stdout, "group size %d != comm size %d\n", grp_size, size );
    }


/* Form a new communicator with inverted ranking */
    MPI_Comm_split( comm, 0, size - rank, &newcomm );
    MPI_Comm_group( newcomm, &g1 );
    ranks	  = (int *)malloc( size * sizeof(int) );
    ranks_out = (int *)malloc( size * sizeof(int) );
    for (i=0; i<size; i++) ranks[i] = i;
    nranks = size;
    MPI_Group_translate_ranks( g1, nranks, ranks, basegroup, ranks_out );
    for (i=0; i<size; i++) {
	if (ranks_out[i] != (size - 1) - i) {
	    errs++;
	    fprintf( stdout, "Translate ranks got %d expected %d\n", 
		     ranks_out[i], (size - 1) - i );
	}
    }

/* Check Compare */
    MPI_Group_compare( basegroup, g1, &result );
    if (result != MPI_SIMILAR) {
	errs++;
	fprintf( stdout, "Group compare should have been similar, was %d\n",
		 result );
    }
    MPI_Comm_dup( comm, &dupcomm );
    MPI_Comm_group( dupcomm, &g2 );
    MPI_Group_compare( basegroup, g2, &result );
    if (result != MPI_IDENT) {
	errs++;
	fprintf( stdout, "Group compare should have been ident, was %d\n",
		 result );
    }
    MPI_Comm_split( comm, rank < size/2, rank, &splitcomm );
    MPI_Comm_group( splitcomm, &g3 );
    MPI_Group_compare( basegroup, g3, &result );
    if (result != MPI_UNEQUAL) {
	errs++;
	fprintf( stdout, "Group compare should have been unequal, was %d\n",
		 result );
    }

    /* Build two groups that have this process and one other, but do not
       have the same processes */
    ranks[0] = rank;
    ranks[1] = (rank + 1) % size;
    MPI_Group_incl( basegroup, 2, ranks, &g3a );
    ranks[1] = (rank + size - 1) % size;
    MPI_Group_incl( basegroup, 2, ranks, &g3b );
    MPI_Group_compare( g3a, g3b, &result );
    if (result != MPI_UNEQUAL) {
        errs++;
	fprintf( stdout, "Group compare of equal sized but different groups should have been unequal, was %d\n", result );
    }
    

/* Build two new groups by excluding members; use Union to put them
   together again */

/* Exclude 0 */
    for (i=0; i<size; i++) ranks[i] = i;
    MPI_Group_excl( basegroup, 1, ranks, &g4 );
/* Exclude 1-(size-1) */
    MPI_Group_excl( basegroup, size-1, ranks+1, &g5 );
    MPI_Group_union( g5, g4, &g6 );
    MPI_Group_compare( basegroup, g6, &result );
    if (result != MPI_IDENT) {
	int usize;
	errs++;
	/* See ordering requirements on union */
	fprintf( stdout, "Group excl and union did not give ident groups\n" );
	fprintf( stdout, "[%d] result of compare was %d\n", rank, result );
	MPI_Group_size( g6, &usize );
	fprintf( stdout, "Size of union is %d, should be %d\n", usize, size );
    }
    MPI_Group_union( basegroup, g4, &g7 );
    MPI_Group_compare( basegroup, g7, &result );
    if (result != MPI_IDENT) {
	int usize;
	errs++;
	fprintf( stdout, "Group union of overlapping groups failed\n" );
	fprintf( stdout, "[%d] result of compare was %d\n", rank, result );
	MPI_Group_size( g7, &usize );
	fprintf( stdout, "Size of union is %d, should be %d\n", usize, size );
    }

/* Use range_excl instead of ranks */
    /* printf ("range excl\n" ); fflush( stdout ); */
    range[0][0] = 1;
    range[0][1] = size-1;
    range[0][2] = 1;
    MPI_Group_range_excl( basegroup, 1, range, &g8 );
    /* printf( "out  of range excl\n" ); fflush( stdout ); */
    MPI_Group_compare( g5, g8, &result );
    /* printf( "out of compare\n" ); fflush( stdout ); */
    if (result != MPI_IDENT) {
	errs++;
	fprintf( stdout, "Group range excl did not give ident groups\n" );
    }

    /* printf( "intersection\n" ); fflush( stdout ); */
    MPI_Group_intersection( basegroup, g4, &g9 );
    MPI_Group_compare( g9, g4, &result );
    if (result != MPI_IDENT) {
	errs++;
	fprintf( stdout, "Group intersection did not give ident groups\n" );
    }

/* Exclude EVERYTHING and check against MPI_GROUP_EMPTY */
    /* printf( "range excl all\n" ); fflush( stdout ); */
    range[0][0] = 0;
    range[0][1] = size-1;
    range[0][2] = 1;
    MPI_Group_range_excl( basegroup, 1, range, &g10 );

    /* printf( "done range excl all\n" ); fflush(stdout); */
    MPI_Group_compare( g10, MPI_GROUP_EMPTY, &result );
    /* printf( "done compare to MPI_GROUP_EMPTY\n" ); fflush(stdout); */

    if (result != MPI_IDENT) {
	errs++;
	fprintf( stdout, 
		 "MPI_GROUP_EMPTY didn't compare against empty group\n");
    }

    /* printf( "freeing groups\n" ); fflush( stdout ); */
    MPI_Group_free( &basegroup );
    MPI_Group_free( &g1 );
    MPI_Group_free( &g2 );
    MPI_Group_free( &g3 );
    MPI_Group_free( &g3a );
    MPI_Group_free( &g3b );
    MPI_Group_free( &g4 );
    MPI_Group_free( &g5 );
    MPI_Group_free( &g6 );
    MPI_Group_free( &g7 );
    MPI_Group_free( &g8 );
    MPI_Group_free( &g9 );
    MPI_Group_free( &g10 );
    MPI_Comm_free( &dupcomm );
    MPI_Comm_free( &splitcomm );
    MPI_Comm_free( &newcomm );

    MPI_Allreduce( &errs, &toterr, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
    if (worldrank == 0) {
	if (toterr == 0) 
	    printf( " No Errors\n" );
	else
	    printf( "Found %d errors in MPI Group routines\n", toterr );
    }

    MPI_Finalize();
    return toterr;
}
Пример #27
0
int main(int argc, char **argv) {

	int rankLeft[4] = {0, 1, 2, 3}, rankRight[4] = {4, 5, 6, 7};
	int i, result;
	char outStr[600];

	int nProcs, myRank;
	MPI_Group grpWorld, grpNew;
	MPI_Comm commNew;

	MPI_Init(&argc, &argv);
	MPI_Comm_size(MPI_COMM_WORLD, &nProcs);
	MPI_Comm_rank(MPI_COMM_WORLD, &myRank);

	MPI_Comm_group(MPI_COMM_WORLD, &grpWorld);
	if (myRank < nProcs	/ 2) {
		MPI_Group_incl(grpWorld, nProcs / 2, rankLeft, &grpNew);
	} else {
		MPI_Group_incl(grpWorld, nProcs / 2, rankRight, &grpNew);
	}
	MPI_Comm_create(MPI_COMM_WORLD, grpNew, &commNew);

	int myRankCommNew, nProcsCommNew;
	int myRankGrpNew, nProcsGrpNew;

	MPI_Comm_rank(commNew, &myRankCommNew);
	MPI_Comm_size(commNew, &nProcsCommNew);
	MPI_Group_rank(grpNew, &myRankGrpNew);
	MPI_Group_size(grpNew, &nProcsGrpNew);

	fprintf(stdout, "WorldRank: %d in %d, NewCommRank: %d in %d, NewGrpRank: %d in %d\n",
		myRank, nProcs, myRankCommNew, nProcsCommNew, myRankGrpNew, nProcsGrpNew);

	MPI_Barrier(MPI_COMM_WORLD);

	int sendBuf = myRank, recvBuf;

	MPI_Allreduce(&sendBuf, &recvBuf, 1, MPI_INT, MPI_SUM, commNew);

	fprintf(stdout, "WorldRank = %d, sendBuf = %d, recvBuf = %d\n", myRank, sendBuf, recvBuf);
	fflush(stdout);

	MPI_Barrier(MPI_COMM_WORLD);

	int ranks1[8] = {0, 1, 2, 3, 4, 5, 6, 7}, ranks2[8];

	MPI_Group_compare(grpWorld, grpNew, &result);
	MPI_Group_translate_ranks(grpWorld, nProcs, ranks1, grpNew, ranks2);
	
	if (myRank == 0) {
		fprintf(stdout, "result = %d\n", result);
	}
	sprintf_s(outStr, "rank %d: ", myRank);
	for (i = 0; i < nProcs; i++) {
		sprintf_s(outStr, "%s%d = %d ", outStr, ranks1[i], ranks2[i]);
	}
	fprintf(stdout, "%s\n", outStr);


	MPI_Comm_free(&commNew);
	MPI_Group_free(&grpNew);
	MPI_Group_free(&grpWorld);

	MPI_Finalize();
	return 0;
}
Пример #28
0
int main(int argc,char* argv[])
{ 
  int rank;
  int size;
  int new_rank;             /* ranks are always contiguous, starting at 0 */
  int sendbuf;
  int recvbuf;
  int count;
  int ranks1[4]={0,1,2,3};  /* list of process ranks for 1st new group */
  int ranks2[4]={4,5,6,7};  /* list of process ranks for 2nd new group */ 

  MPI_Group orig_group;     /* opaque group object from MPI_COMM_WORLD */
  MPI_Group new_group;      /* new group, to be created */
  MPI_Comm  new_comm;       /* new communicator, to be created */

  /* standard startup */
  MPI_Init(&argc,&argv); 
  MPI_Comm_rank(MPI_COMM_WORLD, &rank); 
  MPI_Comm_size(MPI_COMM_WORLD, &size); 

  /* need to impose of certain size of MPI_COMM_WORLD
  ** as we've hardwired the list of process ranks in
  ** the new groups */
  if(size != NPROCS) {
    fprintf(stderr,"Error: Must have %d processes in MPI_COMM_WORLD\n", NPROCS);
    MPI_Abort(MPI_COMM_WORLD,1);
  }

  /* going to broadcast my rank amongst the default communicator */
  sendbuf = rank; 
  count = 1;

  /* Extract the original group handle, as we can only make new
  ** groups in MPI, through reference to existing groups */ 
  MPI_Comm_group(MPI_COMM_WORLD, &orig_group);
      
  /* Divide processes into two distinct groups based upon rank.
  ** We're relying on integer division here, so let's be caseful! */ 
  if (rank < NPROCS/2) {
    MPI_Group_incl(orig_group, NPROCS/2, ranks1, &new_group);
  } 
  else { 
    MPI_Group_incl(orig_group, NPROCS/2, ranks2, &new_group);
  } 

  /* Create new communicator and then perform collective communications.
  ** Notice that all processes have called the group and communicator
  ** creation functions. */ 
  MPI_Comm_create(MPI_COMM_WORLD, new_group, &new_comm); 

  /* Total the ranks--as associated with MPI_COMM_WORLD--within the newer,
  ** smaller communicators */
  MPI_Allreduce(&sendbuf, &recvbuf, count, MPI_INT, MPI_SUM, new_comm); 

  /* get rank in new group */
  MPI_Group_rank (new_group, &new_rank); 
  
  /* all processes print to screen */
  printf("rank= %d newrank= %d recvbuf= %d\n",rank,new_rank,recvbuf); 
  
  MPI_Finalize();

  return EXIT_SUCCESS;
} 
Пример #29
0
/*
  Create a child group for to the given group.
  @param n IN #procs in this group (<= that in group_parent)
  @param pid_list IN The list of proc ids (w.r.t. group_parent)
  @param group_out OUT Handle to store the created group
  @param group_parent IN Parent group 
 */
void ARMCI_Group_create_child(int n, int *pid_list, ARMCI_Group *group_out,
			      ARMCI_Group *grp_parent) {
    int i,grp_me;
    ARMCI_iGroup *igroup = (ARMCI_iGroup *)group_out;
#ifdef ARMCI_GROUP
    armci_grp_attr_t *grp_attr = &igroup->grp_attr;
    int world_me, parent_grp_me;
#else
    int rv;
    ARMCI_iGroup *igroup_parent = (ARMCI_iGroup *)grp_parent;
    MPI_Group *group_parent;
    MPI_Comm *comm_parent;
#endif


#ifdef ARMCI_GROUP
    ARMCI_Group_rank(grp_parent, &parent_grp_me);
    for(i=0; i<n; i++) {
      if(pid_list[i] == parent_grp_me) {
	break;
      }
    }
    if(i==n) {
      /*this initialization is used in group free*/
      grp_attr->nproc=0;
      grp_attr->proc_list = NULL; 
      return; /*not in group to be created*/
    }
#endif
    
    for(i=0; i<n-1;i++) {
       if(pid_list[i] > pid_list[i+1]){
         armci_die("ARMCI_Group_create: Process ids are not sorted ",armci_me);
         break;
       }
    }
    
#ifdef ARMCI_GROUP
    grp_attr->grp_clus_info = NULL;
    grp_attr->nproc = n;
    grp_attr->proc_list = (int *)malloc(n*sizeof(int));
    assert(grp_attr->proc_list!=NULL);
    for(i=0; i<n; i++)  {
      grp_attr->proc_list[i] = ARMCI_Absolute_id(grp_parent,pid_list[i]); 
    }
    /*  MPI_Comm_rank(MPI_COMM_WORLD, &world_me); */
    world_me = armci_msg_me();
    grp_attr->grp_me = grp_me = MPI_UNDEFINED;
    for(i=0; i<n; i++) {
      if(igroup->grp_attr.proc_list[i] == world_me) {
	grp_attr->grp_me = grp_me = i;
	break;
      }
    }
    if(grp_me != MPI_UNDEFINED) armci_cache_attr(group_out);

    armci_msg_group_barrier(group_out);
#else
    /* NOTE: default group is the parent group */
    group_parent = &(igroup_parent->igroup);
    comm_parent  = &(igroup_parent->icomm);

    rv=MPI_Group_incl(*group_parent, n, pid_list, &(igroup->igroup));
    if(rv != MPI_SUCCESS) armci_die("MPI_Group_incl: Failed ",armci_me);
    
    rv = MPI_Comm_create(*comm_parent, (MPI_Group)(igroup->igroup), 
                         (MPI_Comm*)&(igroup->icomm));
    if(rv != MPI_SUCCESS) armci_die("MPI_Comm_create: Failed ",armci_me);

    /* processes belong to this group should cache attributes */
    MPI_Group_rank((MPI_Group)(igroup->igroup), &grp_me);
    igroup->grp_attr.grp_clus_info=NULL;
    if(grp_me != MPI_UNDEFINED) armci_cache_attr(group_out);
#endif
}
Пример #30
0
int main(int argc, char *argv[]) {
  int numprocs, myrank, grank;
  MPI_Init(&argc, &argv);
  MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
  MPI_Comm_rank(MPI_COMM_WORLD, &myrank);

  MPI_Group orig_group, new_group; 
  MPI_Comm new_comm; 

  /* Extract the original group handle */ 
  MPI_Comm_group(MPI_COMM_WORLD, &orig_group); 

  double sendTime, recvTime, min, avg, max;
  double time[3] = {0.0, 0.0, 0.0};
  int msg_size;
  MPI_Status mstat;
  int i=0, pe, trial, hops;
  char name[30];

  char *send_buf = (char *)malloc(MAX_MSG_SIZE);
  char *recv_buf = (char *)malloc(MAX_MSG_SIZE);

  for(i = 0; i < MAX_MSG_SIZE; i++) {
    recv_buf[i] = send_buf[i] = (char) (i & 0xff);
  }

  // allocate the routing map.
  int *map = (int *) malloc(sizeof(int) * numprocs);
  TopoManager *tmgr;
  int dimNZ, numRG, x, y, z, t, bcastSend[3], bcastRecv[3];

  if(myrank == 0) {
    tmgr = new TopoManager();
#if CREATE_JOBS
    numRG = tmgr->getDimNX() * (tmgr->getDimNY() - 2) * 2 * tmgr->getDimNT();
#else
    numRG = tmgr->getDimNX() * tmgr->getDimNY() * 2 * tmgr->getDimNT();
#endif
    dimNZ = tmgr->getDimNZ();
    for (int i=1; i<numprocs; i++) {
      bcastSend[0] = dimNZ;
      bcastSend[1] = numRG;
      tmgr->rankToCoordinates(i, x, y, z, t);
      bcastSend[2] = z;
      MPI_Send(bcastSend, 3, MPI_INT, i, 1, MPI_COMM_WORLD);
    }
    tmgr->rankToCoordinates(0, x, y, z, t);
  } else {
      MPI_Recv(bcastRecv, 3, MPI_INT, 0, 1, MPI_COMM_WORLD, &mstat);
      dimNZ = bcastRecv[0];
      numRG = bcastRecv[1];
      z = bcastRecv[2];
  }

  MPI_Barrier(MPI_COMM_WORLD);

  if (myrank == 0) {
    printf("Torus Dimensions %d %d %d %d\n", tmgr->getDimNX(), tmgr->getDimNY(), dimNZ, tmgr->getDimNT());
  }

#if CREATE_JOBS
  for (hops=0; hops < 2; hops++) {
#else
  for (hops=0; hops < dimNZ/2; hops++) {
#endif
    int *mapRG = (int *) malloc(sizeof(int) * numRG);
    if (myrank == 0) {
      // Rank 0 makes up a routing map.
      build_process_map(numprocs, map, hops, numRG, mapRG);
    }

    // Broadcast the routing map.
    MPI_Bcast(map, numprocs, MPI_INT, 0, MPI_COMM_WORLD);
    MPI_Bcast(mapRG, numRG, MPI_INT, 0, MPI_COMM_WORLD);

    MPI_Group_incl(orig_group, numRG, mapRG, &new_group);
    MPI_Comm_create(MPI_COMM_WORLD, new_group, &new_comm);
    MPI_Group_rank(new_group, &grank);
    
#if CREATE_JOBS
    sprintf(name, "xt4_job_%d_%d.dat", numprocs, hops);
#else
    sprintf(name, "xt4_line_%d_%d.dat", numprocs, hops);
#endif
   
    for (msg_size=MIN_MSG_SIZE; msg_size<=MAX_MSG_SIZE; msg_size=(msg_size<<1)) {
      for (trial=0; trial<10; trial++) {

	pe = map[myrank];
	if(pe != -1) {
          if(grank != MPI_UNDEFINED) MPI_Barrier(new_comm);

	  if(myrank < pe) {
	    // warmup
	    for(i=0; i<2; i++) {
	      MPI_Send(send_buf, msg_size, MPI_CHAR, pe, 1, MPI_COMM_WORLD);
	      MPI_Recv(recv_buf, msg_size, MPI_CHAR, pe, 1, MPI_COMM_WORLD, &mstat);
	    }

	    sendTime = MPI_Wtime();
	    for(i=0; i<NUM_MSGS; i++)
	      MPI_Send(send_buf, msg_size, MPI_CHAR, pe, 1, MPI_COMM_WORLD);
	    for(i=0; i<NUM_MSGS; i++)
	      MPI_Recv(recv_buf, msg_size, MPI_CHAR, pe, 1, MPI_COMM_WORLD, &mstat);
	    recvTime = (MPI_Wtime() - sendTime) / NUM_MSGS;
	
	    // cooldown
	    for(i=0; i<2; i++) {
	      MPI_Send(send_buf, msg_size, MPI_CHAR, pe, 1, MPI_COMM_WORLD);
	      MPI_Recv(recv_buf, msg_size, MPI_CHAR, pe, 1, MPI_COMM_WORLD, &mstat);
	    }

	    if(grank != MPI_UNDEFINED) MPI_Barrier(new_comm);
	  } else {
	    // warmup
	    for(i=0; i<2; i++) {
	      MPI_Recv(recv_buf, msg_size, MPI_CHAR, pe, 1, MPI_COMM_WORLD, &mstat);
	      MPI_Send(send_buf, msg_size, MPI_CHAR, pe, 1, MPI_COMM_WORLD);
	    }

	    sendTime = MPI_Wtime();
	    for(i=0; i<NUM_MSGS; i++)
	      MPI_Recv(recv_buf, msg_size, MPI_CHAR, pe, 1, MPI_COMM_WORLD, &mstat);
	    for(i=0; i<NUM_MSGS; i++)
	      MPI_Send(send_buf, msg_size, MPI_CHAR, pe, 1, MPI_COMM_WORLD);
	    recvTime = (MPI_Wtime() - sendTime) / NUM_MSGS;

	    // cooldown
	    for(i=0; i<2; i++) {
	      MPI_Recv(recv_buf, msg_size, MPI_CHAR, pe, 1, MPI_COMM_WORLD, &mstat);
	      MPI_Send(send_buf, msg_size, MPI_CHAR, pe, 1, MPI_COMM_WORLD);
	    }

	    if(grank != MPI_UNDEFINED) MPI_Barrier(new_comm);
	  }

	  if(grank != MPI_UNDEFINED) {
  	    MPI_Allreduce(&recvTime, &min, 1, MPI_DOUBLE, MPI_MIN, new_comm);
  	    MPI_Allreduce(&recvTime, &avg, 1, MPI_DOUBLE, MPI_SUM, new_comm);
	    MPI_Allreduce(&recvTime, &max, 1, MPI_DOUBLE, MPI_MAX, new_comm);
          }

	  avg /= numRG;

	} // end if map[pe] != -1
	if(grank == 0) {
	  time[0] += min;
	  time[1] += avg;
	  time[2] += max;
	}
      } // end for loop of trials
      if (grank == 0) {
	FILE *outf = fopen(name, "a");
	fprintf(outf, "%d %g %g %g\n", msg_size, time[0]/10, time[1]/10, time[2]/10);
	fflush(NULL);
	fclose(outf);
	time[0] = time[1] = time[2] = 0.0;
      }
    } // end for loop of msgs
    free(mapRG);
  } // end for loop of hops

  if(grank == 0)
    printf("Program Complete\n");

  MPI_Finalize();
  return 0;
}