Example #1
0
static VALUE group_range_incl(VALUE self, VALUE ary)
{
    int rv, i, len, **ranks;
    MPI_Group *grp, *newgrp;

    Data_Get_Struct(self, MPI_Group, grp);

    newgrp = ALLOC(MPI_Group);

    len = RARRAY(ary)->len;
    ranks = ALLOCA_N(int *, len);

    for (i = 0; i < len; i++) {
        int j;
        VALUE range;
        
        range = rb_ary_entry(ary, i);
        ranks[i] = ALLOCA_N(int, 3);
        
        for (j = 0; j < 3; j++)
            ranks[i][j] = FIX2INT(rb_ary_entry(range, j));
    }

    /* Thank you, cdecl */
    rv = MPI_Group_range_incl(*grp, len, (int (*)[3])ranks, newgrp);
    mpi_exception(rv);

    return group_new(newgrp);
}
Example #2
0
/*
 * Class:     mpi_Group
 * Method:    range_incl
 * Signature: ([[I)J
 */
JNIEXPORT jlong JNICALL Java_mpi_Group_range_1incl(JNIEnv *env, jobject jthis, jobjectArray ranges)
{
    int i;
    int n=(*env)->GetArrayLength(env,ranges);
    jboolean isCopy=JNI_TRUE;
    MPI_Group newgroup;
    /*    jint **rngs=(jint**)calloc(n,sizeof(jint[3])); */
    int (*rngs) [3] =(int (*) [3])calloc(n,sizeof(int[3]));
    jintArray *jrngs=(jobject*)calloc(n,sizeof(jintArray));

    ompi_java_clearFreeList(env) ;

    for(i=0;i<n;i++) {
        jint *vec ;
        jrngs[i]=(*env)->GetObjectArrayElement(env,ranges,i);
        vec=(*env)->GetIntArrayElements(env, jrngs[i],&isCopy);
        rngs [i] [0] = vec [0] ;
        rngs [i] [1] = vec [1] ;
        rngs [i] [2] = vec [2] ;
        (*env)->ReleaseIntArrayElements(env,jrngs[i],vec,0);
    }

    MPI_Group_range_incl((MPI_Group)((*env)->GetLongField(env,jthis,ompi_java.GrouphandleID)),
                         n,rngs,&newgroup);

    free(rngs);
    free(jrngs);
    return (jlong)newgroup;
}
Example #3
0
value caml_mpi_group_range_incl(value group, value vranges)
{
  int num;
  int (*ranges)[3];
  MPI_Group newgroup;
  caml_mpi_extract_ranges(vranges, &num, &ranges);
  MPI_Group_range_incl(Group_val(group), num, ranges, &newgroup);
  stat_free(ranges);
  return caml_mpi_alloc_group(newgroup);
}
Example #4
0
dart_ret_t dart_group_split(
  const dart_group_t *g,
  size_t n,
  dart_group_t **gout)
{
  MPI_Group grouptem;
  int size, length, i, ranges[1][3];

  MPI_Group_size (g -> mpi_group, &size);

  /* Ceiling division. */
  length = (size+(int)n-1)/(int)n;

  /* Note: split the group into chunks of subgroups. */
  for (i = 0; i < (int)n; i++)
  {
    if (i * length < size)
    {
      ranges[0][0] = i * length;

      if (i * length + length <= size)
      {
        ranges[0][1] = i * length + length -1;
      }
      else
      {
        ranges[0][1] = size - 1;
      }

      ranges[0][2] = 1;
      MPI_Group_range_incl(
        g -> mpi_group,
        1,
        ranges,
        &grouptem);
      (*(gout + i))->mpi_group = grouptem;
    }
    else
    {
      (*(gout + i))->mpi_group = MPI_GROUP_EMPTY;
    }
  }
  return DART_OK;
}
Example #5
0
void ompi_group_range_incl_f(MPI_Fint *group, MPI_Fint *n, MPI_Fint ranges[][3], MPI_Fint *newgroup, MPI_Fint *ierr)
{
  int c_ierr;
  ompi_group_t *c_group, *c_newgroup;
  OMPI_2_DIM_ARRAY_NAME_DECL(ranges, 3);

  /* Make the fortran to c representation conversion */
  c_group = MPI_Group_f2c(*group);

  OMPI_2_DIM_ARRAY_FINT_2_INT(ranges, *n, 3);
  c_ierr = MPI_Group_range_incl(c_group,
                                OMPI_FINT_2_INT(*n),
                                OMPI_ARRAY_NAME_CONVERT(ranges),
                                &c_newgroup);
  if (NULL != ierr) *ierr = OMPI_INT_2_FINT(c_ierr);

  /* translate the results from c to fortran */
  if (MPI_SUCCESS == c_ierr) {
      *newgroup = c_newgroup->grp_f_to_c_index;
  }

  OMPI_ARRAY_FINT_2_INT_CLEANUP(ranges);
}
int main(int argc, char **argv)
{
    MPI_Group basegroup;
    MPI_Group g1;
    MPI_Comm comm, newcomm;
    int errs = 0, mpi_errno, errclass, rank, size;
    int range[1][3];
    int worldrank;

    MTest_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &worldrank);
    comm = MPI_COMM_WORLD;
    MPI_Comm_group(comm, &basegroup);
    MPI_Comm_rank(comm, &rank);
    MPI_Comm_size(comm, &size);
    MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN);

    MPI_Comm_split(comm, 0, size - rank, &newcomm);
    MPI_Comm_group(newcomm, &g1);

    /* Checking group_range_excl for NULL variable */
    range[0][0] = 1;
    range[0][1] = size-1;
    range[0][2] = 1;
    mpi_errno = MPI_Group_range_incl(basegroup, 1, range, NULL);
    MPI_Error_class(mpi_errno, &errclass);
    if (errclass != MPI_ERR_ARG)
        ++errs;

    MPI_Comm_free(&comm);
    MPI_Comm_free(&newcomm);
    MPI_Group_free(&basegroup);
    MPI_Group_free(&g1);
    MTest_Finalize(errs);
    MPI_Finalize();
    return 0;
}
Example #7
0
JNIEXPORT jlong JNICALL Java_mpi_Group_rangeIncl(
        JNIEnv *env, jobject jthis, jlong group, jobjectArray ranges)
{
    int i;
    MPI_Group newGroup;
    jsize n = (*env)->GetArrayLength(env, ranges);
    int (*cRanges)[3] = (int(*)[3])calloc(n, sizeof(int[3]));

    for(i = 0; i < n; i++)
    {
        jintArray ri = (*env)->GetObjectArrayElement(env, ranges, i);
        jint *jri = (*env)->GetIntArrayElements(env, ri, NULL);
        cRanges[i][0] = jri[0];
        cRanges[i][1] = jri[1];
        cRanges[i][2] = jri[2];
        (*env)->ReleaseIntArrayElements(env, ri, jri, JNI_ABORT);
        (*env)->DeleteLocalRef(env, ri);
    }

    int rc = MPI_Group_range_incl((MPI_Group)group, n, cRanges, &newGroup);
    ompi_java_exceptionCheck(env, rc);
    free(cRanges);
    return (jlong)newGroup;
}
Example #8
0
bool pRPL::Process::
grouping(int nGroups,
         bool incldMaster,
         Process *pGrpedPrc,
         Process *pGrpMaster) const {
  if(!initialized()) {
    cerr << __FILE__ << " " << __FUNCTION__ \
         << " Error: Process has NOT been initialized," \
         << " unable to be grouped" << endl;
    return false;
  }

  if(!active()) {
    cerr << __FILE__ << " " << __FUNCTION__ \
         << " Error: inactive Process," \
         << " unable to group a Null communicator." \
         << " id = " << _id << " nTotPrcs = " << _nTotalPrcs << endl;
    return false;
  }

  if(nGroups <= 0 ||
     nGroups > _nTotalPrcs) {
    cerr << __FILE__ << " " << __FUNCTION__ \
         << " Error: invalid number of groups (" \
         << nGroups << ") as the total number of processes is " \
         << _nTotalPrcs << endl;
    return false;
  }

  if(!incldMaster && _nTotalPrcs <= 1) {
    cerr << __FILE__ << " " << __FUNCTION__ \
         << " Error:  " << _nTotalPrcs << " processes can NOT" \
         << " be grouped without the master process" << endl;
    return false;
  }

  MPI_Group glbGrp;
  MPI_Comm glbComm = _comm;
  MPI_Comm_group(glbComm, &glbGrp);
  int myID = -1;
  int grpID = -1;
  MPI_Comm grpComm = MPI_COMM_NULL;

  if(incldMaster) {
    myID = _id;
    grpID = myID % nGroups;
    MPI_Comm_split(glbComm, grpID, myID, &grpComm);
    if(!pGrpedPrc->set(grpComm, _hasWriter, grpID)) {
      return false;
    }
    if(pGrpMaster != NULL) {
      MPI_Group masterGrp= MPI_GROUP_NULL;
      MPI_Comm masterComm = MPI_COMM_NULL;
      int grpMasterRange[1][3] = {{0, nGroups-1, 1}};
      MPI_Group_range_incl(glbGrp, 1, grpMasterRange, &masterGrp);
      MPI_Comm_create(glbComm, masterGrp, &masterComm);
      if(!pGrpMaster->set(masterComm)) {
        return false;
      }
    }
  }
  else {
    int excldRanks[1] = {0};
    MPI_Group glbGrp2 = MPI_GROUP_NULL;
    MPI_Group_excl(glbGrp, 1, excldRanks, &glbGrp2);
    MPI_Comm_create(_comm, glbGrp2, &glbComm);
    glbGrp = glbGrp2;
    if(!isMaster()) {
      MPI_Comm_rank(glbComm, &myID);
      grpID = myID % nGroups;
      MPI_Comm_split(glbComm, grpID, myID, &grpComm);
      if(!pGrpedPrc->set(grpComm, _hasWriter, grpID)) {
        return false;
      }
      if(pGrpMaster != NULL) {
        MPI_Group masterGrp= MPI_GROUP_NULL;
        MPI_Comm masterComm = MPI_COMM_NULL;
        int grpMasterRange[1][3] = {{0, nGroups-1, 1}};
        MPI_Group_range_incl(glbGrp, 1, grpMasterRange, &masterGrp);
        MPI_Comm_create(glbComm, masterGrp, &masterComm);
        if(!pGrpMaster->set(masterComm)) {
          return false;
        }
      }
    }
  }

  return true;
}
Example #9
0
int main( int argc, char *argv[] )
{
    int errs = 0;
    int ranks[MAX_WORLD_SIZE], ranksout[MAX_WORLD_SIZE], 
	ranksin[MAX_WORLD_SIZE];
    int range[1][3];
    MPI_Group gworld, gself, ngroup, galt;
    MPI_Comm  comm;
    int rank, size, i, nelms;

    MTest_Init( &argc, &argv );

    MPI_Comm_group( MPI_COMM_SELF, &gself );

    comm = MPI_COMM_WORLD;

    MPI_Comm_size( comm, &size );
    MPI_Comm_rank( comm, &rank );

    if (size > MAX_WORLD_SIZE) {
	fprintf( stderr, 
	 "This test requires a comm world with no more than %d processes\n", 
		 MAX_WORLD_SIZE );
	MPI_Abort( MPI_COMM_WORLD, 1 );
        exit(1);
    }
    if (size < 4) {
	fprintf( stderr, "This test requiers at least 4 processes\n" );
	MPI_Abort( MPI_COMM_WORLD, 1 );
        exit(1);
    }

    MPI_Comm_group( comm, &gworld );
    for (i=0; i<size; i++) {
	ranks[i] = i;
	ranksout[i] = -1;
    }
    /* Try translating ranks from comm world compared against
       comm self, so most will be UNDEFINED */
    MPI_Group_translate_ranks( gworld, size, ranks, gself, ranksout );
    
    for (i=0; i<size; i++) {
	if (i == rank) {
	    if (ranksout[i] != 0) {
		printf( "[%d] Rank %d is %d but should be 0\n", rank, 
			i, ranksout[i] );
		errs++;
	    }
	}
	else {
	    if (ranksout[i] != MPI_UNDEFINED) {
		printf( "[%d] Rank %d is %d but should be undefined\n", rank, 
			i, ranksout[i] );
		errs++;
	    }
	}
    }

    /* MPI-2 Errata requires that MPI_PROC_NULL is mapped to MPI_PROC_NULL */
    ranks[0] = MPI_PROC_NULL;
    ranks[1] = 1;
    ranks[2] = rank;
    ranks[3] = MPI_PROC_NULL;
    for (i=0; i<4; i++) ranksout[i] = -1;

    MPI_Group_translate_ranks( gworld, 4, ranks, gself, ranksout );
    if (ranksout[0] != MPI_PROC_NULL) {
	printf( "[%d] Rank[0] should be MPI_PROC_NULL but is %d\n",
		rank, ranksout[0] );
	errs++;
    }
    if (rank != 1 && ranksout[1] != MPI_UNDEFINED) {
	printf( "[%d] Rank[1] should be MPI_UNDEFINED but is %d\n",
		rank, ranksout[1] );
	errs++;
    }
    if (rank == 1 && ranksout[1] != 0) {
	printf( "[%d] Rank[1] should be 0 but is %d\n",
		rank, ranksout[1] );
	errs++;
    }
    if (ranksout[2] != 0) {
	printf( "[%d] Rank[2] should be 0 but is %d\n",
		rank, ranksout[2] );
	errs++;
    }
    if (ranksout[3] != MPI_PROC_NULL) {
	printf( "[%d] Rank[3] should be MPI_PROC_NULL but is %d\n",
		rank, ranksout[3] );
	errs++;
    }

    MPI_Group_free(&gself);

    /* Now, try comparing small groups against larger groups, and use groups
       with irregular members (to bypass optimizations in group_translate_ranks
       for simple groups)
     */
    nelms = 0;
    ranks[nelms++] = size - 2;
    ranks[nelms++] = 0;
    if (rank != 0 && rank != size - 2) {
	ranks[nelms++] = rank; 
    }

    MPI_Group_incl( gworld, nelms, ranks, &ngroup );

    for (i=0; i<nelms; i++) ranksout[i] = -1;
    ranksin[0] = 1;
    ranksin[1] = 0;
    ranksin[2] = MPI_PROC_NULL;
    ranksin[3] = 2;
    MPI_Group_translate_ranks( ngroup, nelms+1, ranksin, gworld, ranksout );
    for (i=0; i<nelms+1; i++) {
	if (ranksin[i] == MPI_PROC_NULL) {
	    if (ranksout[i] != MPI_PROC_NULL) {
		fprintf( stderr, "Input rank for proc_null but output was %d\n",
			 ranksout[i] );
		errs++;
	    }
	}
	else if (ranksout[i] != ranks[ranksin[i]]) {
	    fprintf( stderr, "Expected ranksout[%d] = %d but found %d\n",
		     i, ranks[ranksin[i]], ranksout[i] );
	    errs++;
	}
    }
    
    range[0][0] = size -1 ;
    range[0][1] = 0;
    range[0][2] = -1;
    MPI_Group_range_incl( gworld, 1, range, &galt);
    for (i=0; i<nelms+1; i++) ranksout[i] = -1;
    MPI_Group_translate_ranks( ngroup, nelms+1, ranksin, galt, ranksout );
    for (i=0; i<nelms+1; i++) {
	if (ranksin[i] == MPI_PROC_NULL) {
	    if (ranksout[i] != MPI_PROC_NULL) {
		fprintf( stderr, "Input rank for proc_null but output was %d\n",
			 ranksout[i] );
		errs++;
	    }
	}
	else if (ranksout[i] != (size-1)-ranks[ranksin[i]]) {
	    fprintf( stderr, "Expected ranksout[%d] = %d but found %d\n",
		     i, (size-1)-ranks[ranksin[i]], ranksout[i] );
	    errs++;
	}
    }
    
    
    MPI_Group_free(&gworld);
    MPI_Group_free(&galt);
    MPI_Group_free(&ngroup);

    MTest_Finalize( errs );
    MPI_Finalize();

    return 0;
}
Example #10
0
FC_FUNC( mpi_group_range_incl, MPI_GROUP_RANGE_INCL )
     (int *group, int *n, int ranges[][3], int *newgroup, int *ierror)
{
  *ierror= MPI_Group_range_incl(*group, *n, ranges, newgroup);
}
int main (int argc, char *argv[]){

	MPI_Init(&argc, &argv);
	int tmp_rank, tmp_size;
	MPI_Comm_rank(MPI_COMM_WORLD, &tmp_rank);
	MPI_Comm_size(MPI_COMM_WORLD, &tmp_size);

	os::global::start = MPI_Wtime();

	size_t mpi_rank = tmp_rank, mpi_size = tmp_size;

	os::config::fill(argc, argv);

	if(os::global::help){
		if(mpi_rank == 0) os::config::help();
		MPI_Finalize();
		return 0;
	}

	try{
		os::config::check();

		const size_t nth = std::stoull(os::global::params[0]);
		size_t size = os::index::size(nth);
		size_t last = os::index::last(size);
		const size_t total_count = os::index::total_count(last);

		const size_t r = total_count % mpi_size;
		const size_t base_count = total_count / mpi_size;

		size_t o;
		size_t count = os::index::local_count(mpi_rank, r, base_count, o);

		bits_t<size_t> prime;


		if(mpi_rank == mpi_size - 1) os::global::start_c = MPI_Wtime();


		if(nth == 0) last = 0, count = 0, size = 0;
		else if(nth == 1) last = 4, count = 0, size = 2;
		else if(nth == 2) last = 4, count = 0, size = 2;
		else{

			MPI_Comm* forward = new MPI_Comm[mpi_size];
			for(size_t i = 0; i < mpi_size; ++i){
				MPI_Group group, orig;
				int range[3] = {int(i), tmp_size - 1, 1};
				MPI_Comm_group(MPI_COMM_WORLD, &orig);
				MPI_Group_range_incl(orig, 1, &range, &group);
				MPI_Comm_create(MPI_COMM_WORLD, group, &forward[i]);
			}

			prime.resize(count, true);

			os::global::checkpoint = hrclock::now();
			os::alg::parallel_eratosthene_23(mpi_rank, mpi_size, forward, prime, count, last, o);
			os::global::duration += hrclock::now() - os::global::checkpoint;

			delete[] forward;
		}

		std::cout << "Process " << mpi_rank << " : " << ( double(os::global::duration.count()) / hrclock::period::den * hrclock::period::num ) << " sec" << std::endl;

		if(mpi_rank == mpi_size - 1){
			os::global::stop_c = MPI_Wtime();
			std::cout << "Computation time : " << (os::global::stop_c - os::global::start_c) << " sec" << std::endl;
		}

		if(!os::global::speed){

			if(mpi_rank == 0) std::cout << "Begin writing to file" << std::endl;

			uint16_t max;
			pixel::generator<ppm::pixel_t> *painter_p, *painter_c;
			os::output::create_painters(painter_p, painter_c, max);

			const std::string prefix = os::global::params[1];
			const size_t pixels = size * size;

			std::string tmp_file_name = prefix + std::to_string(size) + ".tmp";
			MPI_File file;
			MPI_File_open(MPI_COMM_WORLD, (char *) tmp_file_name.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &file);
			MPI_Status status;

			size_t offset = ppm::write_header(file, '6', size, size, max, status), prime_n;

			if(os::global::ssd){
				size_t tmp = os::output::apply_write_strategy_random(mpi_rank, file, status, offset, count, o, size, pixels, painter_p, painter_c, prime);
				MPI_Reduce(&tmp, &prime_n, 1, MPI_SIZE_T, MPI_SUM, 0, MPI_COMM_WORLD);
			}

			else{
				prime_n = os::output::apply_write_strategy_sequential(mpi_rank, mpi_size, file, status, offset, nth, base_count, o, r, size, pixels, painter_p, painter_c, prime, tmp_file_name);
			}

			MPI_File_close(&file);

			if(mpi_rank == 0){
				std::string file_name = prefix + std::to_string(prime_n) + ".ppm";
				std::rename(tmp_file_name.c_str(), file_name.c_str());
			}

			delete painter_p;
			delete painter_c;

			if(mpi_rank == 0) os::stat::print(last, prime_n);
		}

		MPI_Barrier(MPI_COMM_WORLD);

		os::global::stop = MPI_Wtime();

		if(mpi_rank == 0) std::cout << "Total time : " << (os::global::stop - os::global::start) << " sec" << std::endl;

		MPI_Finalize();

	}
	catch(const std::exception& e){
		MPI_Finalize();
		std::cout << '[' << mpi_rank << ']' << " error -> " << e.what() << std::endl;
		return 1;
	}

	return 0;
}
Example #12
0
/*
 * Get an intracommunicator with at least min_size members.  If "allowSmaller"
 * is true, allow the communicator to be smaller than MPI_COMM_WORLD and
 * for this routine to return MPI_COMM_NULL for some values.  Returns 0 if
 * no more communicators are available.
 */
int MTestGetIntracommGeneral(MPI_Comm * comm, int min_size, int allowSmaller)
{
    int size, rank, merr;
    int done = 0;
    int isBasic = 0;

    /* The while loop allows us to skip communicators that are too small.
     * MPI_COMM_NULL is always considered large enough */
    while (!done) {
        isBasic = 0;
        intraCommName = "";
        switch (intraCommIdx) {
        case 0:
            *comm = MPI_COMM_WORLD;
            isBasic = 1;
            intraCommName = "MPI_COMM_WORLD";
            break;
        case 1:
            /* dup of world */
            merr = MPI_Comm_dup(MPI_COMM_WORLD, comm);
            if (merr)
                MTestPrintError(merr);
            intraCommName = "Dup of MPI_COMM_WORLD";
            break;
        case 2:
            /* reverse ranks */
            merr = MPI_Comm_size(MPI_COMM_WORLD, &size);
            if (merr)
                MTestPrintError(merr);
            merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
            if (merr)
                MTestPrintError(merr);
            merr = MPI_Comm_split(MPI_COMM_WORLD, 0, size - rank, comm);
            if (merr)
                MTestPrintError(merr);
            intraCommName = "Rank reverse of MPI_COMM_WORLD";
            break;
        case 3:
            /* subset of world, with reversed ranks */
            merr = MPI_Comm_size(MPI_COMM_WORLD, &size);
            if (merr)
                MTestPrintError(merr);
            merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
            if (merr)
                MTestPrintError(merr);
            merr = MPI_Comm_split(MPI_COMM_WORLD, ((rank < size / 2) ? 1 : MPI_UNDEFINED),
                                  size - rank, comm);
            if (merr)
                MTestPrintError(merr);
            intraCommName = "Rank reverse of half of MPI_COMM_WORLD";
            break;
        case 4:
            *comm = MPI_COMM_SELF;
            isBasic = 1;
            intraCommName = "MPI_COMM_SELF";
            break;
        case 5:
            {
#if MTEST_HAVE_MIN_MPI_VERSION(3,0)
                /* Dup of the world using MPI_Intercomm_merge */
                int rleader, isLeft;
                MPI_Comm local_comm, inter_comm;
                MPI_Comm_size(MPI_COMM_WORLD, &size);
                MPI_Comm_rank(MPI_COMM_WORLD, &rank);
                if (size > 1) {
                    merr = MPI_Comm_split(MPI_COMM_WORLD, (rank < size / 2), rank, &local_comm);
                    if (merr)
                        MTestPrintError(merr);
                    if (rank == 0) {
                        rleader = size / 2;
                    }
                    else if (rank == size / 2) {
                        rleader = 0;
                    }
                    else {
                        rleader = -1;
                    }
                    isLeft = rank < size / 2;
                    merr =
                        MPI_Intercomm_create(local_comm, 0, MPI_COMM_WORLD, rleader, 99,
                                             &inter_comm);
                    if (merr)
                        MTestPrintError(merr);
                    merr = MPI_Intercomm_merge(inter_comm, isLeft, comm);
                    if (merr)
                        MTestPrintError(merr);
                    MPI_Comm_free(&inter_comm);
                    MPI_Comm_free(&local_comm);
                    intraCommName = "Dup of WORLD created by MPI_Intercomm_merge";
                }
                else {
                    *comm = MPI_COMM_NULL;
                }
            }
            break;
        case 6:
            {
                /* Even of the world using MPI_Comm_create_group */
                int i;
                MPI_Group world_group, even_group;
                int *excl = NULL;

                MPI_Comm_size(MPI_COMM_WORLD, &size);
                MPI_Comm_rank(MPI_COMM_WORLD, &rank);
                if (allowSmaller && (size + 1) / 2 >= min_size) {
                    /* exclude the odd ranks */
                    excl = malloc((size / 2) * sizeof(int));
                    for (i = 0; i < size / 2; i++)
                        excl[i] = (2 * i) + 1;

                    MPI_Comm_group(MPI_COMM_WORLD, &world_group);
                    MPI_Group_excl(world_group, size / 2, excl, &even_group);
                    MPI_Group_free(&world_group);
                    free(excl);

                    if (rank % 2 == 0) {
                        /* Even processes create a comm. for themselves */
                        MPI_Comm_create_group(MPI_COMM_WORLD, even_group, 0, comm);
                        intraCommName = "Even of WORLD created by MPI_Comm_create_group";
                    }
                    else {
                        *comm = MPI_COMM_NULL;
                    }

                    MPI_Group_free(&even_group);
                }
                else {
                    *comm = MPI_COMM_NULL;
                }
#else
                *comm = MPI_COMM_NULL;
#endif
            }
            break;
        case 7:
            {
                /* High half of the world using MPI_Comm_create */
                int ranges[1][3];
                MPI_Group world_group, high_group;
                MPI_Comm_size(MPI_COMM_WORLD, &size);
                MPI_Comm_rank(MPI_COMM_WORLD, &rank);
                ranges[0][0] = size / 2;
                ranges[0][1] = size - 1;
                ranges[0][2] = 1;

                if (allowSmaller && (size + 1) / 2 >= min_size) {
                    MPI_Comm_group(MPI_COMM_WORLD, &world_group);
                    merr = MPI_Group_range_incl(world_group, 1, ranges, &high_group);
                    if (merr)
                        MTestPrintError(merr);
                    merr = MPI_Comm_create(MPI_COMM_WORLD, high_group, comm);
                    if (merr)
                        MTestPrintError(merr);
                    MPI_Group_free(&world_group);
                    MPI_Group_free(&high_group);
                    intraCommName = "High half of WORLD created by MPI_Comm_create";
                }
                else {
                    *comm = MPI_COMM_NULL;
                }
            }
            break;
            /* These next cases are communicators that include some
             * but not all of the processes */
        case 8:
        case 9:
        case 10:
        case 11:
            {
                int newsize;
                merr = MPI_Comm_size(MPI_COMM_WORLD, &size);
                if (merr)
                    MTestPrintError(merr);
                newsize = size - (intraCommIdx - 7);

                if (allowSmaller && newsize >= min_size) {
                    merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
                    if (merr)
                        MTestPrintError(merr);
                    merr = MPI_Comm_split(MPI_COMM_WORLD, rank < newsize, rank, comm);
                    if (merr)
                        MTestPrintError(merr);
                    if (rank >= newsize) {
                        merr = MPI_Comm_free(comm);
                        if (merr)
                            MTestPrintError(merr);
                        *comm = MPI_COMM_NULL;
                    }
                    else {
                        intraCommName = "Split of WORLD";
                    }
                }
                else {
                    /* Act like default */
                    *comm = MPI_COMM_NULL;
                    intraCommIdx = -1;
                }
            }
            break;

            /* Other ideas: dup of self, cart comm, graph comm */
        default:
            *comm = MPI_COMM_NULL;
            intraCommIdx = -1;
            break;
        }

        if (*comm != MPI_COMM_NULL) {
            merr = MPI_Comm_size(*comm, &size);
            if (merr)
                MTestPrintError(merr);
            if (size >= min_size)
                done = 1;
        }
        else {
            intraCommName = "MPI_COMM_NULL";
            isBasic = 1;
            done = 1;
        }

        /* we are only done if all processes are done */
        MPI_Allreduce(MPI_IN_PLACE, &done, 1, MPI_INT, MPI_LAND, MPI_COMM_WORLD);

        /* Advance the comm index whether we are done or not, otherwise we could
         * spin forever trying to allocate a too-small communicator over and
         * over again. */
        intraCommIdx++;

        if (!done && !isBasic && *comm != MPI_COMM_NULL) {
            /* avoid leaking communicators */
            merr = MPI_Comm_free(comm);
            if (merr)
                MTestPrintError(merr);
        }
    }

    return intraCommIdx;
}
Example #13
0
void nrnmpi_subworld_size(int n) {
	/* n is the size of a subworld, nrnmpi_numprocs (pc.nhost) */
	if (nrnmpi_use != 1) { return; }
	if (nrnmpi_comm != MPI_COMM_NULL) {asrt(MPI_Comm_free(&nrnmpi_comm)); }
	if (nrn_bbs_comm != MPI_COMM_NULL) {asrt(MPI_Comm_free(&nrn_bbs_comm)); }
	if (grp_bbs != MPI_GROUP_NULL) { asrt(MPI_Group_free(&grp_bbs)); }
	if (grp_net != MPI_GROUP_NULL) { asrt(MPI_Group_free(&grp_net)); }
	MPI_Group wg;
	asrt(MPI_Comm_group(nrnmpi_world_comm, &wg));
	int r = nrnmpi_myid_world;
	/* special cases */
	if (n == 1) {
		asrt(MPI_Group_incl(wg, 1, &r, &grp_net));
		asrt(MPI_Comm_dup(nrnmpi_world_comm, &nrn_bbs_comm));
		asrt(MPI_Comm_create(nrnmpi_world_comm, grp_net, &nrnmpi_comm));
		asrt(MPI_Comm_rank(nrnmpi_comm, &nrnmpi_myid));
		asrt(MPI_Comm_size(nrnmpi_comm, &nrnmpi_numprocs));
		asrt(MPI_Comm_rank(nrn_bbs_comm, &nrnmpi_myid_bbs));
		asrt(MPI_Comm_size(nrn_bbs_comm, &nrnmpi_numprocs_bbs));
	}else if (n == nrnmpi_numprocs_world) {
		asrt(MPI_Group_incl(wg, 1, &r, &grp_bbs));
		asrt(MPI_Comm_dup(nrnmpi_world_comm, &nrnmpi_comm));
		asrt(MPI_Comm_create(nrnmpi_world_comm, grp_bbs, &nrn_bbs_comm));
		asrt(MPI_Comm_rank(nrnmpi_comm, &nrnmpi_myid));
		asrt(MPI_Comm_size(nrnmpi_comm, &nrnmpi_numprocs));
		if (r == 0) {
			asrt(MPI_Comm_rank(nrn_bbs_comm, &nrnmpi_myid_bbs));
			asrt(MPI_Comm_size(nrn_bbs_comm, &nrnmpi_numprocs_bbs));
		}else{
			nrnmpi_myid_bbs = -1;
			nrnmpi_numprocs_bbs = -1;
		}
	}else{
		int nw = nrnmpi_numprocs_world;
		int nb = nw/n; /* nrnmpi_numprocs_bbs */
		int range[3];
		/* net is contiguous */
		range[0] = r/n;
		range[0] *= n; /* first */
		range[1] = range[0] + n - 1; /* last */
		range[2] = 1; /* stride */
		asrt(MPI_Group_range_incl(wg, 1, &range, &grp_net));
		asrt(MPI_Comm_create(nrnmpi_world_comm, grp_net, &nrnmpi_comm));
		asrt(MPI_Comm_rank(nrnmpi_comm, &nrnmpi_myid));
		asrt(MPI_Comm_size(nrnmpi_comm, &nrnmpi_numprocs));

		range[0] = 0; /* first */
		range[1] = nw - n; /* last */
		range[2] = n; /* stride */
		asrt(MPI_Group_range_incl(wg, 1, &range, &grp_bbs));
		asrt(MPI_Comm_create(nrnmpi_world_comm, grp_bbs, &nrn_bbs_comm));
		if (r%n == 0) { /* only rank 0 of the subworlds */
			asrt(MPI_Comm_rank(nrn_bbs_comm, &nrnmpi_myid_bbs));
			asrt(MPI_Comm_size(nrn_bbs_comm, &nrnmpi_numprocs_bbs));
		}else{
			nrnmpi_myid_bbs = -1;
			nrnmpi_numprocs_bbs = -1;
		}
	}
	asrt(MPI_Group_free(&wg));
}
Example #14
0
   void init(int NGang=1)
   {
#     ifndef USE_MPI
      std::cout << __FILE__ << ":" << __LINE__ << " MPI_Gang assumes -DUSE_MPI" << std::endl;
      return;
#     else
      owner = true;
      ngang = NGang;
      //if(pool.comm==MPI_COMM_NULL) this->set_pool(MPI_Struct::world());
      if(verbose_debug) std::cout << __FILE__ << ":" << __LINE__ << " NGang=" << ngang << std::endl;
      // Get information about the pool of processes
      if( !pool.in() ) return;
      MPI_Comm_rank(pool.comm, &pool.iproc);
      MPI_Comm_size(pool.comm, &pool.nproc);
      MPI_Comm_group(pool.comm,&pool.group);
      // Create a communicator for each Gang of workers
      int NPerGang   = pool.nproc/ngang;
      if( NPerGang<1 ) 
      {
         NPerGang = 1;
         ngang    = pool.nproc;
      }
      if( (pool.nproc % NPerGang) != 0 )
      {
         if( pool.iproc==0 ) 
            std::cout << __FILE__ << ":" << __LINE__ << "Can't evenly divide processes into gangs" << std::endl
                      << "ngang=" << ngang << " npool=" << pool.nproc << std::endl;
      }
      igang = pool.iproc/NPerGang;
      int gang_range[ngang][3];
      for(int ig=0; ig<ngang; ig++)
      {
         gang_range[ig][0] = ig*NPerGang;           // First process in gang
         gang_range[ig][1] = (ig+1)*NPerGang-1;     // Last process in gang
         gang_range[ig][2] = 1;                     // Stride through original group
         if(gang_range[ig][1]>=pool.nproc)
            gang_range[ig][1] = pool.nproc-1;
      }
      if(verbose_debug && pool.iproc==0)
      {
         std::cout << "range =";
         for(int i=0; i<(ngang*3); i++) std::cout << " " << gang_range[igang][i];
         std::cout << std::endl;
      }
      if(pool.in()) 
      {
         MPI_Group_range_incl(pool.group,1,&(gang_range[igang]),&gang.group);
         MPI_Comm_create(pool.comm,gang.group,&gang.comm);
         if( gang.in() )
         {
            MPI_Comm_rank(gang.comm,&gang.iproc);
            MPI_Comm_size(gang.comm,&gang.nproc);
         }
      }
      else
      {
         std::cout << "iproc=" << pool.iproc << " not assigned to a gang" << std::endl;
         return;
      }
      if(verbose_debug) std::cout << __FILE__ << ":" << __LINE__ << " iproc=" << pool.iproc << " igang=" << igang << "/" << ngang 
                                  << " iproc_gang=" << gang.iproc << "/" << gang.nproc << std::endl;
      // Create a communicator for lead-processes
      std::vector<int> lead_rank(ngang);
      for(int i=0; i<ngang; i++)
         lead_rank[i] = i*NPerGang;
      if( pool.in() ) 
      {
         MPI_Group_incl(pool.group,ngang,&(lead_rank[0]),&lead.group);
         MPI_Comm_create(pool.comm,lead.group,&lead.comm);
         if( lead.in() )
         {
            MPI_Comm_rank(lead.comm,&lead.iproc);
            MPI_Comm_size(lead.comm,&lead.nproc);
         }
      }
      if( gang.in() && gang.iproc==0 && !lead.in() )   // This is a paranoid check
         std::cout << __FILE__ << ":" << __LINE__ << "(" << pool.iproc << ") gang.iproc=" << gang.iproc << std::endl;
      if( lead.in() )
      {
         int ilead = -1;
         MPI_Comm_rank(lead.comm,&ilead);
         if( ilead!=igang ) 
         {
            std::cout << "ilead!=igang for iproc=" << pool.iproc << " ilead=" << ilead << " igang=" << igang << " iproc_gang=" << gang.iproc << std::endl;
         }
      }
#     endif
   }
Example #15
0
/* ************************************************************************ */
static
void
initVariables (struct calculation_arguments* arguments, struct calculation_results* results, struct options const* options, struct mpi_options* mpi_options)
{
	arguments->N = (options->interlines * 8) + 9 - 1;
	arguments->num_matrices = (options->method == METH_JACOBI) ? 2 : 1;
	arguments->h = 1.0 / arguments->N;

	if(options->method == METH_JACOBI)
	{
		// paralell assign rows to the processes
		uint64_t N = arguments->N;
		if(N < mpi_options->num_procs_used)
		{
			mpi_options->num_procs_used = N;
		}
		uint64_t rows_per_process = (N-1)/mpi_options->num_procs_used;
		uint64_t remaining_rows = (N-1) - mpi_options->num_procs_used * rows_per_process;
		
		// calculate how many rows each process gets
		if(mpi_options->mpi_rank < mpi_options->num_procs_used)
		{	
			uint64_t start = mpi_options->mpi_rank * rows_per_process;
			start += (mpi_options->mpi_rank < remaining_rows)? mpi_options->mpi_rank : remaining_rows;
			uint64_t end = start + rows_per_process;
			end += (mpi_options->mpi_rank < remaining_rows)? 1:0;
			assert(start <= end);
			assert(end <= N);
			arguments->row_start = start + 1;//1 is first index
			arguments->row_end = end + 1;//1 is first index
		}
	}
	else
	{
		// not parallel
		mpi_options->num_procs_used = 1;
		if(mpi_options->mpi_rank == 0)
		{
			// calculate everything
			arguments->row_start = 1;
			arguments->row_end = arguments->N;
		}
	}

	if(mpi_options->mpi_size > mpi_options->num_procs_used)
	{
		MPI_Group world_group;
		MPI_Comm_group(MPI_COMM_WORLD, &world_group);

		// create group with only necessary ranks
		int ranks[3] = {0, mpi_options->num_procs_used-1, 1};
		MPI_Group new_group;
		MPI_Group_range_incl(world_group, 1, &ranks, &new_group);

		// Create a new communicator
		MPI_Comm_create(MPI_COMM_WORLD, new_group, &mpi_options->comm);
	}

	if(mpi_options->mpi_rank >= mpi_options->num_procs_used)
	{	
		// calculate nothing
		//arguments->row_start = 0;
		//arguments->row_end = 0;
		MPI_Finalize();
		exit(0);
	}

	results->m = 0;
	results->stat_iteration = 0;
	results->stat_precision = 0;
}
Example #16
0
int test_communicators( void )
{
    MPI_Comm dup_comm_world, lo_comm, rev_comm, dup_comm, 
	split_comm, world_comm;
    MPI_Group world_group, lo_group, rev_group;
    void *vvalue;
    int ranges[1][3];
    int flag, world_rank, world_size, rank, size, n, key_1, key_3;
    int color, key, result;
    int errs = 0;
    MPI_Aint value;

    MPI_Comm_rank( MPI_COMM_WORLD, &world_rank );
    MPI_Comm_size( MPI_COMM_WORLD, &world_size );
#ifdef DEBUG
    if (world_rank == 0) {
	printf( "*** Communicators ***\n" ); fflush(stdout);
    }
#endif

    MPI_Comm_dup( MPI_COMM_WORLD, &dup_comm_world );

    /*
      Exercise Comm_create by creating an equivalent to dup_comm_world
      (sans attributes) and a half-world communicator.
    */

#ifdef DEBUG
    if (world_rank == 0) {
	printf( "    Comm_create\n" ); fflush(stdout);
    }
#endif

    MPI_Comm_group( dup_comm_world, &world_group );
    MPI_Comm_create( dup_comm_world, world_group, &world_comm );
    MPI_Comm_rank( world_comm, &rank );
    if (rank != world_rank) {
	errs++;
	printf( "incorrect rank in world comm: %d\n", rank );
	MPI_Abort(MPI_COMM_WORLD, 3001 );
    }

    n = world_size / 2;

    ranges[0][0] = 0;
    ranges[0][1] = (world_size - n) - 1;
    ranges[0][2] = 1;

#ifdef DEBUG
    printf( "world rank = %d before range incl\n", world_rank );FFLUSH;
#endif
    MPI_Group_range_incl(world_group, 1, ranges, &lo_group );
#ifdef DEBUG
    printf( "world rank = %d after range incl\n", world_rank );FFLUSH;
#endif
    MPI_Comm_create(world_comm, lo_group, &lo_comm );
#ifdef DEBUG
    printf( "world rank = %d before group free\n", world_rank );FFLUSH;
#endif
    MPI_Group_free( &lo_group );

#ifdef DEBUG
    printf( "world rank = %d after group free\n", world_rank );FFLUSH;
#endif

    if (world_rank < (world_size - n)) {
	MPI_Comm_rank(lo_comm, &rank );
	if (rank == MPI_UNDEFINED) {
	    errs++;
	    printf( "incorrect lo group rank: %d\n", rank ); fflush(stdout);
	    MPI_Abort(MPI_COMM_WORLD, 3002 );
	}
	else {
	    /* printf( "lo in\n" );FFLUSH; */
	    MPI_Barrier(lo_comm );
	    /* printf( "lo out\n" );FFLUSH; */
	}
    }
    else {
	if (lo_comm != MPI_COMM_NULL) {
	    errs++;
	    printf( "incorrect lo comm:\n" ); fflush(stdout);
	    MPI_Abort(MPI_COMM_WORLD, 3003 );
	}
    }

#ifdef DEBUG
    printf( "worldrank = %d\n", world_rank );FFLUSH;
#endif
    MPI_Barrier(world_comm);

#ifdef DEBUG
    printf( "bar!\n" );FFLUSH;
#endif
    /*
      Check Comm_dup by adding attributes to lo_comm & duplicating
    */
#ifdef DEBUG
    if (world_rank == 0) {
	printf( "    Comm_dup\n" );
	fflush(stdout);
    }
#endif
    
    if (lo_comm != MPI_COMM_NULL) {
	value = 9;
	MPI_Keyval_create(copy_fn,     delete_fn,   &key_1, &value );
	value = 8;
	value = 7;
	MPI_Keyval_create(MPI_NULL_COPY_FN, MPI_NULL_DELETE_FN,
			  &key_3, &value ); 

	/* This may generate a compilation warning; it is, however, an
	   easy way to cache a value instead of a pointer */
	/* printf( "key1 = %x key3 = %x\n", key_1, key_3 ); */
	MPI_Attr_put(lo_comm, key_1, (void *) (MPI_Aint) world_rank );
	MPI_Attr_put(lo_comm, key_3, (void *)0 );
	
	MPI_Comm_dup(lo_comm, &dup_comm );

	/* Note that if sizeof(int) < sizeof(void *), we can't use
	   (void **)&value to get the value we passed into Attr_put.  To avoid 
	   problems (e.g., alignment errors), we recover the value into 
	   a (void *) and cast to int. Note that this may generate warning
	   messages from the compiler.  */
	MPI_Attr_get(dup_comm, key_1, (void **)&vvalue, &flag );
	value = (MPI_Aint)vvalue;
	
	if (! flag) {
	    errs++;
	    printf( "dup_comm key_1 not found on %d\n", world_rank );
	    fflush( stdout );
	    MPI_Abort(MPI_COMM_WORLD, 3004 );
	}
	
	if (value != world_rank) {
	    errs++;
	    printf( "dup_comm key_1 value incorrect: %ld, expected %d\n", 
		    (long)value, world_rank );
	    fflush( stdout );
	    MPI_Abort(MPI_COMM_WORLD, 3005 );
	}

	MPI_Attr_get(dup_comm, key_3, (void **)&vvalue, &flag );
	value = (MPI_Aint)vvalue;
	if (flag) {
	    errs++;
	    printf( "dup_comm key_3 found!\n" );
	    fflush( stdout );
	    MPI_Abort(MPI_COMM_WORLD, 3008 );
	}
	MPI_Keyval_free(&key_1 );
	MPI_Keyval_free(&key_3 );
    }
    /* 
       Split the world into even & odd communicators with reversed ranks.
    */
#ifdef DEBUG
    if (world_rank == 0) {
	printf( "    Comm_split\n" );
	fflush(stdout);
    }
#endif
    
    color = world_rank % 2;
    key   = world_size - world_rank;
    
    MPI_Comm_split(dup_comm_world, color, key, &split_comm );
    MPI_Comm_size(split_comm, &size );
    MPI_Comm_rank(split_comm, &rank );
    if (rank != ((size - world_rank/2) - 1)) {
	errs++;
	printf( "incorrect split rank: %d\n", rank ); fflush(stdout);
	MPI_Abort(MPI_COMM_WORLD, 3009 );
    }
    
    MPI_Barrier(split_comm );
    /*
      Test each possible Comm_compare result
    */
#ifdef DEBUG
    if (world_rank == 0) {
	printf( "    Comm_compare\n" );
	fflush(stdout);
    }
#endif
    
    MPI_Comm_compare(world_comm, world_comm, &result );
    if (result != MPI_IDENT) {
	errs++;
	printf( "incorrect ident result: %d\n", result );
	MPI_Abort(MPI_COMM_WORLD, 3010 );
    }
    
    if (lo_comm != MPI_COMM_NULL) {
	MPI_Comm_compare(lo_comm, dup_comm, &result );
	if (result != MPI_CONGRUENT) {
	    errs++;
            printf( "incorrect congruent result: %d\n", result );
            MPI_Abort(MPI_COMM_WORLD, 3011 );
	}
    }
    
    ranges[0][0] = world_size - 1;
    ranges[0][1] = 0;
    ranges[0][2] = -1;

    MPI_Group_range_incl(world_group, 1, ranges, &rev_group );
    MPI_Comm_create(world_comm, rev_group, &rev_comm );

    MPI_Comm_compare(world_comm, rev_comm, &result );
    if (result != MPI_SIMILAR && world_size != 1) {
	errs++;
	printf( "incorrect similar result: %d\n", result );
	MPI_Abort(MPI_COMM_WORLD, 3012 );
    }
    
    if (lo_comm != MPI_COMM_NULL) {
	MPI_Comm_compare(world_comm, lo_comm, &result );
	if (result != MPI_UNEQUAL && world_size != 1) {
	    errs++;
	    printf( "incorrect unequal result: %d\n", result );
	    MPI_Abort(MPI_COMM_WORLD, 3013 );
	}
    }
    /*
      Free all communicators created
    */
#ifdef DEBUG
    if (world_rank == 0) 
	printf( "    Comm_free\n" );
#endif
    
    MPI_Comm_free( &world_comm );
    MPI_Comm_free( &dup_comm_world );
    
    MPI_Comm_free( &rev_comm );
    MPI_Comm_free( &split_comm );
    
    MPI_Group_free( &world_group );
    MPI_Group_free( &rev_group );
    
    if (lo_comm != MPI_COMM_NULL) {
        MPI_Comm_free( &lo_comm );
        MPI_Comm_free( &dup_comm );
    }
    
    return errs;
}
Example #17
0
int main( int argc, char *argv[] )
{
    MPI_Group g1, g2, g4, g5, g45, selfgroup, g6;
    int ranks[16], size, rank, myrank, range[1][3];
    int errs = 0;
    int i, rin[16], rout[16], result;

    MPI_Init(&argc,&argv);

	MPI_Comm_group( MPI_COMM_WORLD, &g1 );
	MPI_Comm_rank( MPI_COMM_WORLD, &myrank );
	MPI_Comm_size( MPI_COMM_WORLD, &size );
	if (size < 8) {
	    fprintf( stderr, 
		  "Test requires 8 processes (16 prefered) only %d provided\n",
		     size );
	    errs++;
	}

	/* 16 members, this process is rank 0, return in group 1 */
	ranks[0] = myrank; ranks[1] = 2; ranks[2] = 7;
	if (myrank == 2) ranks[1] = 3;
	if (myrank == 7) ranks[2] = 6;
	MPI_Group_incl( g1, 3, ranks, &g2 );
	
	/* Check the resulting group */
	MPI_Group_size( g2, &size );
	MPI_Group_rank( g2, &rank );
	
	if (size != 3) {
	    fprintf( stderr, "Size should be %d, is %d\n", 3, size );
	    errs++;
	}
	if (rank != 0) {
	    fprintf( stderr, "Rank should be %d, is %d\n", 0, rank );
	    errs++;
	}

	rin[0] = 0; rin[1] = 1; rin[2] = 2;
	MPI_Group_translate_ranks( g2, 3, rin, g1, rout );
	for (i=0; i<3; i++) {
	    if (rout[i] != ranks[i]) {
		fprintf( stderr, "translated rank[%d] %d should be %d\n", 
			 i, rout[i], ranks[i] );
		errs++;
	    }
	}
	
	/* Translate the process of the self group against another group */
	MPI_Comm_group( MPI_COMM_SELF, &selfgroup );
	rin[0] = 0;
	MPI_Group_translate_ranks( selfgroup, 1, rin, g1, rout );
	if (rout[0] != myrank) {
	    fprintf( stderr, "translated of self is %d should be %d\n", 
			 rout[0], myrank );
	    errs++;
	}

	for (i=0; i<size; i++) 
	    rin[i] = i;
	MPI_Group_translate_ranks( g1, size, rin, selfgroup, rout );
	for (i=0; i<size; i++) {
	    if (i == myrank && rout[i] != 0) {
		fprintf( stderr, "translated world to self of %d is %d\n",
			 i, rout[i] );
		errs++;
	    }
	    else if (i != myrank && rout[i] != MPI_UNDEFINED) {
		fprintf( stderr, "translated world to self of %d should be undefined, is %d\n",
			 i, rout[i] );
		errs++;
	    }
	}
	MPI_Group_free( &selfgroup );

	/* Exclude everyone in our group */
	{
	    int ii, *lranks, g1size;

	    MPI_Group_size( g1, &g1size );
	    
	    lranks = (int *)malloc( g1size * sizeof(int) );
	    for (ii=0; ii<g1size; ii++) lranks[ii] = ii;
	    MPI_Group_excl( g1, g1size, lranks, &g6 );
	    if (g6 != MPI_GROUP_EMPTY) {
		fprintf( stderr, "Group formed by excluding all ranks not empty\n" );
		errs++;
		MPI_Group_free( &g6 );
	    }
	    free( lranks );
	}
	
	/* Add tests for additional group operations */
	/* 
	   g2 = incl 1,3,7
	   g3 = excl 1,3,7
	   intersect ( w, g2 ) => g2
	   intersect ( w, g3 ) => g3
	   intersect ( g2, g3 ) => empty
	   
	   g4 = rincl 1:n-1:2
	   g5 = rexcl 1:n-1:2
	   union( g4, g5 ) => world
	   g6 = rincl n-1:1:-1 
	   g7 = rexcl n-1:1:-1
	   union( g6, g7 ) => concat of entries, similar to world
	   diff( w, g2 ) => g3
	*/
	MPI_Group_free( &g2 );

	range[0][0] = 1;
	range[0][1] = size-1;
	range[0][2] = 2;
	MPI_Group_range_excl( g1, 1, range, &g5 );

	range[0][0] = 1;
	range[0][1] = size-1;
	range[0][2] = 2;
	MPI_Group_range_incl( g1, 1, range, &g4 );
	MPI_Group_union( g4, g5, &g45 );
	MPI_Group_compare( MPI_GROUP_EMPTY, g4, &result );
	if (result != MPI_UNEQUAL) {
	    errs++;
	    fprintf( stderr, "Comparison with empty group gave %d, not 3\n",
		     result );
	}
	MPI_Group_free( &g4 );
	MPI_Group_free( &g5 );
	MPI_Group_free( &g45 );

	/* Now, duplicate the test, but using negative strides */
	range[0][0] = size-1;
	range[0][1] = 1;
	range[0][2] = -2;
	MPI_Group_range_excl( g1, 1, range, &g5 );

	range[0][0] = size-1;
	range[0][1] = 1;
	range[0][2] = -2;
	MPI_Group_range_incl( g1, 1, range, &g4 );

	MPI_Group_union( g4, g5, &g45 );

	MPI_Group_compare( MPI_GROUP_EMPTY, g4, &result );
	if (result != MPI_UNEQUAL) {
	    errs++;
	    fprintf( stderr, "Comparison with empty group (formed with negative strides) gave %d, not 3\n",
		     result );
	}
	MPI_Group_free( &g4 );
	MPI_Group_free( &g5 );
	MPI_Group_free( &g45 );
        MPI_Group_free( &g1 );

    if (myrank == 0) 
    {
	if (errs == 0) {
	    printf( " No Errors\n" );
	}
	else {
	    printf( "Found %d errors\n", errs );
	}
    }

    MPI_Finalize();
    return 0;
}
Example #18
0
/*@

MPI_Cart_create - Makes a new communicator to which topology information
                  has been attached

Input Parameters:
+ comm_old - input communicator (handle) 
. ndims - number of dimensions of cartesian grid (integer) 
. dims - integer array of size ndims specifying the number of processes in 
  each dimension 
. periods - logical array of size ndims specifying whether the grid is 
  periodic (true) or not (false) in each dimension 
- reorder - ranking may be reordered (true) or not (false) (logical) 

Output Parameter:
. comm_cart - communicator with new cartesian topology (handle) 

Algorithm:
We ignore 'reorder' info currently.

.N fortran

.N Errors
.N MPI_SUCCESS
.N MPI_ERR_TOPOLOGY
.N MPI_ERR_DIMS
.N MPI_ERR_ARG
@*/
int MPI_Cart_create ( MPI_Comm comm_old, int ndims, int *dims, int *periods, 
		      int reorder, MPI_Comm *comm_cart )
{
  int range[1][3];
  MPI_Group group_old, group;
  int i, rank, num_ranks = 1;
  int mpi_errno = MPI_SUCCESS;
  int flag, size;
  MPIR_TOPOLOGY *topo;
  struct MPIR_COMMUNICATOR *comm_old_ptr;
  static char myname[] = "MPI_CART_CREATE";

  TR_PUSH(myname);
  comm_old_ptr = MPIR_GET_COMM_PTR(comm_old);

  /* Check validity of arguments */
#ifndef MPIR_NO_ERROR_CHECKING
  MPIR_TEST_MPI_COMM(comm_old,comm_old_ptr,comm_old_ptr,myname);
  MPIR_TEST_ARG(comm_cart);
  MPIR_TEST_ARG(periods);
  if (ndims < 1 || dims == (int *)0) mpi_errno = MPI_ERR_DIMS;
  if (mpi_errno)
	return MPIR_ERROR(comm_old_ptr, mpi_errno, myname );
  
  /* Check for Intra-communicator */
  MPI_Comm_test_inter ( comm_old, &flag );
  if (flag)
    return MPIR_ERROR(comm_old_ptr, 
            MPIR_ERRCLASS_TO_CODE(MPI_ERR_COMM,MPIR_ERR_COMM_INTER), myname );
#endif

  /* Determine number of ranks in topology */
  for ( i=0; i<ndims; i++ )
    num_ranks    *= (dims[i]>0)?dims[i]:-dims[i];
  if ( num_ranks < 1 ) {
    (*comm_cart)  = MPI_COMM_NULL;
    return MPIR_ERROR( comm_old_ptr, MPI_ERR_TOPOLOGY, myname );
  }

  /* Is the old communicator big enough? */
  MPIR_Comm_size (comm_old_ptr, &size);
  if (num_ranks > size) {
      mpi_errno = MPIR_Err_setmsg( MPI_ERR_TOPOLOGY, MPIR_ERR_TOPO_TOO_LARGE, 
				   myname, 
                  "Topology size is larger than size of communicator",
		  "Topology size %d is greater than communicator size %d", 
				   num_ranks, size );
	return MPIR_ERROR(comm_old_ptr, mpi_errno, myname );
  }
	
  /* Make new comm */
  range[0][0] = 0; range[0][1] = num_ranks - 1; range[0][2] = 1;
  MPI_Comm_group ( comm_old, &group_old );
  MPI_Group_range_incl ( group_old, 1, range, &group );
  MPI_Comm_create  ( comm_old, group, comm_cart );
  MPI_Group_free( &group );
  MPI_Group_free( &group_old );

  /* Store topology information in new communicator */
  if ( (*comm_cart) != MPI_COMM_NULL ) {
      MPIR_ALLOC(topo,(MPIR_TOPOLOGY *) MPIR_SBalloc ( MPIR_topo_els ),
		 comm_old_ptr,MPI_ERR_EXHAUSTED,myname);
      MPIR_SET_COOKIE(&topo->cart,MPIR_CART_TOPOL_COOKIE)
	  topo->cart.type         = MPI_CART;
      topo->cart.nnodes       = num_ranks;
      topo->cart.ndims        = ndims;
      MPIR_ALLOC(topo->cart.dims,(int *)MALLOC( sizeof(int) * 3 * ndims ),
		 comm_old_ptr,MPI_ERR_EXHAUSTED,myname);
      topo->cart.periods      = topo->cart.dims + ndims;
      topo->cart.position     = topo->cart.periods + ndims;
      for ( i=0; i<ndims; i++ ) {
	  topo->cart.dims[i]    = dims[i];
	  topo->cart.periods[i] = periods[i];
      }

    /* Compute my position */
    MPI_Comm_rank ( (*comm_cart), &rank );
    for ( i=0; i < ndims; i++ ) {
      num_ranks = num_ranks / dims[i];
      topo->cart.position[i]  = rank / num_ranks;
      rank   = rank % num_ranks;
    }

    /* cache topology information */
    MPI_Attr_put ( (*comm_cart), MPIR_TOPOLOGY_KEYVAL, (void *)topo );
  }
  TR_POP;
  return (mpi_errno);
}
Example #19
0
AnyType ParMETIS_Op<Type, Mesh>::operator()(Stack stack) const {
    KN<Type>* ptKN = GetAny<KN<Type>*>((*part)(stack));
    idx_t nparts = GetAny<long>((*lparts)(stack));
    Type* pt = *ptKN;
    long n = ptKN->n;
    idx_t* ptInt = reinterpret_cast<idx_t*>(pt);
    std::fill_n(ptInt, n, 0);
    MPI_Comm comm = nargs[0] ? *((MPI_Comm*)GetAny<pcommworld>((*nargs[0])(stack))) : MPI_COMM_WORLD;
    int worker = nargs[1] ? GetAny<long>((*nargs[1])(stack)) : 0;
    MPI_Comm workComm = comm;
    if(worker == 0)
        MPI_Comm_size(comm, &worker);
    else {
        int size;
        MPI_Comm_size(comm, &size);
        worker = std::min(size, worker);
        MPI_Group worldGroup, workGroup;
        MPI_Comm_group(workComm, &worldGroup);
        int ranges[1][3];
        ranges[0][0] = 0;
        ranges[0][1] = worker - 1;
        ranges[0][2] = 1;
        MPI_Group_range_incl(worldGroup, 1, ranges, &workGroup);
        MPI_Comm_create(comm, workGroup, &workComm);
        MPI_Group_free(&worldGroup);
    }
    int rank;
    MPI_Comm_rank(comm, &rank);
    if(rank < worker) {
        idx_t* vtxdist = new idx_t[worker + 1];
        vtxdist[0] = 0;
        for(int i = 1; i < worker; ++i)
            vtxdist[i] = vtxdist[i - 1] + n / worker;
        vtxdist[worker] = n;
        int loc = vtxdist[rank + 1] - vtxdist[rank];
        idx_t* xadg = new idx_t[loc + 1];
        const Mesh& Th(*GetAny<const Mesh*>((*pTh)(stack)));
        idx_t nv = Th.nv;
        idx_t nve = Mesh::Rd::d + 1;
        std::vector<idx_t> adjncy;
        adjncy.reserve(loc * nve);
        xadg[0] = 0;
        for(idx_t k = vtxdist[rank]; k < vtxdist[rank + 1]; ++k) {
            for(idx_t j = 0; j < nve; ++j) {
                idx_t l = j;
                idx_t m = Th.ElementAdj(k, l);
                if(k != m && m > 0)
                    adjncy.push_back(m);
            }
            xadg[k + 1 - vtxdist[rank]] = adjncy.size();
        }
#if 0
        for(int i = 0; i < worker; ++i) {
            MPI_Barrier(workComm);
            if(i == rank) {
                for(int j = 0; j < worker + 1; ++j) {
                    std::cout << vtxdist[j] << " ";
                }
                std::cout << std::endl;
                for(int j = 0; j < loc + 1; ++j) {
                    std::cout << xadg[j] << " ";
                }
                std::cout << std::endl;
                for(int j = 0; j < adjncy.size(); ++j) {
                    std::cout << adjncy[j] << " ";
                }
                std::cout << std::endl;
            }
            MPI_Barrier(workComm);
        }
#endif
        idx_t wgtflag = 0;
        idx_t ncon = 1;
        idx_t edgecut;
        real_t* tpwgts = new real_t[nparts];
        for(int i = 0; i < nparts; ++i)
            tpwgts[i] = 1.0 / static_cast<real_t>(nparts);
        real_t ubvec = 1.05;
        idx_t* part = ptInt + vtxdist[rank];
        ParMETIS_V3_PartKway(vtxdist, xadg, adjncy.data(), NULL, NULL, &wgtflag, &wgtflag, &ncon, &nparts, tpwgts, &ubvec, &wgtflag, &edgecut, part, &workComm);
        delete [] tpwgts;
        delete [] xadg;
        delete [] vtxdist;
    }
    MPI_Allreduce(MPI_IN_PLACE, ptInt, n, MPI_INT, MPI_SUM, comm);
    for(int i = n; i-- > 0; )
        pt[i] = ptInt[i];
    if(nargs[1] && workComm != MPI_COMM_NULL)
        MPI_Comm_free(&workComm);
    return 0L;
}
Example #20
0
int main(int argc, char *argv[]){
  int i; 
  int n_ranks;
  int my_rank;
  int data_bytes;
  int buffer_size;
  int n_iterations;
  char *filename_to_write=NULL;
  int *rank_buffer=NULL;

  MPI_Info my_Info=MPI_INFO_NULL;
  MPI_Status my_MPI_Status;
  MPI_File **archive_file_MPI=NULL;
  int file_result;

  long int total_data_transfer;
  int total_elapsed_time_s;
  float total_transferred_gb;
  float transfer_speed_gb_s;

  long int stage_archive_file_offset;

  char *archive_filenames[MAX_N_ARCHIVE_FILES];
  int n_archive_files=(1);
  int archive_filename_length;

  time_t time_before,time_after;
  //  int my_target_file;

  int rank_ranges[MAX_N_ARCHIVE_FILES][3];
  int n_ranks_in_archive_file[MAX_N_ARCHIVE_FILES];
  MPI_Group world_group;
  MPI_Group sub_group[MAX_N_ARCHIVE_FILES];
  MPI_Comm sub_comm[MAX_N_ARCHIVE_FILES];
  int my_communicator; 
  long int total_archive_file_size;

  //  fprintf(stderr,"parfu_write_test beginning\n");
  
  if(argc < 6){
    fprintf(stderr,"usage: \n");
    fprintf(stderr," parfu_write_test <dat_bytes> <buf_bytes> <file_to_write> <n_iterations> <# arch files>\n");
    MPI_Finalize();
    return -1;
  }
  data_bytes=atoi(argv[1]);
  buffer_size=atoi(argv[2]);
  filename_to_write=argv[3];
  n_iterations=atoi(argv[4]);
  n_archive_files=atoi(argv[5]);
  if(n_archive_files<1 || n_archive_files>MAX_N_ARCHIVE_FILES){
    fprintf(stderr," you specified %d archive files!  Must be >0 or <%d\n",
	    n_archive_files,MAX_N_ARCHIVE_FILES);
  }

  MPI_Init(NULL,NULL);
  MPI_Comm_size(MPI_COMM_WORLD,&n_ranks);
  MPI_Comm_rank(MPI_COMM_WORLD,&my_rank);
  if(my_rank==0){
    fprintf(stderr," Data payload: %d bytes.\n",data_bytes);
    fprintf(stderr," Buffer size: %d bytes.\n",buffer_size);
    fprintf(stderr," Writing to file: >%s<\n",filename_to_write);
    fprintf(stderr,"  Performing %d iterations\n",n_iterations);
  }
  // all MPI stuff below
  
  // set up multiple file output
  for(i=0;i<MAX_N_ARCHIVE_FILES;i++){
    archive_filenames[i]=NULL;
  }

  // Creating the sub-group communicators
  file_result=MPI_Comm_group(MPI_COMM_WORLD,&world_group);
  if(file_result != MPI_SUCCESS){
    fprintf(stderr,"rank %d MPI_Comm_group to get master returned %d!\n",my_rank,file_result);
  }
  for(i=0;i<n_archive_files;i++){
    rank_ranges[0][0] = i;
    rank_ranges[0][1] = ((n_ranks/n_archive_files)*n_archive_files) + i;
    if(rank_ranges[0][1] >= n_ranks){
      rank_ranges[0][1] -= n_archive_files;
    }
    rank_ranges[0][2] = n_archive_files;  
    if(my_rank == 0){
      fprintf(stderr,"triple [%02d]: %4d  %4d  %4d\n",
	      i,rank_ranges[0][0],rank_ranges[0][1],rank_ranges[0][2]);
    }
    n_ranks_in_archive_file[i]=((rank_ranges[0][1] - rank_ranges[0][0]) / n_archive_files)+1;
    file_result=MPI_Group_range_incl(world_group,1,rank_ranges,sub_group+i);
    if(file_result != MPI_SUCCESS){
      fprintf(stderr,"rank %d MPI_Group_range_incl() returned %d\n",
	      my_rank,file_result);
    }
  }
  // sub groups created; now create the sub-communicators
  for(i=0;i<n_archive_files;i++){
    MPI_Comm_create(MPI_COMM_WORLD,sub_group[i],sub_comm+i);
    if(file_result != MPI_SUCCESS){
      fprintf(stderr,"rank_%d MPI_Comm_create() returned %d\n",
	      my_rank,file_result);
    }
  }
  my_communicator = my_rank % n_archive_files;
  
  archive_filename_length = strlen(filename_to_write) + 10;
  for(i=0;i<n_archive_files;i++){
    if((archive_filenames[i]=
	(char*)malloc(sizeof(char)*archive_filename_length))==NULL){
      fprintf(stderr,"Could not allocate archive_filename member # %d!\n",i);
      MPI_Finalize();
      return -4;
    }
    sprintf(archive_filenames[i],"%s__%02d",filename_to_write,i);
  } // for(i=0;
  
  if(my_rank==0){
    fprintf(stderr,"Writing to %d archive files:\n",n_archive_files);
    for(i=0;i<n_archive_files;i++){
      fprintf(stderr,"   %s\n",archive_filenames[i]);
    }
  }
  
  // allocate transfer buffer
  if((rank_buffer=(void*)malloc(buffer_size))==NULL){
    fprintf(stderr,"rank %d failed to allocate buffer!\n",my_rank);
    MPI_Finalize();
    return -3;
  }
  
  // fill buffer with numbers
  for(i=0;i<(data_bytes/sizeof(int));i++){
    rank_buffer[i]=(i*22)+7;
  }

  // All the ranks have a buffer ready to go
  // now get the collective file(s) set up for writing.

  if((archive_file_MPI=(MPI_File**)malloc(sizeof(MPI_File*)*n_archive_files))==NULL){
    fprintf(stderr,"rank %d could not allocate array for archive file pointers!\n",my_rank);
    MPI_Finalize();
    return 75;
  }
  for(i=0;i<n_archive_files;i++){
    if((archive_file_MPI[i]=(MPI_File*)malloc(sizeof(MPI_File)))==NULL){
      fprintf(stderr,"rank %d could not allocate MPI file pointer number %d!!\n",my_rank,i);
      return 76;
    }
  }
  
  /*
  for(i=0;i<n_archive_files;i++){
    file_result=MPI_File_open(MPI_COMM_WORLD, archive_filenames[i], 
			      MPI_MODE_WRONLY | MPI_MODE_CREATE , 
			      my_Info, archive_file_MPI[i]);
    if(file_result != MPI_SUCCESS){
      fprintf(stderr,"MPI_File_open for archive buffer:  returned error!  Rank %d file >%s<\n",
	      my_rank,
	      archive_filenames[i]);
      return 3; 
    }
  }
  */

  // all files open THEIR file in THEIR communicator
  
  total_archive_file_size = 
    ((long int)(n_ranks_in_archive_file[my_communicator])) * 
    ((long int)(n_iterations)) * 
    ((long int)(buffer_size));
  file_result=MPI_File_open(sub_comm[my_communicator], archive_filenames[my_communicator], 
			    MPI_MODE_WRONLY | MPI_MODE_CREATE , 
			    my_Info, archive_file_MPI[my_communicator]);
  if(file_result != MPI_SUCCESS){
    fprintf(stderr,"MPI_File_open for archive buffer:  returned error!  Rank %d file >%s< comm %d\n",
	    my_rank,
	    archive_filenames[my_communicator],my_communicator);

    MPI_Finalize();
    return 3; 
  }
  
  file_result=MPI_File_set_size((*(archive_file_MPI[my_communicator])),total_archive_file_size);
  
  if(file_result != MPI_SUCCESS){
    fprintf(stderr,"MPI_File_set_size for archive buffer:  returned error!  Rank %d file >%s< comm %d\n",
	    my_rank,
	    archive_filenames[my_communicator],my_communicator);
    
    MPI_Finalize();
    return 4; 
  }
  
  
  // file(s) is(are) open on all ranks. 
  // time to do a whole mess of writing to it(them).
  MPI_Barrier(MPI_COMM_WORLD);
  if(my_rank==0){
    fprintf(stderr,"About to begin data writing loop.\n");
    time(&time_before);
  }

  /*  if(n_archive_files>1){
    my_target_file = my_rank % n_archive_files;
  }
  else{
    my_target_file=0;
  }
  */
      
  for(i=0;i<n_iterations;i++){
    stage_archive_file_offset = 
      ((long int)( ((long int)i) * (((long int)(n_ranks/n_archive_files)) * ((long int)buffer_size)) )) + 
      ((long int)(( (my_rank/n_archive_files) * buffer_size)));
    //    file_result=MPI_File_write_at_all(*archive_file_MPI,stage_archive_file_offset,rank_buffer,
    //				      data_bytes,MPI_CHAR,&my_MPI_Status);
    //    file_result=MPI_File_write_at_all(*archive_file_MPI,stage_archive_file_offset,rank_buffer,
    //				      data_bytes,MPI_CHAR,&my_MPI_Status);
    file_result=MPI_File_write_at_all((*(archive_file_MPI[my_communicator])),stage_archive_file_offset,rank_buffer,
				      data_bytes,MPI_CHAR,&my_MPI_Status);
    if(file_result != MPI_SUCCESS){
      fprintf(stderr,"rank %d i=%d got %d from MPI_File_write_at_all\n",my_rank,i,file_result);
      fprintf(stderr,"failed in i=%d communicator %d!!\n",i,my_communicator);
      MPI_Finalize();
      return 77;
    }
    if(my_rank==0 && (!(i%20))){
      fprintf(stderr,".");
    }
  } // for(i=0....
  if(my_rank==0) fprintf(stderr,"\n");
  //  MPI_File_close((*(ar
  MPI_Barrier(MPI_COMM_WORLD);
  if(my_rank==0){
    time(&time_after);
    total_data_transfer = ((long int)data_bytes) * ((long int)n_ranks) * ((long int)n_iterations);
    total_elapsed_time_s = time_after - time_before;
    total_transferred_gb = ((float)(total_data_transfer))/1.0e9;
    fprintf(stderr,"total_time: %d seconds to transfer %3.4f GB\n",
	    total_elapsed_time_s,total_transferred_gb);
    transfer_speed_gb_s = 
      (  total_transferred_gb / 
	 ((float)total_elapsed_time_s) );
    fprintf(stderr,"transfer speed: %3.4f GB/s\n",transfer_speed_gb_s);
  }

  // all MPI stuff above
  MPI_Finalize();
  
  return 0;
}
Example #21
0
File: MPI-api.c Project: 8l/rose
void declareBindings (void)
{
  /* === Point-to-point === */
  void* buf;
  int count;
  MPI_Datatype datatype;
  int dest;
  int tag;
  MPI_Comm comm;
  MPI_Send (buf, count, datatype, dest, tag, comm); // L12
  int source;
  MPI_Status status;
  MPI_Recv (buf, count, datatype, source, tag, comm, &status); // L15
  MPI_Get_count (&status, datatype, &count);
  MPI_Bsend (buf, count, datatype, dest, tag, comm);
  MPI_Ssend (buf, count, datatype, dest, tag, comm);
  MPI_Rsend (buf, count, datatype, dest, tag, comm);
  void* buffer;
  int size;
  MPI_Buffer_attach (buffer, size); // L22
  MPI_Buffer_detach (buffer, &size);
  MPI_Request request;
  MPI_Isend (buf, count, datatype, dest, tag, comm, &request); // L25
  MPI_Ibsend (buf, count, datatype, dest, tag, comm, &request);
  MPI_Issend (buf, count, datatype, dest, tag, comm, &request);
  MPI_Irsend (buf, count, datatype, dest, tag, comm, &request);
  MPI_Irecv (buf, count, datatype, source, tag, comm, &request);
  MPI_Wait (&request, &status);
  int flag;
  MPI_Test (&request, &flag, &status); // L32
  MPI_Request_free (&request);
  MPI_Request* array_of_requests;
  int index;
  MPI_Waitany (count, array_of_requests, &index, &status); // L36
  MPI_Testany (count, array_of_requests, &index, &flag, &status);
  MPI_Status* array_of_statuses;
  MPI_Waitall (count, array_of_requests, array_of_statuses); // L39
  MPI_Testall (count, array_of_requests, &flag, array_of_statuses);
  int incount;
  int outcount;
  int* array_of_indices;
  MPI_Waitsome (incount, array_of_requests, &outcount, array_of_indices,
		array_of_statuses); // L44--45
  MPI_Testsome (incount, array_of_requests, &outcount, array_of_indices,
		array_of_statuses); // L46--47
  MPI_Iprobe (source, tag, comm, &flag, &status); // L48
  MPI_Probe (source, tag, comm, &status);
  MPI_Cancel (&request);
  MPI_Test_cancelled (&status, &flag);
  MPI_Send_init (buf, count, datatype, dest, tag, comm, &request);
  MPI_Bsend_init (buf, count, datatype, dest, tag, comm, &request);
  MPI_Ssend_init (buf, count, datatype, dest, tag, comm, &request);
  MPI_Rsend_init (buf, count, datatype, dest, tag, comm, &request);
  MPI_Recv_init (buf, count, datatype, source, tag, comm, &request);
  MPI_Start (&request);
  MPI_Startall (count, array_of_requests);
  void* sendbuf;
  int sendcount;
  MPI_Datatype sendtype;
  int sendtag;
  void* recvbuf;
  int recvcount;
  MPI_Datatype recvtype;
  MPI_Datatype recvtag;
  MPI_Sendrecv (sendbuf, sendcount, sendtype, dest, sendtag,
		recvbuf, recvcount, recvtype, source, recvtag,
		comm, &status); // L67--69
  MPI_Sendrecv_replace (buf, count, datatype, dest, sendtag, source, recvtag,
			comm, &status); // L70--71
  MPI_Datatype oldtype;
  MPI_Datatype newtype;
  MPI_Type_contiguous (count, oldtype, &newtype); // L74
  int blocklength;
  {
    int stride;
    MPI_Type_vector (count, blocklength, stride, oldtype, &newtype); // L78
  }
  {
    MPI_Aint stride;
    MPI_Type_hvector (count, blocklength, stride, oldtype, &newtype); // L82
  }
  int* array_of_blocklengths;
  {
    int* array_of_displacements;
    MPI_Type_indexed (count, array_of_blocklengths, array_of_displacements,
		      oldtype, &newtype); // L87--88
  }
  {
    MPI_Aint* array_of_displacements;
    MPI_Type_hindexed (count, array_of_blocklengths, array_of_displacements,
                       oldtype, &newtype); // L92--93
    MPI_Datatype* array_of_types;
    MPI_Type_struct (count, array_of_blocklengths, array_of_displacements,
                     array_of_types, &newtype); // L95--96
  }
  void* location;
  MPI_Aint address;
  MPI_Address (location, &address); // L100
  MPI_Aint extent;
  MPI_Type_extent (datatype, &extent); // L102
  MPI_Type_size (datatype, &size);
  MPI_Aint displacement;
  MPI_Type_lb (datatype, &displacement); // L105
  MPI_Type_ub (datatype, &displacement);
  MPI_Type_commit (&datatype);
  MPI_Type_free (&datatype);
  MPI_Get_elements (&status, datatype, &count);
  void* inbuf;
  void* outbuf;
  int outsize;
  int position;
  MPI_Pack (inbuf, incount, datatype, outbuf, outsize, &position, comm); // L114
  int insize;
  MPI_Unpack (inbuf, insize, &position, outbuf, outcount, datatype,
	      comm); // L116--117
  MPI_Pack_size (incount, datatype, comm, &size);

  /* === Collectives === */
  MPI_Barrier (comm); // L121
  int root;
  MPI_Bcast (buffer, count, datatype, root, comm); // L123
  MPI_Gather (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype,
	      root, comm); // L124--125
  int* recvcounts;
  int* displs;
  MPI_Gatherv (sendbuf, sendcount, sendtype,
               recvbuf, recvcounts, displs, recvtype,
	       root, comm); // L128--130
  MPI_Scatter (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype,
               root, comm); // L131--132
  int* sendcounts;
  MPI_Scatterv (sendbuf, sendcounts, displs, sendtype,
		recvbuf, recvcount, recvtype, root, comm); // L134--135
  MPI_Allgather (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype,
                 comm); // L136--137
  MPI_Allgatherv (sendbuf, sendcount, sendtype,
		  recvbuf, recvcounts, displs, recvtype,
		  comm); // L138--140
  MPI_Alltoall (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype,
		comm); // L141--142
  int* sdispls;
  int* rdispls;
  MPI_Alltoallv (sendbuf, sendcounts, sdispls, sendtype,
                 recvbuf, recvcounts, rdispls, recvtype,
		 comm); // L145--147
  MPI_Op op;
  MPI_Reduce (sendbuf, recvbuf, count, datatype, op, root, comm); // L149
#if 0
  MPI_User_function function;
  int commute;
  MPI_Op_create (function, commute, &op); // L153
#endif
  MPI_Op_free (&op); // L155
  MPI_Allreduce (sendbuf, recvbuf, count, datatype, op, comm);
  MPI_Reduce_scatter (sendbuf, recvbuf, recvcounts, datatype, op, comm);
  MPI_Scan (sendbuf, recvbuf, count, datatype, op, comm);

  /* === Groups, contexts, and communicators === */
  MPI_Group group;
  MPI_Group_size (group, &size); // L162
  int rank;
  MPI_Group_rank (group, &rank); // L164
  MPI_Group group1;
  int n;
  int* ranks1;
  MPI_Group group2;
  int* ranks2;
  MPI_Group_translate_ranks (group1, n, ranks1, group2, ranks2); // L170
  int result;
  MPI_Group_compare (group1, group2, &result); // L172
  MPI_Group newgroup;
  MPI_Group_union (group1, group2, &newgroup); // L174
  MPI_Group_intersection (group1, group2, &newgroup);
  MPI_Group_difference (group1, group2, &newgroup);
  int* ranks;
  MPI_Group_incl (group, n, ranks, &newgroup); // L178
  MPI_Group_excl (group, n, ranks, &newgroup);
  extern int ranges[][3];
  MPI_Group_range_incl (group, n, ranges, &newgroup); // L181
  MPI_Group_range_excl (group, n, ranges, &newgroup);
  MPI_Group_free (&group);
  MPI_Comm_size (comm, &size);
  MPI_Comm_rank (comm, &rank);
  MPI_Comm comm1;
  MPI_Comm comm2;
  MPI_Comm_compare (comm1, comm2, &result);
  MPI_Comm newcomm;
  MPI_Comm_dup (comm, &newcomm);
  MPI_Comm_create (comm, group, &newcomm);
  int color;
  int key;
  MPI_Comm_split (comm, color, key, &newcomm); // L194
  MPI_Comm_free (&comm);
  MPI_Comm_test_inter (comm, &flag);
  MPI_Comm_remote_size (comm, &size);
  MPI_Comm_remote_group (comm, &group);
  MPI_Comm local_comm;
  int local_leader;
  MPI_Comm peer_comm;
  int remote_leader;
  MPI_Comm newintercomm;
  MPI_Intercomm_create (local_comm, local_leader, peer_comm, remote_leader, tag,
			&newintercomm); // L204--205
  MPI_Comm intercomm;
  MPI_Comm newintracomm;
  int high;
  MPI_Intercomm_merge (intercomm, high, &newintracomm); // L209
  int keyval;
#if 0
  MPI_Copy_function copy_fn;
  MPI_Delete_function delete_fn;
  void* extra_state;
  MPI_Keyval_create (copy_fn, delete_fn, &keyval, extra_state); // L215
#endif
  MPI_Keyval_free (&keyval); // L217
  void* attribute_val;
  MPI_Attr_put (comm, keyval, attribute_val); // L219
  MPI_Attr_get (comm, keyval, attribute_val, &flag);
  MPI_Attr_delete (comm, keyval);

  /* === Environmental inquiry === */
  char* name;
  int resultlen;
  MPI_Get_processor_name (name, &resultlen); // L226
  MPI_Errhandler errhandler;
#if 0
  MPI_Handler_function function;
  MPI_Errhandler_create (function, &errhandler); // L230
#endif
  MPI_Errhandler_set (comm, errhandler); // L232
  MPI_Errhandler_get (comm, &errhandler);
  MPI_Errhandler_free (&errhandler);
  int errorcode;
  char* string;
  MPI_Error_string (errorcode, string, &resultlen); // L237
  int errorclass;
  MPI_Error_class (errorcode, &errorclass); // L239
  MPI_Wtime ();
  MPI_Wtick ();
  int argc;
  char** argv;
  MPI_Init (&argc, &argv); // L244
  MPI_Finalize ();
  MPI_Initialized (&flag);
  MPI_Abort (comm, errorcode);
}
Example #22
0
/* Run async tests. */
int main(int argc, char **argv)
{
    int my_rank; /* Zero-based rank of processor. */
    int ntasks; /* Number of processors involved in current execution. */
    int iosysid_world; /* The ID for the parallel I/O system. */
    int even_iosysid; /* The ID for iosystem of even_comm. */
    int overlap_iosysid; /* The ID for iosystem of even_comm. */
    MPI_Group world_group; /* An MPI group of world. */
    MPI_Group even_group; /* An MPI group of 0 and 2. */
    MPI_Group overlap_group; /* An MPI group of 0, 1, and 3. */
    MPI_Comm even_comm = MPI_COMM_NULL; /* Communicator for tasks 0, 2 */
    MPI_Comm overlap_comm = MPI_COMM_NULL; /* Communicator for tasks 0, 1, 2. */
    int even_rank = -1, overlap_rank = -1; /* Tasks rank in communicator. */
    int even_size = 0, overlap_size = 0; /* Size of communicator. */
    int num_flavors; /* Number of PIO netCDF flavors in this build. */
    int flavor[NUM_FLAVORS]; /* iotypes for the supported netCDF IO flavors. */
    MPI_Comm test_comm;
    int rearranger[NUM_REARRANGERS] = {PIO_REARR_BOX, PIO_REARR_SUBSET};
    int ret; /* Return code. */

    /* Initialize test. */
    if ((ret = pio_test_init2(argc, argv, &my_rank, &ntasks, TARGET_NTASKS, TARGET_NTASKS,
                              -1, &test_comm)))
        ERR(ERR_INIT);

    /* Test code runs on TARGET_NTASKS tasks. The left over tasks do
     * nothing. */
    if (my_rank < TARGET_NTASKS)
    {
        /* Figure out iotypes. */
        if ((ret = get_iotypes(&num_flavors, flavor)))
            ERR(ret);

        /* Test with both rearrangers. */
        for (int r = 0; r < NUM_REARRANGERS; r++)
        {
            /* Initialize PIO system on world. */
            if ((ret = PIOc_Init_Intracomm(test_comm, NUM_IO4, STRIDE1, BASE0, rearranger[r],
                                           &iosysid_world)))
                ERR(ret);

            /* Set the error handler. */
            if ((ret = PIOc_set_iosystem_error_handling(iosysid_world, PIO_BCAST_ERROR, NULL)))
                ERR(ret);

            /* Get MPI_Group of world comm. */
            if ((ret = MPI_Comm_group(test_comm, &world_group)))
                ERR(ret);

            /* Create a group with tasks 0 and 2. */
            int even_ranges[EVEN_NUM_RANGES][3] = {{0, 2, 2}};
            if ((ret = MPI_Group_range_incl(world_group, EVEN_NUM_RANGES, even_ranges,
                                            &even_group)))
                ERR(ret);

            /* Create a communicator from the even_group. */
            if ((ret = MPI_Comm_create(test_comm, even_group, &even_comm)))
                ERR(ret);

            /* Learn my rank and the total number of processors in even group. */
            if (even_comm != MPI_COMM_NULL)
            {
                if ((ret = MPI_Comm_rank(even_comm, &even_rank)))
                    MPIERR(ret);
                if ((ret = MPI_Comm_size(even_comm, &even_size)))
                    MPIERR(ret);
            }

            /* Create a group with tasks 0, 1, and 3. */
            int overlap_ranges[OVERLAP_NUM_RANGES][3] = {{0, 0, 1}, {1, 3, 2}};
            if ((ret = MPI_Group_range_incl(world_group, OVERLAP_NUM_RANGES, overlap_ranges,
                                            &overlap_group)))
                ERR(ret);

            /* Create a communicator from the overlap_group. */
            if ((ret = MPI_Comm_create(test_comm, overlap_group, &overlap_comm)))
                ERR(ret);

            /* Learn my rank and the total number of processors in overlap
             * group. */
            if (overlap_comm != MPI_COMM_NULL)
            {
                if ((ret = MPI_Comm_rank(overlap_comm, &overlap_rank)))
                    MPIERR(ret);
                if ((ret = MPI_Comm_size(overlap_comm, &overlap_size)))
                    MPIERR(ret);
            }

            /* Initialize PIO system for even. */
            if (even_comm != MPI_COMM_NULL)
            {
                if ((ret = PIOc_Init_Intracomm(even_comm, NUM_IO1, STRIDE1, BASE1, rearranger[r],
                                               &even_iosysid)))
                    ERR(ret);

                /* These should not work. */
                if (PIOc_set_hint(even_iosysid + TEST_VAL_42, NULL, NULL) != PIO_EBADID)
                    ERR(ERR_WRONG);
                if (PIOc_set_hint(even_iosysid, NULL, NULL) != PIO_EINVAL)
                    ERR(ERR_WRONG);

                /* Set the hint (which will be ignored). */
                if ((ret = PIOc_set_hint(even_iosysid, "hint", "hint_value")))
                    ERR(ret);

                /* Set the error handler. */
                /*PIOc_Set_IOSystem_Error_Handling(even_iosysid, PIO_BCAST_ERROR);*/
                if ((ret = PIOc_set_iosystem_error_handling(even_iosysid, PIO_BCAST_ERROR, NULL)))
                    ERR(ret);
            }

            /* Initialize PIO system for overlap comm. */
            if (overlap_comm != MPI_COMM_NULL)
            {
                if ((ret = PIOc_Init_Intracomm(overlap_comm, NUM_IO2, STRIDE1, BASE1, rearranger[r],
                                               &overlap_iosysid)))
                    ERR(ret);

                /* Set the error handler. */
                PIOc_Set_IOSystem_Error_Handling(overlap_iosysid, PIO_BCAST_ERROR);
            }

            for (int i = 0; i < num_flavors; i++)
            {
                char fname0[PIO_MAX_NAME + 1];
                char fname1[PIO_MAX_NAME + 1];
                char fname2[PIO_MAX_NAME + 1];

                sprintf(fname0, "%s_file_0_iotype_%d_rearr_%d.nc", TEST_NAME, flavor[i], rearranger[r]);
                if ((ret = create_file(test_comm, iosysid_world, flavor[i], fname0, ATTNAME,
                                       DIMNAME, my_rank)))
                    ERR(ret);

                sprintf(fname1, "%s_file_1_iotype_%d_rearr_%d.nc", TEST_NAME, flavor[i], rearranger[r]);
                if ((ret = create_file(test_comm, iosysid_world, flavor[i], fname1, ATTNAME,
                                       DIMNAME, my_rank)))
                    ERR(ret);

                sprintf(fname2, "%s_file_2_iotype_%d_rearr_%d.nc", TEST_NAME, flavor[i], rearranger[r]);
                if ((ret = create_file(test_comm, iosysid_world, flavor[i], fname2, ATTNAME,
                                       DIMNAME, my_rank)))
                    ERR(ret);

                /* Now check the first file from WORLD communicator. */
                int ncid;
                if ((ret = open_and_check_file(test_comm, iosysid_world, flavor[i], &ncid, fname0,
                                               ATTNAME, DIMNAME, 1, my_rank)))
                    ERR(ret);

                /* Now have the even communicators check the files. */
                int ncid2;
                if (even_comm != MPI_COMM_NULL)
                {
                    if ((ret = open_and_check_file(even_comm, even_iosysid, flavor[i], &ncid2,
                                                   fname2, ATTNAME, DIMNAME, 1, my_rank)))
                        ERR(ret);
                    if ((ret = check_file(even_comm, even_iosysid, flavor[i], ncid2, fname2,
                                          ATTNAME, DIMNAME, my_rank)))
                        ERR(ret);
                }

                /* Now have the overlap communicators check the files. */
                int ncid3;
                if (overlap_comm != MPI_COMM_NULL)
                {
                    if ((ret = open_and_check_file(overlap_comm, overlap_iosysid, flavor[i],
                                                   &ncid3, fname1, ATTNAME, DIMNAME, 1, my_rank)))
                        ERR(ret);
                    if ((ret = check_file(overlap_comm, overlap_iosysid, flavor[i], ncid3, fname1,
                                          ATTNAME, DIMNAME, my_rank)))
                        ERR(ret);
                }

                /* Close the still-open files. */
                if (even_comm != MPI_COMM_NULL)
                    if ((ret = PIOc_closefile(ncid2)))
                        ERR(ret);
                if (overlap_comm != MPI_COMM_NULL)
                    if ((ret = PIOc_closefile(ncid3)))
                        ERR(ret);
                if ((ret = PIOc_closefile(ncid)))
                    ERR(ret);

            } /* next iotype */
        
            /* Finalize PIO systems. */
            if (even_comm != MPI_COMM_NULL)
                if ((ret = PIOc_finalize(even_iosysid)))
                    ERR(ret);
            if (overlap_comm != MPI_COMM_NULL)
            {
                if ((ret = PIOc_finalize(overlap_iosysid)))
                    ERR(ret);
            }
            if ((ret = PIOc_finalize(iosysid_world)))
                ERR(ret);

            /* Free MPI resources used by test. */
            if ((ret = MPI_Group_free(&overlap_group)))
                ERR(ret);
            if ((ret = MPI_Group_free(&even_group)))
                ERR(ret);
            if ((ret = MPI_Group_free(&world_group)))
                ERR(ret);
            if (overlap_comm != MPI_COMM_NULL)
                if ((ret = MPI_Comm_free(&overlap_comm)))
                    ERR(ret);
            if (even_comm != MPI_COMM_NULL)
                if ((ret = MPI_Comm_free(&even_comm)))
                    ERR(ret);
        } /* next rearranger */
    } /* my_rank < TARGET_NTASKS */

    /* Finalize test. */
    if ((ret = pio_test_finalize(&test_comm)))
        return ERR_AWFUL;

    printf("%d %s SUCCESS!!\n", my_rank, TEST_NAME);

    return 0;
}
Example #23
0
MTEST_THREAD_RETURN_TYPE test_idup(void *arg)
{
    int i;
    int size, rank;
    int ranges[1][3];
    int rleader, isLeft;
    int *excl = NULL;
    int tid = *(int *) arg;

    MPI_Group ingroup, high_group, even_group;
    MPI_Comm local_comm, inter_comm;
    MPI_Comm idupcomms[NUM_IDUPS];
    MPI_Request reqs[NUM_IDUPS];

    MPI_Comm outcomm;
    MPI_Comm incomm = comms[tid];

    MPI_Comm_size(incomm, &size);
    MPI_Comm_rank(incomm, &rank);
    MPI_Comm_group(incomm, &ingroup);

    /* Idup incomm multiple times */
    for (i = 0; i < NUM_IDUPS; i++) {
        MPI_Comm_idup(incomm, &idupcomms[i], &reqs[i]);
    }

    /* Overlap pending idups with various comm generation functions */
    /* Comm_dup */
    MPI_Comm_dup(incomm, &outcomm);
    errs[tid] += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    /* Comm_split */
    MPI_Comm_split(incomm, rank % 2, size - rank, &outcomm);
    errs[tid] += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    /* Comm_create, high half of incomm */
    ranges[0][0] = size / 2;
    ranges[0][1] = size - 1;
    ranges[0][2] = 1;
    MPI_Group_range_incl(ingroup, 1, ranges, &high_group);
    MPI_Comm_create(incomm, high_group, &outcomm);
    MPI_Group_free(&high_group);
    errs[tid] += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    /* Comm_create_group, even ranks of incomm */
    /* exclude the odd ranks */
    excl = malloc((size / 2) * sizeof(int));
    for (i = 0; i < size / 2; i++)
        excl[i] = (2 * i) + 1;

    MPI_Group_excl(ingroup, size / 2, excl, &even_group);
    free(excl);

    if (rank % 2 == 0) {
        MPI_Comm_create_group(incomm, even_group, 0, &outcomm);
    }
    else {
        outcomm = MPI_COMM_NULL;
    }
    MPI_Group_free(&even_group);
    errs[tid] += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    /* Intercomm_create & Intercomm_merge */
    MPI_Comm_split(incomm, (rank < size / 2), rank, &local_comm);
    if (rank == 0) {
        rleader = size / 2;
    }
    else if (rank == size / 2) {
        rleader = 0;
    }
    else {
        rleader = -1;
    }
    isLeft = rank < size / 2;

    MPI_Intercomm_create(local_comm, 0, incomm, rleader, 99, &inter_comm);
    MPI_Intercomm_merge(inter_comm, isLeft, &outcomm);
    MPI_Comm_free(&local_comm);

    errs[tid] += MTestTestComm(inter_comm);
    MTestFreeComm(&inter_comm);
    errs[tid] += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    MPI_Waitall(NUM_IDUPS, reqs, MPI_STATUSES_IGNORE);
    for (i = 0; i < NUM_IDUPS; i++) {
        errs[tid] += MTestTestComm(idupcomms[i]);
        MPI_Comm_free(&idupcomms[i]);
    }
    MPI_Group_free(&ingroup);
    return NULL;
}
int
main (int argc, char **argv)
{
  int nprocs = -1;
  int rank = -1;
  int comm = MPI_COMM_WORLD;
  char processor_name[128];
  int namelen = 128;
  int i;
  int ranks[2], ranges[1][3];
  MPI_Group newgroup[GROUP_CONSTRUCTOR_COUNT]; 
  MPI_Group newgroup2[GROUP_CONSTRUCTOR_COUNT]; 
  MPI_Comm temp;
  MPI_Comm intercomm = MPI_COMM_NULL;

  /* init */
  MPI_Init (&argc, &argv);
  MPI_Comm_size (comm, &nprocs);
  MPI_Comm_rank (comm, &rank);
  MPI_Get_processor_name (processor_name, &namelen);
  printf ("(%d) is alive on %s\n", rank, processor_name);
  fflush (stdout);

  ranks[0] = 0;
  ranks[1] = 1;

  ranges[0][0] = 0;
  ranges[0][1] = 2;
  ranges[0][2] = 2;

  MPI_Barrier (comm);

  if (nprocs < 3) {
      printf ("requires at least 3 tasks\n");
  }
  else {
    /* create the groups */
    if (GROUP_CONSTRUCTOR_COUNT > 0)
      MPI_Comm_group (MPI_COMM_WORLD, &newgroup[0]);

    if (GROUP_CONSTRUCTOR_COUNT > 1)
      MPI_Group_incl (newgroup[0], 2, ranks, &newgroup[1]);    

    if (GROUP_CONSTRUCTOR_COUNT > 2)
      MPI_Group_excl (newgroup[0], 2, ranks, &newgroup[2]);

    if (GROUP_CONSTRUCTOR_COUNT > 3)
      MPI_Group_range_incl (newgroup[0], 1, ranges, &newgroup[3]);    

    if (GROUP_CONSTRUCTOR_COUNT > 4)
      MPI_Group_range_excl (newgroup[0], 1, ranges, &newgroup[4]);    

    if (GROUP_CONSTRUCTOR_COUNT > 5)
      MPI_Group_union (newgroup[1], newgroup[3], &newgroup[5]);

    if (GROUP_CONSTRUCTOR_COUNT > 6)
      MPI_Group_intersection (newgroup[5], newgroup[2], &newgroup[6]);

    if (GROUP_CONSTRUCTOR_COUNT > 7)
      MPI_Group_difference (newgroup[5], newgroup[2], &newgroup[7]);

    if (GROUP_CONSTRUCTOR_COUNT > 8) {
      /* need lots of stuff for this constructor... */
      MPI_Comm_split (MPI_COMM_WORLD, rank % 3, nprocs - rank, &temp);

      if (rank % 3) {
	MPI_Intercomm_create (temp, 0, MPI_COMM_WORLD, 
			      (((nprocs % 3) == 2) && ((rank % 3) == 2)) ?
			      nprocs - 1 : nprocs - (rank % 3) - (nprocs % 3),
			      INTERCOMM_CREATE_TAG, &intercomm);

	MPI_Comm_remote_group (intercomm, &newgroup[8]);

	MPI_Comm_free (&intercomm);
      }
      else {
	MPI_Comm_group (temp, &newgroup[8]);
      }

      MPI_Comm_free (&temp);
    }
  }      

  MPI_Barrier (comm);

  printf ("(%d) Finished normally\n", rank);
  MPI_Finalize ();
}
Example #25
0
int main(int argc, char **argv)
{
    int errs = 0;
    int i;
    int rank, size;
    int *excl;
    int ranges[1][3];
    int isLeft, rleader;
    MPI_Group world_group, high_group, even_group;
    MPI_Comm local_comm, inter_comm, test_comm, outcomm;
    MPI_Comm idupcomms[NUM_IDUPS];
    MPI_Request reqs[NUM_IDUPS];

    MTest_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_group(MPI_COMM_WORLD, &world_group);

    if (size < 2) {
        printf("this test requires at least 2 processes\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    /* Idup MPI_COMM_WORLD multiple times */
    for (i = 0; i < NUM_IDUPS; i++) {
        MPI_Comm_idup(MPI_COMM_WORLD, &idupcomms[i], &reqs[i]);
    }

    /* Overlap pending idups with various comm generation functions */

    /* Comm_dup */
    MPI_Comm_dup(MPI_COMM_WORLD, &outcomm);
    errs += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    /* Comm_split */
    MPI_Comm_split(MPI_COMM_WORLD, rank % 2, size - rank, &outcomm);
    errs += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    /* Comm_create, high half of MPI_COMM_WORLD */
    ranges[0][0] = size / 2;
    ranges[0][1] = size - 1;
    ranges[0][2] = 1;
    MPI_Group_range_incl(world_group, 1, ranges, &high_group);
    MPI_Comm_create(MPI_COMM_WORLD, high_group, &outcomm);
    MPI_Group_free(&high_group);
    errs += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    /* Comm_create_group, even ranks of MPI_COMM_WORLD */
    /* exclude the odd ranks */
    excl = malloc((size / 2) * sizeof(int));
    for (i = 0; i < size / 2; i++)
        excl[i] = (2 * i) + 1;

    MPI_Group_excl(world_group, size / 2, excl, &even_group);
    free(excl);

    if (rank % 2 == 0) {
        MPI_Comm_create_group(MPI_COMM_WORLD, even_group, 0, &outcomm);
    } else {
        outcomm = MPI_COMM_NULL;
    }
    MPI_Group_free(&even_group);

    errs += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    /* Intercomm_create & Intercomm_merge */
    MPI_Comm_split(MPI_COMM_WORLD, (rank < size / 2), rank, &local_comm);

    if (rank == 0) {
        rleader = size / 2;
    } else if (rank == size / 2) {
        rleader = 0;
    } else {
        rleader = -1;
    }
    isLeft = rank < size / 2;

    MPI_Intercomm_create(local_comm, 0, MPI_COMM_WORLD, rleader, 99, &inter_comm);
    MPI_Intercomm_merge(inter_comm, isLeft, &outcomm);
    MPI_Comm_free(&local_comm);

    errs += MTestTestComm(inter_comm);
    MTestFreeComm(&inter_comm);

    errs += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    MPI_Waitall(NUM_IDUPS, reqs, MPI_STATUSES_IGNORE);
    for (i = 0; i < NUM_IDUPS; i++) {
        errs += MTestTestComm(idupcomms[i]);
        MPI_Comm_free(&idupcomms[i]);
    }

    MPI_Group_free(&world_group);

    MTest_Finalize(errs);
    return MTestReturnValue(errs);
}
Example #26
0
FORT_DLL_SPEC void FORT_CALL mpi_group_range_incl_ ( MPI_Fint *v1, MPI_Fint *v2, MPI_Fint v3[], MPI_Fint *v4, MPI_Fint *ierr ){
    *ierr = MPI_Group_range_incl( (MPI_Group)*v1, (int)*v2, (int (*)[3]) v3, (MPI_Group *)(v4) );
}
Example #27
0
int main(int argc, char *argv[])
{
    MPI_Group gworld, g;
    MPI_Comm comm, newcomm[MAX_LOOP];
    int wsize, wrank, range[1][3], errs = 0;
    double t[MAX_LOG_WSIZE], tf;
    int maxi, i, k, ts, gsize[MAX_LOG_WSIZE];

    MTest_Init(&argc, &argv);

    MPI_Comm_size(MPI_COMM_WORLD, &wsize);
    MPI_Comm_rank(MPI_COMM_WORLD, &wrank);

    if (wrank == 0)
        MTestPrintfMsg(1, "size\ttime\n");

    MPI_Comm_group(MPI_COMM_WORLD, &gworld);
    ts = 1;
    comm = MPI_COMM_WORLD;
    for (i = 0; ts <= wsize; i++, ts = ts + ts) {
        /* Create some groups with at most ts members */
        range[0][0] = ts - 1;
        range[0][1] = 0;
        range[0][2] = -1;
        MPI_Group_range_incl(gworld, 1, range, &g);

        MPI_Barrier(MPI_COMM_WORLD);
        tf = MPI_Wtime();
        for (k = 0; k < MAX_LOOP; k++)
            MPI_Comm_create(comm, g, &newcomm[k]);
        tf = MPI_Wtime() - tf;
        MPI_Allreduce(&tf, &t[i], 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD);
        t[i] = t[i] / MAX_LOOP;
        gsize[i] = ts;
        if (wrank == 0)
            MTestPrintfMsg(1, "%d\t%e\n", ts, t[i]);
        MPI_Group_free(&g);
        if (newcomm[0] != MPI_COMM_NULL)
            for (k = 0; k < MAX_LOOP; k++)
                MPI_Comm_free(&newcomm[k]);
    }
    MPI_Group_free(&gworld);
    maxi = i - 1;

    /* The cost should be linear or at worst ts*log(ts).
     * We can check this in a number of ways.
     */
    if (wrank == 0) {
        for (i = 4; i <= maxi; i++) {
            double rdiff;
            if (t[i] > 0) {
                rdiff = (t[i] - t[i - 1]) / t[i];
                if (rdiff >= 4) {
                    errs++;
                    fprintf(stderr,
                            "Relative difference between group of size %d and %d is %e exceeds 4\n",
                            gsize[i - 1], gsize[i], rdiff);
                }
            }
        }
    }

    MTest_Finalize(errs);

    MPI_Finalize();

    return 0;
}
void IOserver::initialize(int proc_size0,int proc_size1, int IOserver_size, int IO_node_size)
{
    int rang[3];
    int totalMPIsize;
    int itemp;
    
    MPI_Group groupTemp1,groupTemp2;
   
    MPI_Comm_group(MPI_COMM_WORLD,&world_group_);
    MPI_Group_size(world_group_,&totalMPIsize);
    
    
    if((proc_size0*proc_size1) % IOserver_size!=0 || IOserver_size % IO_node_size!=0)
    {
        //cout<<"IOserver wrong number of process"<<endl;
        exit(-44);
    }
    
    
    rang[0]=0;
    rang[1]=proc_size0*proc_size1-1;
    rang[2]=1;
    MPI_Group_range_incl(world_group_,1,&rang,&computeGroup_);
    MPI_Comm_create(MPI_COMM_WORLD,computeGroup_ , &computeComm_);    
    
    MPI_Group_rank(computeGroup_, &computeRank_);
    
    
    rang[0]=proc_size0*proc_size1;
    rang[1]=proc_size0*proc_size1 + IOserver_size - 1 ;
    rang[2]=1;
    MPI_Group_range_incl(world_group_,1,&rang,&IO_Group_);
    MPI_Comm_create(MPI_COMM_WORLD,IO_Group_ , &IO_Comm_);
    
    MPI_Group_rank(IO_Group_, &IO_Rank_);
    
    
    
    rang[0]=proc_size0*proc_size1;
    rang[1]=0;
    MPI_Group_incl(world_group_,2,&rang[0],&syncLineGroup_);
    MPI_Comm_create(MPI_COMM_WORLD,syncLineGroup_ , &syncLineComm_);
    
    MPI_Group_rank(syncLineGroup_, &syncLineRank_);
    
    
    IO_ClientSize_=proc_size0*proc_size1/IOserver_size;
    IO_NodeSize_=IO_node_size;
    
    if(computeRank_!=MPI_UNDEFINED)itemp = floor((float)computeRank_/(float)IO_ClientSize_) * IO_ClientSize_;
    else itemp = IO_Rank_ * IO_ClientSize_;
    //if(computeRank_!=MPI_UNDEFINED)cout<< "compute core: "<< computeRank_ <<" , "<<  itemp<<endl;
    //if(IO_Rank_!=MPI_UNDEFINED)cout<< "IO core: "<< IO_Rank_ <<" , "<<  itemp<<endl;
    
    rang[0] = itemp;
    rang[1] = itemp + IO_ClientSize_ -1;
    rang[2]=1;
    MPI_Group_range_incl(world_group_,1,&rang,&groupTemp2);
    
    if(computeRank_!=MPI_UNDEFINED)itemp = proc_size0*proc_size1 + floor((float)computeRank_/(float)IO_ClientSize_);
    else itemp = proc_size0*proc_size1 + IO_Rank_;
    //if(computeRank_!=MPI_UNDEFINED)cout<< "compute core: "<< computeRank_ <<" , "<<  itemp<<endl;
    //if(IO_Rank_!=MPI_UNDEFINED)cout<< "IO core: "<< IO_Rank_ <<" , "<<  itemp<<endl;

    MPI_Group_incl(world_group_,1,&itemp,&groupTemp1);
    
    MPI_Group_union(groupTemp1,groupTemp2,&masterClientGroup_);
    MPI_Comm_create(MPI_COMM_WORLD,masterClientGroup_ , &masterClientComm_);
    
    
    //if(computeRank_!=MPI_UNDEFINED)cout<< "compute core: "<< computeRank_ <<" , "<<  itemp<<endl;
    //if(IO_Rank_!=MPI_UNDEFINED)cout<< "IO core: "<< IO_Rank_ <<" , "<<  itemp<<endl;
    
    
    
    if(IO_Rank_!=MPI_UNDEFINED)
    {
                
        itemp= floor((float)IO_Rank_/ (float)IO_node_size) * IO_node_size;
        
        rang[0] = itemp;
        rang[1] = itemp + IO_node_size -1;
        rang[2]=1;
        MPI_Group_range_incl(IO_Group_,1,&rang,&IO_NodeGroup_);
        MPI_Comm_create(IO_Comm_,IO_NodeGroup_ , &IO_NodeComm_);
        MPI_Group_rank(IO_NodeGroup_, &IO_NodeRank_);
        
        files = new file_struct[MAX_FILE_NUMBER];
        dataBuffer = (char*)malloc(IO_BUFFERS_TOTAL_SIZE);
        IO_Node_=floor((float)IO_Rank_/ (float)IO_node_size) ;
        
    }
    
    
    
    sendRequest = MPI_REQUEST_NULL;
    
}