void init_comms(void){ extern int numtasks, rank, myfieldrank, myenglandrank, mybrazilrank, field_ranks[12], eng_ranks[11], bra_ranks[11]; extern MPI_Group world, england, brazil, engfield, brafield, field; extern MPI_Comm eng_comm, bra_comm, engfield_comm, brafield_comm, field_comm; MPI_Comm_size(MPI_COMM_WORLD, &numtasks); MPI_Comm_rank(MPI_COMM_WORLD, &rank); if (numtasks != NPROCS) { printf("Must specify MP_PROCS= %d. Terminating.\n",NPROCS); MPI_Finalize(); exit(EXIT_FAILURE); } MPI_Comm_group(MPI_COMM_WORLD, &world); MPI_Group_incl(world, 12, field_ranks, &field); MPI_Group_incl(world, 11, eng_ranks, &england); MPI_Group_incl(world, 11, bra_ranks, &brazil); MPI_Group_union(field, england, &engfield); MPI_Group_union(field, brazil, &brafield); MPI_Comm_create(MPI_COMM_WORLD, field, &field_comm); MPI_Comm_create(MPI_COMM_WORLD, england, &eng_comm); MPI_Comm_create(MPI_COMM_WORLD, brazil, &bra_comm); MPI_Comm_create(MPI_COMM_WORLD, engfield, &engfield_comm); MPI_Comm_create(MPI_COMM_WORLD, brafield, &brafield_comm); MPI_Group_rank (field, &myfieldrank); MPI_Group_rank (england, &myenglandrank); MPI_Group_rank (brazil, &mybrazilrank); }
JNIEXPORT jlong JNICALL Java_mpi_Group_union( JNIEnv *env, jclass jthis, jlong group1, jlong group2) { MPI_Group newGroup; int rc = MPI_Group_union((MPI_Group)group1, (MPI_Group)group2, &newGroup); ompi_java_exceptionCheck(env, rc); return (jlong)newGroup; }
FORTRAN_API void FORT_CALL mpi_group_union_ ( MPI_Fint *group1, MPI_Fint *group2, MPI_Fint *group_out, MPI_Fint *__ierr ) { MPI_Group l_group_out; *__ierr = MPI_Group_union( MPI_Group_f2c(*group1), MPI_Group_f2c(*group2), &l_group_out ); if (*__ierr == MPI_SUCCESS) *group_out = MPI_Group_c2f( l_group_out ); }
dart_ret_t dart_adapt_group_union( const dart_group_t *g1, const dart_group_t *g2, dart_group_t *gout) { return MPI_Group_union( g1 -> mpi_group, g2 -> mpi_group, &(gout -> mpi_group)); }
dart_ret_t dart_group_union( const dart_group_t *g1, const dart_group_t *g2, dart_group_t *gout) { /* g1 and g2 are both ordered groups. */ int ret = MPI_Group_union( g1->mpi_group, g2->mpi_group, &(gout -> mpi_group)); if (ret == MPI_SUCCESS) { int i, j, k, size_in, size_out; dart_unit_t *pre_unitidsout, *post_unitidsout;; MPI_Group group_all; MPI_Comm_group(MPI_COMM_WORLD, &group_all); MPI_Group_size(gout->mpi_group, &size_out); if (size_out > 1) { MPI_Group_size(g1->mpi_group, &size_in); pre_unitidsout = (dart_unit_t *)malloc( size_out * sizeof (dart_unit_t)); post_unitidsout = (dart_unit_t *)malloc( size_out * sizeof (dart_unit_t)); dart_group_getmembers (gout, pre_unitidsout); /* Sort gout by the method of 'merge sort'. */ i = k = 0; j = size_in; while ((i <= size_in - 1) && (j <= size_out - 1)) { post_unitidsout[k++] = (pre_unitidsout[i] <= pre_unitidsout[j]) ? pre_unitidsout[i++] : pre_unitidsout[j++]; } while (i <= size_in -1) { post_unitidsout[k++] = pre_unitidsout[i++]; } while (j <= size_out -1) { post_unitidsout[k++] = pre_unitidsout[j++]; } gout -> mpi_group = MPI_GROUP_EMPTY; MPI_Group_incl( group_all, size_out, post_unitidsout, &(gout->mpi_group)); free (pre_unitidsout); free (post_unitidsout); } ret = DART_OK; } return ret; }
/* * Class: mpi_Group * Method: union * Signature: (Lmpi/Group;Lmpi/Group;)J */ JNIEXPORT jlong JNICALL Java_mpi_Group_union(JNIEnv *env, jclass jthis, jobject group1, jobject group2) { MPI_Group newgroup; ompi_java_clearFreeList(env) ; MPI_Group_union((MPI_Group)((*env)->GetLongField(env,group1,ompi_java.GrouphandleID)), (MPI_Group)((*env)->GetLongField(env,group2,ompi_java.GrouphandleID)), &newgroup); return (jlong)newgroup; }
static VALUE group_union(VALUE self, VALUE rgrp2) { int rv; MPI_Group *grp1, *grp2, *newgrp; Data_Get_Struct(self, MPI_Group, grp1); Data_Get_Struct(grp2, MPI_Group, grp2); newgrp = ALLOC(MPI_Group); rv = MPI_Group_union(*grp1, *grp2, newgrp); mpi_exception(rv); return group_new(newgrp); }
void mpi_group_union_f(MPI_Fint *group1, MPI_Fint *group2, MPI_Fint *newgroup, MPI_Fint *ierr) { ompi_group_t *c_group1, *c_group2, *c_newgroup; /* Make the fortran to c representation conversion */ c_group1 = MPI_Group_f2c(*group1); c_group2 = MPI_Group_f2c(*group2); *ierr = OMPI_INT_2_FINT(MPI_Group_union(c_group1, c_group2, &c_newgroup)); /* translate the results from c to fortran */ if (MPI_SUCCESS == OMPI_FINT_2_INT(*ierr)) { *newgroup = c_newgroup->grp_f_to_c_index; } }
int main(int argc, char **argv) { MPI_Group basegroup; MPI_Group g1, g2; MPI_Comm comm, newcomm, dupcomm; int errs = 0, mpi_errno, rank, size; int errclass, worldrank; MTest_Init( &argc, &argv ); MPI_Comm_rank( MPI_COMM_WORLD, &worldrank ); comm = MPI_COMM_WORLD; MPI_Comm_group( comm, &basegroup ); MPI_Comm_rank( comm, &rank ); MPI_Comm_size( comm, &size ); MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN); MPI_Comm_split( comm, 0, size - rank, &newcomm ); MPI_Comm_group( newcomm, &g1); MPI_Comm_dup( comm, &dupcomm ); MPI_Comm_group( dupcomm, &g2 ); /* checking group_union for NULL variable */ mpi_errno = MPI_Group_union( g1, g2, NULL ); MPI_Error_class(mpi_errno, &errclass); if (errclass != MPI_ERR_ARG) ++errs; MPI_Comm_free(&comm); MPI_Comm_free(&newcomm); MPI_Comm_free(&dupcomm); MPI_Group_free(&basegroup); MPI_Group_free(&g1); MPI_Group_free(&g2); MTest_Finalize(errs); MPI_Finalize(); return 0; }
int main( int argc, char *argv[] ) { MPI_Group g1, g2, g4, g5, g45, selfgroup, g6; int ranks[16], size, rank, myrank, range[1][3]; int errs = 0; int i, rin[16], rout[16], result; MPI_Init(&argc,&argv); MPI_Comm_group( MPI_COMM_WORLD, &g1 ); MPI_Comm_rank( MPI_COMM_WORLD, &myrank ); MPI_Comm_size( MPI_COMM_WORLD, &size ); if (size < 8) { fprintf( stderr, "Test requires 8 processes (16 prefered) only %d provided\n", size ); errs++; } /* 16 members, this process is rank 0, return in group 1 */ ranks[0] = myrank; ranks[1] = 2; ranks[2] = 7; if (myrank == 2) ranks[1] = 3; if (myrank == 7) ranks[2] = 6; MPI_Group_incl( g1, 3, ranks, &g2 ); /* Check the resulting group */ MPI_Group_size( g2, &size ); MPI_Group_rank( g2, &rank ); if (size != 3) { fprintf( stderr, "Size should be %d, is %d\n", 3, size ); errs++; } if (rank != 0) { fprintf( stderr, "Rank should be %d, is %d\n", 0, rank ); errs++; } rin[0] = 0; rin[1] = 1; rin[2] = 2; MPI_Group_translate_ranks( g2, 3, rin, g1, rout ); for (i=0; i<3; i++) { if (rout[i] != ranks[i]) { fprintf( stderr, "translated rank[%d] %d should be %d\n", i, rout[i], ranks[i] ); errs++; } } /* Translate the process of the self group against another group */ MPI_Comm_group( MPI_COMM_SELF, &selfgroup ); rin[0] = 0; MPI_Group_translate_ranks( selfgroup, 1, rin, g1, rout ); if (rout[0] != myrank) { fprintf( stderr, "translated of self is %d should be %d\n", rout[0], myrank ); errs++; } for (i=0; i<size; i++) rin[i] = i; MPI_Group_translate_ranks( g1, size, rin, selfgroup, rout ); for (i=0; i<size; i++) { if (i == myrank && rout[i] != 0) { fprintf( stderr, "translated world to self of %d is %d\n", i, rout[i] ); errs++; } else if (i != myrank && rout[i] != MPI_UNDEFINED) { fprintf( stderr, "translated world to self of %d should be undefined, is %d\n", i, rout[i] ); errs++; } } MPI_Group_free( &selfgroup ); /* Exclude everyone in our group */ { int ii, *lranks, g1size; MPI_Group_size( g1, &g1size ); lranks = (int *)malloc( g1size * sizeof(int) ); for (ii=0; ii<g1size; ii++) lranks[ii] = ii; MPI_Group_excl( g1, g1size, lranks, &g6 ); if (g6 != MPI_GROUP_EMPTY) { fprintf( stderr, "Group formed by excluding all ranks not empty\n" ); errs++; MPI_Group_free( &g6 ); } free( lranks ); } /* Add tests for additional group operations */ /* g2 = incl 1,3,7 g3 = excl 1,3,7 intersect ( w, g2 ) => g2 intersect ( w, g3 ) => g3 intersect ( g2, g3 ) => empty g4 = rincl 1:n-1:2 g5 = rexcl 1:n-1:2 union( g4, g5 ) => world g6 = rincl n-1:1:-1 g7 = rexcl n-1:1:-1 union( g6, g7 ) => concat of entries, similar to world diff( w, g2 ) => g3 */ MPI_Group_free( &g2 ); range[0][0] = 1; range[0][1] = size-1; range[0][2] = 2; MPI_Group_range_excl( g1, 1, range, &g5 ); range[0][0] = 1; range[0][1] = size-1; range[0][2] = 2; MPI_Group_range_incl( g1, 1, range, &g4 ); MPI_Group_union( g4, g5, &g45 ); MPI_Group_compare( MPI_GROUP_EMPTY, g4, &result ); if (result != MPI_UNEQUAL) { errs++; fprintf( stderr, "Comparison with empty group gave %d, not 3\n", result ); } MPI_Group_free( &g4 ); MPI_Group_free( &g5 ); MPI_Group_free( &g45 ); /* Now, duplicate the test, but using negative strides */ range[0][0] = size-1; range[0][1] = 1; range[0][2] = -2; MPI_Group_range_excl( g1, 1, range, &g5 ); range[0][0] = size-1; range[0][1] = 1; range[0][2] = -2; MPI_Group_range_incl( g1, 1, range, &g4 ); MPI_Group_union( g4, g5, &g45 ); MPI_Group_compare( MPI_GROUP_EMPTY, g4, &result ); if (result != MPI_UNEQUAL) { errs++; fprintf( stderr, "Comparison with empty group (formed with negative strides) gave %d, not 3\n", result ); } MPI_Group_free( &g4 ); MPI_Group_free( &g5 ); MPI_Group_free( &g45 ); MPI_Group_free( &g1 ); if (myrank == 0) { if (errs == 0) { printf( " No Errors\n" ); } else { printf( "Found %d errors\n", errs ); } } MPI_Finalize(); return 0; }
value caml_mpi_group_union(value group1, value group2) { MPI_Group group; MPI_Group_union(Group_val(group1), Group_val(group2), &group); return caml_mpi_alloc_group(group); }
int main( int argc, char **argv ) { int errs=0, toterr; MPI_Group basegroup; MPI_Group g1, g2, g3, g4, g5, g6, g7, g8, g9, g10; MPI_Group g3a, g3b; MPI_Comm comm, newcomm, splitcomm, dupcomm; int i, grp_rank, rank, grp_size, size, result; int nranks, *ranks, *ranks_out; int range[1][3]; int worldrank; MPI_Init( &argc, &argv ); MPI_Comm_rank( MPI_COMM_WORLD, &worldrank ); comm = MPI_COMM_WORLD; MPI_Comm_group( comm, &basegroup ); MPI_Comm_rank( comm, &rank ); MPI_Comm_size( comm, &size ); /* Get the basic information on this group */ MPI_Group_rank( basegroup, &grp_rank ); if (grp_rank != rank) { errs++; fprintf( stdout, "group rank %d != comm rank %d\n", grp_rank, rank ); } MPI_Group_size( basegroup, &grp_size ); if (grp_size != size) { errs++; fprintf( stdout, "group size %d != comm size %d\n", grp_size, size ); } /* Form a new communicator with inverted ranking */ MPI_Comm_split( comm, 0, size - rank, &newcomm ); MPI_Comm_group( newcomm, &g1 ); ranks = (int *)malloc( size * sizeof(int) ); ranks_out = (int *)malloc( size * sizeof(int) ); for (i=0; i<size; i++) ranks[i] = i; nranks = size; MPI_Group_translate_ranks( g1, nranks, ranks, basegroup, ranks_out ); for (i=0; i<size; i++) { if (ranks_out[i] != (size - 1) - i) { errs++; fprintf( stdout, "Translate ranks got %d expected %d\n", ranks_out[i], (size - 1) - i ); } } /* Check Compare */ MPI_Group_compare( basegroup, g1, &result ); if (result != MPI_SIMILAR) { errs++; fprintf( stdout, "Group compare should have been similar, was %d\n", result ); } MPI_Comm_dup( comm, &dupcomm ); MPI_Comm_group( dupcomm, &g2 ); MPI_Group_compare( basegroup, g2, &result ); if (result != MPI_IDENT) { errs++; fprintf( stdout, "Group compare should have been ident, was %d\n", result ); } MPI_Comm_split( comm, rank < size/2, rank, &splitcomm ); MPI_Comm_group( splitcomm, &g3 ); MPI_Group_compare( basegroup, g3, &result ); if (result != MPI_UNEQUAL) { errs++; fprintf( stdout, "Group compare should have been unequal, was %d\n", result ); } /* Build two groups that have this process and one other, but do not have the same processes */ ranks[0] = rank; ranks[1] = (rank + 1) % size; MPI_Group_incl( basegroup, 2, ranks, &g3a ); ranks[1] = (rank + size - 1) % size; MPI_Group_incl( basegroup, 2, ranks, &g3b ); MPI_Group_compare( g3a, g3b, &result ); if (result != MPI_UNEQUAL) { errs++; fprintf( stdout, "Group compare of equal sized but different groups should have been unequal, was %d\n", result ); } /* Build two new groups by excluding members; use Union to put them together again */ /* Exclude 0 */ for (i=0; i<size; i++) ranks[i] = i; MPI_Group_excl( basegroup, 1, ranks, &g4 ); /* Exclude 1-(size-1) */ MPI_Group_excl( basegroup, size-1, ranks+1, &g5 ); MPI_Group_union( g5, g4, &g6 ); MPI_Group_compare( basegroup, g6, &result ); if (result != MPI_IDENT) { int usize; errs++; /* See ordering requirements on union */ fprintf( stdout, "Group excl and union did not give ident groups\n" ); fprintf( stdout, "[%d] result of compare was %d\n", rank, result ); MPI_Group_size( g6, &usize ); fprintf( stdout, "Size of union is %d, should be %d\n", usize, size ); } MPI_Group_union( basegroup, g4, &g7 ); MPI_Group_compare( basegroup, g7, &result ); if (result != MPI_IDENT) { int usize; errs++; fprintf( stdout, "Group union of overlapping groups failed\n" ); fprintf( stdout, "[%d] result of compare was %d\n", rank, result ); MPI_Group_size( g7, &usize ); fprintf( stdout, "Size of union is %d, should be %d\n", usize, size ); } /* Use range_excl instead of ranks */ /* printf ("range excl\n" ); fflush( stdout ); */ range[0][0] = 1; range[0][1] = size-1; range[0][2] = 1; MPI_Group_range_excl( basegroup, 1, range, &g8 ); /* printf( "out of range excl\n" ); fflush( stdout ); */ MPI_Group_compare( g5, g8, &result ); /* printf( "out of compare\n" ); fflush( stdout ); */ if (result != MPI_IDENT) { errs++; fprintf( stdout, "Group range excl did not give ident groups\n" ); } /* printf( "intersection\n" ); fflush( stdout ); */ MPI_Group_intersection( basegroup, g4, &g9 ); MPI_Group_compare( g9, g4, &result ); if (result != MPI_IDENT) { errs++; fprintf( stdout, "Group intersection did not give ident groups\n" ); } /* Exclude EVERYTHING and check against MPI_GROUP_EMPTY */ /* printf( "range excl all\n" ); fflush( stdout ); */ range[0][0] = 0; range[0][1] = size-1; range[0][2] = 1; MPI_Group_range_excl( basegroup, 1, range, &g10 ); /* printf( "done range excl all\n" ); fflush(stdout); */ MPI_Group_compare( g10, MPI_GROUP_EMPTY, &result ); /* printf( "done compare to MPI_GROUP_EMPTY\n" ); fflush(stdout); */ if (result != MPI_IDENT) { errs++; fprintf( stdout, "MPI_GROUP_EMPTY didn't compare against empty group\n"); } /* printf( "freeing groups\n" ); fflush( stdout ); */ MPI_Group_free( &basegroup ); MPI_Group_free( &g1 ); MPI_Group_free( &g2 ); MPI_Group_free( &g3 ); MPI_Group_free( &g3a ); MPI_Group_free( &g3b ); MPI_Group_free( &g4 ); MPI_Group_free( &g5 ); MPI_Group_free( &g6 ); MPI_Group_free( &g7 ); MPI_Group_free( &g8 ); MPI_Group_free( &g9 ); MPI_Group_free( &g10 ); MPI_Comm_free( &dupcomm ); MPI_Comm_free( &splitcomm ); MPI_Comm_free( &newcomm ); MPI_Allreduce( &errs, &toterr, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); if (worldrank == 0) { if (toterr == 0) printf( " No Errors\n" ); else printf( "Found %d errors in MPI Group routines\n", toterr ); } MPI_Finalize(); return toterr; }
int main (int argc, char **argv) { int nprocs = -1; int rank = -1; int comm = MPI_COMM_WORLD; char processor_name[128]; int namelen = 128; int i; int ranks[2], ranges[1][3]; MPI_Group newgroup[GROUP_CONSTRUCTOR_COUNT]; MPI_Group newgroup2[GROUP_CONSTRUCTOR_COUNT]; MPI_Comm temp; MPI_Comm intercomm = MPI_COMM_NULL; /* init */ MPI_Init (&argc, &argv); MPI_Comm_size (comm, &nprocs); MPI_Comm_rank (comm, &rank); MPI_Get_processor_name (processor_name, &namelen); printf ("(%d) is alive on %s\n", rank, processor_name); fflush (stdout); ranks[0] = 0; ranks[1] = 1; ranges[0][0] = 0; ranges[0][1] = 2; ranges[0][2] = 2; MPI_Barrier (comm); if (nprocs < 3) { printf ("requires at least 3 tasks\n"); } else { /* create the groups */ if (GROUP_CONSTRUCTOR_COUNT > 0) MPI_Comm_group (MPI_COMM_WORLD, &newgroup[0]); if (GROUP_CONSTRUCTOR_COUNT > 1) MPI_Group_incl (newgroup[0], 2, ranks, &newgroup[1]); if (GROUP_CONSTRUCTOR_COUNT > 2) MPI_Group_excl (newgroup[0], 2, ranks, &newgroup[2]); if (GROUP_CONSTRUCTOR_COUNT > 3) MPI_Group_range_incl (newgroup[0], 1, ranges, &newgroup[3]); if (GROUP_CONSTRUCTOR_COUNT > 4) MPI_Group_range_excl (newgroup[0], 1, ranges, &newgroup[4]); if (GROUP_CONSTRUCTOR_COUNT > 5) MPI_Group_union (newgroup[1], newgroup[3], &newgroup[5]); if (GROUP_CONSTRUCTOR_COUNT > 6) MPI_Group_intersection (newgroup[5], newgroup[2], &newgroup[6]); if (GROUP_CONSTRUCTOR_COUNT > 7) MPI_Group_difference (newgroup[5], newgroup[2], &newgroup[7]); if (GROUP_CONSTRUCTOR_COUNT > 8) { /* need lots of stuff for this constructor... */ MPI_Comm_split (MPI_COMM_WORLD, rank % 3, nprocs - rank, &temp); if (rank % 3) { MPI_Intercomm_create (temp, 0, MPI_COMM_WORLD, (((nprocs % 3) == 2) && ((rank % 3) == 2)) ? nprocs - 1 : nprocs - (rank % 3) - (nprocs % 3), INTERCOMM_CREATE_TAG, &intercomm); MPI_Comm_remote_group (intercomm, &newgroup[8]); MPI_Comm_free (&intercomm); } else { MPI_Comm_group (temp, &newgroup[8]); } MPI_Comm_free (&temp); } } MPI_Barrier (comm); printf ("(%d) Finished normally\n", rank); MPI_Finalize (); }
void declareBindings (void) { /* === Point-to-point === */ void* buf; int count; MPI_Datatype datatype; int dest; int tag; MPI_Comm comm; MPI_Send (buf, count, datatype, dest, tag, comm); // L12 int source; MPI_Status status; MPI_Recv (buf, count, datatype, source, tag, comm, &status); // L15 MPI_Get_count (&status, datatype, &count); MPI_Bsend (buf, count, datatype, dest, tag, comm); MPI_Ssend (buf, count, datatype, dest, tag, comm); MPI_Rsend (buf, count, datatype, dest, tag, comm); void* buffer; int size; MPI_Buffer_attach (buffer, size); // L22 MPI_Buffer_detach (buffer, &size); MPI_Request request; MPI_Isend (buf, count, datatype, dest, tag, comm, &request); // L25 MPI_Ibsend (buf, count, datatype, dest, tag, comm, &request); MPI_Issend (buf, count, datatype, dest, tag, comm, &request); MPI_Irsend (buf, count, datatype, dest, tag, comm, &request); MPI_Irecv (buf, count, datatype, source, tag, comm, &request); MPI_Wait (&request, &status); int flag; MPI_Test (&request, &flag, &status); // L32 MPI_Request_free (&request); MPI_Request* array_of_requests; int index; MPI_Waitany (count, array_of_requests, &index, &status); // L36 MPI_Testany (count, array_of_requests, &index, &flag, &status); MPI_Status* array_of_statuses; MPI_Waitall (count, array_of_requests, array_of_statuses); // L39 MPI_Testall (count, array_of_requests, &flag, array_of_statuses); int incount; int outcount; int* array_of_indices; MPI_Waitsome (incount, array_of_requests, &outcount, array_of_indices, array_of_statuses); // L44--45 MPI_Testsome (incount, array_of_requests, &outcount, array_of_indices, array_of_statuses); // L46--47 MPI_Iprobe (source, tag, comm, &flag, &status); // L48 MPI_Probe (source, tag, comm, &status); MPI_Cancel (&request); MPI_Test_cancelled (&status, &flag); MPI_Send_init (buf, count, datatype, dest, tag, comm, &request); MPI_Bsend_init (buf, count, datatype, dest, tag, comm, &request); MPI_Ssend_init (buf, count, datatype, dest, tag, comm, &request); MPI_Rsend_init (buf, count, datatype, dest, tag, comm, &request); MPI_Recv_init (buf, count, datatype, source, tag, comm, &request); MPI_Start (&request); MPI_Startall (count, array_of_requests); void* sendbuf; int sendcount; MPI_Datatype sendtype; int sendtag; void* recvbuf; int recvcount; MPI_Datatype recvtype; MPI_Datatype recvtag; MPI_Sendrecv (sendbuf, sendcount, sendtype, dest, sendtag, recvbuf, recvcount, recvtype, source, recvtag, comm, &status); // L67--69 MPI_Sendrecv_replace (buf, count, datatype, dest, sendtag, source, recvtag, comm, &status); // L70--71 MPI_Datatype oldtype; MPI_Datatype newtype; MPI_Type_contiguous (count, oldtype, &newtype); // L74 int blocklength; { int stride; MPI_Type_vector (count, blocklength, stride, oldtype, &newtype); // L78 } { MPI_Aint stride; MPI_Type_hvector (count, blocklength, stride, oldtype, &newtype); // L82 } int* array_of_blocklengths; { int* array_of_displacements; MPI_Type_indexed (count, array_of_blocklengths, array_of_displacements, oldtype, &newtype); // L87--88 } { MPI_Aint* array_of_displacements; MPI_Type_hindexed (count, array_of_blocklengths, array_of_displacements, oldtype, &newtype); // L92--93 MPI_Datatype* array_of_types; MPI_Type_struct (count, array_of_blocklengths, array_of_displacements, array_of_types, &newtype); // L95--96 } void* location; MPI_Aint address; MPI_Address (location, &address); // L100 MPI_Aint extent; MPI_Type_extent (datatype, &extent); // L102 MPI_Type_size (datatype, &size); MPI_Aint displacement; MPI_Type_lb (datatype, &displacement); // L105 MPI_Type_ub (datatype, &displacement); MPI_Type_commit (&datatype); MPI_Type_free (&datatype); MPI_Get_elements (&status, datatype, &count); void* inbuf; void* outbuf; int outsize; int position; MPI_Pack (inbuf, incount, datatype, outbuf, outsize, &position, comm); // L114 int insize; MPI_Unpack (inbuf, insize, &position, outbuf, outcount, datatype, comm); // L116--117 MPI_Pack_size (incount, datatype, comm, &size); /* === Collectives === */ MPI_Barrier (comm); // L121 int root; MPI_Bcast (buffer, count, datatype, root, comm); // L123 MPI_Gather (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, comm); // L124--125 int* recvcounts; int* displs; MPI_Gatherv (sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, root, comm); // L128--130 MPI_Scatter (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, comm); // L131--132 int* sendcounts; MPI_Scatterv (sendbuf, sendcounts, displs, sendtype, recvbuf, recvcount, recvtype, root, comm); // L134--135 MPI_Allgather (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm); // L136--137 MPI_Allgatherv (sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, comm); // L138--140 MPI_Alltoall (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm); // L141--142 int* sdispls; int* rdispls; MPI_Alltoallv (sendbuf, sendcounts, sdispls, sendtype, recvbuf, recvcounts, rdispls, recvtype, comm); // L145--147 MPI_Op op; MPI_Reduce (sendbuf, recvbuf, count, datatype, op, root, comm); // L149 #if 0 MPI_User_function function; int commute; MPI_Op_create (function, commute, &op); // L153 #endif MPI_Op_free (&op); // L155 MPI_Allreduce (sendbuf, recvbuf, count, datatype, op, comm); MPI_Reduce_scatter (sendbuf, recvbuf, recvcounts, datatype, op, comm); MPI_Scan (sendbuf, recvbuf, count, datatype, op, comm); /* === Groups, contexts, and communicators === */ MPI_Group group; MPI_Group_size (group, &size); // L162 int rank; MPI_Group_rank (group, &rank); // L164 MPI_Group group1; int n; int* ranks1; MPI_Group group2; int* ranks2; MPI_Group_translate_ranks (group1, n, ranks1, group2, ranks2); // L170 int result; MPI_Group_compare (group1, group2, &result); // L172 MPI_Group newgroup; MPI_Group_union (group1, group2, &newgroup); // L174 MPI_Group_intersection (group1, group2, &newgroup); MPI_Group_difference (group1, group2, &newgroup); int* ranks; MPI_Group_incl (group, n, ranks, &newgroup); // L178 MPI_Group_excl (group, n, ranks, &newgroup); extern int ranges[][3]; MPI_Group_range_incl (group, n, ranges, &newgroup); // L181 MPI_Group_range_excl (group, n, ranges, &newgroup); MPI_Group_free (&group); MPI_Comm_size (comm, &size); MPI_Comm_rank (comm, &rank); MPI_Comm comm1; MPI_Comm comm2; MPI_Comm_compare (comm1, comm2, &result); MPI_Comm newcomm; MPI_Comm_dup (comm, &newcomm); MPI_Comm_create (comm, group, &newcomm); int color; int key; MPI_Comm_split (comm, color, key, &newcomm); // L194 MPI_Comm_free (&comm); MPI_Comm_test_inter (comm, &flag); MPI_Comm_remote_size (comm, &size); MPI_Comm_remote_group (comm, &group); MPI_Comm local_comm; int local_leader; MPI_Comm peer_comm; int remote_leader; MPI_Comm newintercomm; MPI_Intercomm_create (local_comm, local_leader, peer_comm, remote_leader, tag, &newintercomm); // L204--205 MPI_Comm intercomm; MPI_Comm newintracomm; int high; MPI_Intercomm_merge (intercomm, high, &newintracomm); // L209 int keyval; #if 0 MPI_Copy_function copy_fn; MPI_Delete_function delete_fn; void* extra_state; MPI_Keyval_create (copy_fn, delete_fn, &keyval, extra_state); // L215 #endif MPI_Keyval_free (&keyval); // L217 void* attribute_val; MPI_Attr_put (comm, keyval, attribute_val); // L219 MPI_Attr_get (comm, keyval, attribute_val, &flag); MPI_Attr_delete (comm, keyval); /* === Environmental inquiry === */ char* name; int resultlen; MPI_Get_processor_name (name, &resultlen); // L226 MPI_Errhandler errhandler; #if 0 MPI_Handler_function function; MPI_Errhandler_create (function, &errhandler); // L230 #endif MPI_Errhandler_set (comm, errhandler); // L232 MPI_Errhandler_get (comm, &errhandler); MPI_Errhandler_free (&errhandler); int errorcode; char* string; MPI_Error_string (errorcode, string, &resultlen); // L237 int errorclass; MPI_Error_class (errorcode, &errorclass); // L239 MPI_Wtime (); MPI_Wtick (); int argc; char** argv; MPI_Init (&argc, &argv); // L244 MPI_Finalize (); MPI_Initialized (&flag); MPI_Abort (comm, errorcode); }
FC_FUNC( mpi_group_union, MPI_GROUP_UNION ) (int *group1, int *group2, int *newgroup, int *ierror) { *ierror= MPI_Group_union(*group1,*group2,newgroup); }
void IOserver::initialize(int proc_size0,int proc_size1, int IOserver_size, int IO_node_size) { int rang[3]; int totalMPIsize; int itemp; MPI_Group groupTemp1,groupTemp2; MPI_Comm_group(MPI_COMM_WORLD,&world_group_); MPI_Group_size(world_group_,&totalMPIsize); if((proc_size0*proc_size1) % IOserver_size!=0 || IOserver_size % IO_node_size!=0) { //cout<<"IOserver wrong number of process"<<endl; exit(-44); } rang[0]=0; rang[1]=proc_size0*proc_size1-1; rang[2]=1; MPI_Group_range_incl(world_group_,1,&rang,&computeGroup_); MPI_Comm_create(MPI_COMM_WORLD,computeGroup_ , &computeComm_); MPI_Group_rank(computeGroup_, &computeRank_); rang[0]=proc_size0*proc_size1; rang[1]=proc_size0*proc_size1 + IOserver_size - 1 ; rang[2]=1; MPI_Group_range_incl(world_group_,1,&rang,&IO_Group_); MPI_Comm_create(MPI_COMM_WORLD,IO_Group_ , &IO_Comm_); MPI_Group_rank(IO_Group_, &IO_Rank_); rang[0]=proc_size0*proc_size1; rang[1]=0; MPI_Group_incl(world_group_,2,&rang[0],&syncLineGroup_); MPI_Comm_create(MPI_COMM_WORLD,syncLineGroup_ , &syncLineComm_); MPI_Group_rank(syncLineGroup_, &syncLineRank_); IO_ClientSize_=proc_size0*proc_size1/IOserver_size; IO_NodeSize_=IO_node_size; if(computeRank_!=MPI_UNDEFINED)itemp = floor((float)computeRank_/(float)IO_ClientSize_) * IO_ClientSize_; else itemp = IO_Rank_ * IO_ClientSize_; //if(computeRank_!=MPI_UNDEFINED)cout<< "compute core: "<< computeRank_ <<" , "<< itemp<<endl; //if(IO_Rank_!=MPI_UNDEFINED)cout<< "IO core: "<< IO_Rank_ <<" , "<< itemp<<endl; rang[0] = itemp; rang[1] = itemp + IO_ClientSize_ -1; rang[2]=1; MPI_Group_range_incl(world_group_,1,&rang,&groupTemp2); if(computeRank_!=MPI_UNDEFINED)itemp = proc_size0*proc_size1 + floor((float)computeRank_/(float)IO_ClientSize_); else itemp = proc_size0*proc_size1 + IO_Rank_; //if(computeRank_!=MPI_UNDEFINED)cout<< "compute core: "<< computeRank_ <<" , "<< itemp<<endl; //if(IO_Rank_!=MPI_UNDEFINED)cout<< "IO core: "<< IO_Rank_ <<" , "<< itemp<<endl; MPI_Group_incl(world_group_,1,&itemp,&groupTemp1); MPI_Group_union(groupTemp1,groupTemp2,&masterClientGroup_); MPI_Comm_create(MPI_COMM_WORLD,masterClientGroup_ , &masterClientComm_); //if(computeRank_!=MPI_UNDEFINED)cout<< "compute core: "<< computeRank_ <<" , "<< itemp<<endl; //if(IO_Rank_!=MPI_UNDEFINED)cout<< "IO core: "<< IO_Rank_ <<" , "<< itemp<<endl; if(IO_Rank_!=MPI_UNDEFINED) { itemp= floor((float)IO_Rank_/ (float)IO_node_size) * IO_node_size; rang[0] = itemp; rang[1] = itemp + IO_node_size -1; rang[2]=1; MPI_Group_range_incl(IO_Group_,1,&rang,&IO_NodeGroup_); MPI_Comm_create(IO_Comm_,IO_NodeGroup_ , &IO_NodeComm_); MPI_Group_rank(IO_NodeGroup_, &IO_NodeRank_); files = new file_struct[MAX_FILE_NUMBER]; dataBuffer = (char*)malloc(IO_BUFFERS_TOTAL_SIZE); IO_Node_=floor((float)IO_Rank_/ (float)IO_node_size) ; } sendRequest = MPI_REQUEST_NULL; }