va_start(ap, unknown); buf = unknown; if (_numargs() == NUMPARAMS+1) { buflen = va_arg(ap, int) /8; /* This is in bits. */ } count = va_arg (ap, int *); datatype = va_arg(ap, MPI_Datatype *); dest = va_arg(ap, int *); tag = va_arg(ap, int *); comm = va_arg(ap, MPI_Comm *); request = va_arg(ap, MPI_Request *); __ierr = va_arg(ap, int *); *__ierr = MPI_Send_init(MPIR_F_PTR(buf),*count, *datatype,*dest,*tag,*comm,&lrequest); *(int*)request = MPIR_FromPointer( lrequest ); } #else void mpi_send_init_( buf, count, datatype, dest, tag, comm, request, __ierr ) void *buf; int*count; MPI_Datatype *datatype; int*dest; int*tag; MPI_Comm *comm; MPI_Request *request; int *__ierr; { MPI_Request lrequest;
/*@ MPI_Group_union - Produces a group by combining two groups Input Parameters: + group1 - first group (handle) - group2 - second group (handle) Output Parameter: . newgroup - union group (handle) .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_GROUP .N MPI_ERR_EXHAUSTED .seealso: MPI_Group_free @*/ EXPORT_MPI_API int MPI_Group_union ( MPI_Group group1, MPI_Group group2, MPI_Group *group_out ) { int i, j, global_rank; struct MPIR_GROUP *group1_ptr, *group2_ptr, *new_group_ptr; int n; int mpi_errno = MPI_SUCCESS; static char myname[] = "MPI_GROUP_UNION"; TR_PUSH(myname); group1_ptr = MPIR_GET_GROUP_PTR(group1); group2_ptr = MPIR_GET_GROUP_PTR(group2); /* MPIR_TEST_MPI_GROUP(group1,group1_ptr,MPIR_COMM_WORLD,myname); MPIR_TEST_MPI_GROUP(grou2p,group2_ptr,MPIR_COMM_WORLD,myname); */ #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_GROUP(group1_ptr); MPIR_TEST_GROUP(group2_ptr); if (mpi_errno) return MPIR_ERROR(MPIR_COMM_WORLD, mpi_errno, myname ); #endif /* Check for EMPTY groups */ if ( (group1 == MPI_GROUP_EMPTY) && (group2 == MPI_GROUP_EMPTY) ) { MPIR_Group_dup ( MPIR_GROUP_EMPTY, &new_group_ptr ); TR_POP; *group_out = new_group_ptr->self; return (mpi_errno); } if ( group1 == MPI_GROUP_EMPTY ) { MPIR_Group_dup ( group2_ptr, &new_group_ptr ); *group_out = new_group_ptr->self; TR_POP; return (mpi_errno); } if ( group2 == MPI_GROUP_EMPTY ) { MPIR_Group_dup ( group1_ptr, &new_group_ptr ); *group_out = new_group_ptr->self; TR_POP; return (mpi_errno); } /* Create the new group */ MPIR_ALLOC(new_group_ptr,NEW(struct MPIR_GROUP),MPIR_COMM_WORLD, MPI_ERR_EXHAUSTED, "MPI_GROUP_UNION" ); *group_out = (MPI_Group) MPIR_FromPointer( new_group_ptr ); new_group_ptr->self = *group_out; MPIR_SET_COOKIE(new_group_ptr,MPIR_GROUP_COOKIE) new_group_ptr->ref_count = 1; new_group_ptr->permanent = 0; new_group_ptr->local_rank = group1_ptr->local_rank; new_group_ptr->set_mark = (int *)0; /* Set the number in the union */ n = group1_ptr->np + group2_ptr->np; /* Allocate set marking space for group2 if necessary */ if (group2_ptr->set_mark == NULL) { MPIR_ALLOC(group2_ptr->set_mark,(int *) MALLOC( group2_ptr->np * sizeof(int) ), MPIR_COMM_WORLD,MPI_ERR_EXHAUSTED,"MPI_GROUP_UNION"); } /* Mark the union */ for ( j=0; j<group2_ptr->np; j++ ) { group2_ptr->set_mark[j] = MPIR_MARKED; for ( i=0; i<group1_ptr->np; i++ ) if ( group1_ptr->lrank_to_grank[i] == group2_ptr->lrank_to_grank[j] ) { group2_ptr->set_mark[j] = MPIR_UNMARKED; n--; break; } } /* Alloc the memory */ new_group_ptr->np = n; MPIR_ALLOC(new_group_ptr->lrank_to_grank,(int *) MALLOC( n * sizeof(int) ), MPIR_COMM_WORLD, MPI_ERR_EXHAUSTED, "MPI_GROUP_UNION" ); /* Fill in the space */ n = group1_ptr->np; memcpy(new_group_ptr->lrank_to_grank,group1_ptr->lrank_to_grank,n*sizeof(int)); for ( j=0; j<group2_ptr->np; j++ ) if ( (group2_ptr->set_mark[j]==MPIR_MARKED) && (n < new_group_ptr->np) ) new_group_ptr->lrank_to_grank[n++] = group2_ptr->lrank_to_grank[j]; /* Find the local rank only if local rank not defined in group 1 */ if ( new_group_ptr->local_rank == MPI_UNDEFINED ) { global_rank = MPID_MyWorldRank; for( i=group1_ptr->np; i<new_group_ptr->np; i++ ) if ( global_rank == new_group_ptr->lrank_to_grank[i] ) { new_group_ptr->local_rank = i; break; } } /* Determine the previous and next powers of 2 */ MPIR_Powers_of_2 ( new_group_ptr->np, &(new_group_ptr->N2_next), &(new_group_ptr->N2_prev) ); TR_POP; return (mpi_errno); }
/*@ MPI_Group_range_excl - Produces a group by excluding ranges of processes from an existing group Input Parameters: + group - group (handle) . n - number of elements in array 'ranks' (integer) - ranges - a one-dimensional array of integer triplets of the form (first rank, last rank, stride), indicating the ranks in 'group' of processes to be excluded from the output group 'newgroup' . Output Parameter: . newgroup - new group derived from above, preserving the order in 'group' (handle) Note: Currently, each of the ranks to exclude must be a valid rank in the group and all elements must be distinct or the function is erroneous. This restriction is per the draft. .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_GROUP .N MPI_ERR_EXHAUSTED .N MPI_ERR_RANK .N MPI_ERR_ARG .seealso: MPI_Group_free @*/ EXPORT_MPI_API int MPI_Group_range_excl ( MPI_Group group, int n, int ranges[][3], MPI_Group *newgroup ) { int i, j, first, last, stride; int np; struct MPIR_GROUP *group_ptr, *new_group_ptr; int mpi_errno = MPI_SUCCESS; static char myname[] = "MPI_GROUP_RANGE_EXCL"; TR_PUSH(myname); /* Check for bad arguments */ group_ptr = MPIR_GET_GROUP_PTR(group); #ifndef MPIR_NO_ERROR_CHECKING /* MPIR_TEST_MPI_GROUP(group,group_ptr,MPIR_COMM_WORLD,myname); */ MPIR_TEST_GROUP(group_ptr); if (mpi_errno) return MPIR_ERROR(MPIR_COMM_WORLD, mpi_errno, myname ); #endif /* Check for a EMPTY input group */ if ( (group == MPI_GROUP_EMPTY) ) { MPIR_Group_dup ( MPIR_GROUP_EMPTY, &new_group_ptr ); *newgroup = new_group_ptr->self; TR_POP; return (mpi_errno); } /* Check for no range ranks to exclude */ if ( n == 0 ) { MPIR_Group_dup ( group_ptr, &new_group_ptr ); *newgroup = new_group_ptr->self; return (mpi_errno); } if (n < 0) return MPIR_ERROR( MPIR_COMM_WORLD, MPI_ERR_ARG, myname ); /* Allocate set marking space for group if necessary */ if (group_ptr->set_mark == NULL) { MPIR_ALLOC(group_ptr->set_mark,(int *) MALLOC( group_ptr->np * sizeof(int) ), MPIR_COMM_WORLD, MPI_ERR_EXHAUSTED, myname ); } (void) memset( group_ptr->set_mark, (char)0, group_ptr->np * sizeof(int) ); /* Mark the ranks to be excluded */ np = group_ptr->np; for (i=0; i<n; i++) { first = ranges[i][0]; last = ranges[i][1]; stride = ranges[i][2]; if (stride != 0) { if ( (stride > 0 && first > last) || (stride < 0 && first < last) ) { mpi_errno = MPIR_Err_setmsg( MPI_ERR_ARG, MPIR_ERR_ARG_STRIDE, myname, "Range does not terminate", "Range (%d,%d,%d) does not terminate", first, last, stride ); return MPIR_ERROR( MPIR_COMM_WORLD, mpi_errno, myname ); } for ( j=first; j*stride <= last*stride; j += stride ) if ( (j < group_ptr->np) && (j >= 0) ) { if (group_ptr->set_mark[j] == MPIR_UNMARKED) { group_ptr->set_mark[j] = MPIR_MARKED; np--; } } else{ mpi_errno = MPIR_Err_setmsg( MPI_ERR_RANK, MPIR_ERR_DEFAULT, myname, (char *)0,(char *)0, j ); return MPIR_ERROR( MPIR_COMM_WORLD, mpi_errno, myname ); } } else { mpi_errno = MPIR_Err_setmsg( MPI_ERR_ARG, MPIR_ERR_ARG_ZERO_STRIDE, myname, "Zero stride is incorrect", (char *)0 ); return MPIR_ERROR( MPIR_COMM_WORLD, mpi_errno, myname ); } } /* Check np to see if we have original group or if we have null group */ if (np == 0) { MPIR_Group_dup ( MPIR_GROUP_EMPTY, &new_group_ptr ); *newgroup = new_group_ptr->self; return (mpi_errno); } if (np == group_ptr->np) { MPIR_Group_dup ( group_ptr, &new_group_ptr ); *newgroup = new_group_ptr->self; return (mpi_errno); } /* Create the new group */ MPIR_ALLOC(new_group_ptr,NEW(struct MPIR_GROUP),MPIR_COMM_WORLD, MPI_ERR_EXHAUSTED, myname ); *newgroup = (MPI_Group) MPIR_FromPointer( new_group_ptr ); new_group_ptr->self = *newgroup; MPIR_SET_COOKIE(new_group_ptr,MPIR_GROUP_COOKIE) new_group_ptr->ref_count = 1; new_group_ptr->permanent = 0; new_group_ptr->local_rank = MPI_UNDEFINED; new_group_ptr->set_mark = (int *)0; new_group_ptr->np = np; new_group_ptr->lrank_to_grank = (int *) MALLOC( np * sizeof(int) ); if (!new_group_ptr->lrank_to_grank) { return MPIR_ERROR( MPIR_COMM_WORLD, MPI_ERR_EXHAUSTED, myname ); } /* Fill in new group */ for (i=j=0; i < group_ptr->np ; i++) if ( (group_ptr->set_mark[i] == MPIR_UNMARKED) && (j < new_group_ptr->np ) ) { if (group_ptr->local_rank == i) new_group_ptr->local_rank = j; new_group_ptr->lrank_to_grank[j++] = group_ptr->lrank_to_grank[i]; } /* Determine the previous and next powers of 2 */ MPIR_Powers_of_2 ( new_group_ptr->np, &(new_group_ptr->N2_next), &(new_group_ptr->N2_prev) ); TR_POP; return (mpi_errno); }
/*@ MPI_Type_struct - Creates a struct datatype Input Parameters: + count - number of blocks (integer) -- also number of entries in arrays array_of_types , array_of_displacements and array_of_blocklengths . blocklens - number of elements in each block (array) . indices - byte displacement of each block (array) - old_types - type of elements in each block (array of handles to datatype objects) Output Parameter: . newtype - new datatype (handle) Notes: If an upperbound is set explicitly by using the MPI datatype 'MPI_UB', the corresponding index must be positive. The MPI standard originally made vague statements about padding and alignment; this was intended to allow the simple definition of structures that could be sent with a count greater than one. For example, .vb struct { int a; char b; } foo; .ve may have 'sizeof(foo) > sizeof(int) + sizeof(char)'; for example, 'sizeof(foo) == 2*sizeof(int)'. The initial version of the MPI standard defined the extent of a datatype as including an `epsilon` that would have allowed an implementation to make the extent an MPI datatype for this structure equal to '2*sizeof(int)'. However, since different systems might define different paddings, there was much discussion by the MPI Forum about what was the correct value of epsilon, and one suggestion was to define epsilon as zero. This would have been the best thing to do in MPI 1.0, particularly since the 'MPI_UB' type allows the user to easily set the end of the structure. Unfortunately, this change did not make it into the final document. Currently, this routine does not add any padding, since the amount of padding needed is determined by the compiler that the user is using to build their code, not the compiler used to construct the MPI library. A later version of MPICH may provide for some natural choices of padding (e.g., multiple of the size of the largest basic member), but users are advised to never depend on this, even with vendor MPI implementations. Instead, if you define a structure datatype and wish to send or receive multiple items, you should explicitly include an 'MPI_UB' entry as the last member of the structure. For example, the following code can be used for the structure foo .vb blen[0] = 1; indices[0] = 0; oldtypes[0] = MPI_INT; blen[1] = 1; indices[1] = &foo.b - &foo; oldtypes[1] = MPI_CHAR; blen[2] = 1; indices[2] = sizeof(foo); oldtypes[2] = MPI_UB; MPI_Type_struct( 3, blen, indices, oldtypes, &newtype ); .ve .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_TYPE .N MPI_ERR_COUNT .N MPI_ERR_EXHAUSTED @*/ int MPI_Type_struct( int count, int blocklens[], MPI_Aint indices[], MPI_Datatype old_types[], MPI_Datatype *newtype ) { struct MPIR_DATATYPE* dteptr; MPI_Aint ub, lb, high, low, real_ub, real_lb, real_init; int high_init = 0, low_init = 0; int i, mpi_errno = MPI_SUCCESS; MPI_Aint ub_marker = 0, lb_marker = 0; /* to suppress warnings */ MPI_Aint ub_found = 0, lb_found = 0; int size, total_count; static char myname[] = "MPI_TYPE_STRUCT"; disableSignal(); /* Check for bad arguments */ if ( count < 0 ) { mpi_errno = MPIR_Err_setmsg( MPI_ERR_COUNT, MPIR_ERR_DEFAULT, myname, (char *)0, (char *)0, count ); revertSignal(); return MPIR_ERROR( MPIR_COMM_WORLD, mpi_errno, myname ); } if (count == 0) { revertSignal(); return MPI_Type_contiguous( 0, MPI_INT, newtype ); } /* Check blocklens and old_types arrays and find number of bound */ /* markers */ total_count = 0; for (i=0; i<count; i++) { total_count += blocklens[i]; if ( blocklens[i] < 0) { mpi_errno = MPIR_Err_setmsg( MPI_ERR_ARG, MPIR_ERR_ARG_ARRAY_VAL, myname, (char *)0, (char *)0, "blocklens", i, blocklens[i] ); revertSignal(); return MPIR_ERROR( MPIR_COMM_WORLD, mpi_errno,myname); } if ( old_types[i] == MPI_DATATYPE_NULL ) { mpi_errno = MPIR_Err_setmsg( MPI_ERR_TYPE, MPIR_ERR_TYPE_ARRAY_NULL, myname, (char *)0, (char *)0, "old_types", i ); revertSignal(); return MPIR_ERROR( MPIR_COMM_WORLD, mpi_errno, myname ); } } if (total_count == 0) { revertSignal(); return MPI_Type_contiguous( 0, MPI_INT, newtype ); } /* Create and fill in the datatype */ MPIR_ALLOC(dteptr,(struct MPIR_DATATYPE *) MPIR_SBalloc( MPIR_dtes ),MPIR_COMM_WORLD, MPI_ERR_EXHAUSTED, myname ); *newtype = (MPI_Datatype) MPIR_FromPointer( dteptr ); dteptr->self = *newtype; MPIR_SET_COOKIE(dteptr,MPIR_DATATYPE_COOKIE) dteptr->dte_type = MPIR_STRUCT; dteptr->committed = 0; dteptr->basic = 0; dteptr->permanent = 0; dteptr->is_contig = 0; dteptr->ref_count = 1; dteptr->count = count; dteptr->elements = 0; dteptr->size = 0; dteptr->align = 1; dteptr->has_ub = 0; dteptr->has_lb = 0; dteptr->self = *newtype; /* Create indices and blocklens arrays and fill them */ dteptr->indices = ( MPI_Aint * ) MALLOC( count * sizeof( MPI_Aint ) ); dteptr->blocklens = ( int * ) MALLOC( count * sizeof( int ) ); dteptr->old_types = ( struct MPIR_DATATYPE ** )MALLOC(count*sizeof(struct MPIR_DATATYPE *)); if (!dteptr->indices || !dteptr->blocklens || !dteptr->old_types) { revertSignal(); return MPIR_ERROR( MPIR_COMM_WORLD, MPI_ERR_EXHAUSTED, "MPI_TYPE_STRUCT" ); } high = low = ub = lb = 0; real_ub = real_lb = 0; real_init = 0; /* If data alignment is 2, 4, or 8, then assign dteptr->align to that value. If 0, then assign dteptr->align to the maximal alignment requirement. (done below) */ if (ALIGNMENT_VALUE > 0) dteptr->align = ALIGNMENT_VALUE; for (i = 0; i < count; i++) { struct MPIR_DATATYPE *old_dtype_ptr; old_dtype_ptr = MPIR_GET_DTYPE_PTR(old_types[i]); MPIR_TEST_DTYPE(old_types[i],old_dtype_ptr,MPIR_COMM_WORLD, "MPI_TYPE_STRUCT"); dteptr->old_types[i] = MPIR_Type_dup (old_dtype_ptr); dteptr->indices[i] = indices[i]; dteptr->blocklens[i] = blocklens[i]; /* Keep track of maximal alignment requirement */ if (ALIGNMENT_VALUE == 0) { if (dteptr->align < old_dtype_ptr->align) dteptr->align = old_dtype_ptr->align; } if ( old_dtype_ptr->dte_type == MPIR_UB ) { if (ub_found) { if (indices[i] > ub_marker) ub_marker = indices[i]; } else { ub_marker = indices[i]; ub_found = 1; } } else if ( old_dtype_ptr->dte_type == MPIR_LB ) { if (lb_found) { if ( indices[i] < lb_marker ) { lb_marker = indices[i]; } } else { lb_marker = indices[i]; lb_found = 1; } } else { /* Since the datatype is NOT a UB or LB, save the real limits */ if (!real_init) { real_init = 1; real_lb = old_dtype_ptr->real_lb; real_ub = old_dtype_ptr->real_ub; } else { if (old_dtype_ptr->real_lb < real_lb) real_lb = old_dtype_ptr->real_lb; if (old_dtype_ptr->real_ub > real_ub) real_ub = old_dtype_ptr->real_ub; } /* Next, check to see if datatype has an MPI_LB or MPI_UB within it... Make sure to adjust the ub by the selected displacement and blocklens (blocklens is like Type_contiguous) */ if (old_dtype_ptr->has_ub) { MPI_Aint ub_test; ub_test = old_dtype_ptr->ub + indices[i] + (blocklens[i] - 1) * old_dtype_ptr->extent; if (ub_marker < ub_test || !ub_found) ub_marker = ub_test; ub_found = 1; } if (old_dtype_ptr->has_lb) { if (!lb_found || lb_marker > (old_dtype_ptr->lb) + indices[i] ) lb_marker = old_dtype_ptr->lb + indices[i]; lb_found = 1; } /* Get the ub/lb from the datatype (if a MPI_UB or MPI_LB was found, then these values will be ignored). We use the lb of the old type and add the indices value to it */ lb = indices[i] + old_dtype_ptr->lb; ub = lb + (blocklens[i] * old_dtype_ptr->extent) ; if (!high_init) { high = ub; high_init = 1; } else if (ub > high) high = ub; if (!low_init ) { low = lb; low_init = 1; } else if (lb < low) low = lb; if (ub > lb) { if ( high < ub ) high = ub; if ( low > lb ) low = lb; } else { if ( high < lb ) high = lb; if ( low > ub ) low = ub; } dteptr->elements += (blocklens[i] * old_dtype_ptr->elements); } /* end else */ if (i < count - 1) { size = old_dtype_ptr->size * blocklens[i]; dteptr->size += size; } else { dteptr->size += (blocklens[i] * old_dtype_ptr->size); } } /* end for loop */ /* Set the upper/lower bounds and the extent and size */ if (lb_found) { dteptr->lb = lb_marker; dteptr->has_lb = 1; } else dteptr->lb = (low_init ? low : 0); if (ub_found) { dteptr->ub = ub_marker; dteptr->has_ub = 1; } else dteptr->ub = (high_init ? high: 0); dteptr->extent = dteptr->ub - dteptr->lb ; dteptr->real_ub = real_ub; dteptr->real_lb = real_lb; /* If there is no explicit ub/lb marker, make the extent/ub fit the alignment of the largest basic item, if that structure alignment is chosen */ if (!lb_found && !ub_found) { MPI_Aint eps_offset; /* Since data is always offset by the extent, is the extent that we must adjust. */ eps_offset = dteptr->extent % dteptr->align; if (eps_offset > 0) { dteptr->ub += (dteptr->align - eps_offset); dteptr->extent = dteptr->ub - dteptr->lb; } } # if defined(MPID_HAS_TYPE_STRUCT) { mpi_errno = MPID_Type_struct(count, blocklens, indices, old_types, *newtype); } # endif revertSignal(); return (mpi_errno); }
/*@ MPI_Intercomm_create - Creates an intercommuncator from two intracommunicators Input Paramters: + local_comm - Local (intra)communicator . local_leader - Rank in local_comm of leader (often 0) . peer_comm - Remote communicator . remote_leader - Rank in peer_comm of remote leader (often 0) - tag - Message tag to use in constructing intercommunicator; if multiple 'MPI_Intercomm_creates' are being made, they should use different tags (more precisely, ensure that the local and remote leaders are using different tags for each 'MPI_intercomm_create'). Output Parameter: . comm_out - Created intercommunicator Notes: The MPI 1.1 Standard contains two mutually exclusive comments on the input intracommunicators. One says that their repective groups must be disjoint; the other that the leaders can be the same process. After some discussion by the MPI Forum, it has been decided that the groups must be disjoint. Note that the `reason` given for this in the standard is `not` the reason for this choice; rather, the `other` operations on intercommunicators (like 'MPI_Intercomm_merge') do not make sense if the groups are not disjoint. .N fortran Algorithm: + 1) Allocate a send context, an inter-coll context, and an intra-coll context . 2) Send "send_context" and lrank_to_grank list from local comm group if I''m the local_leader. . 3) If I''m the local leader, then wait on the posted sends and receives to complete. Post the receive for the remote group information and wait for it to complete. . 4) Broadcast information received from the remote leader. . 5) Create the inter_communicator from the information we now have. - An inter-communicator ends up with three levels of communicators. The inter-communicator returned to the user, a "collective" inter-communicator that can be used for safe communications between local & remote groups, and a collective intra-communicator that can be used to allocate new contexts during the merge and dup operations. For the resulting inter-communicator, 'comm_out' .vb comm_out = inter-communicator comm_out->comm_coll = "collective" inter-communicator comm_out->comm_coll->comm_coll = safe collective intra-communicator .ve .N Errors .N MPI_SUCCESS .N MPI_ERR_COMM .N MPI_ERR_TAG .N MPI_ERR_EXHAUSTED .N MPI_ERR_RANK .seealso: MPI_Intercomm_merge, MPI_Comm_free, MPI_Comm_remote_group, MPI_Comm_remote_size @*/ EXPORT_MPI_API int MPI_Intercomm_create ( MPI_Comm local_comm, int local_leader, MPI_Comm peer_comm, int remote_leader, int tag, MPI_Comm *comm_out ) { int local_size, local_rank, peer_size, peer_rank; int remote_size; int mpi_errno = MPI_SUCCESS; MPIR_CONTEXT context, send_context; struct MPIR_GROUP *remote_group_ptr; struct MPIR_COMMUNICATOR *new_comm, *local_comm_ptr, *peer_comm_ptr; MPI_Request req[6]; MPI_Status status[6]; MPIR_ERROR_DECL; static char myname[]="MPI_INTERCOMM_CREATE"; TR_PUSH(myname); local_comm_ptr = MPIR_GET_COMM_PTR(local_comm); #ifndef MPIR_NO_ERROR_CHECKING /* Check for valid arguments to function */ MPIR_TEST_MPI_COMM(local_comm,local_comm_ptr,local_comm_ptr,myname); MPIR_TEST_SEND_TAG(tag); if (mpi_errno) return MPIR_ERROR(local_comm_ptr, mpi_errno, myname ); #endif if (local_comm == MPI_COMM_NULL) { mpi_errno = MPIR_Err_setmsg( MPI_ERR_COMM, MPIR_ERR_LOCAL_COMM, myname, "Local communicator must not be MPI_COMM_NULL", (char *)0 ); return MPIR_ERROR( local_comm_ptr, mpi_errno, myname ); } (void) MPIR_Comm_size ( local_comm_ptr, &local_size ); (void) MPIR_Comm_rank ( local_comm_ptr, &local_rank ); if ( local_leader == local_rank ) { /* Peer_comm need be valid only at local_leader */ peer_comm_ptr = MPIR_GET_COMM_PTR(peer_comm); if ((MPIR_TEST_COMM_NOTOK(peer_comm,peer_comm_ptr) || (peer_comm == MPI_COMM_NULL))) { mpi_errno = MPIR_Err_setmsg( MPI_ERR_COMM, MPIR_ERR_PEER_COMM, myname, "Peer communicator is not valid", (char *)0 ); return MPIR_ERROR( local_comm_ptr, mpi_errno, myname ); } (void) MPIR_Comm_size ( peer_comm_ptr, &peer_size ); (void) MPIR_Comm_rank ( peer_comm_ptr, &peer_rank ); if (((peer_rank == MPI_UNDEFINED) && (mpi_errno = MPI_ERR_RANK))) return MPIR_ERROR( local_comm_ptr, mpi_errno, myname ); if (((remote_leader >= peer_size) && (mpi_errno = MPI_ERR_RANK)) || ((remote_leader < 0) && (mpi_errno = MPI_ERR_RANK))) { mpi_errno = MPIR_Err_setmsg( MPI_ERR_RANK, MPIR_ERR_REMOTE_RANK, myname, "Error specifying remote_leader", "Error specifying remote_leader; value %d not between 0 and %d", remote_leader, peer_size ); return MPIR_ERROR( local_comm_ptr, mpi_errno, myname ); } } if (((local_leader >= local_size) && (mpi_errno = MPI_ERR_RANK)) || ((local_leader < 0) && (mpi_errno = MPI_ERR_RANK))) { mpi_errno = MPIR_Err_setmsg( MPI_ERR_RANK, MPIR_ERR_LOCAL_RANK, myname, "Error specifying local_leader", "Error specifying local_leader; value %d not in between 0 and %d", local_leader, local_size ); return MPIR_ERROR( local_comm_ptr, mpi_errno, myname ); } /* Allocate send context, inter-coll context and intra-coll context */ MPIR_Context_alloc ( local_comm_ptr, 3, &context ); /* If I'm the local leader, then exchange information */ if (local_rank == local_leader) { MPIR_ERROR_PUSH(peer_comm_ptr); /* Post the receives for the information from the remote_leader */ /* We don't post a receive for the remote group yet, because we */ /* don't know how big it is yet. */ MPIR_CALL_POP(MPI_Irecv (&remote_size, 1, MPI_INT, remote_leader, tag, peer_comm, &(req[2])),peer_comm_ptr,myname); MPIR_CALL_POP(MPI_Irecv (&send_context, 1, MPIR_CONTEXT_TYPE, remote_leader,tag, peer_comm, &(req[3])), peer_comm_ptr,myname); /* Send the lrank_to_grank table of the local_comm and an allocated */ /* context. Currently I use multiple messages to send this info. */ /* Eventually, this will change(?) */ MPIR_CALL_POP(MPI_Isend (&local_size, 1, MPI_INT, remote_leader, tag, peer_comm, &(req[0])),peer_comm_ptr,myname); MPIR_CALL_POP(MPI_Isend (&context, 1, MPIR_CONTEXT_TYPE, remote_leader, tag, peer_comm, &(req[1])),peer_comm_ptr,myname); /* Wait on the communication requests to finish */ MPIR_CALL_POP(MPI_Waitall ( 4, req, status ),peer_comm_ptr,myname); /* We now know how big the remote group is, so create it */ remote_group_ptr = MPIR_CreateGroup ( remote_size ); remote_group_ptr->self = (MPI_Group) MPIR_FromPointer( remote_group_ptr ); /* Post the receive for the group information */ MPIR_CALL_POP(MPI_Irecv (remote_group_ptr->lrank_to_grank, remote_size, MPI_INT, remote_leader, tag, peer_comm, &(req[5])),peer_comm_ptr,myname); /* Send the local group info to the remote group */ MPIR_CALL_POP(MPI_Isend (local_comm_ptr->group->lrank_to_grank, local_size, MPI_INT, remote_leader, tag, peer_comm, &(req[4])),peer_comm_ptr,myname); /* wait on the send and the receive for the group information */ MPIR_CALL_POP(MPI_Waitall ( 2, &(req[4]), &(status[4]) ),peer_comm_ptr, myname); MPIR_ERROR_POP(peer_comm_ptr); /* Now we can broadcast the group information to the other local comm */ /* members. */ MPIR_ERROR_PUSH(local_comm_ptr); MPIR_CALL_POP(MPI_Bcast(&remote_size,1,MPI_INT,local_rank,local_comm), local_comm_ptr,myname); MPIR_CALL_POP(MPI_Bcast(remote_group_ptr->lrank_to_grank, remote_size, MPI_INT, local_rank, local_comm),local_comm_ptr, myname); MPIR_ERROR_POP(local_comm_ptr); } /* Else I'm just an ordinary comm member, so receive the bcast'd */ /* info about the remote group */ else { MPIR_ERROR_PUSH(local_comm_ptr); MPIR_CALL_POP(MPI_Bcast(&remote_size, 1, MPI_INT, local_leader, local_comm),local_comm_ptr,myname); /* We now know how big the remote group is, so create it */ remote_group_ptr = MPIR_CreateGroup ( remote_size ); remote_group_ptr->self = (MPI_Group) MPIR_FromPointer( remote_group_ptr ); /* Receive the group info */ MPIR_CALL_POP(MPI_Bcast(remote_group_ptr->lrank_to_grank, remote_size, MPI_INT, local_leader, local_comm), local_comm_ptr,myname ); MPIR_ERROR_POP(local_comm_ptr); } MPIR_ERROR_PUSH(local_comm_ptr); /* Broadcast the send context */ MPIR_CALL_POP(MPI_Bcast(&send_context, 1, MPIR_CONTEXT_TYPE, local_leader, local_comm),local_comm_ptr,myname); MPIR_ERROR_POP(local_comm_ptr); /* We all now have all the information necessary, start building the */ /* inter-communicator */ MPIR_ALLOC(new_comm,NEW(struct MPIR_COMMUNICATOR),local_comm_ptr, MPI_ERR_EXHAUSTED,myname ); MPIR_Comm_init( new_comm, local_comm_ptr, MPIR_INTER ); *comm_out = new_comm->self; new_comm->group = remote_group_ptr; MPIR_Group_dup( local_comm_ptr->group, &(new_comm->local_group) ); new_comm->local_rank = new_comm->local_group->local_rank; new_comm->lrank_to_grank = new_comm->group->lrank_to_grank; new_comm->np = new_comm->group->np; new_comm->send_context = send_context; new_comm->recv_context = context; new_comm->comm_name = 0; if ((mpi_errno = MPID_CommInit( local_comm_ptr, new_comm )) ) return mpi_errno; (void) MPIR_Attr_create_tree ( new_comm ); /* Build the collective inter-communicator */ MPIR_Comm_make_coll( new_comm, MPIR_INTER ); MPIR_Comm_make_onesided( new_comm, MPIR_INTER ); /* Build the collective intra-communicator. Note that we require an intra-communicator for the "coll_comm" so that MPI_COMM_DUP can use it for some collective operations (do we need this for MPI-2 with intercommunicator collective?) Note that this really isn't the right thing to do; we need to replace *all* of the Mississippi state collective code. */ MPIR_Comm_make_coll( new_comm->comm_coll, MPIR_INTRA ); #if 0 MPIR_Comm_make_coll( new_comm->comm_onesided, MPIR_INTRA ); #endif /* Remember it for the debugger */ MPIR_Comm_remember ( new_comm ); TR_POP; return (mpi_errno); }