/* once */ int tMPI_Once(tMPI_Comm comm, void (*function)(void*), void *param, int *was_first) { int myrank; int ret=TMPI_SUCCESS; struct coll_sync *csync; struct coll_env *cev; int syncs; if (!comm) { return tMPI_Error(TMPI_COMM_WORLD, TMPI_ERR_COMM); } myrank=tMPI_Comm_seek_rank(comm, tMPI_Get_current()); /* we increase our counter, and determine which coll_env we get */ csync=&(comm->csync[myrank]); csync->syncs++; cev=&(comm->cev[csync->syncs % N_COLL_ENV]); /* now do a compare-and-swap on the current_syncc */ syncs=tMPI_Atomic_get( &(cev->coll.current_sync)); if ((csync->syncs - syncs > 0) && /* check if sync was an earlier number. If it is a later number, we can't have been the first to arrive here. */ tMPI_Atomic_cas(&(cev->coll.current_sync), syncs, csync->syncs)) { /* we're the first! */ function(param); if (was_first) *was_first=TRUE; } return ret; }
int tMPI_Cart_get(tMPI_Comm comm, int maxdims, int *dims, int *periods, int *coords) { int i; int myrank=tMPI_Comm_seek_rank(comm, tMPI_Get_current()); #ifdef TMPI_TRACE tMPI_Trace_print("tMPI_Cart_get(%p, %d, %p, %p, %p)", comm, maxdims, dims, periods, coords); #endif if (!comm) { return tMPI_Error(TMPI_COMM_WORLD, TMPI_ERR_COMM); } if (!comm->cart || comm->cart->ndims==0) return TMPI_SUCCESS; tMPI_Cart_coords(comm, myrank, maxdims, coords); for(i=0;i<comm->cart->ndims;i++) { if (i>=maxdims) { return tMPI_Error(comm, TMPI_ERR_DIMS); } dims[i]=comm->cart->dims[i]; periods[i]=comm->cart->periods[i]; } return TMPI_SUCCESS; }
int tMPI_Comm_dup(tMPI_Comm comm, tMPI_Comm *newcomm) { #ifdef TMPI_TRACE tMPI_Trace_print("tMPI_Comm_dup(%p, %p)", comm, newcomm); #endif /* we just call Comm_split because it already contains all the neccesary synchronization constructs. */ return tMPI_Comm_split(comm, 0, tMPI_Comm_seek_rank(comm, tMPI_Get_current()), newcomm); }
int tMPI_Cart_create(tMPI_Comm comm_old, int ndims, int *dims, int *periods, int reorder, tMPI_Comm *comm_cart) { int myrank = tMPI_Comm_seek_rank(comm_old, tMPI_Get_current()); int key = myrank; int color = 0; int Ntot = 1; int i; #ifdef TMPI_TRACE tMPI_Trace_print("tMPI_Cart_create(%p, %d, %p, %p, %d, %p)", comm_old, ndims, dims, periods, reorder, comm_cart); #endif if (!comm_old) { return tMPI_Error(comm_old, TMPI_ERR_COMM); } /* calculate the total number of procs in cartesian comm */ for (i = 0; i < ndims; i++) { Ntot *= dims[i]; } /* refuse to create if there's not enough procs */ if (comm_old->grp.N < Ntot) { *comm_cart = TMPI_COMM_NULL; #if 1 return tMPI_Error(comm_old, TMPI_ERR_CART_CREATE_NPROCS); #endif } if (key >= Ntot) { key = TMPI_UNDEFINED; } if (reorder) { tMPI_Cart_map(comm_old, ndims, dims, periods, &key); } if (key == TMPI_UNDEFINED) { color = TMPI_UNDEFINED; } tMPI_Comm_split(comm_old, color, key, comm_cart); tMPI_Cart_init(comm_cart, ndims, dims, periods); return TMPI_SUCCESS; }
int tMPI_Comm_create(tMPI_Comm comm, tMPI_Group group, tMPI_Comm *newcomm) { int color = TMPI_UNDEFINED; int key = tMPI_Comm_seek_rank(comm, tMPI_Get_current()); #ifdef TMPI_TRACE tMPI_Trace_print("tMPI_Comm_create(%p, %p, %p)", comm, group, newcomm); #endif if (tMPI_In_group(group)) { color = 1; } /* the MPI specs specifically say that this is equivalent */ return tMPI_Comm_split(comm, color, key, newcomm); }
int tMPI_Cart_map(tMPI_Comm comm, int ndims, int *dims, int *periods, int *newrank) { /* this function doesn't actually do anything beyond returning the current rank (or TMPI_UNDEFINED if it doesn't fit in the new topology */ int myrank=tMPI_Comm_seek_rank(comm, tMPI_Get_current()); int Ntot=1; int i; #ifdef TMPI_TRACE tMPI_Trace_print("tMPI_Cart_map(%p, %d, %p, %p, %p)", comm, ndims, dims, periods, newrank); #endif if (!comm) { return tMPI_Error(TMPI_COMM_WORLD, TMPI_ERR_COMM); } if (!periods) { return tMPI_Error(comm, TMPI_ERR_DIMS); } /* calculate the total number of procs in cartesian comm */ for(i=0;i<ndims;i++) { Ntot *= dims[i]; } if (myrank >= Ntot) { *newrank=TMPI_UNDEFINED; } else { *newrank=myrank; } return TMPI_SUCCESS; }
int tMPI_Scan(void* sendbuf, void* recvbuf, int count, tMPI_Datatype datatype, tMPI_Op op, tMPI_Comm comm) { struct tmpi_thread *cur=tMPI_Get_current(); int myrank=tMPI_Comm_seek_rank(comm, cur); int N=tMPI_Comm_N(comm); int prev=myrank - 1; /* my previous neighbor */ int next=myrank + 1; /* my next neighbor */ #ifdef TMPI_PROFILE tMPI_Profile_count_start(cur); #endif #ifdef TMPI_TRACE tMPI_Trace_print("tMPI_Scan(%p, %p, %d, %p, %p, %p)", sendbuf, recvbuf, count, datatype, op, comm); #endif if (count==0) return TMPI_SUCCESS; if (!recvbuf) { return tMPI_Error(comm, TMPI_ERR_BUF); } if (sendbuf==TMPI_IN_PLACE) { sendbuf=recvbuf; } /* we set our send and recv buffers */ tMPI_Atomic_ptr_set(&(comm->reduce_sendbuf[myrank]),sendbuf); tMPI_Atomic_ptr_set(&(comm->reduce_recvbuf[myrank]),recvbuf); /* now wait for the previous rank to finish */ if (myrank > 0) { void *a, *b; int ret; #if defined(TMPI_PROFILE) && defined(TMPI_CYCLE_COUNT) tMPI_Profile_wait_start(cur); #endif /* wait for the previous neighbor's data to be ready */ tMPI_Event_wait( &(comm->csync[myrank].events[prev]) ); tMPI_Event_process( &(comm->csync[myrank].events[prev]), 1); #if defined(TMPI_PROFILE) && defined(TMPI_CYCLE_COUNT) tMPI_Profile_wait_stop(cur, TMPIWAIT_Reduce); #endif #ifdef TMPI_DEBUG printf("%d: scanning with %d \n", myrank, prev, iteration); fflush(stdout); #endif /* now do the reduction */ if (prev > 0) { a = (void*)tMPI_Atomic_ptr_get(&(comm->reduce_recvbuf[prev])); } else { a = (void*)tMPI_Atomic_ptr_get(&(comm->reduce_sendbuf[prev])); } b = sendbuf; if ((ret=tMPI_Reduce_run_op(recvbuf, a, b, datatype, count, op, comm)) != TMPI_SUCCESS) { return ret; } /* signal to my previous neighbor that I'm done with the data */ tMPI_Event_signal( &(comm->csync[prev].events[prev]) ); } else { if (sendbuf != recvbuf) { /* copy the data if this is rank 0, and not MPI_IN_PLACE */ memcpy(recvbuf, sendbuf, count*datatype->size); } } if (myrank < N-1) { /* signal to my next neighbor that I have the data */ tMPI_Event_signal( &(comm->csync[next].events[myrank]) ); /* and wait for my next neighbor to finish */ tMPI_Event_wait( &(comm->csync[myrank].events[myrank]) ); tMPI_Event_process( &(comm->csync[myrank].events[myrank]), 1); } #if defined(TMPI_PROFILE) && defined(TMPI_CYCLE_COUNT) tMPI_Profile_wait_start(cur); #endif /*tMPI_Barrier_wait( &(comm->barrier));*/ #if defined(TMPI_PROFILE) /*tMPI_Profile_wait_stop(cur, TMPIWAIT_Reduce);*/ tMPI_Profile_count_stop(cur, TMPIFN_Scan); #endif return TMPI_SUCCESS; }
/* this is the main comm creation function. All other functions that create comms use this*/ int tMPI_Comm_split(tMPI_Comm comm, int color, int key, tMPI_Comm *newcomm) { int i, j; int N = tMPI_Comm_N(comm); volatile tMPI_Comm *newcomm_list; volatile int colors[MAX_PREALLOC_THREADS]; /* array with the colors of each thread */ volatile int keys[MAX_PREALLOC_THREADS]; /* same for keys (only one of the threads actually suplies these arrays to the comm structure) */ tmpi_bool i_am_first = FALSE; int myrank = tMPI_Comm_seek_rank(comm, tMPI_Get_current()); struct tmpi_split *spl; int ret; #ifdef TMPI_TRACE tMPI_Trace_print("tMPI_Comm_split(%p, %d, %d, %p)", comm, color, key, newcomm); #endif if (!comm) { *newcomm = NULL; return tMPI_Error(TMPI_COMM_WORLD, TMPI_ERR_COMM); } ret = tMPI_Thread_mutex_lock(&(comm->comm_create_lock)); if (ret != 0) { return tMPI_Error(TMPI_COMM_WORLD, TMPI_ERR_IO); } /* first get the colors */ if (!comm->new_comm) { /* i am apparently first */ comm->split = (struct tmpi_split*)tMPI_Malloc(sizeof(struct tmpi_split)); comm->new_comm = (tMPI_Comm*)tMPI_Malloc(N*sizeof(tMPI_Comm)); if (N <= MAX_PREALLOC_THREADS) { comm->split->colors = colors; comm->split->keys = keys; } else { comm->split->colors = (int*)tMPI_Malloc(N*sizeof(int)); comm->split->keys = (int*)tMPI_Malloc(N*sizeof(int)); } comm->split->Ncol_init = tMPI_Comm_N(comm); comm->split->can_finish = FALSE; i_am_first = TRUE; /* the main communicator contains a list the size of grp.N */ } newcomm_list = comm->new_comm; /* we copy it to the local stacks because we can later erase comm->new_comm safely */ spl = comm->split; /* we do the same for spl */ spl->colors[myrank] = color; spl->keys[myrank] = key; spl->Ncol_init--; if (spl->Ncol_init == 0) { ret = tMPI_Thread_cond_signal(&(comm->comm_create_prep)); if (ret != 0) { return tMPI_Error(TMPI_COMM_WORLD, TMPI_ERR_IO); } } if (!i_am_first) { /* all other threads can just wait until the creator thread is finished */ while (!spl->can_finish) { ret = tMPI_Thread_cond_wait(&(comm->comm_create_finish), &(comm->comm_create_lock) ); if (ret != 0) { return tMPI_Error(TMPI_COMM_WORLD, TMPI_ERR_IO); } } } else { int Ncomms = 0; int comm_color_[MAX_PREALLOC_THREADS]; int comm_N_[MAX_PREALLOC_THREADS]; int *comm_color = comm_color_; /* there can't be more comms than N*/ int *comm_N = comm_N_; /* the number of procs in a group */ int *comm_groups; /* the groups */ tMPI_Comm *comms; /* the communicators */ /* wait for the colors to be done */ /*if (N>1)*/ while (spl->Ncol_init > 0) { ret = tMPI_Thread_cond_wait(&(comm->comm_create_prep), &(comm->comm_create_lock)); if (ret != 0) { return tMPI_Error(TMPI_COMM_WORLD, TMPI_ERR_IO); } } /* reset the state so that a new comm creating function can run */ spl->Ncol_destroy = N; comm->new_comm = 0; comm->split = 0; comm_groups = (int*)tMPI_Malloc(N*N*sizeof(int)); if (N > MAX_PREALLOC_THREADS) { comm_color = (int*)tMPI_Malloc(N*sizeof(int)); comm_N = (int*)tMPI_Malloc(N*sizeof(int)); } /* count colors, allocate and split up communicators */ tMPI_Split_colors(N, (int*)spl->colors, (int*)spl->keys, &Ncomms, comm_N, comm_color, comm_groups); /* allocate a bunch of communicators */ comms = (tMPI_Comm*)tMPI_Malloc(Ncomms*sizeof(tMPI_Comm)); for (i = 0; i < Ncomms; i++) { ret = tMPI_Comm_alloc(&(comms[i]), comm, comm_N[i]); if (ret != TMPI_SUCCESS) { return ret; } } /* now distribute the comms */ for (i = 0; i < Ncomms; i++) { comms[i]->grp.N = comm_N[i]; for (j = 0; j < comm_N[i]; j++) { comms[i]->grp.peers[j] = comm->grp.peers[comm_groups[i*comm->grp.N + j]]; } } /* and put them into the newcomm_list */ for (i = 0; i < N; i++) { newcomm_list[i] = TMPI_COMM_NULL; for (j = 0; j < Ncomms; j++) { if (spl->colors[i] == comm_color[j]) { newcomm_list[i] = comms[j]; break; } } } #ifdef TMPI_DEBUG /* output */ for (i = 0; i < Ncomms; i++) { printf("Group %d (color %d) has %d members: ", i, comm_color[i], comm_N[i]); for (j = 0; j < comm_N[i]; j++) { printf(" %d ", comm_groups[comm->grp.N*i + j]); } printf(" rank: "); for (j = 0; j < comm_N[i]; j++) { printf(" %d ", spl->keys[comm_groups[N*i + j]]); } printf(" color: "); for (j = 0; j < comm_N[i]; j++) { printf(" %d ", spl->colors[comm_groups[N*i + j]]); } printf("\n"); } #endif if (N > MAX_PREALLOC_THREADS) { free((int*)spl->colors); free((int*)spl->keys); free(comm_color); free(comm_N); } free(comm_groups); free(comms); spl->can_finish = TRUE; /* tell the waiting threads that there's a comm ready */ ret = tMPI_Thread_cond_broadcast(&(comm->comm_create_finish)); if (ret != 0) { return tMPI_Error(TMPI_COMM_WORLD, TMPI_ERR_IO); } } /* here the individual threads get their comm object */ *newcomm = newcomm_list[myrank]; /* free when we have assigned them all, so we can reuse the object*/ spl->Ncol_destroy--; if (spl->Ncol_destroy == 0) { free((void*)newcomm_list); free(spl); } ret = tMPI_Thread_mutex_unlock(&(comm->comm_create_lock)); if (ret != 0) { return tMPI_Error(TMPI_COMM_WORLD, TMPI_ERR_IO); } return TMPI_SUCCESS; }
void* tMPI_Once_wait(tMPI_Comm comm, void* (*function)(void*), void *param, int *was_first) { int myrank; struct coll_sync *csync; struct coll_env *cev; int syncs; void *ret; if (!comm) { tMPI_Error(TMPI_COMM_WORLD, TMPI_ERR_COMM); return NULL; } myrank=tMPI_Comm_seek_rank(comm, tMPI_Get_current()); /* we increase our counter, and determine which coll_env we get */ csync=&(comm->csync[myrank]); csync->syncs++; cev=&(comm->cev[csync->syncs % N_COLL_ENV]); /* now do a compare-and-swap on the current_syncc */ syncs=tMPI_Atomic_get( &(cev->coll.current_sync)); tMPI_Atomic_memory_barrier_acq(); if ((csync->syncs - syncs > 0) && /* check if sync was an earlier number. If it is a later number, we can't have been the first to arrive here. Calculating the difference instead of comparing directly avoids ABA problems. */ tMPI_Atomic_cas(&(cev->coll.current_sync), syncs, csync->syncs)) { /* we're the first! */ ret=function(param); if (was_first) *was_first=TRUE; /* broadcast the output data */ cev->coll.res=ret; tMPI_Atomic_memory_barrier_rel(); /* signal that we're done */ tMPI_Atomic_fetch_add(&(cev->coll.current_sync), 1); /* we need to keep being in sync */ csync->syncs++; } else { /* we need to wait until the current_syncc gets increased again */ csync->syncs++; do { /*tMPI_Atomic_memory_barrier();*/ syncs=tMPI_Atomic_get( &(cev->coll.current_sync) ); } while (csync->syncs - syncs > 0); /* difference again due to ABA problems */ tMPI_Atomic_memory_barrier_acq(); ret=cev->coll.res; } return ret; }
int tMPI_Gather(void* sendbuf, int sendcount, tMPI_Datatype sendtype, void* recvbuf, int recvcount, tMPI_Datatype recvtype, int root, tMPI_Comm comm) { int synct; struct coll_env *cev; int myrank; int ret = TMPI_SUCCESS; struct tmpi_thread *cur = tMPI_Get_current(); #ifdef TMPI_PROFILE tMPI_Profile_count_start(cur); #endif #ifdef TMPI_TRACE tMPI_Trace_print("tMPI_Gather(%p, %d, %p, %p, %d, %p, %d, %p)", sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, comm); #endif if (!comm) { return tMPI_Error(TMPI_COMM_WORLD, TMPI_ERR_COMM); } myrank = tMPI_Comm_seek_rank(comm, cur); /* we increase our counter, and determine which coll_env we get */ cev = tMPI_Get_cev(comm, myrank, &synct); if (myrank == root) { int i; int n_remaining = comm->grp.N-1; /* do root transfer */ if (sendbuf != TMPI_IN_PLACE) { tMPI_Coll_root_xfer(comm, sendtype, recvtype, sendtype->size*sendcount, recvtype->size*recvcount, sendbuf, (char*)recvbuf+myrank*recvcount*recvtype->size, &ret); } for (i = 0; i < comm->grp.N; i++) { cev->met[myrank].read_data[i] = FALSE; } cev->met[myrank].read_data[myrank] = TRUE; /* wait for data availability as long as there are xfers to be done */ while (n_remaining > 0) { #if defined(TMPI_PROFILE) && defined(TMPI_CYCLE_COUNT) tMPI_Profile_wait_start(cur); #endif tMPI_Event_wait( &(cev->met[myrank]).recv_ev ); #if defined(TMPI_PROFILE) && defined(TMPI_CYCLE_COUNT) tMPI_Profile_wait_stop(cur, TMPIWAIT_Coll_recv); #endif /* now check all of them */ for (i = 0; i < comm->grp.N; i++) { if (!cev->met[myrank].read_data[i] && (tMPI_Atomic_get(&(cev->met[i].current_sync)) == synct)) { tMPI_Mult_recv(comm, cev, i, 0, TMPI_GATHER_TAG, recvtype, recvcount*recvtype->size, (char*)recvbuf+i*recvcount*recvtype->size, &ret); tMPI_Event_process( &(cev->met[myrank]).recv_ev, 1); if (ret != TMPI_SUCCESS) { return ret; } cev->met[myrank].read_data[i] = TRUE; n_remaining--; } } } } else { if (!sendbuf) /* don't do pointer arithmetic on a NULL ptr */ { return tMPI_Error(comm, TMPI_ERR_BUF); } /* first set up the data just to root. */ ret = tMPI_Post_multi(cev, myrank, 0, TMPI_GATHER_TAG, sendtype, sendcount*sendtype->size, sendbuf, 1, synct, root); if (ret != TMPI_SUCCESS) { return ret; } /* and wait until root is done copying */ tMPI_Wait_for_others(cev, myrank); } #ifdef TMPI_PROFILE tMPI_Profile_count_stop(cur, TMPIFN_Gather); #endif return ret; }
int tMPI_Alltoall(void* sendbuf, int sendcount, tMPI_Datatype sendtype, void* recvbuf, int recvcount, tMPI_Datatype recvtype, tMPI_Comm comm) { int synct; struct coll_env *cev; int myrank; int ret = TMPI_SUCCESS; int i; size_t sendsize = sendtype->size*sendcount; size_t recvsize = recvtype->size*recvcount; int n_remaining; struct tmpi_thread *cur = tMPI_Get_current(); #ifdef TMPI_PROFILE tMPI_Profile_count_start(cur); #endif #ifdef TMPI_TRACE tMPI_Trace_print("tMPI_Alltoall(%p, %d, %p, %p, %d, %p, %p)", sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm); #endif if (!comm) { return tMPI_Error(TMPI_COMM_WORLD, TMPI_ERR_COMM); } if (!sendbuf || !recvbuf) /* don't do pointer arithmetic on a NULL ptr */ { return tMPI_Error(comm, TMPI_ERR_BUF); } myrank = tMPI_Comm_seek_rank(comm, cur); /* we increase our counter, and determine which coll_env we get */ cev = tMPI_Get_cev(comm, myrank, &synct); /* post our pointers */ /* we set up multiple posts, so no Post_multi */ cev->met[myrank].tag = TMPI_ALLTOALL_TAG; cev->met[myrank].datatype = sendtype; tMPI_Atomic_set( &(cev->met[myrank].n_remaining), cev->N-1 ); for (i = 0; i < comm->grp.N; i++) { cev->met[myrank].bufsize[i] = sendsize; cev->met[myrank].buf[i] = (char*)sendbuf+sendsize*i; cev->met[myrank].read_data[i] = FALSE; } tMPI_Atomic_memory_barrier_rel(); tMPI_Atomic_set(&(cev->met[myrank].current_sync), synct); /* post availability */ for (i = 0; i < cev->N; i++) { if (i != myrank) { tMPI_Event_signal( &(cev->met[i].recv_ev) ); } } /* we don't do the copy buffer thing here because it's pointless: the processes have to synchronize anyway, because they all send and receive. */ /* do root transfer */ tMPI_Coll_root_xfer(comm, sendtype, recvtype, sendsize, recvsize, (char*)sendbuf+sendsize*myrank, (char*)recvbuf+recvsize*myrank, &ret); cev->met[myrank].read_data[myrank] = TRUE; /* and poll data availability */ n_remaining = cev->N-1; while (n_remaining > 0) { #if defined(TMPI_PROFILE) && defined(TMPI_CYCLE_COUNT) tMPI_Profile_wait_start(cur); #endif tMPI_Event_wait( &(cev->met[myrank]).recv_ev ); #if defined(TMPI_PROFILE) && defined(TMPI_CYCLE_COUNT) tMPI_Profile_wait_stop(cur, TMPIWAIT_Coll_recv); #endif for (i = 0; i < cev->N; i++) { if ((!cev->met[myrank].read_data[i]) && (tMPI_Atomic_get(&(cev->met[i].current_sync)) == synct)) { tMPI_Event_process( &(cev->met[myrank]).recv_ev, 1); tMPI_Mult_recv(comm, cev, i, myrank, TMPI_ALLTOALL_TAG, recvtype, recvsize, (char*)recvbuf+recvsize*i, &ret); if (ret != TMPI_SUCCESS) { return ret; } cev->met[myrank].read_data[i] = TRUE; n_remaining--; } } } /* and wait until everybody is done copying our data */ tMPI_Wait_for_others(cev, myrank); #ifdef TMPI_PROFILE tMPI_Profile_count_stop(cur, TMPIFN_Alltoall); #endif return ret; }