static int rescomm(long int Nlocal, realtype tt, N_Vector uu, N_Vector up, void *user_data) { UserData data; realtype *uarray, *uext, buffer[2*MYSUB]; MPI_Comm comm; int thispe, ixsub, jysub, mxsub, mysub; MPI_Request request[4]; data = (UserData) user_data; uarray = N_VGetArrayPointer_Parallel(uu); /* Get comm, thispe, subgrid indices, data sizes, extended array uext. */ comm = data->comm; thispe = data->thispe; ixsub = data->ixsub; jysub = data->jysub; mxsub = data->mxsub; mysub = data->mysub; uext = data->uext; /* Start receiving boundary data from neighboring PEs. */ BRecvPost(comm, request, thispe, ixsub, jysub, mxsub, mysub, uext, buffer); /* Send data from boundary of local grid to neighboring PEs. */ BSend(comm, thispe, ixsub, jysub, mxsub, mysub, uarray); /* Finish receiving boundary data from neighboring PEs. */ BRecvWait(request, ixsub, jysub, mxsub, uext, buffer); return(0); }
static int ccomm(long int Nlocal, N_Vector cc, void *userdata) { realtype *cdata, *cext, buffer[2*NUM_SPECIES*MYSUB]; UserData data; MPI_Comm comm; int my_pe, isubx, isuby, nsmxsub, nsmysub; MPI_Request request[4]; /* Get comm, my_pe, subgrid indices, data sizes, extended array cext */ data = (UserData) userdata; comm = data->comm; my_pe = data->my_pe; isubx = data->isubx; isuby = data->isuby; nsmxsub = data->nsmxsub; nsmysub = NUM_SPECIES*MYSUB; cext = data->cext; cdata = NV_DATA_P(cc); /* Start receiving boundary data from neighboring PEs */ BRecvPost(comm, request, my_pe, isubx, isuby, nsmxsub, nsmysub, cext, buffer); /* Send data from boundary of local grid to neighboring PEs */ BSend(comm, my_pe, isubx, isuby, nsmxsub, nsmysub, cdata); /* Finish receiving boundary data from neighboring PEs */ BRecvWait(request, isubx, isuby, nsmxsub, cext, buffer); return(0); }
static void ucomm(realtype t, N_Vector u, UserData data) { realtype *udata, *uext, buffer[2*NVARS*MYSUB]; MPI_Comm comm; int my_pe, isubx, isuby; long int nvmxsub, nvmysub; MPI_Request request[4]; udata = NV_DATA_P(u); /* Get comm, my_pe, subgrid indices, data sizes, extended array uext */ comm = data->comm; my_pe = data->my_pe; isubx = data->isubx; isuby = data->isuby; nvmxsub = data->nvmxsub; nvmysub = NVARS*MYSUB; uext = data->uext; /* Start receiving boundary data from neighboring PEs */ BRecvPost(comm, request, my_pe, isubx, isuby, nvmxsub, nvmysub, uext, buffer); /* Send data from boundary of local grid to neighboring PEs */ BSend(comm, my_pe, isubx, isuby, nvmxsub, nvmysub, udata); /* Finish receiving boundary data from neighboring PEs */ BRecvWait(request, isubx, isuby, nvmxsub, uext, buffer); }
static int rescomm(N_Vector cc, N_Vector cp, void *user_data) { UserData webdata; realtype *cdata, *cext, buffer[2*NUM_SPECIES*MYSUB]; int thispe, ixsub, jysub, nsmxsub, nsmysub; MPI_Comm comm; MPI_Request request[4]; webdata = (UserData) user_data; cdata = NV_DATA_P(cc); /* Get comm, thispe, subgrid indices, data sizes, extended array cext. */ comm = webdata->comm; thispe = webdata->thispe; ixsub = webdata->ixsub; jysub = webdata->jysub; cext = webdata->cext; nsmxsub = webdata->nsmxsub; nsmysub = (webdata->ns)*(webdata->mysub); /* Start receiving boundary data from neighboring PEs. */ BRecvPost(comm, request, thispe, ixsub, jysub, nsmxsub, nsmysub, cext, buffer); /* Send data from boundary of local grid to neighboring PEs. */ BSend(comm, thispe, ixsub, jysub, nsmxsub, nsmysub, cdata); /* Finish receiving boundary data from neighboring PEs. */ BRecvWait(request, ixsub, jysub, nsmxsub, cext, buffer); return(0); }
/* * rescomm: Communication routine in support of resweb. * This routine performs all inter-processor communication of components * of the uv vector needed to calculate F, namely the components at all * interior subgrid boundaries (ghost cell data). It loads this data * into a work array cext (the local portion of c, extended). * The message-passing uses blocking sends, non-blocking receives, * and receive-waiting, in routines BRecvPost, BSend, BRecvWait. */ static int rescomm(long int Nlocal, realtype tt, N_Vector uv, N_Vector uvp, void *user_data) { UserData data; realtype *cdata, *gridext, buffer[2*NUM_SPECIES*MYSUB]; int thispe, ixsub, jysub, nsmxsub, nsmysub; MPI_Comm comm; MPI_Request request[4]; data = (UserData) user_data; cdata = N_VGetArrayPointer_Parallel(uv); /* Get comm, thispe, subgrid indices, data sizes, extended array cext. */ comm = data->comm; thispe = data->thispe; ixsub = data->ixsub; jysub = data->jysub; gridext = data->gridext; nsmxsub = data->nsmxsub; nsmysub = (data->ns)*(data->mysub); /* Start receiving boundary data from neighboring PEs. */ BRecvPost(comm, request, thispe, ixsub, jysub, nsmxsub, nsmysub, gridext, buffer); /* Send data from boundary of local grid to neighboring PEs. */ BSend(comm, thispe, ixsub, jysub, nsmxsub, nsmysub, cdata); /* Finish receiving boundary data from neighboring PEs. */ BRecvWait(request, ixsub, jysub, nsmxsub, gridext, buffer); return(0); }
static void ccomm(realtype *cdata, UserData data) { realtype *cext, buffer[2*NUM_SPECIES*MYSUB]; MPI_Comm comm; long int my_pe, isubx, isuby, nsmxsub, nsmysub; MPI_Request request[4]; /* Get comm, my_pe, subgrid indices, data sizes, extended array cext */ comm = data->comm; my_pe = data->my_pe; isubx = data->isubx; isuby = data->isuby; nsmxsub = data->nsmxsub; nsmysub = NUM_SPECIES*MYSUB; cext = data->cext; /* Start receiving boundary data from neighboring PEs */ BRecvPost(comm, request, my_pe, isubx, isuby, nsmxsub, nsmysub, cext, buffer); /* Send data from boundary of local grid to neighboring PEs */ BSend(comm, my_pe, isubx, isuby, nsmxsub, nsmysub, cdata); /* Finish receiving boundary data from neighboring PEs */ BRecvWait(request, isubx, isuby, nsmxsub, cext, buffer); }