/* -------------------------------------------------------------- purpose -- to split a distributed Pencil object pencil -- pointer to the local Pencil object mapIV -- pointer to the map from vertices to processes firsttag -- first tag value, two will be used, tag and tag+1 stats[4] -- statistics vector stats[0] -- # of messages sent stats[1] -- # of messages received stats[2] -- # of bytes sent stats[3] -- # of bytes received msglvl -- local message level msgFile -- local message file comm -- MPI communication structure created -- 98may20, cca -------------------------------------------------------------- */ void Pencil_MPI_split ( Pencil *pencil, IV *mapIV, int stats[], int msglvl, FILE *msgFile, int firsttag, MPI_Comm comm ) { InpMtx *inpmtxA, *inpmtxB, *keepmtx ; int tag ; /* ----------------------- check the range of tags ----------------------- */ if ( firsttag < 0 || firsttag + 1 > maxTagMPI(comm) ) { fprintf(stderr, "\n fatal error in Pencil_MPI_split()" "\n range of tags is [%d,%d], tag_bound = %d", firsttag, firsttag + 1, maxTagMPI(comm)) ; exit(-1) ; } /* ------------------------------------ split the DInpMtx object into pieces ------------------------------------ */ tag = firsttag ; if ( (inpmtxA = pencil->inpmtxA) != NULL ) { keepmtx = InpMtx_MPI_split(inpmtxA, mapIV, stats, msglvl, msgFile, tag, comm) ; InpMtx_free(inpmtxA) ; pencil->inpmtxA = keepmtx ; } tag += 1 ; if ( (inpmtxB = pencil->inpmtxB) != NULL ) { keepmtx = InpMtx_MPI_split(inpmtxB, mapIV, stats, msglvl, msgFile, tag, comm) ; InpMtx_free(inpmtxB) ; pencil->inpmtxB = keepmtx ; } tag += 1 ; return ; }
/* ----------------------------------------------------------------- purpose -- the IV objects objIV and ownersIV are found on each process. the ownersIV object is identical over all the processes, and owners[ii] tells which processes owns location ii of the obj[] vector. on return from this entry, the obj[] vector is replicated over all the processes. each process sends the (ii,obj[ii]) pairs that it owns to all the other processes. created -- 98apr02, cca ----------------------------------------------------------------- */ void IV_MPI_allgather ( IV *objIV, IV *ownersIV, int stats[], int msglvl, FILE *msgFile, int firsttag, MPI_Comm comm ) { int count, destination, ii, incount, iproc, jj, lasttag, left, maxcount, myid, nowners, nproc, nvec, offset, outcount, right, source, tag, tagbound, value ; int *counts, *inbuffer, *outbuffer, *owners, *vec ; MPI_Status status ; /* --------------- check the input --------------- */ if ( objIV == NULL || ownersIV == NULL ) { fprintf(stderr, "\n fatal error in IV_MPI_allgather()" "\n objIV = %p, ownersIV = %p\n", objIV, ownersIV) ; spoolesFatal(); } /* ---------------------------------------------- get id of self, # of processes and # of fronts ---------------------------------------------- */ MPI_Comm_rank(comm, &myid) ; MPI_Comm_size(comm, &nproc) ; IV_sizeAndEntries(objIV, &nvec, &vec) ; IV_sizeAndEntries(ownersIV, &nowners, &owners) ; if ( msglvl > 1 ) { fprintf(msgFile, "\n\n inside IV_MPI_allgather" "\n nproc = %d, myid = %d, nvec = %d, nowners = %d", nproc, myid, nvec, nowners) ; fflush(msgFile) ; } if ( nvec != nowners || vec == NULL || owners == NULL ) { fprintf(stderr, "\n fatal error in IV_MPI_allgather()" "\n nvec = %d, nowners = %d, vec = %p, owners = %p\n", nvec, nowners, vec, owners) ; spoolesFatal(); } /* ------------------- check the tag range ------------------- */ lasttag = firsttag + nproc ; tagbound = maxTagMPI(comm) ; if ( firsttag < 0 || lasttag > tagbound ) { fprintf(stderr, "\n fatal error in IV_MPI_allgather()" "\n firsttag = %d, lasttag = %d, tagbound = %d\n", firsttag, lasttag, tagbound) ; spoolesFatal(); } if ( msglvl > 2 ) { fprintf(msgFile, "\n\n objIV") ; IV_writeForHumanEye(objIV, msgFile) ; fprintf(msgFile, "\n\n ownersIV") ; IV_writeForHumanEye(ownersIV, msgFile) ; fflush(msgFile) ; } /* ------------------------------------------------------------- step 1 : determine the number of entries owned by each vector ------------------------------------------------------------- */ counts = IVinit(nproc, 0) ; for ( ii = 0 ; ii < nvec ; ii++ ) { if ( owners[ii] < 0 || owners[ii] >= nproc ) { fprintf(stderr, "\n owners[%d] = %d", ii, owners[ii]) ; spoolesFatal(); } counts[owners[ii]]++ ; } if ( msglvl > 2 ) { fprintf(msgFile, "\n counts") ; IVfprintf(msgFile, nproc, counts) ; fflush(msgFile) ; } /* ----------------------------- set up the in and out buffers ----------------------------- */ if ( counts[myid] > 0 ) { outbuffer = IVinit(2*counts[myid], -1) ; for ( ii = jj = 0 ; ii < nvec ; ii++ ) { if ( owners[ii] == myid ) { outbuffer[jj++] = ii ; outbuffer[jj++] = vec[ii] ; } } if ( jj != 2*counts[myid] ) { fprintf(msgFile, "\n jj = %d, 2*counts[%d] = %d", jj, myid, 2*counts[myid]) ; fprintf(stderr, "\n jj = %d, 2*counts[%d] = %d", jj, myid, 2*counts[myid]) ; spoolesFatal(); } } else { outbuffer = NULL ; } maxcount = IVmax(nproc, counts, &iproc) ; if ( maxcount > 0 ) { inbuffer = IVinit(2*maxcount, -1) ; } else { inbuffer = NULL ; } if ( msglvl > 2 ) { fprintf(msgFile, "\n outbuffer %p, maxcount %d, inbuffer %p", outbuffer, maxcount, inbuffer) ; fflush(msgFile) ; } /* ------------------------------------- step 2: loop over the other processes send and receive information ------------------------------------- */ outcount = 2*counts[myid] ; for ( offset = 1, tag = firsttag ; offset < nproc ; offset++, tag++ ) { right = (myid + offset) % nproc ; if ( offset <= myid ) { left = myid - offset ; } else { left = nproc + myid - offset ; } if ( outcount > 0 ) { destination = right ; stats[0]++ ; stats[2] += outcount*sizeof(int) ; } else { destination = MPI_PROC_NULL ; } incount = 2*counts[left] ; if ( incount > 0 ) { source = left ; stats[1]++ ; stats[3] += incount*sizeof(int) ; } else { source = MPI_PROC_NULL ; } if ( msglvl > 1 ) { fprintf(msgFile, "\n offset %d, source %d, destination %d", offset, source, destination) ; fflush(msgFile) ; } /* ----------------- do a send/receive ----------------- */ MPI_Sendrecv(outbuffer, outcount, MPI_INT, destination, tag, inbuffer, incount, MPI_INT, source, tag, comm, &status) ; if ( source != MPI_PROC_NULL ) { MPI_Get_count(&status, MPI_INT, &count) ; if ( count != incount ) { fprintf(stderr, "\n 1. fatal error in IV_MPI_allgather()" "\n proc %d : source = %d, count = %d, incount = %d\n", myid, source, count, incount) ; spoolesFatal(); } } /* ---------------------------- set the values in the vector ---------------------------- */ for ( jj = 0 ; jj < incount ; jj += 2 ) { ii = inbuffer[jj] ; value = inbuffer[jj+1] ; vec[ii] = value ; } if ( jj != incount ) { fprintf(msgFile, "\n jj = %d, incount = %d", jj, incount) ; fprintf(stderr, "\n jj = %d, incount = %d", jj, incount) ; spoolesFatal(); } if ( msglvl > 2 ) { fprintf(msgFile, "\n after setting values") ; IVfprintf(msgFile, nvec, vec) ; fflush(msgFile) ; } } /* ------------------------ free the working storage ------------------------ */ IVfree(counts) ; if ( outbuffer != NULL ) { IVfree(outbuffer) ; } if ( inbuffer != NULL ) { IVfree(inbuffer) ; } if ( msglvl > 2 ) { fprintf(msgFile, "\n\n leaving IV_MPI_gatherall()") ; fflush(msgFile) ; } return ; }
/* ------------------------------------------------------------------ this method is used during the setup for matrix-vector multiplies. each processor has computed the vertices it needs from other processors, these lists are contained in sendIVL. on return, recvIVL contains the lists of vertices this processor must send to all others. sendIVL -- on input, list[q] contains the vertices needed by this processor that are owned by q recvIVL -- on output, list[q] contains the vertices owned by this processor that are needed by q. note, if NULL on input, a new IVL object is allocated stats[] -- statistics vector stats[0] -- contains # of sends stats[1] -- contains # of receives stats[2] -- contains # of bytes sent stats[3] -- contains # of bytes received firsttag -- first tag for messages, tags in range [firsttag, firsttag+nproc-1] are used return value -- recvIVL created -- 98jul26, cca ------------------------------------------------------------------ */ IVL * IVL_MPI_alltoall ( IVL *sendIVL, IVL *recvIVL, int stats[], int msglvl, FILE *msgFile, int firsttag, MPI_Comm comm ) { int count, destination, lasttag, left, myid, nproc, offset, q, recvcount, right, sendcount, source, tag, tagbound ; int *incounts, *outcounts, *recvvec, *sendvec ; MPI_Status status ; /* --------------- check the input --------------- */ if ( sendIVL == NULL || stats == NULL || (msglvl > 0 && msgFile == NULL) ) { fprintf(msgFile, "\n fatal error in IVL_MPI_alltoall()" "\n bad input\n") ; exit(-1) ; } /* --------------------------------------- get id of self and number of processors --------------------------------------- */ MPI_Comm_rank(comm, &myid) ; MPI_Comm_size(comm, &nproc) ; if ( sendIVL->nlist != nproc ) { fprintf(msgFile, "\n fatal error in IVL_MPI_alltoall()" "\n sendIVL: nproc = %d, nlist = %d\n", nproc, sendIVL->nlist) ; exit(-1) ; } lasttag = firsttag + nproc ; if ( lasttag > (tagbound = maxTagMPI(comm)) ) { fprintf(stderr, "\n fatal error in IVL_MPI_alltoall()" "\n lasttag = %d, tag_bound = %d", lasttag, tagbound) ; exit(-1) ; } if ( recvIVL == NULL ) { recvIVL = IVL_new() ; } else { IVL_clearData(recvIVL) ; } IVL_init1(recvIVL, IVL_CHUNKED, nproc) ; /* ------------------------------------------ outcounts[] is sendIVL->sizes[] incounts[] will be recvIVL->sizes[] fill incounts via a call to MPI_Alltoall() and then initialize the recvIVL lists. ------------------------------------------ */ outcounts = sendIVL->sizes ; incounts = IVinit(nproc, 0) ; MPI_Alltoall((void *) outcounts, 1, MPI_INT, (void *) incounts, 1, MPI_INT, comm) ; for ( q = 0 ; q < nproc ; q++ ) { IVL_setList(recvIVL, q, incounts[q], NULL) ; } IVfree(incounts) ; /* --------------------------------------------------- load list myid of sendIVL into list myid of recvIVL --------------------------------------------------- */ IVL_listAndSize(sendIVL, myid, &sendcount, &sendvec) ; IVL_setList(recvIVL, myid, sendcount, sendvec) ; /* --------------------------------------------------------- now loop over the processes, send and receive information --------------------------------------------------------- */ for ( offset = 1, tag = firsttag ; offset < nproc ; offset++, tag++ ) { right = (myid + offset) % nproc ; if ( offset <= myid ) { left = myid - offset ; } else { left = nproc + myid - offset ; } IVL_listAndSize(sendIVL, right, &sendcount, &sendvec) ; IVL_listAndSize(recvIVL, left, &recvcount, &recvvec) ; if ( sendcount > 0 ) { destination = right ; stats[0]++ ; stats[2] += sendcount*sizeof(int) ; } else { destination = MPI_PROC_NULL ; } if ( recvcount > 0 ) { source = left ; stats[1]++ ; stats[3] += recvcount*sizeof(int) ; } else { source = MPI_PROC_NULL ; } if ( msglvl > 2 ) { fprintf(msgFile, "\n offset %d, recvcount %d, source %d, sendcount %d, destination %d", offset, recvcount, source, sendcount, destination) ; fflush(msgFile) ; } /* ----------------- do a send/receive ----------------- */ MPI_Sendrecv((void *) sendvec, sendcount, MPI_INT, destination, tag, (void *) recvvec, recvcount, MPI_INT, source, tag, comm, &status) ; if ( source != MPI_PROC_NULL ) { MPI_Get_count(&status, MPI_INT, &count) ; if ( count != recvcount ) { fprintf(stderr, "\n fatal error in IVL_MPI_alltoall()" "\n proc %d : source %d, count %d, recvcount %d\n", myid, source, count, recvcount) ; exit(-1) ; } } if ( msglvl > 2 ) { fprintf(msgFile, "\n send/recv completed") ; fflush(msgFile) ; } } return(recvIVL) ; }