/** Execute sync_memory */ void _XMP_mpi_sync_memory() { if(_XMP_flag_multi_win){ int num = 0; _XMP_coarray_t **coarrays = _XMP_coarray_get_list(&num); for(int i = 0; i < num; i++){ MPI_Win win = coarrays[i]->win; if(win != MPI_WIN_NULL){ XACC_DEBUG("flush_all for host a coarray (%ld)", (long)win); MPI_Win_flush_all(win); XACC_DEBUG("sync for host a coarray (%ld)", (long)win); MPI_Win_sync(win); } #ifdef _XMP_XACC MPI_Win win_acc = coarrays[i]->win_acc; if(win_acc != MPI_WIN_NULL){ XACC_DEBUG("flush_all for acc a coarray (%ld)", (long)win_acc); MPI_Win_flush_all(win_acc); XACC_DEBUG("sync for acc a coarray (%ld)", (long)win_acc); MPI_Win_sync(win_acc); } #endif } }else{ if(! _is_coarray_win_flushed){ XACC_DEBUG("flush_all for host single coarray(%ld)", (long)_xmp_mpi_onesided_win); MPI_Win_flush_all(_xmp_mpi_onesided_win); _is_coarray_win_flushed = true; } if(! _is_distarray_win_flushed){ XACC_DEBUG("flush_all for host single distarray(%ld)", (long)_xmp_mpi_distarray_win); MPI_Win_flush_all(_xmp_mpi_distarray_win); _is_distarray_win_flushed = true; } #ifdef _XMP_XACC if(! _is_coarray_win_acc_flushed){ XACC_DEBUG("flush_all for acc single coarray(%ld)", (long)_xmp_mpi_onesided_win_acc); MPI_Win_flush_all(_xmp_mpi_onesided_win_acc); _is_coarray_win_acc_flushed = true; } if(! _is_distarray_win_acc_flushed){ XACC_DEBUG("flush_all for acc single distarray(%ld)", (long)_xmp_mpi_distarray_win_acc); MPI_Win_flush_all(_xmp_mpi_distarray_win_acc); _is_distarray_win_acc_flushed = true; } #endif _win_sync(); } }
dart_ret_t dart_team_lock_init (dart_team_t teamid, dart_lock_t* lock) { dart_gptr_t gptr_tail; dart_gptr_t gptr_list; dart_unit_t unitid, myid; int32_t *addr; uint16_t index; int result = dart_adapt_teamlist_convert (teamid, &index); if (result == -1) { return DART_ERR_INVAL; } dart_team_myid (teamid, &unitid); dart_myid (&myid); *lock = (dart_lock_t) malloc (sizeof (struct dart_lock_struct)); /* Unit 0 is the process holding the gptr_tail by default. */ if (unitid == 0) { dart_memalloc (sizeof (int32_t), &gptr_tail); dart_gptr_getaddr (gptr_tail, (void*)&addr); /* Local store is safe and effective followed by the sync call. */ *addr = -1; MPI_Win_sync (dart_win_local_alloc); } dart_bcast(&gptr_tail, sizeof (dart_gptr_t), 0, teamid); /* Create a global memory region across the teamid, * and every local memory segment related certain unit * hold the next blocking unit info waiting on the lock. */ dart_team_memalloc_aligned(teamid, sizeof(int32_t), // number of bytes &gptr_list); MPI_Win win; win = dart_win_lists[index];//this window object is used for atomic operations dart_gptr_setunit (&gptr_list, myid); dart_gptr_getaddr (gptr_list, (void*)&addr); *addr = -1; MPI_Win_sync (win); DART_GPTR_COPY((*lock) -> gptr_tail, gptr_tail); DART_GPTR_COPY((*lock) -> gptr_list, gptr_list); (*lock) -> teamid = teamid; (*lock) -> is_acquired = 0; DART_LOG_DEBUG ("%2d: INIT - done", unitid); return DART_OK; }
static void _win_sync() { XACC_DEBUG("sync for host single coarray"); XACC_DEBUG("sync for host single distarray"); MPI_Win_sync(_xmp_mpi_onesided_win); MPI_Win_sync(_xmp_mpi_distarray_win); #ifdef _XMP_XACC XACC_DEBUG("sync for acc single coarray"); XACC_DEBUG("sync for acc single distarray"); MPI_Win_sync(_xmp_mpi_onesided_win_acc); MPI_Win_sync(_xmp_mpi_distarray_win_acc); #endif }
/** Create an MCS mutex. Collective on comm. * * @param[out] comm communicator containing all processes that will use the * mutex * @param[out] tail_rank rank of the process in comm that holds the tail * pointer * @param[out] hdl handle to the mutex * @return MPI status */ int MCS_Mutex_create(int tail_rank, MPI_Comm comm, MCS_Mutex * hdl_out) { MCS_Mutex hdl; hdl = malloc(sizeof(struct mcs_mutex_s)); assert(hdl != NULL); MPI_Comm_dup(comm, &hdl->comm); hdl->tail_rank = tail_rank; MPI_Win_allocate(2*sizeof(int), sizeof(int), MPI_INFO_NULL, hdl->comm, &hdl->base, &hdl->window); MPI_Win_lock_all(0, hdl->window); hdl->base[0] = -1; hdl->base[1] = -1; MPI_Win_sync(hdl->window); MPI_Barrier(hdl->comm); *hdl_out = hdl; return MPI_SUCCESS; }
/** Lock a mutex. * * @param[in] hdl Handle to the mutex * @return MPI status */ int MCS_Mutex_lock(MCS_Mutex hdl) { int prev; /* This store is safe, since it cannot happen concurrently with a remote * write */ hdl->base[MCS_MTX_ELEM_DISP] = -1; MPI_Win_sync(hdl->window); MPI_Fetch_and_op(&shmem_world_rank, &prev, MPI_INT, hdl->tail_rank, MCS_MTX_TAIL_DISP, MPI_REPLACE, hdl->window); MPI_Win_flush(hdl->tail_rank, hdl->window); /* If there was a previous tail, update their next pointer and wait for * notification. Otherwise, the mutex was successfully acquired. */ if (prev != -1) { /* Wait for notification */ MPI_Status status; MPI_Accumulate(&shmem_world_rank, 1, MPI_INT, prev, MCS_MTX_ELEM_DISP, 1, MPI_INT, MPI_REPLACE, hdl->window); MPI_Win_flush(prev, hdl->window); debug_print("%2d: LOCK - waiting for notification from %d\n", shmem_world_rank, prev); MPI_Recv(NULL, 0, MPI_BYTE, prev, MCS_MUTEX_TAG, hdl->comm, &status); } debug_print("%2d: LOCK - lock acquired\n", shmem_world_rank); return MPI_SUCCESS; }
int oshmpi_trylock(long * lockp) { int is_locked = -1, nil = -1; oshmpi_lock_t *lock = (oshmpi_lock_t *) lockp; lock->prev = -1; /* Get the last tail, if -1 replace with me */ MPI_Compare_and_swap (&shmem_world_rank, &nil, &(lock->prev), MPI_INT, TAIL, TAIL_DISP, oshmpi_lock_win); MPI_Win_flush (TAIL, oshmpi_lock_win); /* Find if the last proc is holding lock */ if (lock->prev != -1) { MPI_Fetch_and_op (NULL, &is_locked, MPI_INT, lock->prev, LOCK_DISP, MPI_NO_OP, oshmpi_lock_win); MPI_Win_flush (lock->prev, oshmpi_lock_win); if (is_locked) return 0; } /* Add myself in tail */ MPI_Fetch_and_op (&shmem_world_rank, &(lock->prev), MPI_INT, TAIL, TAIL_DISP, MPI_REPLACE, oshmpi_lock_win); MPI_Win_flush (TAIL, oshmpi_lock_win); /* Hold lock */ oshmpi_lock_base[LOCK_DISP] = 1; MPI_Win_sync (oshmpi_lock_win); return 1; }
void oshmpi_unlock(long * lockp) { oshmpi_lock_t *lock = (oshmpi_lock_t *) lockp; /* Determine my next process */ MPI_Fetch_and_op (NULL, &(lock->next), MPI_INT, shmem_world_rank, NEXT_DISP, MPI_NO_OP, oshmpi_lock_win); MPI_Win_flush (shmem_world_rank, oshmpi_lock_win); if (lock->next != -1) { MPI_Send (&shmem_world_rank, 1, MPI_INT, lock->next, 999, MPI_COMM_WORLD); } /* Release lock */ oshmpi_lock_base[LOCK_DISP] = -1; MPI_Win_sync (oshmpi_lock_win); return; }
int main(int argc, char * argv[]) { MPI_Init(&argc, &argv); int wrank, wsize; MPI_Comm_rank(MPI_COMM_WORLD, &wrank); MPI_Comm_size(MPI_COMM_WORLD, &wsize); int nrank, nsize; MPI_Comm MPI_COMM_NODE; MPI_Comm_split_type(MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED, 0 /* key */, MPI_INFO_NULL, &MPI_COMM_NODE); MPI_Comm_rank(MPI_COMM_NODE, &nrank); MPI_Comm_size(MPI_COMM_NODE, &nsize); int * shptr = NULL; MPI_Win shwin; MPI_Info win_info; MPI_Info_create(&win_info); MPI_Info_set(win_info, "alloc_shared_noncontig", "true"); MPI_Win_allocate_shared(sizeof(int), sizeof(int), win_info, MPI_COMM_NODE, &shptr, &shwin); MPI_Info_free(&win_info); MPI_Win_lock_all(0 /* assertion */, shwin); MPI_Win_sync(shwin); MPI_Barrier(MPI_COMM_NODE); MPI_Aint rsize[nsize]; int rdisp[nsize]; int * rptr[nsize]; for (int i=0; i<nsize; i++) { MPI_Win_shared_query(shwin, i, &(rsize[i]), &(rdisp[i]), &(rptr[i])); printf("rank=%d target=%d rptr=%p rsize=%zu rdisp=%d \n", nrank, i, rptr[i], (size_t)rsize[i], rdisp[i]); } MPI_Win_unlock_all(shwin); MPI_Win_free(&shwin); MPI_Comm_free(&MPI_COMM_NODE); MPI_Finalize(); return 0; }
/** Attempt to acquire a mutex. * * @param[in] hdl Handle to the mutex * @param[out] success Indicates whether the mutex was acquired * @return MPI status */ int MCS_Mutex_trylock(MCS_Mutex hdl, int *success) { int tail, nil = -1; /* This store is safe, since it cannot happen concurrently with a remote * write */ hdl->base[MCS_MTX_ELEM_DISP] = -1; MPI_Win_sync(hdl->window); /* Check if the lock is available and claim it if it is. */ MPI_Compare_and_swap(&shmem_world_rank, &nil, &tail, MPI_INT, hdl->tail_rank, MCS_MTX_TAIL_DISP, hdl->window); MPI_Win_flush(hdl->tail_rank, hdl->window); /* If the old tail was -1, we have claimed the mutex */ *success = (tail == nil) ? 0 : 1; debug_print("%2d: TRYLOCK - %s\n", shmem_world_rank, (*success) ? "Success" : "Non-success"); return MPI_SUCCESS; }
int SMP_Bcast(void* buffer, int count, MPI_Datatype datatype, int root, MPI_Comm comm) { int nrank = -1; MPI_Comm_rank(comm, &nrank); #ifndef DEBUG int nsize = 0; MPI_Comm_size(comm, &nsize); /* fast path for trivial case */ if (nsize==1) return MPI_SUCCESS; #endif /* Type_size only works for types without holes. */ int ts = 0; MPI_Type_size(datatype, &ts); MPI_Aint winsize = (nrank==0) ? count * ts : 0; void * local = NULL; MPI_Win wintemp = MPI_WIN_NULL; MPI_Win_allocate_shared(winsize, ts, MPI_INFO_NULL, comm, &local, &wintemp); void * remote = NULL; int disp; /* unused */ MPI_Win_shared_query(wintemp, 0, &winsize, &disp, &remote); MPI_Win_lock_all(0, wintemp); if (nrank==0) { memcpy(local, buffer, (size_t)count*ts); } MPI_Win_sync(wintemp); if (nrank!=0) { memcpy(buffer, remote, (size_t)count*ts); } MPI_Win_unlock_all(wintemp); MPI_Win_free(&wintemp); return MPI_SUCCESS; }
void oshmpi_lock(long * lockp) { MPI_Status status; oshmpi_lock_t *lock = (oshmpi_lock_t *) lockp; /* Replace myself with the last tail */ MPI_Fetch_and_op (&shmem_world_rank, &(lock->prev), MPI_INT, TAIL, TAIL_DISP, MPI_REPLACE, oshmpi_lock_win); MPI_Win_flush (TAIL, oshmpi_lock_win); /* Previous proc holding lock will eventually notify */ if (lock->prev != -1) { /* Send my shmem_world_rank to previous proc's next */ MPI_Accumulate (&shmem_world_rank, 1, MPI_INT, lock->prev, NEXT_DISP, 1, MPI_INT, MPI_REPLACE, oshmpi_lock_win); MPI_Win_flush (lock->prev, oshmpi_lock_win); MPI_Probe (lock->prev, MPI_ANY_TAG, MPI_COMM_WORLD, &status); } /* Hold lock */ oshmpi_lock_base[LOCK_DISP] = 1; MPI_Win_sync (oshmpi_lock_win); return; }
dart_ret_t dart_lock_release (dart_lock_t lock) { dart_unit_t unitid; dart_team_myid (lock -> teamid, &unitid); if (lock -> is_acquired == 0) { printf ("Warning: RELEASE - %2d has not yet required the lock\n", unitid); return DART_OK; } dart_gptr_t gptr_tail; dart_gptr_t gptr_list; MPI_Win win; int32_t *addr2, next, result[1]; MPI_Aint disp_list; int32_t origin[1] = {-1}; DART_GPTR_COPY(gptr_tail, lock -> gptr_tail); DART_GPTR_COPY(gptr_list, lock -> gptr_list); uint64_t offset_tail = gptr_tail.addr_or_offs.offset; int16_t seg_id = gptr_list.segid; dart_unit_t tail = gptr_tail.unitid; uint16_t index = gptr_list.flags; dart_gptr_getaddr(gptr_list, (void*)&addr2); win = dart_win_lists[index]; /* Atomicity: Check if we are at the tail of this lock queue, if so, we are done. * Otherwise, we still need to send notification. */ MPI_Compare_and_swap (origin, &unitid, result, MPI_INT32_T, tail, offset_tail, dart_win_local_alloc); MPI_Win_flush (tail, dart_win_local_alloc); /* We are not at the tail of this lock queue. */ if (*result != unitid) { DART_LOG_DEBUG ("%2d: UNLOCK - waiting for next pointer (tail = %d) in team %d", unitid, *result, (lock -> teamid)); if (dart_adapt_transtable_get_disp (seg_id, unitid, &disp_list) == -1) { return DART_ERR_INVAL; } /* Waiting for the update of my next pointer finished. */ while (1) { MPI_Fetch_and_op (NULL, &next, MPI_INT, unitid, disp_list, MPI_NO_OP, win); MPI_Win_flush (unitid, win); if (next != -1) break; } DART_LOG_DEBUG ("%2d: UNLOCK - notifying %d in team %d", unitid, next, (lock -> teamid)); /* Notifying the next unit waiting on the lock queue. */ MPI_Send (NULL, 0, MPI_INT, next, 0, dart_teams[index]); *addr2 = -1; MPI_Win_sync (win); } lock -> is_acquired = 0; DART_LOG_DEBUG ("%2d: UNLOCK - release lock in team %d", unitid, (lock -> teamid)); return DART_OK; }
int main(int argc, char ** argv) { int Block_order; size_t Block_size; size_t Colblock_size; int Tile_order=32; int tiling; int Num_procs; /* Number of ranks */ int order; /* overall matrix order */ int send_to, recv_from; /* communicating ranks */ size_t bytes; /* total amount of data to be moved */ int my_ID; /* rank */ int root=0; /* root rank of a communicator */ int iterations; /* number of times to run the pipeline algorithm */ int i, j, it, jt, ID;/* dummies */ int iter; /* index of iteration */ int phase; /* phase in the staged communication */ size_t colstart; /* sequence number of first column owned by calling rank */ int error=0; /* error flag */ double *A_p; /* original matrix column block */ double *B_p; /* transposed matrix column block */ double *Work_in_p; /* workspace for the transpose function */ double *Work_out_p;/* workspace for the transpose function */ double abserr, abserr_tot; /* computed error */ double epsilon = 1.e-8; /* error tolerance */ double local_trans_time, /* timing parameters */ trans_time, avgtime; MPI_Status status; /* completion status of message */ MPI_Win shm_win_A; /* Shared Memory window object */ MPI_Win shm_win_B; /* Shared Memory window object */ MPI_Win shm_win_Work_in; /* Shared Memory window object */ MPI_Win shm_win_Work_out; /* Shared Memory window object */ MPI_Info rma_winfo;/* info for window */ MPI_Comm shm_comm_prep;/* Shared Memory prep Communicator */ MPI_Comm shm_comm; /* Shared Memory Communicator */ int shm_procs; /* # of ranks in shared domain */ int shm_ID; /* MPI rank within coherence domain */ int group_size; /* number of ranks per shared memory group */ int Num_groups; /* number of shared memory group */ int group_ID; /* sequence number of shared memory group */ int size_mul; /* size multiplier; 0 for non-root ranks in coherence domain*/ int istart; MPI_Request send_req, recv_req; /********************************************************************************* ** Initialize the MPI environment **********************************************************************************/ MPI_Init(&argc,&argv); MPI_Comm_rank(MPI_COMM_WORLD, &my_ID); MPI_Comm_size(MPI_COMM_WORLD, &Num_procs); root = 0; /********************************************************************* ** process, test and broadcast input parameter *********************************************************************/ if (my_ID == root){ if (argc != 4 && argc !=5){ printf("Usage: %s <#ranks per coherence domain> <# iterations> <matrix order> [tile size]\n", *argv); error = 1; goto ENDOFTESTS; } group_size = atoi(*++argv); if (group_size < 1) { printf("ERROR: # ranks per coherence domain must be >= 1 : %d \n",group_size); error = 1; goto ENDOFTESTS; } if (Num_procs%group_size) { printf("ERROR: toal # %d ranks not divisible by ranks per coherence domain %d\n", Num_procs, group_size); error = 1; goto ENDOFTESTS; } iterations = atoi(*++argv); if (iterations < 1){ printf("ERROR: iterations must be >= 1 : %d \n",iterations); error = 1; goto ENDOFTESTS; } order = atoi(*++argv); if (order < Num_procs) { printf("ERROR: matrix order %d should at least # procs %d\n", order, Num_procs); error = 1; goto ENDOFTESTS; } if (order%Num_procs) { printf("ERROR: matrix order %d should be divisible by # procs %d\n", order, Num_procs); error = 1; goto ENDOFTESTS; } if (argc == 5) Tile_order = atoi(*++argv); ENDOFTESTS:; } bail_out(error); /* Broadcast input data to all ranks */ MPI_Bcast(&order, 1, MPI_INT, root, MPI_COMM_WORLD); MPI_Bcast(&iterations, 1, MPI_INT, root, MPI_COMM_WORLD); MPI_Bcast(&Tile_order, 1, MPI_INT, root, MPI_COMM_WORLD); MPI_Bcast(&group_size, 1, MPI_INT, root, MPI_COMM_WORLD); if (my_ID == root) { printf("Parallel Research Kernels version %s\n", PRKVERSION); printf("MPI+SHM Matrix transpose: B = A^T\n"); printf("Number of ranks = %d\n", Num_procs); printf("Rank group size = %d\n", group_size); printf("Matrix order = %d\n", order); printf("Number of iterations = %d\n", iterations); if ((Tile_order > 0) && (Tile_order < order)) printf("Tile size = %d\n", Tile_order); else printf("Untiled\n"); #ifndef SYNCHRONOUS printf("Non-"); #endif printf("Blocking messages\n"); } /* Setup for Shared memory regions */ /* first divide WORLD in groups of size group_size */ MPI_Comm_split(MPI_COMM_WORLD, my_ID/group_size, my_ID%group_size, &shm_comm_prep); /* derive from that a SHM communicator */ MPI_Comm_split_type(shm_comm_prep, MPI_COMM_TYPE_SHARED, 0, MPI_INFO_NULL, &shm_comm); MPI_Comm_rank(shm_comm, &shm_ID); MPI_Comm_size(shm_comm, &shm_procs); /* do sanity check, making sure groups did not shrink in second comm split */ if (shm_procs != group_size) MPI_Abort(MPI_COMM_WORLD, 666); /* a non-positive tile size means no tiling of the local transpose */ tiling = (Tile_order > 0) && (Tile_order < order); bytes = 2 * sizeof(double) * order * order; /********************************************************************* ** The matrix is broken up into column blocks that are mapped one to a ** rank. Each column block is made up of Num_procs smaller square ** blocks of order block_order. *********************************************************************/ Num_groups = Num_procs/group_size; Block_order = order/Num_groups; group_ID = my_ID/group_size; colstart = Block_order * group_ID; Colblock_size = order * Block_order; Block_size = Block_order * Block_order; /********************************************************************* ** Create the column block of the test matrix, the column block of the ** transposed matrix, and workspace (workspace only if #procs>1) *********************************************************************/ /* RMA win info */ MPI_Info_create(&rma_winfo); /* This key indicates that passive target RMA will not be used. * It is the one info key that MPICH actually uses for optimization. */ MPI_Info_set(rma_winfo, "no_locks", "true"); /* only the root of each SHM domain specifies window of nonzero size */ size_mul = (shm_ID==0); int offset = 32; MPI_Aint size= (Colblock_size+offset)*sizeof(double)*size_mul; int disp_unit; MPI_Win_allocate_shared(size, sizeof(double), rma_winfo, shm_comm, (void *) &A_p, &shm_win_A); MPI_Win_lock_all(MPI_MODE_NOCHECK,shm_win_A); MPI_Win_shared_query(shm_win_A, MPI_PROC_NULL, &size, &disp_unit, (void *)&A_p); if (A_p == NULL){ printf(" Error allocating space for original matrix on node %d\n",my_ID); error = 1; } bail_out(error); A_p += offset; /* recompute memory size (overwritten by prior query */ size= (Colblock_size+offset)*sizeof(double)*size_mul; MPI_Win_allocate_shared(size, sizeof(double), rma_winfo, shm_comm, (void *) &B_p, &shm_win_B); MPI_Win_lock_all(MPI_MODE_NOCHECK,shm_win_B); MPI_Win_shared_query(shm_win_B, MPI_PROC_NULL, &size, &disp_unit, (void *)&B_p); if (B_p == NULL){ printf(" Error allocating space for transposed matrix by group %d\n",group_ID); error = 1; } bail_out(error); B_p += offset; if (Num_groups>1) { size = Block_size*sizeof(double)*size_mul; MPI_Win_allocate_shared(size, sizeof(double),rma_winfo, shm_comm, (void *) &Work_in_p, &shm_win_Work_in); MPI_Win_lock_all(MPI_MODE_NOCHECK,shm_win_Work_in); MPI_Win_shared_query(shm_win_Work_in, MPI_PROC_NULL, &size, &disp_unit, (void *)&Work_in_p); if (Work_in_p == NULL){ printf(" Error allocating space for in block by group %d\n",group_ID); error = 1; } bail_out(error); /* recompute memory size (overwritten by prior query */ size = Block_size*sizeof(double)*size_mul; MPI_Win_allocate_shared(size, sizeof(double), rma_winfo, shm_comm, (void *) &Work_out_p, &shm_win_Work_out); MPI_Win_lock_all(MPI_MODE_NOCHECK,shm_win_Work_out); MPI_Win_shared_query(shm_win_Work_out, MPI_PROC_NULL, &size, &disp_unit, (void *)&Work_out_p); if (Work_out_p == NULL){ printf(" Error allocating space for out block by group %d\n",group_ID); error = 1; } bail_out(error); } /* Fill the original column matrix */ istart = 0; int chunk_size = Block_order/group_size; if (tiling) { for (j=shm_ID*chunk_size;j<(shm_ID+1)*chunk_size;j+=Tile_order) { for (i=0;i<order; i+=Tile_order) for (jt=j; jt<MIN((shm_ID+1)*chunk_size,j+Tile_order); jt++) for (it=i; it<MIN(order,i+Tile_order); it++) { A(it,jt) = (double) ((double)order*(jt+colstart) + it); B(it,jt) = -1.0; } } } else { for (j=shm_ID*chunk_size;j<(shm_ID+1)*chunk_size;j++) for (i=0;i<order; i++) { A(i,j) = (double)((double)order*(j+colstart) + i); B(i,j) = -1.0; } } /* NEED A STORE FENCE HERE */ MPI_Win_sync(shm_win_A); MPI_Win_sync(shm_win_B); MPI_Barrier(shm_comm); for (iter=0; iter<=iterations; iter++) { /* start timer after a warmup iteration */ if (iter == 1) { MPI_Barrier(MPI_COMM_WORLD); local_trans_time = wtime(); } /* do the local transpose */ istart = colstart; if (!tiling) { for (i=shm_ID*chunk_size; i<(shm_ID+1)*chunk_size; i++) { for (j=0; j<Block_order; j++) B(j,i) = A(i,j); } } else { for (i=shm_ID*chunk_size; i<(shm_ID+1)*chunk_size; i+=Tile_order) { for (j=0; j<Block_order; j+=Tile_order) for (it=i; it<MIN(Block_order,i+Tile_order); it++) for (jt=j; jt<MIN(Block_order,j+Tile_order);jt++) { B(jt,it) = A(it,jt); } } } for (phase=1; phase<Num_groups; phase++){ recv_from = ((group_ID + phase )%Num_groups); send_to = ((group_ID - phase + Num_groups)%Num_groups); istart = send_to*Block_order; if (!tiling) { for (i=shm_ID*chunk_size; i<(shm_ID+1)*chunk_size; i++) for (j=0; j<Block_order; j++){ Work_out(j,i) = A(i,j); } } else { for (i=shm_ID*chunk_size; i<(shm_ID+1)*chunk_size; i+=Tile_order) for (j=0; j<Block_order; j+=Tile_order) for (it=i; it<MIN(Block_order,i+Tile_order); it++) for (jt=j; jt<MIN(Block_order,j+Tile_order);jt++) { Work_out(jt,it) = A(it,jt); } } /* NEED A LOAD/STORE FENCE HERE */ MPI_Win_sync(shm_win_Work_in); MPI_Win_sync(shm_win_Work_out); MPI_Barrier(shm_comm); if (shm_ID==0) { #ifndef SYNCHRONOUS /* if we place the Irecv outside this block, it would not be protected by a local barrier, which creates a race */ MPI_Irecv(Work_in_p, Block_size, MPI_DOUBLE, recv_from*group_size, phase, MPI_COMM_WORLD, &recv_req); MPI_Isend(Work_out_p, Block_size, MPI_DOUBLE, send_to*group_size, phase, MPI_COMM_WORLD, &send_req); MPI_Wait(&recv_req, &status); MPI_Wait(&send_req, &status); #else MPI_Sendrecv(Work_out_p, Block_size, MPI_DOUBLE, send_to*group_size, phase, Work_in_p, Block_size, MPI_DOUBLE, recv_from*group_size, phase, MPI_COMM_WORLD, &status); #endif } /* NEED A LOAD FENCE HERE */ MPI_Win_sync(shm_win_Work_in); MPI_Win_sync(shm_win_Work_out); MPI_Barrier(shm_comm); istart = recv_from*Block_order; /* scatter received block to transposed matrix; no need to tile */ for (j=shm_ID*chunk_size; j<(shm_ID+1)*chunk_size; j++) for (i=0; i<Block_order; i++) B(i,j) = Work_in(i,j); } /* end of phase loop */ } /* end of iterations */ local_trans_time = wtime() - local_trans_time; MPI_Reduce(&local_trans_time, &trans_time, 1, MPI_DOUBLE, MPI_MAX, root, MPI_COMM_WORLD); abserr = 0.0; istart = 0; /* for (j=shm_ID;j<Block_order;j+=group_size) for (i=0;i<order; i++) { */ for (j=shm_ID*chunk_size; j<(shm_ID+1)*chunk_size; j++) for (i=0;i<order; i++) { abserr += ABS(B(i,j) - (double)((double)order*i + j+colstart)); } MPI_Reduce(&abserr, &abserr_tot, 1, MPI_DOUBLE, MPI_SUM, root, MPI_COMM_WORLD); if (my_ID == root) { if (abserr_tot < epsilon) { printf("Solution validates\n"); avgtime = trans_time/(double)iterations; printf("Rate (MB/s): %lf Avg time (s): %lf\n",1.0E-06*bytes/avgtime, avgtime); #ifdef VERBOSE printf("Summed errors: %f \n", abserr_tot); #endif } else { printf("ERROR: Aggregate squared error %e exceeds threshold %e\n", abserr_tot, epsilon); error = 1; } } bail_out(error); MPI_Win_unlock_all(shm_win_A); MPI_Win_unlock_all(shm_win_B); MPI_Win_free(&shm_win_A); MPI_Win_free(&shm_win_B); if (Num_groups>1) { MPI_Win_unlock_all(shm_win_Work_in); MPI_Win_unlock_all(shm_win_Work_out); MPI_Win_free(&shm_win_Work_in); MPI_Win_free(&shm_win_Work_out); } MPI_Info_free(&rma_winfo); MPI_Finalize(); exit(EXIT_SUCCESS); } /* end of main */
int main(int argc, char **argv) { int i, j, rank, nproc; int shm_rank, shm_nproc; MPI_Aint size; int errors = 0, all_errors = 0; int *base, *my_base; int disp_unit; MPI_Win shm_win; MPI_Comm shm_comm; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &nproc); MPI_Comm_split_type(MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED, rank, MPI_INFO_NULL, &shm_comm); MPI_Comm_rank(shm_comm, &shm_rank); MPI_Comm_size(shm_comm, &shm_nproc); /* Allocate ELEM_PER_PROC integers for each process */ MPI_Win_allocate_shared(sizeof(int)*ELEM_PER_PROC, sizeof(int), MPI_INFO_NULL, shm_comm, &my_base, &shm_win); /* Locate absolute base */ MPI_Win_shared_query(shm_win, MPI_PROC_NULL, &size, &disp_unit, &base); /* make sure the query returned the right values */ if (disp_unit != sizeof(int)) errors++; if (size != ELEM_PER_PROC * sizeof(int)) errors++; if ((shm_rank == 0) && (base != my_base)) errors++; if (shm_rank && (base == my_base)) errors++; if (verbose) printf("%d -- size = %d baseptr = %p my_baseptr = %p\n", shm_rank, (int) size, (void*) base, (void*) my_base); MPI_Win_lock_all(MPI_MODE_NOCHECK, shm_win); /* Write to all my data */ for (i = 0; i < ELEM_PER_PROC; i++) { my_base[i] = i; } MPI_Win_sync(shm_win); MPI_Barrier(shm_comm); MPI_Win_sync(shm_win); /* Read and verify everyone's data */ for (i = 0; i < shm_nproc; i++) { for (j = 0; j < ELEM_PER_PROC; j++) { if ( base[i*ELEM_PER_PROC + j] != j ) { errors++; printf("%d -- Got %d at rank %d index %d, expected %d\n", shm_rank, base[i*ELEM_PER_PROC + j], i, j, j); } } } MPI_Win_unlock_all(shm_win); MPI_Win_free(&shm_win); MPI_Comm_free(&shm_comm); MPI_Reduce(&errors, &all_errors, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD); if (rank == 0 && all_errors == 0) printf(" No Errors\n"); MPI_Finalize(); return 0; }
int main(int argc, char ** argv) { int Num_procs; /* number of ranks */ int Num_procsx, Num_procsy; /* number of ranks in each coord direction */ int Num_groupsx, Num_groupsy; /* number of blocks in each coord direction */ int my_group; /* sequence number of shared memory block */ int my_group_IDx, my_group_IDy; /* coordinates of block within block grid */ int group_size; /* number of ranks in shared memory group */ int group_sizex, group_sizey; /* number of ranks in block in each coord direction */ int my_ID; /* MPI rank */ int my_global_IDx, my_global_IDy; /* coordinates of rank in overall rank grid */ int my_local_IDx, my_local_IDy; /* coordinates of rank within shared memory block */ int right_nbr; /* global rank of right neighboring tile */ int left_nbr; /* global rank of left neighboring tile */ int top_nbr; /* global rank of top neighboring tile */ int bottom_nbr; /* global rank of bottom neighboring tile */ int local_nbr[4]; /* list of synchronizing local neighbors */ int num_local_nbrs; /* number of synchronizing local neighbors */ int dummy; DTYPE *top_buf_out; /* communication buffer */ DTYPE *top_buf_in; /* " " */ DTYPE *bottom_buf_out; /* " " */ DTYPE *bottom_buf_in; /* " " */ DTYPE *right_buf_out; /* " " */ DTYPE *right_buf_in; /* " " */ DTYPE *left_buf_out; /* " " */ DTYPE *left_buf_in; /* " " */ int root = 0; long n, width, height;/* linear global and block grid dimension */ int width_rank, height_rank; /* linear local dimension */ int iter, leftover; /* dummies */ int istart_rank, iend_rank; /* bounds of grid tile assigned to calling rank */ int jstart_rank, jend_rank; /* bounds of grid tile assigned to calling rank */ int istart, iend; /* bounds of grid block containing tile */ int jstart, jend; /* bounds of grid block containing tile */ DTYPE norm, /* L1 norm of solution */ local_norm, /* contribution of calling rank to L1 norm */ reference_norm; /* value to be matched by computed norm */ DTYPE f_active_points; /* interior of grid with respect to stencil */ DTYPE flops; /* floating point ops per iteration */ int iterations; /* number of times to run the algorithm */ double local_stencil_time,/* timing parameters */ stencil_time, avgtime; int stencil_size; /* number of points in stencil */ DTYPE * RESTRICT in; /* input grid values */ DTYPE * RESTRICT out; /* output grid values */ long total_length_in; /* total required length to store input array */ long total_length_out;/* total required length to store output array */ int error=0; /* error flag */ DTYPE weight[2*RADIUS+1][2*RADIUS+1]; /* weights of points in the stencil */ MPI_Request request[8]; /* requests for sends & receives in 4 coord directions */ MPI_Win shm_win_in; /* shared memory window object for IN array */ MPI_Win shm_win_out; /* shared memory window object for OUT array */ MPI_Comm shm_comm_prep; /* preparatory shared memory communicator */ MPI_Comm shm_comm; /* Shared Memory Communicator */ int shm_procs; /* # of rankes in shared domain */ int shm_ID; /* MPI rank in shared memory domain */ MPI_Aint size_in; /* size of the IN array in shared memory window */ MPI_Aint size_out; /* size of the OUT array in shared memory window */ int size_mul; /* one for shm_comm root, zero for the other ranks */ int disp_unit; /* ignored */ /******************************************************************************* ** Initialize the MPI environment ********************************************************************************/ MPI_Init(&argc,&argv); MPI_Comm_rank(MPI_COMM_WORLD, &my_ID); MPI_Comm_size(MPI_COMM_WORLD, &Num_procs); /******************************************************************************* ** process, test, and broadcast input parameters ********************************************************************************/ if (my_ID == root) { printf("Parallel Research Kernels version %s\n", PRKVERSION); printf("MPI+SHM stencil execution on 2D grid\n"); #if !STAR printf("ERROR: Compact stencil not supported\n"); error = 1; goto ENDOFTESTS; #endif if (argc != 4){ printf("Usage: %s <#ranks per coherence domain><# iterations> <array dimension> \n", *argv); error = 1; goto ENDOFTESTS; } group_size = atoi(*++argv); if (group_size < 1) { printf("ERROR: # ranks per coherence domain must be >= 1 : %d \n",group_size); error = 1; goto ENDOFTESTS; } if (Num_procs%group_size) { printf("ERROR: total # %d ranks not divisible by ranks per coherence domain %d\n", Num_procs, group_size); error = 1; goto ENDOFTESTS; } iterations = atoi(*++argv); if (iterations < 0){ printf("ERROR: iterations must be >= 0 : %d \n",iterations); error = 1; goto ENDOFTESTS; } n = atol(*++argv); long nsquare = n * n; if (nsquare < Num_procs){ printf("ERROR: grid size must be at least # ranks: %ld\n", nsquare); error = 1; goto ENDOFTESTS; } if (RADIUS < 0) { printf("ERROR: Stencil radius %d should be non-negative\n", RADIUS); error = 1; goto ENDOFTESTS; } if (2*RADIUS +1 > n) { printf("ERROR: Stencil radius %d exceeds grid size %ld\n", RADIUS, n); error = 1; goto ENDOFTESTS; } ENDOFTESTS:; } bail_out(error); MPI_Bcast(&n, 1, MPI_LONG, root, MPI_COMM_WORLD); MPI_Bcast(&iterations, 1, MPI_INT, root, MPI_COMM_WORLD); MPI_Bcast(&group_size, 1, MPI_INT, root, MPI_COMM_WORLD); /* determine best way to create a 2D grid of ranks (closest to square, for best surface/volume ratio); we do this brute force for now. The decomposition needs to be such that shared memory groups can evenly tessellate the rank grid */ for (Num_procsx=(int) (sqrt(Num_procs+1)); Num_procsx>0; Num_procsx--) { if (!(Num_procs%Num_procsx)) { Num_procsy = Num_procs/Num_procsx; for (group_sizex=(int)(sqrt(group_size+1)); group_sizex>0; group_sizex--) { if (!(group_size%group_sizex) && !(Num_procsx%group_sizex)) { group_sizey=group_size/group_sizex; break; } } if (!(Num_procsy%group_sizey)) break; } } if (my_ID == root) { printf("Number of ranks = %d\n", Num_procs); printf("Grid size = %ld\n", n); printf("Radius of stencil = %d\n", RADIUS); printf("Tiles in x/y-direction = %d/%d\n", Num_procsx, Num_procsy); printf("Tiles per shared memory domain = %d\n", group_size); printf("Tiles in x/y-direction in group = %d/%d\n", group_sizex, group_sizey); printf("Type of stencil = star\n"); #if LOCAL_BARRIER_SYNCH printf("Local synchronization = barrier\n"); #else printf("Local synchronization = point to point\n"); #endif #if DOUBLE printf("Data type = double precision\n"); #else printf("Data type = single precision\n"); #endif #if LOOPGEN printf("Script used to expand stencil loop body\n"); #else printf("Compact representation of stencil loop body\n"); #endif printf("Number of iterations = %d\n", iterations); } /* Setup for Shared memory regions */ /* first divide WORLD in groups of size group_size */ MPI_Comm_split(MPI_COMM_WORLD, my_ID/group_size, my_ID%group_size, &shm_comm_prep); /* derive from that an SHM communicator */ MPI_Comm_split_type(shm_comm_prep, MPI_COMM_TYPE_SHARED, 0, MPI_INFO_NULL, &shm_comm); MPI_Comm_rank(shm_comm, &shm_ID); MPI_Comm_size(shm_comm, &shm_procs); /* do sanity check, making sure groups did not shrink in second comm split */ if (shm_procs != group_size) MPI_Abort(MPI_COMM_WORLD, 666); Num_groupsx = Num_procsx/group_sizex; Num_groupsy = Num_procsy/group_sizey; my_group = my_ID/group_size; my_group_IDx = my_group%Num_groupsx; my_group_IDy = my_group/Num_groupsx; my_local_IDx = my_ID%group_sizex; my_local_IDy = (my_ID%group_size)/group_sizex; my_global_IDx = my_group_IDx*group_sizex+my_local_IDx; my_global_IDy = my_group_IDy*group_sizey+my_local_IDy; /* set all neighboring ranks to -1 (no communication with those ranks) */ left_nbr = right_nbr = top_nbr = bottom_nbr = -1; /* keep track of local neighbors for local synchronization */ num_local_nbrs = 0; if (my_local_IDx == group_sizex-1 && my_group_IDx != (Num_groupsx-1)) { right_nbr = (my_group+1)*group_size+shm_ID-group_sizex+1; } if (my_local_IDx != group_sizex-1) { local_nbr[num_local_nbrs++] = shm_ID + 1; } if (my_local_IDx == 0 && my_group_IDx != 0) { left_nbr = (my_group-1)*group_size+shm_ID+group_sizex-1; } if (my_local_IDx != 0) { local_nbr[num_local_nbrs++] = shm_ID - 1; } if (my_local_IDy == group_sizey-1 && my_group_IDy != (Num_groupsy-1)) { top_nbr = (my_group+Num_groupsx)*group_size + my_local_IDx; } if (my_local_IDy != group_sizey-1) { local_nbr[num_local_nbrs++] = shm_ID + group_sizex; } if (my_local_IDy == 0 && my_group_IDy != 0) { bottom_nbr = (my_group-Num_groupsx)*group_size + group_sizex*(group_sizey-1)+my_local_IDx; } if (my_local_IDy != 0) { local_nbr[num_local_nbrs++] = shm_ID - group_sizex; } /* compute amount of space required for input and solution arrays for the block, and also compute index sets */ width = n/Num_groupsx; leftover = n%Num_groupsx; if (my_group_IDx<leftover) { istart = (width+1) * my_group_IDx; iend = istart + width; } else { istart = (width+1) * leftover + width * (my_group_IDx-leftover); iend = istart + width - 1; } width = iend - istart + 1; if (width == 0) { printf("ERROR: rank %d has no work to do\n", my_ID); error = 1; } bail_out(error); height = n/Num_groupsy; leftover = n%Num_groupsy; if (my_group_IDy<leftover) { jstart = (height+1) * my_group_IDy; jend = jstart + height; } else { jstart = (height+1) * leftover + height * (my_group_IDy-leftover); jend = jstart + height - 1; } height = jend - jstart + 1; if (height == 0) { printf("ERROR: rank %d has no work to do\n", my_ID); error = 1; } bail_out(error); if (width < RADIUS || height < RADIUS) { printf("ERROR: rank %d has work tile smaller then stencil radius; w=%ld,h=%ld\n", my_ID, width, height); error = 1; } bail_out(error); total_length_in = (width+2*RADIUS)*(height+2*RADIUS)*sizeof(DTYPE); total_length_out = width*height*sizeof(DTYPE); /* only the root of each SHM domain specifies window of nonzero size */ size_mul = (shm_ID==0); size_in= total_length_in*size_mul; MPI_Win_allocate_shared(size_in, sizeof(double), MPI_INFO_NULL, shm_comm, (void *) &in, &shm_win_in); MPI_Win_lock_all(MPI_MODE_NOCHECK, shm_win_in); MPI_Win_shared_query(shm_win_in, MPI_PROC_NULL, &size_in, &disp_unit, (void *)&in); if (in == NULL){ printf("Error allocating space for input array by group %d\n",my_group); error = 1; } bail_out(error); size_out= total_length_out*size_mul; MPI_Win_allocate_shared(size_out, sizeof(double), MPI_INFO_NULL, shm_comm, (void *) &out, &shm_win_out); MPI_Win_lock_all(MPI_MODE_NOCHECK, shm_win_out); MPI_Win_shared_query(shm_win_out, MPI_PROC_NULL, &size_out, &disp_unit, (void *)&out); if (out == NULL){ printf("Error allocating space for output array by group %d\n", my_group); error = 1; } bail_out(error); /* determine index set assigned to each rank */ width_rank = width/group_sizex; leftover = width%group_sizex; if (my_local_IDx<leftover) { istart_rank = (width_rank+1) * my_local_IDx; iend_rank = istart_rank + width_rank; } else { istart_rank = (width_rank+1) * leftover + width_rank * (my_local_IDx-leftover); iend_rank = istart_rank + width_rank - 1; } istart_rank += istart; iend_rank += istart; width_rank = iend_rank - istart_rank + 1; height_rank = height/group_sizey; leftover = height%group_sizey; if (my_local_IDy<leftover) { jstart_rank = (height_rank+1) * my_local_IDy; jend_rank = jstart_rank + height_rank; } else { jstart_rank = (height_rank+1) * leftover + height_rank * (my_local_IDy-leftover); jend_rank = jstart_rank + height_rank - 1; } jstart_rank+=jstart; jend_rank+=jstart; height_rank = jend_rank - jstart_rank + 1; if (height_rank*width_rank==0) { error = 1; printf("Rank %d has no work to do\n", my_ID); } bail_out(error); /* allocate communication buffers for halo values */ top_buf_out = (DTYPE *) prk_malloc(4*sizeof(DTYPE)*RADIUS*width_rank); if (!top_buf_out) { printf("ERROR: Rank %d could not allocated comm buffers for y-direction\n", my_ID); error = 1; } bail_out(error); top_buf_in = top_buf_out + RADIUS*width_rank; bottom_buf_out = top_buf_out + 2*RADIUS*width_rank; bottom_buf_in = top_buf_out + 3*RADIUS*width_rank; right_buf_out = (DTYPE *) prk_malloc(4*sizeof(DTYPE)*RADIUS*height_rank); if (!right_buf_out) { printf("ERROR: Rank %d could not allocated comm buffers for x-direction\n", my_ID); error = 1; } bail_out(error); right_buf_in = right_buf_out + RADIUS*height_rank; left_buf_out = right_buf_out + 2*RADIUS*height_rank; left_buf_in = right_buf_out + 3*RADIUS*height_rank; /* fill the stencil weights to reflect a discrete divergence operator */ for (int jj=-RADIUS; jj<=RADIUS; jj++) for (int ii=-RADIUS; ii<=RADIUS; ii++) WEIGHT(ii,jj) = (DTYPE) 0.0; stencil_size = 4*RADIUS+1; for (int ii=1; ii<=RADIUS; ii++) { WEIGHT(0, ii) = WEIGHT( ii,0) = (DTYPE) (1.0/(2.0*ii*RADIUS)); WEIGHT(0,-ii) = WEIGHT(-ii,0) = -(DTYPE) (1.0/(2.0*ii*RADIUS)); } norm = (DTYPE) 0.0; f_active_points = (DTYPE) (n-2*RADIUS)*(DTYPE) (n-2*RADIUS); /* intialize the input and output arrays */ for (int j=jstart_rank; j<=jend_rank; j++) for (int i=istart_rank; i<=iend_rank; i++) { IN(i,j) = COEFX*i+COEFY*j; OUT(i,j) = (DTYPE)0.0; } /* LOAD/STORE FENCE */ MPI_Win_sync(shm_win_in); MPI_Win_sync(shm_win_out); MPI_Barrier(shm_comm); for (iter = 0; iter<=iterations; iter++){ /* start timer after a warmup iteration */ if (iter == 1) { MPI_Barrier(MPI_COMM_WORLD); local_stencil_time = wtime(); } /* need to fetch ghost point data from neighbors in y-direction */ if (top_nbr != -1) { MPI_Irecv(top_buf_in, RADIUS*width_rank, MPI_DTYPE, top_nbr, 101, MPI_COMM_WORLD, &(request[1])); for (int kk=0,j=jend_rank-RADIUS+1; j<=jend_rank; j++) for (int i=istart_rank; i<=iend_rank; i++) { top_buf_out[kk++]= IN(i,j); } MPI_Isend(top_buf_out, RADIUS*width_rank,MPI_DTYPE, top_nbr, 99, MPI_COMM_WORLD, &(request[0])); } if (bottom_nbr != -1) { MPI_Irecv(bottom_buf_in,RADIUS*width_rank, MPI_DTYPE, bottom_nbr, 99, MPI_COMM_WORLD, &(request[3])); for (int kk=0,j=jstart_rank; j<=jstart_rank+RADIUS-1; j++) for (int i=istart_rank; i<=iend_rank; i++) { bottom_buf_out[kk++]= IN(i,j); } MPI_Isend(bottom_buf_out, RADIUS*width_rank,MPI_DTYPE, bottom_nbr, 101, MPI_COMM_WORLD, &(request[2])); } if (top_nbr != -1) { MPI_Wait(&(request[0]), MPI_STATUS_IGNORE); MPI_Wait(&(request[1]), MPI_STATUS_IGNORE); for (int kk=0,j=jend_rank+1; j<=jend_rank+RADIUS; j++) for (int i=istart_rank; i<=iend_rank; i++) { IN(i,j) = top_buf_in[kk++]; } } if (bottom_nbr != -1) { MPI_Wait(&(request[2]), MPI_STATUS_IGNORE); MPI_Wait(&(request[3]), MPI_STATUS_IGNORE); for (int kk=0,j=jstart_rank-RADIUS; j<=jstart_rank-1; j++) for (int i=istart_rank; i<=iend_rank; i++) { IN(i,j) = bottom_buf_in[kk++]; } } /* LOAD/STORE FENCE */ MPI_Win_sync(shm_win_in); /* need to fetch ghost point data from neighbors in x-direction */ if (right_nbr != -1) { MPI_Irecv(right_buf_in, RADIUS*height_rank, MPI_DTYPE, right_nbr, 1010, MPI_COMM_WORLD, &(request[1+4])); for (int kk=0,j=jstart_rank; j<=jend_rank; j++) for (int i=iend_rank-RADIUS+1; i<=iend_rank; i++) { right_buf_out[kk++]= IN(i,j); } MPI_Isend(right_buf_out, RADIUS*height_rank, MPI_DTYPE, right_nbr, 990, MPI_COMM_WORLD, &(request[0+4])); } if (left_nbr != -1) { MPI_Irecv(left_buf_in, RADIUS*height_rank, MPI_DTYPE, left_nbr, 990, MPI_COMM_WORLD, &(request[3+4])); for (int kk=0,j=jstart_rank; j<=jend_rank; j++) for (int i=istart_rank; i<=istart_rank+RADIUS-1; i++) { left_buf_out[kk++]= IN(i,j); } MPI_Isend(left_buf_out, RADIUS*height_rank, MPI_DTYPE, left_nbr, 1010, MPI_COMM_WORLD, &(request[2+4])); } if (right_nbr != -1) { MPI_Wait(&(request[0+4]), MPI_STATUS_IGNORE); MPI_Wait(&(request[1+4]), MPI_STATUS_IGNORE); for (int kk=0,j=jstart_rank; j<=jend_rank; j++) for (int i=iend_rank+1; i<=iend_rank+RADIUS; i++) { IN(i,j) = right_buf_in[kk++]; } } if (left_nbr != -1) { MPI_Wait(&(request[2+4]), MPI_STATUS_IGNORE); MPI_Wait(&(request[3+4]), MPI_STATUS_IGNORE); for (int kk=0,j=jstart_rank; j<=jend_rank; j++) for (int i=istart_rank-RADIUS; i<=istart_rank-1; i++) { IN(i,j) = left_buf_in[kk++]; } } /* LOAD/STORE FENCE */ MPI_Win_sync(shm_win_in); /* Apply the stencil operator */ for (int j=MAX(jstart_rank,RADIUS); j<=MIN(n-RADIUS-1,jend_rank); j++) { for (int i=MAX(istart_rank,RADIUS); i<=MIN(n-RADIUS-1,iend_rank); i++) { #if LOOPGEN #include "loop_body_star.incl" #else for (int jj=-RADIUS; jj<=RADIUS; jj++) OUT(i,j) += WEIGHT(0,jj)*IN(i,j+jj); for (int ii=-RADIUS; ii<0; ii++) OUT(i,j) += WEIGHT(ii,0)*IN(i+ii,j); for (int ii=1; ii<=RADIUS; ii++) OUT(i,j) += WEIGHT(ii,0)*IN(i+ii,j); #endif } } /* LOAD/STORE FENCE */ MPI_Win_sync(shm_win_out); #if LOCAL_BARRIER_SYNCH MPI_Barrier(shm_comm); // needed to avoid writing IN while other ranks are reading it #else for (int i=0; i<num_local_nbrs; i++) { MPI_Irecv(&dummy, 0, MPI_INT, local_nbr[i], 666, shm_comm, &(request[i])); MPI_Send(&dummy, 0, MPI_INT, local_nbr[i], 666, shm_comm); } MPI_Waitall(num_local_nbrs, request, MPI_STATUSES_IGNORE); #endif /* add constant to solution to force refresh of neighbor data, if any */ for (int j=jstart_rank; j<=jend_rank; j++) for (int i=istart_rank; i<=iend_rank; i++) IN(i,j)+= 1.0; /* LOAD/STORE FENCE */ MPI_Win_sync(shm_win_in); #if LOCAL_BARRIER_SYNCH MPI_Barrier(shm_comm); // needed to avoid reading IN while other ranks are writing it #else for (int i=0; i<num_local_nbrs; i++) { MPI_Irecv(&dummy, 0, MPI_INT, local_nbr[i], 666, shm_comm, &(request[i])); MPI_Send(&dummy, 0, MPI_INT, local_nbr[i], 666, shm_comm); } MPI_Waitall(num_local_nbrs, request, MPI_STATUSES_IGNORE); #endif } /* end of iterations */ local_stencil_time = wtime() - local_stencil_time; MPI_Reduce(&local_stencil_time, &stencil_time, 1, MPI_DOUBLE, MPI_MAX, root, MPI_COMM_WORLD); /* compute L1 norm in parallel */ local_norm = (DTYPE) 0.0; for (int j=MAX(jstart_rank,RADIUS); j<=MIN(n-RADIUS-1,jend_rank); j++) { for (int i=MAX(istart_rank,RADIUS); i<=MIN(n-RADIUS-1,iend_rank); i++) { local_norm += (DTYPE)ABS(OUT(i,j)); } } MPI_Reduce(&local_norm, &norm, 1, MPI_DTYPE, MPI_SUM, root, MPI_COMM_WORLD); /******************************************************************************* ** Analyze and output results. ********************************************************************************/ /* verify correctness */ if (my_ID == root) { norm /= f_active_points; if (RADIUS > 0) { reference_norm = (DTYPE) (iterations+1) * (COEFX + COEFY); } else { reference_norm = (DTYPE) 0.0; } if (ABS(norm-reference_norm) > EPSILON) { printf("ERROR: L1 norm = "FSTR", Reference L1 norm = "FSTR"\n", norm, reference_norm); error = 1; } else { printf("Solution validates\n"); #if VERBOSE printf("Reference L1 norm = "FSTR", L1 norm = "FSTR"\n", reference_norm, norm); #endif } } bail_out(error); MPI_Win_unlock_all(shm_win_in); MPI_Win_unlock_all(shm_win_out); MPI_Win_free(&shm_win_in); MPI_Win_free(&shm_win_out); if (my_ID == root) { /* flops/stencil: 2 flops (fma) for each point in the stencil, plus one flop for the update of the input of the array */ flops = (DTYPE) (2*stencil_size+1) * f_active_points; avgtime = stencil_time/iterations; printf("Rate (MFlops/s): "FSTR" Avg time (s): %lf\n", 1.0E-06 * flops/avgtime, avgtime); } MPI_Finalize(); exit(EXIT_SUCCESS); }
int main( int argc, char *argv[] ) { int rank, nproc, i; int errors = 0, all_errors = 0; int *buf; MPI_Win window; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &nproc); if (nproc < 2) { if (rank == 0) printf("Error: must be run with two or more processes\n"); MPI_Abort(MPI_COMM_WORLD, 1); } /** Create using MPI_Win_create() **/ if (rank == 0) { MPI_Alloc_mem(4*sizeof(int), MPI_INFO_NULL, &buf); *buf = nproc-1; } else buf = NULL; MPI_Win_create(buf, 4*sizeof(int)*(rank == 0), 1, MPI_INFO_NULL, MPI_COMM_WORLD, &window); /* PROC_NULL Communication */ { MPI_Request pn_req[4]; int val[4], res; MPI_Win_lock_all(0, window); MPI_Rget_accumulate(&val[0], 1, MPI_INT, &res, 1, MPI_INT, MPI_PROC_NULL, 0, 1, MPI_INT, MPI_REPLACE, window, &pn_req[0]); MPI_Rget(&val[1], 1, MPI_INT, MPI_PROC_NULL, 1, 1, MPI_INT, window, &pn_req[1]); MPI_Rput(&val[2], 1, MPI_INT, MPI_PROC_NULL, 2, 1, MPI_INT, window, &pn_req[2]); MPI_Raccumulate(&val[3], 1, MPI_INT, MPI_PROC_NULL, 3, 1, MPI_INT, MPI_REPLACE, window, &pn_req[3]); assert(pn_req[0] != MPI_REQUEST_NULL); assert(pn_req[1] != MPI_REQUEST_NULL); assert(pn_req[2] != MPI_REQUEST_NULL); assert(pn_req[3] != MPI_REQUEST_NULL); MPI_Win_unlock_all(window); MPI_Waitall(4, pn_req, MPI_STATUSES_IGNORE); } MPI_Barrier(MPI_COMM_WORLD); MPI_Win_lock(MPI_LOCK_SHARED, 0, 0, window); /* GET-ACC: Test third-party communication, through rank 0. */ for (i = 0; i < ITER; i++) { MPI_Request gacc_req; int val = -1, exp = -1; /* Processes form a ring. Process 0 starts first, then passes a token * to the right. Each process, in turn, performs third-party * communication via process 0's window. */ if (rank > 0) { MPI_Recv(NULL, 0, MPI_BYTE, rank-1, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); } MPI_Rget_accumulate(&rank, 1, MPI_INT, &val, 1, MPI_INT, 0, 0, 1, MPI_INT, MPI_REPLACE, window, &gacc_req); assert(gacc_req != MPI_REQUEST_NULL); MPI_Wait(&gacc_req, MPI_STATUS_IGNORE); MPI_Win_flush(0, window); exp = (rank + nproc-1) % nproc; if (val != exp) { printf("%d - Got %d, expected %d\n", rank, val, exp); errors++; } if (rank < nproc-1) { MPI_Send(NULL, 0, MPI_BYTE, rank+1, 0, MPI_COMM_WORLD); } MPI_Barrier(MPI_COMM_WORLD); } MPI_Barrier(MPI_COMM_WORLD); if (rank == 0) *buf = nproc-1; MPI_Win_sync(window); /* GET+PUT: Test third-party communication, through rank 0. */ for (i = 0; i < ITER; i++) { MPI_Request req; int val = -1, exp = -1; /* Processes form a ring. Process 0 starts first, then passes a token * to the right. Each process, in turn, performs third-party * communication via process 0's window. */ if (rank > 0) { MPI_Recv(NULL, 0, MPI_BYTE, rank-1, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); } MPI_Rget(&val, 1, MPI_INT, 0, 0, 1, MPI_INT, window, &req); assert(req != MPI_REQUEST_NULL); MPI_Wait(&req, MPI_STATUS_IGNORE); MPI_Rput(&rank, 1, MPI_INT, 0, 0, 1, MPI_INT, window, &req); assert(req != MPI_REQUEST_NULL); MPI_Wait(&req, MPI_STATUS_IGNORE); MPI_Win_flush(0, window); exp = (rank + nproc-1) % nproc; if (val != exp) { printf("%d - Got %d, expected %d\n", rank, val, exp); errors++; } if (rank < nproc-1) { MPI_Send(NULL, 0, MPI_BYTE, rank+1, 0, MPI_COMM_WORLD); } MPI_Barrier(MPI_COMM_WORLD); } MPI_Barrier(MPI_COMM_WORLD); if (rank == 0) *buf = nproc-1; MPI_Win_sync(window); /* GET+ACC: Test third-party communication, through rank 0. */ for (i = 0; i < ITER; i++) { MPI_Request req; int val = -1, exp = -1; /* Processes form a ring. Process 0 starts first, then passes a token * to the right. Each process, in turn, performs third-party * communication via process 0's window. */ if (rank > 0) { MPI_Recv(NULL, 0, MPI_BYTE, rank-1, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); } MPI_Rget(&val, 1, MPI_INT, 0, 0, 1, MPI_INT, window, &req); assert(req != MPI_REQUEST_NULL); MPI_Wait(&req, MPI_STATUS_IGNORE); MPI_Raccumulate(&rank, 1, MPI_INT, 0, 0, 1, MPI_INT, MPI_REPLACE, window, &req); assert(req != MPI_REQUEST_NULL); MPI_Wait(&req, MPI_STATUS_IGNORE); MPI_Win_flush(0, window); exp = (rank + nproc-1) % nproc; if (val != exp) { printf("%d - Got %d, expected %d\n", rank, val, exp); errors++; } if (rank < nproc-1) { MPI_Send(NULL, 0, MPI_BYTE, rank+1, 0, MPI_COMM_WORLD); } MPI_Barrier(MPI_COMM_WORLD); } MPI_Win_unlock(0, window); MPI_Barrier(MPI_COMM_WORLD); /* Wait inside of an epoch */ { MPI_Request pn_req[4]; int val[4], res; const int target = 0; MPI_Win_lock_all(0, window); MPI_Rget_accumulate(&val[0], 1, MPI_INT, &res, 1, MPI_INT, target, 0, 1, MPI_INT, MPI_REPLACE, window, &pn_req[0]); MPI_Rget(&val[1], 1, MPI_INT, target, 1, 1, MPI_INT, window, &pn_req[1]); MPI_Rput(&val[2], 1, MPI_INT, target, 2, 1, MPI_INT, window, &pn_req[2]); MPI_Raccumulate(&val[3], 1, MPI_INT, target, 3, 1, MPI_INT, MPI_REPLACE, window, &pn_req[3]); assert(pn_req[0] != MPI_REQUEST_NULL); assert(pn_req[1] != MPI_REQUEST_NULL); assert(pn_req[2] != MPI_REQUEST_NULL); assert(pn_req[3] != MPI_REQUEST_NULL); MPI_Waitall(4, pn_req, MPI_STATUSES_IGNORE); MPI_Win_unlock_all(window); } MPI_Barrier(MPI_COMM_WORLD); /* Wait outside of an epoch */ { MPI_Request pn_req[4]; int val[4], res; const int target = 0; MPI_Win_lock_all(0, window); MPI_Rget_accumulate(&val[0], 1, MPI_INT, &res, 1, MPI_INT, target, 0, 1, MPI_INT, MPI_REPLACE, window, &pn_req[0]); MPI_Rget(&val[1], 1, MPI_INT, target, 1, 1, MPI_INT, window, &pn_req[1]); MPI_Rput(&val[2], 1, MPI_INT, target, 2, 1, MPI_INT, window, &pn_req[2]); MPI_Raccumulate(&val[3], 1, MPI_INT, target, 3, 1, MPI_INT, MPI_REPLACE, window, &pn_req[3]); assert(pn_req[0] != MPI_REQUEST_NULL); assert(pn_req[1] != MPI_REQUEST_NULL); assert(pn_req[2] != MPI_REQUEST_NULL); assert(pn_req[3] != MPI_REQUEST_NULL); MPI_Win_unlock_all(window); MPI_Waitall(4, pn_req, MPI_STATUSES_IGNORE); } /* Wait in a different epoch */ { MPI_Request pn_req[4]; int val[4], res; const int target = 0; MPI_Win_lock_all(0, window); MPI_Rget_accumulate(&val[0], 1, MPI_INT, &res, 1, MPI_INT, target, 0, 1, MPI_INT, MPI_REPLACE, window, &pn_req[0]); MPI_Rget(&val[1], 1, MPI_INT, target, 1, 1, MPI_INT, window, &pn_req[1]); MPI_Rput(&val[2], 1, MPI_INT, target, 2, 1, MPI_INT, window, &pn_req[2]); MPI_Raccumulate(&val[3], 1, MPI_INT, target, 3, 1, MPI_INT, MPI_REPLACE, window, &pn_req[3]); assert(pn_req[0] != MPI_REQUEST_NULL); assert(pn_req[1] != MPI_REQUEST_NULL); assert(pn_req[2] != MPI_REQUEST_NULL); assert(pn_req[3] != MPI_REQUEST_NULL); MPI_Win_unlock_all(window); MPI_Win_lock_all(0, window); MPI_Waitall(4, pn_req, MPI_STATUSES_IGNORE); MPI_Win_unlock_all(window); } /* Wait in a fence epoch */ { MPI_Request pn_req[4]; int val[4], res; const int target = 0; MPI_Win_lock_all(0, window); MPI_Rget_accumulate(&val[0], 1, MPI_INT, &res, 1, MPI_INT, target, 0, 1, MPI_INT, MPI_REPLACE, window, &pn_req[0]); MPI_Rget(&val[1], 1, MPI_INT, target, 1, 1, MPI_INT, window, &pn_req[1]); MPI_Rput(&val[2], 1, MPI_INT, target, 2, 1, MPI_INT, window, &pn_req[2]); MPI_Raccumulate(&val[3], 1, MPI_INT, target, 3, 1, MPI_INT, MPI_REPLACE, window, &pn_req[3]); assert(pn_req[0] != MPI_REQUEST_NULL); assert(pn_req[1] != MPI_REQUEST_NULL); assert(pn_req[2] != MPI_REQUEST_NULL); assert(pn_req[3] != MPI_REQUEST_NULL); MPI_Win_unlock_all(window); MPI_Win_fence(0, window); MPI_Waitall(4, pn_req, MPI_STATUSES_IGNORE); MPI_Win_fence(0, window); } MPI_Win_free(&window); if (buf) MPI_Free_mem(buf); MPI_Reduce(&errors, &all_errors, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD); if (rank == 0 && all_errors == 0) printf(" No Errors\n"); MPI_Finalize(); return 0; }
/* MAIN */ int main (int argc, char *argv[]) { /* to be used for hello world exchanges */ int rank, numtasks, namelen; char name[MPI_MAX_PROCESSOR_NAME]; /* related to MPI-3 shm*/ MPI_Comm shmcomm; /* shm communicator */ MPI_Win win; /* shm window object */ int *mem; /* shm memory to be allocated on each node */ int i0, i1; int* i2; /* for reading back from shm */ /* current rank exchanges hello world info with partners */ int partners[n_partners]; int *partners_map; /* mapping in shm communicator */ int **partners_ptrs; /* ptrs to shared mem window for each partner*/ int j, partner, alloc_len; int n_node_partners=0, n_inter_partners=0; /* non-blocking inter-node */ MPI_Request *reqs, *rq; int rbuf[n_partners]; /* recv buffer */ int req_num = 2; /* each inter-node echange needs a pair of MPI_Irecv and MPI_Isend */ if (getenv("1DRING_VERBOSE")) verbose = 1; /* Switch on/off printfs thru env */ MPI_Init (&argc, &argv); MPI_Comm_size (MPI_COMM_WORLD, &numtasks); MPI_Comm_rank (MPI_COMM_WORLD, &rank); MPI_Get_processor_name (name, &namelen); /* if (verbose) printf ("Hello world from COMM_WORLD: rank %d of %d is running on %s\n", rank, numtasks, name); */ /* The 1D ring is defined in partners array. It can be easily expanded to the higher order stencils. The current rank has 2 neighbours: previous and next, i.e., prev-rank-next */ partners[0] = rank-1; /* prev */ partners[1] = rank+1; /* next */ /* We will use periodic boundary conditions here */ if (rank == 0) partners[0] = numtasks - 1; if (rank == (numtasks - 1)) partners[1] = 0; /* MPI-3 SHM collective creates shm communicator */ MPI_Comm_split_type (MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED, 0, MPI_INFO_NULL, &shmcomm); /* mapping: global rank -> shmcomm rank is in partners_map */ partners_map = (int*)malloc(n_partners*sizeof(int)); /* allocate partners_map */ translate_ranks(shmcomm, partners, partners_map); /* number of inter and intra node partners */ get_n_partners (rank, partners, partners_map, &n_node_partners, &n_inter_partners); if (verbose) print_n_partners (rank, partners, partners_map, n_node_partners, n_inter_partners); alloc_len = 2*sizeof(int) + namelen+1; /* the size of hello world info: 2 int and string; +1 for '\n' */ if (n_node_partners > 0) { /* allocate shared memory windows on each node for intra-node partners */ MPI_Win_allocate_shared (alloc_len, 1, MPI_INFO_NULL, shmcomm, /* inputs to MPI-3 SHM collective */ &mem, &win); /* outputs: mem - initial address of window; win - window object */ /* pointers to mem windows */ partners_ptrs = (int **)malloc(n_partners*sizeof(int*)); get_partners_ptrs (win, partners, partners_map, partners_ptrs ); } else { mem = (int *)malloc(alloc_len); } /* allocate MPI Request resources for inter-node comm. */ if(n_inter_partners > 0) { reqs = (MPI_Request*)malloc(req_num*n_inter_partners*sizeof(MPI_Request)); rq = reqs; } /* start halo exchange */ if (n_node_partners > 0) { /* Entering MPI-3 RMA access epoch required for MPI-3 shm */ MPI_Win_lock_all (MPI_MODE_NOCHECK, win); /* alternatively, MPI_Win_lock_all, MPI_Win_sync and MPI_Barrier can be replaced with 2 MPI_Win_fence calls surrounding update of shared memory. */ /* MPI_Win_fence(0, win); */ /* -- alternative */ } /* update MPI-3 shared memory (or local memory in case of lack of node partners) * by writing hello_world info into mem */ mem[0] = rank; mem[1] = numtasks; memcpy(mem+2, name, namelen); if (n_node_partners > 0) { /* MPI_Win_fence (0, win); */ /* -- alternative end */ MPI_Win_sync (win); /* memory fence to sync node exchanges */ MPI_Barrier (shmcomm); /* time barrier to make sure all ranks have updated their info */ } for (j=0; j<n_partners; j++) { if(partners_map[j] != MPI_UNDEFINED) /* partner j is on the same node */ { i0 = partners_ptrs[j][0]; /* load from MPI-3/SHM ops! */ i1 = partners_ptrs[j][1]; i2 = partners_ptrs[j]+2; if(verbose) printf ("load MPI/SHM values from neighbour => rank %d, numtasks %d on %s\n", i0, i1, i2); } else /* inter-node non-blocking MPI-1 */ { MPI_Irecv (&rbuf[j], 1, MPI_INT, partners[j], 1 , MPI_COMM_WORLD, rq++); MPI_Isend (&rank, 1, MPI_INT, partners[j], 1 , MPI_COMM_WORLD, rq++); } } /* sync inter-node exchanges and print out receive buffer rbuf*/ if(n_inter_partners > 0) { MPI_Waitall (req_num*n_inter_partners, reqs, MPI_STATUS_IGNORE); if(verbose){ for (j =0; j< n_partners;j++) if (partners_map[j] == MPI_UNDEFINED) printf("Recieved from my inter-node partner %d\n", rbuf[j]); } } if (n_node_partners > 0) { MPI_Win_unlock_all (win); /* close RMA epoch */ /* free resources */ MPI_Win_free (&win); free (partners_ptrs); } if (n_inter_partners) free (reqs); free (partners_map); MPI_Finalize (); return (0); }
int main(int argc, char **argv) { int i, rank, nproc; int shm_rank, shm_nproc; MPI_Aint size; int errors = 0, all_errors = 0; int **bases = NULL, *my_base = NULL; int disp_unit; MPI_Win shm_win = MPI_WIN_NULL, win = MPI_WIN_NULL; MPI_Comm shm_comm = MPI_COMM_NULL; MPI_Group shm_group = MPI_GROUP_NULL, world_group = MPI_GROUP_NULL; int dst_shm_rank, dst_world_rank; MPI_Info create_info = MPI_INFO_NULL; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &nproc); MPI_Comm_split_type(MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED, rank, MPI_INFO_NULL, &shm_comm); MPI_Comm_rank(shm_comm, &shm_rank); MPI_Comm_size(shm_comm, &shm_nproc); /* Platform does not support shared memory, just return. */ if (shm_nproc < 2) { goto exit; } /* Specify the last process in the node as the target process */ dst_shm_rank = shm_nproc - 1; MPI_Comm_group(shm_comm, &shm_group); MPI_Comm_group(MPI_COMM_WORLD, &world_group); MPI_Group_translate_ranks(shm_group, 1, &dst_shm_rank, world_group, &dst_world_rank); bases = calloc(shm_nproc, sizeof(int *)); /* Allocate shm window among local processes, then create a global window with * those shm window buffers */ MPI_Win_allocate_shared(sizeof(int) * ELEM_PER_PROC, sizeof(int), MPI_INFO_NULL, shm_comm, &my_base, &shm_win); if (verbose) printf("%d -- allocate shared: my_base = %p, absolute base\n", shm_rank, my_base); for (i = 0; i < shm_nproc; i++) { MPI_Win_shared_query(shm_win, i, &size, &disp_unit, &bases[i]); if (verbose) printf("%d -- shared query: base[%d]=%p, size %ld, unit %d\n", shm_rank, i, bases[i], size, disp_unit); } #ifdef USE_INFO_ALLOC_SHM MPI_Info_create(&create_info); MPI_Info_set(create_info, "alloc_shm", "true"); #else create_info = MPI_INFO_NULL; #endif MPI_Win_create(my_base, sizeof(int) * ELEM_PER_PROC, sizeof(int), create_info, MPI_COMM_WORLD, &win); /* Reset data */ for (i = 0; i < ELEM_PER_PROC; i++) { my_base[i] = 0; local_buf[i] = i + 1; } /* Do RMA through global window, then check value through shared window */ MPI_Win_lock_all(0, win); MPI_Win_lock_all(0, shm_win); if (shm_rank == 0) { MPI_Put(&local_buf[0], 1, MPI_INT, dst_world_rank, 0, 1, MPI_INT, win); MPI_Put(&local_buf[ELEM_PER_PROC - 1], 1, MPI_INT, dst_world_rank, ELEM_PER_PROC - 1, 1, MPI_INT, win); MPI_Win_flush(dst_world_rank, win); } MPI_Win_sync(shm_win); MPI_Barrier(shm_comm); MPI_Win_sync(shm_win); if (bases[dst_shm_rank][0] != local_buf[0]) { errors++; printf("%d -- Got %d at rank %d index %d, expected %d\n", rank, bases[dst_shm_rank][0], dst_shm_rank, 0, local_buf[0]); } if (bases[dst_shm_rank][ELEM_PER_PROC - 1] != local_buf[ELEM_PER_PROC - 1]) { errors++; printf("%d -- Got %d at rank %d index %d, expected %d\n", rank, bases[dst_shm_rank][ELEM_PER_PROC - 1], dst_shm_rank, ELEM_PER_PROC - 1, local_buf[ELEM_PER_PROC - 1]); } MPI_Win_unlock_all(shm_win); MPI_Win_unlock_all(win); MPI_Reduce(&errors, &all_errors, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD); MPI_Win_free(&win); MPI_Win_free(&shm_win); exit: if (rank == 0 && all_errors == 0) printf(" No Errors\n"); if (create_info != MPI_INFO_NULL) MPI_Info_free(&create_info); if (shm_comm != MPI_COMM_NULL) MPI_Comm_free(&shm_comm); if (shm_group != MPI_GROUP_NULL) MPI_Group_free(&shm_group); if (world_group != MPI_GROUP_NULL) MPI_Group_free(&world_group); MPI_Finalize(); if (bases) free(bases); return 0; }
int main(int argc, char **argv) { int r,p; int n, energy, niters, px, py; int rx, ry; int north, south, west, east; int bx, by, offx, offy; /* three heat sources */ const int nsources = 3; int sources[nsources][2]; int locnsources; /* number of sources in my area */ int locsources[nsources][2]; /* sources local to my rank */ double t1, t2; int iter, i, j; double heat, rheat; int final_flag; /* initialize MPI envrionment */ MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &r); MPI_Comm_size(MPI_COMM_WORLD, &p); /* create shared memory communicator */ MPI_Comm shmcomm; MPI_Comm_split_type(MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED, 0, MPI_INFO_NULL, &shmcomm); int sr, sp; // rank and size in shmem comm MPI_Comm_size(shmcomm, &sp); MPI_Comm_rank(shmcomm, &sr); // this code works only on comm world! if(sp != p) MPI_Abort(MPI_COMM_WORLD, 1); /* argument checking and setting */ setup(r, p, argc, argv, &n, &energy, &niters, &px, &py, &final_flag); if (final_flag == 1) { MPI_Finalize(); exit(0); } /* determine my coordinates (x,y) -- r=x*a+y in the 2d processor array */ rx = r % px; ry = r / px; /* determine my four neighbors */ north = (ry - 1) * px + rx; if (ry-1 < 0) north = MPI_PROC_NULL; south = (ry + 1) * px + rx; if (ry+1 >= py) south = MPI_PROC_NULL; west = ry * px + rx - 1; if (rx-1 < 0) west = MPI_PROC_NULL; east = ry * px + rx + 1; if (rx+1 >= px) east = MPI_PROC_NULL; /* decompose the domain */ bx = n / px; /* block size in x */ by = n / py; /* block size in y */ offx = rx * bx; /* offset in x */ offy = ry * by; /* offset in y */ /* printf("%i (%i,%i) - w: %i, e: %i, n: %i, s: %i\n", r, ry,rx,west,east,north,south); */ int size = (bx+2)*(by+2); /* process-local grid (including halos (thus +2)) */ double *mem; MPI_Win win; MPI_Win_allocate_shared(2*size*sizeof(double), 1, MPI_INFO_NULL, shmcomm, &mem, &win); double *tmp; double *anew=mem; /* each rank's offset */ double *aold=mem+size; /* second half is aold! */ double *northptr, *southptr, *eastptr, *westptr; double *northptr2, *southptr2, *eastptr2, *westptr2; MPI_Aint sz; int dsp_unit; /* locate the shared memory region for each neighbor */ MPI_Win_shared_query(win, north, &sz, &dsp_unit, &northptr); MPI_Win_shared_query(win, south, &sz, &dsp_unit, &southptr); MPI_Win_shared_query(win, east, &sz, &dsp_unit, &eastptr); MPI_Win_shared_query(win, west, &sz, &dsp_unit, &westptr); northptr2 = northptr+size; southptr2 = southptr+size; eastptr2 = eastptr+size; westptr2 = westptr+size; /* initialize three heat sources */ init_sources(bx, by, offx, offy, n, nsources, sources, &locnsources, locsources); t1 = MPI_Wtime(); /* take time */ MPI_Win_lock_all(0, win); for (iter = 0; iter < niters; ++iter) { /* refresh heat sources */ for (i = 0; i < locnsources; ++i) { aold[ind(locsources[i][0],locsources[i][1])] += energy; /* heat source */ } MPI_Win_sync(win); MPI_Barrier(shmcomm); /* exchange data with neighbors */ if(north != MPI_PROC_NULL) { for(i=0; i<bx; ++i) aold[ind(i+1,0)] = northptr2[ind(i+1,by)]; /* pack loop - last valid region */ } if(south != MPI_PROC_NULL) { for(i=0; i<bx; ++i) aold[ind(i+1,by+1)] = southptr2[ind(i+1,1)]; /* pack loop */ } if(east != MPI_PROC_NULL) { for(i=0; i<by; ++i) aold[ind(bx+1,i+1)] = eastptr2[ind(1,i+1)]; /* pack loop */ } if(west != MPI_PROC_NULL) { for(i=0; i<by; ++i) aold[ind(0,i+1)] = westptr2[ind(bx,i+1)]; /* pack loop */ } /* update grid points */ update_grid(bx, by, aold, anew, &heat); /* swap working arrays */ tmp = anew; anew = aold; aold = tmp; /* optional - print image */ if (iter == niters-1) printarr_par(iter, anew, n, px, py, rx, ry, bx, by, offx, offy, shmcomm); } MPI_Win_unlock_all(win); t2 = MPI_Wtime(); /* get final heat in the system */ MPI_Allreduce(&heat, &rheat, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); if (!r) printf("[%i] last heat: %f time: %f\n", r, rheat, t2-t1); /* free working arrays and communication buffers */ MPI_Win_free(&win); MPI_Comm_free(&shmcomm); MPI_Finalize(); }