int MPIC_Sendrecv_replace(void *buf, int count, MPI_Datatype datatype, int dest, int sendtag, int source, int recvtag, MPI_Comm comm, MPI_Status *status) { int mpi_errno = MPI_SUCCESS; MPIR_Context_id_t context_id_offset; MPID_Request *sreq; MPID_Request *rreq; void *tmpbuf = NULL; int tmpbuf_size = 0; int tmpbuf_count = 0; MPID_Comm *comm_ptr; MPIU_CHKLMEM_DECL(1); MPIDI_STATE_DECL(MPID_STATE_MPIC_SENDRECV_REPLACE); #ifdef MPID_LOG_ARROWS /* The logging macros log sendcount and recvcount */ int sendcount = count, recvcount = count; #endif MPIDI_PT2PT_FUNC_ENTER_BOTH(MPID_STATE_MPIC_SENDRECV_REPLACE); MPID_Comm_get_ptr( comm, comm_ptr ); context_id_offset = (comm_ptr->comm_kind == MPID_INTRACOMM) ? MPID_CONTEXT_INTRA_COLL : MPID_CONTEXT_INTER_COLL; if (count > 0 && dest != MPI_PROC_NULL) { MPIR_Pack_size_impl(count, datatype, &tmpbuf_size); MPIU_CHKLMEM_MALLOC(tmpbuf, void *, tmpbuf_size, mpi_errno, "temporary send buffer"); mpi_errno = MPIR_Pack_impl(buf, count, datatype, tmpbuf, tmpbuf_size, &tmpbuf_count); if (mpi_errno) MPIU_ERR_POP(mpi_errno); }
int MPIR_Ibcast_binomial(void *buffer, int count, MPI_Datatype datatype, int root, MPID_Comm *comm_ptr, MPID_Sched_t s) { int mpi_errno = MPI_SUCCESS; int mask; int comm_size, rank; int is_contig, is_homogeneous; MPI_Aint nbytes, type_size; int relative_rank; int src, dst; void *tmp_buf = NULL; MPIR_SCHED_CHKPMEM_DECL(1); comm_size = comm_ptr->local_size; rank = comm_ptr->rank; if (comm_size == 1) { /* nothing to add, this is a useless broadcast */ goto fn_exit; } MPID_Datatype_is_contig(datatype, &is_contig); is_homogeneous = 1; #ifdef MPID_HAS_HETERO if (comm_ptr->is_hetero) is_homogeneous = 0; #endif /* MPI_Type_size() might not give the accurate size of the packed * datatype for heterogeneous systems (because of padding, encoding, * etc). On the other hand, MPI_Pack_size() can become very * expensive, depending on the implementation, especially for * heterogeneous systems. We want to use MPI_Type_size() wherever * possible, and MPI_Pack_size() in other places. */ if (is_homogeneous) MPID_Datatype_get_size_macro(datatype, type_size); else MPIR_Pack_size_impl(1, datatype, &type_size); nbytes = type_size * count; if (!is_contig || !is_homogeneous) { MPIR_SCHED_CHKPMEM_MALLOC(tmp_buf, void *, nbytes, mpi_errno, "tmp_buf"); /* TODO: Pipeline the packing and communication */ if (rank == root) { mpi_errno = MPID_Sched_copy(buffer, count, datatype, tmp_buf, nbytes, MPI_PACKED, s); if (mpi_errno) MPIU_ERR_POP(mpi_errno); MPID_SCHED_BARRIER(s); } }
int MPIC_Sendrecv_replace(void *buf, int count, MPI_Datatype datatype, int dest, int sendtag, int source, int recvtag, MPID_Comm *comm_ptr, MPI_Status *status, mpir_errflag_t *errflag) { int mpi_errno = MPI_SUCCESS; MPI_Status mystatus; MPIR_Context_id_t context_id_offset; MPID_Request *sreq = NULL; MPID_Request *rreq = NULL; void *tmpbuf = NULL; MPI_Aint tmpbuf_size = 0; MPI_Aint tmpbuf_count = 0; MPIU_CHKLMEM_DECL(1); MPIDI_STATE_DECL(MPID_STATE_MPIC_SENDRECV_REPLACE); #ifdef MPID_LOG_ARROWS /* The logging macros log sendcount and recvcount */ int sendcount = count, recvcount = count; #endif MPIDI_FUNC_ENTER(MPID_STATE_MPIC_SENDRECV_REPLACE); MPIU_DBG_MSG_D(PT2PT, TYPICAL, "IN: errflag = %d", *errflag); MPIU_ERR_CHKANDJUMP1((count < 0), mpi_errno, MPI_ERR_COUNT, "**countneg", "**countneg %d", count); if (status == MPI_STATUS_IGNORE) status = &mystatus; switch(*errflag) { case MPIR_ERR_NONE: break; case MPIR_ERR_PROC_FAILED: MPIR_TAG_SET_PROC_FAILURE_BIT(sendtag); default: MPIR_TAG_SET_ERROR_BIT(sendtag); } context_id_offset = (comm_ptr->comm_kind == MPID_INTRACOMM) ? MPID_CONTEXT_INTRA_COLL : MPID_CONTEXT_INTER_COLL; if (count > 0 && dest != MPI_PROC_NULL) { MPIR_Pack_size_impl(count, datatype, &tmpbuf_size); MPIU_CHKLMEM_MALLOC(tmpbuf, void *, tmpbuf_size, mpi_errno, "temporary send buffer"); mpi_errno = MPIR_Pack_impl(buf, count, datatype, tmpbuf, tmpbuf_size, &tmpbuf_count); if (mpi_errno) MPIU_ERR_POP(mpi_errno); }
int MPID_nem_mxm_issend(MPIDI_VC_t * vc, const void *buf, int count, MPI_Datatype datatype, int rank, int tag, MPID_Comm * comm, int context_offset, MPID_Request ** sreq_ptr) { int mpi_errno = MPI_SUCCESS; MPID_Request *sreq = NULL; MPID_Datatype *dt_ptr; int dt_contig; MPIDI_msg_sz_t data_sz; MPI_Aint dt_true_lb; MPID_nem_mxm_vc_area *vc_area = NULL; MPID_nem_mxm_req_area *req_area = NULL; MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_MXM_ISSEND); MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_MXM_ISSEND); MPIDI_Datatype_get_info(count, datatype, dt_contig, data_sz, dt_ptr, dt_true_lb); /* create a request */ MPIDI_Request_create_sreq(sreq, mpi_errno, goto fn_exit); MPIU_Assert(sreq != NULL); MPIDI_Request_set_type(sreq, MPIDI_REQUEST_TYPE_SEND); MPIDI_VC_FAI_send_seqnum(vc, seqnum); MPIDI_Request_set_seqnum(sreq, seqnum); if (HANDLE_GET_KIND(datatype) != HANDLE_KIND_BUILTIN) { MPID_Datatype_get_ptr(datatype, sreq->dev.datatype_ptr); MPID_Datatype_add_ref(sreq->dev.datatype_ptr); } sreq->partner_request = NULL; sreq->dev.OnDataAvail = NULL; sreq->dev.tmpbuf = NULL; sreq->ch.vc = vc; sreq->ch.noncontig = FALSE; _dbg_mxm_output(5, "isSend ========> Sending USER msg for req %p (context %d to %d tag %d size %d) \n", sreq, comm->context_id + context_offset, rank, tag, data_sz); vc_area = VC_BASE(vc); req_area = REQ_BASE(sreq); req_area-> ctx = sreq; req_area->iov_buf = req_area->tmp_buf; req_area->iov_count = 0; req_area->iov_buf[0].ptr = NULL; req_area->iov_buf[0].length = 0; if (data_sz) { if (dt_contig) { req_area->iov_count = 1; req_area->iov_buf[0].ptr = (char *) (buf) + dt_true_lb; req_area->iov_buf[0].length = data_sz; } else { MPIDI_msg_sz_t last; MPI_Aint packsize = 0; sreq->ch.noncontig = TRUE; sreq->dev.segment_ptr = MPID_Segment_alloc(); MPIU_ERR_CHKANDJUMP1((sreq->dev.segment_ptr == NULL), mpi_errno, MPI_ERR_OTHER, "**nomem", "**nomem %s", "MPID_Segment_alloc"); MPIR_Pack_size_impl(count, datatype, &packsize); last = data_sz; if (packsize > 0) { sreq->dev.tmpbuf = MPIU_Malloc((size_t) packsize); MPIU_Assert(sreq->dev.tmpbuf); MPID_Segment_init(buf, count, datatype, sreq->dev.segment_ptr, 0); MPID_Segment_pack(sreq->dev.segment_ptr, 0, &last, sreq->dev.tmpbuf); req_area->iov_count = 1; req_area->iov_buf[0].ptr = sreq->dev.tmpbuf; req_area->iov_buf[0].length = last; } } } vc_area->pending_sends += 1; mpi_errno = _mxm_isend(vc_area->mxm_ep, req_area, MXM_MPICH_ISEND_SYNC, (mxm_mq_h) comm->dev.ch.netmod_priv, comm->rank, tag, _mxm_tag_mpi2mxm(tag, comm->context_id + context_offset), 0); if (mpi_errno) MPIU_ERR_POP(mpi_errno); _dbg_mxm_out_req(sreq); fn_exit: *sreq_ptr = sreq; MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_MXM_ISSEND); return mpi_errno; fn_fail: goto fn_exit; }
int MPIR_Alltoall_intra( const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, MPID_Comm *comm_ptr, int *errflag ) { int comm_size, i, j, pof2; MPI_Aint sendtype_extent, recvtype_extent; MPI_Aint recvtype_true_extent, recvbuf_extent, recvtype_true_lb; int mpi_errno=MPI_SUCCESS, src, dst, rank, nbytes; int mpi_errno_ret = MPI_SUCCESS; MPI_Status status; int sendtype_size, pack_size, block, position, *displs, count; MPI_Datatype newtype = MPI_DATATYPE_NULL; void *tmp_buf; MPI_Comm comm; MPI_Request *reqarray; MPI_Status *starray; MPIU_CHKLMEM_DECL(6); #ifdef MPIR_OLD_SHORT_ALLTOALL_ALG MPI_Aint sendtype_true_extent, sendbuf_extent, sendtype_true_lb; int k, p, curr_cnt, dst_tree_root, my_tree_root; int last_recv_cnt, mask, tmp_mask, tree_root, nprocs_completed; #endif if (recvcount == 0) return MPI_SUCCESS; comm = comm_ptr->handle; comm_size = comm_ptr->local_size; rank = comm_ptr->rank; /* Get extent of send and recv types */ MPID_Datatype_get_extent_macro(recvtype, recvtype_extent); MPID_Datatype_get_extent_macro(sendtype, sendtype_extent); MPID_Datatype_get_size_macro(sendtype, sendtype_size); nbytes = sendtype_size * sendcount; /* check if multiple threads are calling this collective function */ MPIDU_ERR_CHECK_MULTIPLE_THREADS_ENTER( comm_ptr ); if (sendbuf == MPI_IN_PLACE) { /* We use pair-wise sendrecv_replace in order to conserve memory usage, * which is keeping with the spirit of the MPI-2.2 Standard. But * because of this approach all processes must agree on the global * schedule of sendrecv_replace operations to avoid deadlock. * * Note that this is not an especially efficient algorithm in terms of * time and there will be multiple repeated malloc/free's rather than * maintaining a single buffer across the whole loop. Something like * MADRE is probably the best solution for the MPI_IN_PLACE scenario. */ for (i = 0; i < comm_size; ++i) { /* start inner loop at i to avoid re-exchanging data */ for (j = i; j < comm_size; ++j) { if (rank == i) { /* also covers the (rank == i && rank == j) case */ mpi_errno = MPIC_Sendrecv_replace_ft(((char *)recvbuf + j*recvcount*recvtype_extent), recvcount, recvtype, j, MPIR_ALLTOALL_TAG, j, MPIR_ALLTOALL_TAG, comm, &status, errflag); if (mpi_errno) { /* for communication errors, just record the error but continue */ *errflag = TRUE; MPIU_ERR_SET(mpi_errno, MPI_ERR_OTHER, "**fail"); MPIU_ERR_ADD(mpi_errno_ret, mpi_errno); } } else if (rank == j) { /* same as above with i/j args reversed */ mpi_errno = MPIC_Sendrecv_replace_ft(((char *)recvbuf + i*recvcount*recvtype_extent), recvcount, recvtype, i, MPIR_ALLTOALL_TAG, i, MPIR_ALLTOALL_TAG, comm, &status, errflag); if (mpi_errno) { /* for communication errors, just record the error but continue */ *errflag = TRUE; MPIU_ERR_SET(mpi_errno, MPI_ERR_OTHER, "**fail"); MPIU_ERR_ADD(mpi_errno_ret, mpi_errno); } } } } } else if ((nbytes <= MPIR_PARAM_ALLTOALL_SHORT_MSG_SIZE) && (comm_size >= 8)) { /* use the indexing algorithm by Jehoshua Bruck et al, * IEEE TPDS, Nov. 97 */ /* allocate temporary buffer */ MPIR_Pack_size_impl(recvcount*comm_size, recvtype, &pack_size); MPIU_CHKLMEM_MALLOC(tmp_buf, void *, pack_size, mpi_errno, "tmp_buf"); /* Do Phase 1 of the algorithim. Shift the data blocks on process i * upwards by a distance of i blocks. Store the result in recvbuf. */ mpi_errno = MPIR_Localcopy((char *) sendbuf + rank*sendcount*sendtype_extent, (comm_size - rank)*sendcount, sendtype, recvbuf, (comm_size - rank)*recvcount, recvtype); if (mpi_errno) { MPIU_ERR_POP(mpi_errno); } mpi_errno = MPIR_Localcopy(sendbuf, rank*sendcount, sendtype, (char *) recvbuf + (comm_size-rank)*recvcount*recvtype_extent, rank*recvcount, recvtype); if (mpi_errno) { MPIU_ERR_POP(mpi_errno); } /* Input data is now stored in recvbuf with datatype recvtype */ /* Now do Phase 2, the communication phase. It takes ceiling(lg p) steps. In each step i, each process sends to rank+2^i and receives from rank-2^i, and exchanges all data blocks whose ith bit is 1. */ /* allocate displacements array for indexed datatype used in communication */ MPIU_CHKLMEM_MALLOC(displs, int *, comm_size * sizeof(int), mpi_errno, "displs"); pof2 = 1; while (pof2 < comm_size) { dst = (rank + pof2) % comm_size; src = (rank - pof2 + comm_size) % comm_size; /* Exchange all data blocks whose ith bit is 1 */ /* Create an indexed datatype for the purpose */ count = 0; for (block=1; block<comm_size; block++) { if (block & pof2) { displs[count] = block * recvcount; count++; } } mpi_errno = MPIR_Type_create_indexed_block_impl(count, recvcount, displs, recvtype, &newtype); if (mpi_errno) MPIU_ERR_POP(mpi_errno); mpi_errno = MPIR_Type_commit_impl(&newtype); if (mpi_errno) MPIU_ERR_POP(mpi_errno); position = 0; mpi_errno = MPIR_Pack_impl(recvbuf, 1, newtype, tmp_buf, pack_size, &position); if (mpi_errno) MPIU_ERR_POP(mpi_errno); mpi_errno = MPIC_Sendrecv_ft(tmp_buf, position, MPI_PACKED, dst, MPIR_ALLTOALL_TAG, recvbuf, 1, newtype, src, MPIR_ALLTOALL_TAG, comm, MPI_STATUS_IGNORE, errflag); if (mpi_errno) { /* for communication errors, just record the error but continue */ *errflag = TRUE; MPIU_ERR_SET(mpi_errno, MPI_ERR_OTHER, "**fail"); MPIU_ERR_ADD(mpi_errno_ret, mpi_errno); } MPIR_Type_free_impl(&newtype); pof2 *= 2; } /* Rotate blocks in recvbuf upwards by (rank + 1) blocks. Need * a temporary buffer of the same size as recvbuf. */ /* get true extent of recvtype */ MPIR_Type_get_true_extent_impl(recvtype, &recvtype_true_lb, &recvtype_true_extent); recvbuf_extent = recvcount * comm_size * (MPIR_MAX(recvtype_true_extent, recvtype_extent)); MPIU_CHKLMEM_MALLOC(tmp_buf, void *, recvbuf_extent, mpi_errno, "tmp_buf"); /* adjust for potential negative lower bound in datatype */ tmp_buf = (void *)((char*)tmp_buf - recvtype_true_lb); mpi_errno = MPIR_Localcopy((char *) recvbuf + (rank+1)*recvcount*recvtype_extent, (comm_size - rank - 1)*recvcount, recvtype, tmp_buf, (comm_size - rank - 1)*recvcount, recvtype); if (mpi_errno) { MPIU_ERR_POP(mpi_errno); } mpi_errno = MPIR_Localcopy(recvbuf, (rank+1)*recvcount, recvtype, (char *) tmp_buf + (comm_size-rank-1)*recvcount*recvtype_extent, (rank+1)*recvcount, recvtype); if (mpi_errno) { MPIU_ERR_POP(mpi_errno); } /* Blocks are in the reverse order now (comm_size-1 to 0). * Reorder them to (0 to comm_size-1) and store them in recvbuf. */ for (i=0; i<comm_size; i++){ mpi_errno = MPIR_Localcopy((char *) tmp_buf + i*recvcount*recvtype_extent, recvcount, recvtype, (char *) recvbuf + (comm_size-i-1)*recvcount*recvtype_extent, recvcount, recvtype); if (mpi_errno) MPIU_ERR_POP(mpi_errno); } #ifdef MPIR_OLD_SHORT_ALLTOALL_ALG /* Short message. Use recursive doubling. Each process sends all its data at each step along with all data it received in previous steps. */ /* need to allocate temporary buffer of size sendbuf_extent*comm_size */ /* get true extent of sendtype */ MPIR_Type_get_true_extent_impl(sendtype, &sendtype_true_lb, &sendtype_true_extent); sendbuf_extent = sendcount * comm_size * (MPIR_MAX(sendtype_true_extent, sendtype_extent)); MPIU_CHKLMEM_MALLOC(tmp_buf, void *, sendbuf_extent*comm_size, mpi_errno, "tmp_buf"); /* adjust for potential negative lower bound in datatype */ tmp_buf = (void *)((char*)tmp_buf - sendtype_true_lb); /* copy local sendbuf into tmp_buf at location indexed by rank */ curr_cnt = sendcount*comm_size; mpi_errno = MPIR_Localcopy(sendbuf, curr_cnt, sendtype, ((char *)tmp_buf + rank*sendbuf_extent), curr_cnt, sendtype); if (mpi_errno) { MPIU_ERR_POP(mpi_errno);} mask = 0x1; i = 0; while (mask < comm_size) { dst = rank ^ mask; dst_tree_root = dst >> i; dst_tree_root <<= i; my_tree_root = rank >> i; my_tree_root <<= i; if (dst < comm_size) { mpi_errno = MPIC_Sendrecv_ft(((char *)tmp_buf + my_tree_root*sendbuf_extent), curr_cnt, sendtype, dst, MPIR_ALLTOALL_TAG, ((char *)tmp_buf + dst_tree_root*sendbuf_extent), sendbuf_extent*(comm_size-dst_tree_root), sendtype, dst, MPIR_ALLTOALL_TAG, comm, &status, errflag); if (mpi_errno) { /* for communication errors, just record the error but continue */ *errflag = TRUE; MPIU_ERR_SET(mpi_errno, MPI_ERR_OTHER, "**fail"); MPIU_ERR_ADD(mpi_errno_ret, mpi_errno); last_recv_cnt = 0; } else /* in case of non-power-of-two nodes, less data may be received than specified */ MPIR_Get_count_impl(&status, sendtype, &last_recv_cnt); curr_cnt += last_recv_cnt; } /* if some processes in this process's subtree in this step did not have any destination process to communicate with because of non-power-of-two, we need to send them the result. We use a logarithmic recursive-halfing algorithm for this. */ if (dst_tree_root + mask > comm_size) { nprocs_completed = comm_size - my_tree_root - mask; /* nprocs_completed is the number of processes in this subtree that have all the data. Send data to others in a tree fashion. First find root of current tree that is being divided into two. k is the number of least-significant bits in this process's rank that must be zeroed out to find the rank of the root */ j = mask; k = 0; while (j) { j >>= 1; k++; } k--; tmp_mask = mask >> 1; while (tmp_mask) { dst = rank ^ tmp_mask; tree_root = rank >> k; tree_root <<= k; /* send only if this proc has data and destination doesn't have data. at any step, multiple processes can send if they have the data */ if ((dst > rank) && (rank < tree_root + nprocs_completed) && (dst >= tree_root + nprocs_completed)) { /* send the data received in this step above */ mpi_errno = MPIC_Send_ft(((char *)tmp_buf + dst_tree_root*sendbuf_extent), last_recv_cnt, sendtype, dst, MPIR_ALLTOALL_TAG, comm, errflag); if (mpi_errno) { /* for communication errors, just record the error but continue */ *errflag = TRUE; MPIU_ERR_SET(mpi_errno, MPI_ERR_OTHER, "**fail"); MPIU_ERR_ADD(mpi_errno_ret, mpi_errno); } } /* recv only if this proc. doesn't have data and sender has data */ else if ((dst < rank) && (dst < tree_root + nprocs_completed) && (rank >= tree_root + nprocs_completed)) { mpi_errno = MPIC_Recv_ft(((char *)tmp_buf + dst_tree_root*sendbuf_extent), sendbuf_extent*(comm_size-dst_tree_root), sendtype, dst, MPIR_ALLTOALL_TAG, comm, &status, errflag); if (mpi_errno) { /* for communication errors, just record the error but continue */ *errflag = TRUE; MPIU_ERR_SET(mpi_errno, MPI_ERR_OTHER, "**fail"); MPIU_ERR_ADD(mpi_errno_ret, mpi_errno); last_recv_cnt = 0; } else MPIR_Get_count_impl(&status, sendtype, &last_recv_cnt); curr_cnt += last_recv_cnt; } tmp_mask >>= 1; k--; } } mask <<= 1; i++; }
int MPID_nem_newmad_process_rdtype(MPID_Request **rreq_p, MPID_Datatype * dt_ptr, MPIDI_msg_sz_t data_sz, struct iovec *newmad_iov[], int *num_iov) { MPID_Request *rreq = *rreq_p; MPIDI_msg_sz_t last; MPID_IOV *iov; int n_iov = 0; int mpi_errno = MPI_SUCCESS; int index; MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_NEWMAD_PROCESS_RDTYPE); MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_NEWMAD_PROCESS_RDTYPE); if (rreq->dev.segment_ptr == NULL) { rreq->dev.segment_ptr = MPID_Segment_alloc( ); MPIU_ERR_CHKANDJUMP1((rreq->dev.segment_ptr == NULL), mpi_errno, MPI_ERR_OTHER, "**nomem", "**nomem %s", "MPID_Segment_alloc"); } MPID_Segment_init(rreq->dev.user_buf, rreq->dev.user_count, rreq->dev.datatype, rreq->dev.segment_ptr, 0); rreq->dev.segment_first = 0; rreq->dev.segment_size = data_sz; last = rreq->dev.segment_size; MPID_Segment_count_contig_blocks(rreq->dev.segment_ptr,rreq->dev.segment_first,&last,&n_iov); MPIU_Assert(n_iov > 0); iov = MPIU_Malloc(n_iov*sizeof(MPID_IOV)); MPID_Segment_unpack_vector(rreq->dev.segment_ptr, rreq->dev.segment_first, &last,iov, &n_iov); MPIU_Assert(last == rreq->dev.segment_size); #ifdef DEBUG for(index = 0; index < n_iov ; index++) { fprintf(stdout,"======================\n"); fprintf(stdout,"RECV iov[%i]: [base %p][len %i]\n",index, iov[index].MPID_IOV_BUF,iov[index].MPID_IOV_LEN); } #endif if(n_iov <= NMAD_IOV_MAX_DEPTH) { for(index=0; index < n_iov ; index++) { (*newmad_iov)[index].iov_base = iov[index].MPID_IOV_BUF; (*newmad_iov)[index].iov_len = iov[index].MPID_IOV_LEN; } rreq->dev.tmpbuf = NULL; *num_iov = n_iov; } else { int packsize = 0; MPIR_Pack_size_impl(rreq->dev.user_count, rreq->dev.datatype, &packsize); rreq->dev.tmpbuf = MPIU_Malloc((size_t) packsize); MPIU_Assert(rreq->dev.tmpbuf); rreq->dev.tmpbuf_sz = packsize; (*newmad_iov)[0].iov_base = (char *) rreq->dev.tmpbuf; (*newmad_iov)[0].iov_len = (uint32_t) packsize; *num_iov = 1 ; } MPIU_Free(iov); fn_exit: MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_NEWMAD_PROCESS_RDTYPE); return mpi_errno; fn_fail: ATTRIBUTE((unused)) goto fn_exit; }
static int MPIR_Bcast_binomial_MV2( void *buffer, int count, MPI_Datatype datatype, int root, MPID_Comm *comm_ptr, int *errflag) { int rank, comm_size, src, dst; int relative_rank, mask; int mpi_errno = MPI_SUCCESS; int mpi_errno_ret = MPI_SUCCESS; int nbytes=0; int type_size, is_contig, is_homogeneous; int position; void *tmp_buf=NULL; MPI_Comm comm; MPID_Datatype *dtp; MPIU_CHKLMEM_DECL(1); comm = comm_ptr->handle; comm_size = comm_ptr->local_size; rank = comm_ptr->rank; /* If there is only one process, return */ if (comm_size == 1) goto fn_exit; if (HANDLE_GET_KIND(datatype) == HANDLE_KIND_BUILTIN) is_contig = 1; else { MPID_Datatype_get_ptr(datatype, dtp); is_contig = dtp->is_contig; } is_homogeneous = 1; #ifdef MPID_HAS_HETERO if (comm_ptr->is_hetero) is_homogeneous = 0; #endif /* MPI_Type_size() might not give the accurate size of the packed * datatype for heterogeneous systems (because of padding, encoding, * etc). On the other hand, MPI_Pack_size() can become very * expensive, depending on the implementation, especially for * heterogeneous systems. We want to use MPI_Type_size() wherever * possible, and MPI_Pack_size() in other places. */ if (is_homogeneous) MPID_Datatype_get_size_macro(datatype, type_size); else MPIR_Pack_size_impl(1, datatype, &type_size); nbytes = type_size * count; if (!is_contig || !is_homogeneous) { MPIU_CHKLMEM_MALLOC(tmp_buf, void *, nbytes, mpi_errno, "tmp_buf"); /* TODO: Pipeline the packing and communication */ position = 0; if (rank == root) { mpi_errno = MPIR_Pack_impl(buffer, count, datatype, tmp_buf, nbytes, &position); if (mpi_errno) MPIU_ERR_POP(mpi_errno); } }
static int _mxm_process_rdtype(MPID_Request ** rreq_p, MPI_Datatype datatype, MPID_Datatype * dt_ptr, MPIDI_msg_sz_t data_sz, const void *buf, int count, mxm_req_buffer_t ** iov_buf, int *iov_count) { int mpi_errno = MPI_SUCCESS; MPID_Request *rreq = *rreq_p; MPIDI_msg_sz_t last; MPL_IOV *iov; int n_iov = 0; int index; if (rreq->dev.segment_ptr == NULL) { rreq->dev.segment_ptr = MPID_Segment_alloc(); MPIR_ERR_CHKANDJUMP1((rreq->dev.segment_ptr == NULL), mpi_errno, MPI_ERR_OTHER, "**nomem", "**nomem %s", "MPID_Segment_alloc"); } MPID_Segment_init(buf, count, datatype, rreq->dev.segment_ptr, 0); rreq->dev.segment_first = 0; rreq->dev.segment_size = data_sz; last = rreq->dev.segment_size; MPID_Segment_count_contig_blocks(rreq->dev.segment_ptr, rreq->dev.segment_first, &last, (MPI_Aint *) & n_iov); MPIU_Assert(n_iov > 0); iov = MPIU_Malloc(n_iov * sizeof(*iov)); MPIU_Assert(iov); last = rreq->dev.segment_size; MPID_Segment_unpack_vector(rreq->dev.segment_ptr, rreq->dev.segment_first, &last, iov, &n_iov); MPIU_Assert(last == rreq->dev.segment_size); #if defined(MXM_DEBUG) && (MXM_DEBUG > 0) _dbg_mxm_output(7, "Recv Noncontiguous data vector %i entries (free slots : %i)\n", n_iov, MXM_REQ_DATA_MAX_IOV); for (index = 0; index < n_iov; index++) { _dbg_mxm_output(7, "======= Recv iov[%i] = ptr : %p, len : %i \n", index, iov[index].MPL_IOV_BUF, iov[index].MPL_IOV_LEN); } #endif if (n_iov <= MXM_REQ_DATA_MAX_IOV) { if (n_iov > MXM_MPICH_MAX_IOV) { *iov_buf = (mxm_req_buffer_t *) MPIU_Malloc(n_iov * sizeof(**iov_buf)); MPIU_Assert(*iov_buf); } for (index = 0; index < n_iov; index++) { (*iov_buf)[index].ptr = iov[index].MPL_IOV_BUF; (*iov_buf)[index].length = iov[index].MPL_IOV_LEN; } rreq->dev.tmpbuf = NULL; rreq->dev.tmpbuf_sz = 0; *iov_count = n_iov; } else { MPI_Aint packsize = 0; MPIR_Pack_size_impl(rreq->dev.user_count, rreq->dev.datatype, &packsize); rreq->dev.tmpbuf = MPIU_Malloc((size_t) packsize); MPIU_Assert(rreq->dev.tmpbuf); rreq->dev.tmpbuf_sz = packsize; (*iov_buf)[0].ptr = rreq->dev.tmpbuf; (*iov_buf)[0].length = (size_t) packsize; *iov_count = 1; } MPIU_Free(iov); fn_exit: return mpi_errno; fn_fail: goto fn_exit; }
/*@ MPI_Sendrecv_replace - Sends and receives using a single buffer Input Parameters: + count - number of elements in send and receive buffer (integer) . datatype - type of elements in send and receive buffer (handle) . dest - rank of destination (integer) . sendtag - send message tag (integer) . source - rank of source (integer) . recvtag - receive message tag (integer) - comm - communicator (handle) Output Parameters: + buf - initial address of send and receive buffer (choice) - status - status object (Status) .N ThreadSafe .N Fortran .N FortranStatus .N Errors .N MPI_SUCCESS .N MPI_ERR_COMM .N MPI_ERR_COUNT .N MPI_ERR_TYPE .N MPI_ERR_TAG .N MPI_ERR_RANK .N MPI_ERR_TRUNCATE .N MPI_ERR_EXHAUSTED @*/ int MPI_Sendrecv_replace(void *buf, int count, MPI_Datatype datatype, int dest, int sendtag, int source, int recvtag, MPI_Comm comm, MPI_Status *status) { static const char FCNAME[] = "MPI_Sendrecv_replace"; int mpi_errno = MPI_SUCCESS; MPIR_Comm *comm_ptr = NULL; MPIR_CHKLMEM_DECL(1); MPIR_FUNC_TERSE_STATE_DECL(MPID_STATE_MPI_SENDRECV_REPLACE); MPIR_ERRTEST_INITIALIZED_ORDIE(); MPID_THREAD_CS_ENTER(GLOBAL, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX); MPIR_FUNC_TERSE_PT2PT_ENTER_BOTH(MPID_STATE_MPI_SENDRECV_REPLACE); /* Convert handles to MPI objects. */ MPIR_Comm_get_ptr(comm, comm_ptr); # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { /* Validate communicator */ MPIR_Comm_valid_ptr( comm_ptr, mpi_errno, FALSE ); if (mpi_errno) goto fn_fail; /* Validate count */ MPIR_ERRTEST_COUNT(count, mpi_errno); /* Validate status (status_ignore is not the same as null) */ MPIR_ERRTEST_ARGNULL(status, "status", mpi_errno); /* Validate tags */ MPIR_ERRTEST_SEND_TAG(sendtag, mpi_errno); MPIR_ERRTEST_RECV_TAG(recvtag, mpi_errno); /* Validate source and destination */ MPIR_ERRTEST_SEND_RANK(comm_ptr, dest, mpi_errno); MPIR_ERRTEST_RECV_RANK(comm_ptr, source, mpi_errno); /* Validate datatype handle */ MPIR_ERRTEST_DATATYPE(datatype, "datatype", mpi_errno); /* Validate datatype object */ if (HANDLE_GET_KIND(datatype) != HANDLE_KIND_BUILTIN) { MPIR_Datatype *datatype_ptr = NULL; MPIR_Datatype_get_ptr(datatype, datatype_ptr); MPIR_Datatype_valid_ptr(datatype_ptr, mpi_errno); if (mpi_errno) goto fn_fail; MPIR_Datatype_committed_ptr(datatype_ptr, mpi_errno); if (mpi_errno) goto fn_fail; } /* Validate buffer */ MPIR_ERRTEST_USERBUFFER(buf,count,datatype,mpi_errno); } MPID_END_ERROR_CHECKS; } # endif /* HAVE_ERROR_CHECKING */ /* ... body of routine ... */ # if defined(MPID_Sendrecv_replace) { mpi_errno = MPID_Sendrecv_replace(buf, count, datatype, dest, sendtag, source, recvtag, comm_ptr, status) } # else { MPIR_Request * sreq; MPIR_Request * rreq; void * tmpbuf = NULL; MPI_Aint tmpbuf_size = 0; MPI_Aint tmpbuf_count = 0; if (count > 0 && dest != MPI_PROC_NULL) { MPIR_Pack_size_impl(count, datatype, &tmpbuf_size); MPIR_CHKLMEM_MALLOC_ORJUMP(tmpbuf, void *, tmpbuf_size, mpi_errno, "temporary send buffer", MPL_MEM_BUFFER); mpi_errno = MPIR_Pack_impl(buf, count, datatype, tmpbuf, tmpbuf_size, &tmpbuf_count); if (mpi_errno != MPI_SUCCESS) goto fn_fail; } mpi_errno = MPID_Irecv(buf, count, datatype, source, recvtag, comm_ptr, MPIR_CONTEXT_INTRA_PT2PT, &rreq); if (mpi_errno != MPI_SUCCESS) goto fn_fail; mpi_errno = MPID_Isend(tmpbuf, tmpbuf_count, MPI_PACKED, dest, sendtag, comm_ptr, MPIR_CONTEXT_INTRA_PT2PT, &sreq); if (mpi_errno != MPI_SUCCESS) { /* --BEGIN ERROR HANDLING-- */ /* FIXME: should we cancel the pending (possibly completed) receive request or wait for it to complete? */ MPIR_Request_free(rreq); goto fn_fail; /* --END ERROR HANDLING-- */ } if (!MPIR_Request_is_complete(sreq) || !MPIR_Request_is_complete(rreq)) { MPID_Progress_state progress_state; MPID_Progress_start(&progress_state); while (!MPIR_Request_is_complete(sreq) || !MPIR_Request_is_complete(rreq)) { mpi_errno = MPID_Progress_wait(&progress_state); if (mpi_errno != MPI_SUCCESS) { /* --BEGIN ERROR HANDLING-- */ MPID_Progress_end(&progress_state); goto fn_fail; /* --END ERROR HANDLING-- */ } } MPID_Progress_end(&progress_state); } if (status != MPI_STATUS_IGNORE) { *status = rreq->status; } if (mpi_errno == MPI_SUCCESS) { mpi_errno = rreq->status.MPI_ERROR; if (mpi_errno == MPI_SUCCESS) { mpi_errno = sreq->status.MPI_ERROR; } } MPIR_Request_free(sreq); MPIR_Request_free(rreq); }
/*@ MPI_Pack_size - Returns the upper bound on the amount of space needed to pack a message Input Parameters: + incount - count argument to packing call (integer) . datatype - datatype argument to packing call (handle) - comm - communicator argument to packing call (handle) Output Parameter: . size - upper bound on size of packed message, in bytes (integer) Notes: The MPI standard document describes this in terms of 'MPI_Pack', but it applies to both 'MPI_Pack' and 'MPI_Unpack'. That is, the value 'size' is the maximum that is needed by either 'MPI_Pack' or 'MPI_Unpack'. .N ThreadSafe .N Fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_COMM .N MPI_ERR_TYPE .N MPI_ERR_ARG @*/ int MPI_Pack_size(int incount, MPI_Datatype datatype, MPI_Comm comm, int *size) { MPID_Comm *comm_ptr = NULL; int mpi_errno = MPI_SUCCESS; MPID_MPI_STATE_DECL(MPID_STATE_MPI_PACK_SIZE); MPIR_ERRTEST_INITIALIZED_ORDIE(); MPID_MPI_FUNC_ENTER(MPID_STATE_MPI_PACK_SIZE); /* Validate parameters, especially handles needing to be converted */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPIR_ERRTEST_COMM(comm, mpi_errno); if (mpi_errno != MPI_SUCCESS) goto fn_fail; } MPID_END_ERROR_CHECKS; } # endif /* HAVE_ERROR_CHECKING */ /* Convert MPI object handles to object pointers */ MPID_Comm_get_ptr( comm, comm_ptr ); # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPID_Datatype *datatype_ptr = NULL; MPIR_ERRTEST_COUNT(incount, mpi_errno); MPIR_ERRTEST_ARGNULL(size, "size", mpi_errno); if (mpi_errno) goto fn_fail; MPID_Comm_valid_ptr( comm_ptr, mpi_errno ); if (mpi_errno) goto fn_fail; MPIR_ERRTEST_DATATYPE(datatype, "datatype", mpi_errno); if (mpi_errno) goto fn_fail; if (HANDLE_GET_KIND(datatype) != HANDLE_KIND_BUILTIN) { MPID_Datatype_get_ptr(datatype, datatype_ptr); MPID_Datatype_valid_ptr(datatype_ptr, mpi_errno); MPID_Datatype_committed_ptr(datatype_ptr, mpi_errno); } if (mpi_errno) goto fn_fail; } MPID_END_ERROR_CHECKS; } # endif /* HAVE_ERROR_CHECKING */ /* ... body of routine ... */ MPIR_Pack_size_impl(incount, datatype, size); /* ... end of body of routine ... */ #ifdef HAVE_ERROR_CHECKING fn_exit: #endif MPID_MPI_FUNC_EXIT(MPID_STATE_MPI_PACK_SIZE); return mpi_errno; /* --BEGIN ERROR HANDLING-- */ # ifdef HAVE_ERROR_CHECKING fn_fail: { mpi_errno = MPIR_Err_create_code( mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**mpi_pack_size", "**mpi_pack_size %d %D %C %p", incount, datatype, comm, size); } mpi_errno = MPIR_Err_return_comm( comm_ptr, FCNAME, mpi_errno ); goto fn_exit; # endif /* --END ERROR HANDLING-- */ }
int MPIR_Bsend_isend(const void *buf, int count, MPI_Datatype dtype, int dest, int tag, MPID_Comm *comm_ptr, MPIR_Bsend_kind_t kind, MPID_Request **request ) { int mpi_errno = MPI_SUCCESS; MPIR_Bsend_data_t *p; MPIR_Bsend_msg_t *msg; int packsize, pass; /* Find a free segment and copy the data into it. If we could have, we would already have used tBsend to send the message with no copying. We may want to decide here whether we need to pack at all or if we can just use (a MPIU_Memcpy) of the buffer. */ /* We check the active buffer first. This helps avoid storage fragmentation */ mpi_errno = MPIR_Bsend_check_active(); if (mpi_errno) MPIU_ERR_POP(mpi_errno); if (dtype != MPI_PACKED) MPIR_Pack_size_impl( count, dtype, &packsize ); else packsize = count; MPIU_DBG_MSG_D(BSEND,TYPICAL,"looking for buffer of size %d", packsize); /* * Use two passes. Each pass is the same; between the two passes, * attempt to complete any active requests, and start any pending * ones. If the message can be initiated in the first pass, * do not perform the second pass. */ for (pass = 0; pass < 2; pass++) { p = MPIR_Bsend_find_buffer( packsize ); if (p) { MPIU_DBG_MSG_FMT(BSEND,TYPICAL,(MPIU_DBG_FDEST, "found buffer of size %d with address %p",packsize,p)); /* Found a segment */ msg = &p->msg; /* Pack the data into the buffer */ /* We may want to optimize for the special case of either primative or contiguous types, and just use MPIU_Memcpy and the provided datatype */ msg->count = 0; if (dtype != MPI_PACKED) { mpi_errno = MPIR_Pack_impl( buf, count, dtype, p->msg.msgbuf, packsize, &p->msg.count); if (mpi_errno) MPIU_ERR_POP(mpi_errno); } else { MPIU_Memcpy(p->msg.msgbuf, buf, count); p->msg.count = count; } /* Try to send the message. We must use MPID_Isend because this call must not block */ mpi_errno = MPID_Isend(msg->msgbuf, msg->count, MPI_PACKED, dest, tag, comm_ptr, MPID_CONTEXT_INTRA_PT2PT, &p->request ); MPIU_ERR_CHKINTERNAL(mpi_errno, mpi_errno, "Bsend internal error: isend returned err"); /* If the error is "request not available", we should put this on the pending list. This will depend on how we signal failure to send. */ if (p->request) { MPIU_DBG_MSG_FMT(BSEND,TYPICAL, (MPIU_DBG_FDEST,"saving request %p in %p",p->request,p)); /* An optimization is to check to see if the data has already been sent. The original code to do this was commented out and probably did not match the current request internals */ MPIR_Bsend_take_buffer( p, p->msg.count ); p->kind = kind; *request = p->request; } break; } /* If we found a buffer or we're in the seccond pass, then break. Note that the test on phere is redundant, as the code breaks out of the loop in the test above if a block p is found. */ if (p || pass == 1) break; MPIU_DBG_MSG(BSEND,TYPICAL,"Could not find storage, checking active"); /* Try to complete some pending bsends */ MPIR_Bsend_check_active( ); /* Give priority to any pending operations */ MPIR_Bsend_retry_pending( ); } if (!p) { /* Return error for no buffer space found */ /* Generate a traceback of the allocated space, explaining why packsize could not be found */ MPIU_DBG_MSG(BSEND,TYPICAL,"Could not find space; dumping arena" ); MPIU_DBG_STMT(BSEND,TYPICAL,MPIR_Bsend_dump()); MPIU_ERR_SETANDJUMP2(mpi_errno, MPI_ERR_BUFFER, "**bufbsend", "**bufbsend %d %d", packsize, BsendBuffer.buffer_size); } fn_exit: return mpi_errno; fn_fail: goto fn_exit; }
int MPIR_Alltoall_intra( const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, MPIR_Comm *comm_ptr, MPIR_Errflag_t *errflag ) { int comm_size, i, j, pof2; MPI_Aint sendtype_extent, recvtype_extent; MPI_Aint recvtype_true_extent, recvbuf_extent, recvtype_true_lb; int mpi_errno=MPI_SUCCESS, src, dst, rank, nbytes; int mpi_errno_ret = MPI_SUCCESS; MPI_Status status; int sendtype_size, block, *displs, count; MPI_Aint pack_size, position; MPI_Datatype newtype = MPI_DATATYPE_NULL; void *tmp_buf; MPIR_Request **reqarray; MPI_Status *starray; MPIR_CHKLMEM_DECL(6); if (recvcount == 0) return MPI_SUCCESS; comm_size = comm_ptr->local_size; rank = comm_ptr->rank; /* Get extent of send and recv types */ MPID_Datatype_get_extent_macro(recvtype, recvtype_extent); MPID_Datatype_get_extent_macro(sendtype, sendtype_extent); MPID_Datatype_get_size_macro(sendtype, sendtype_size); nbytes = sendtype_size * sendcount; if (sendbuf == MPI_IN_PLACE) { /* We use pair-wise sendrecv_replace in order to conserve memory usage, * which is keeping with the spirit of the MPI-2.2 Standard. But * because of this approach all processes must agree on the global * schedule of sendrecv_replace operations to avoid deadlock. * * Note that this is not an especially efficient algorithm in terms of * time and there will be multiple repeated malloc/free's rather than * maintaining a single buffer across the whole loop. Something like * MADRE is probably the best solution for the MPI_IN_PLACE scenario. */ for (i = 0; i < comm_size; ++i) { /* start inner loop at i to avoid re-exchanging data */ for (j = i; j < comm_size; ++j) { if (rank == i) { /* also covers the (rank == i && rank == j) case */ mpi_errno = MPIC_Sendrecv_replace(((char *)recvbuf + j*recvcount*recvtype_extent), recvcount, recvtype, j, MPIR_ALLTOALL_TAG, j, MPIR_ALLTOALL_TAG, comm_ptr, &status, errflag); if (mpi_errno) { /* for communication errors, just record the error but continue */ *errflag = MPIR_ERR_GET_CLASS(mpi_errno); MPIR_ERR_SET(mpi_errno, *errflag, "**fail"); MPIR_ERR_ADD(mpi_errno_ret, mpi_errno); } } else if (rank == j) { /* same as above with i/j args reversed */ mpi_errno = MPIC_Sendrecv_replace(((char *)recvbuf + i*recvcount*recvtype_extent), recvcount, recvtype, i, MPIR_ALLTOALL_TAG, i, MPIR_ALLTOALL_TAG, comm_ptr, &status, errflag); if (mpi_errno) { /* for communication errors, just record the error but continue */ *errflag = MPIR_ERR_GET_CLASS(mpi_errno); MPIR_ERR_SET(mpi_errno, *errflag, "**fail"); MPIR_ERR_ADD(mpi_errno_ret, mpi_errno); } } } } } else if ((nbytes <= MPIR_CVAR_ALLTOALL_SHORT_MSG_SIZE) && (comm_size >= 8)) { /* use the indexing algorithm by Jehoshua Bruck et al, * IEEE TPDS, Nov. 97 */ /* allocate temporary buffer */ MPIR_Pack_size_impl(recvcount*comm_size, recvtype, &pack_size); MPIR_CHKLMEM_MALLOC(tmp_buf, void *, pack_size, mpi_errno, "tmp_buf"); /* Do Phase 1 of the algorithim. Shift the data blocks on process i * upwards by a distance of i blocks. Store the result in recvbuf. */ mpi_errno = MPIR_Localcopy((char *) sendbuf + rank*sendcount*sendtype_extent, (comm_size - rank)*sendcount, sendtype, recvbuf, (comm_size - rank)*recvcount, recvtype); if (mpi_errno) { MPIR_ERR_POP(mpi_errno); } mpi_errno = MPIR_Localcopy(sendbuf, rank*sendcount, sendtype, (char *) recvbuf + (comm_size-rank)*recvcount*recvtype_extent, rank*recvcount, recvtype); if (mpi_errno) { MPIR_ERR_POP(mpi_errno); } /* Input data is now stored in recvbuf with datatype recvtype */ /* Now do Phase 2, the communication phase. It takes ceiling(lg p) steps. In each step i, each process sends to rank+2^i and receives from rank-2^i, and exchanges all data blocks whose ith bit is 1. */ /* allocate displacements array for indexed datatype used in communication */ MPIR_CHKLMEM_MALLOC(displs, int *, comm_size * sizeof(int), mpi_errno, "displs"); pof2 = 1; while (pof2 < comm_size) { dst = (rank + pof2) % comm_size; src = (rank - pof2 + comm_size) % comm_size; /* Exchange all data blocks whose ith bit is 1 */ /* Create an indexed datatype for the purpose */ count = 0; for (block=1; block<comm_size; block++) { if (block & pof2) { displs[count] = block * recvcount; count++; } } mpi_errno = MPIR_Type_create_indexed_block_impl(count, recvcount, displs, recvtype, &newtype); if (mpi_errno) MPIR_ERR_POP(mpi_errno); mpi_errno = MPIR_Type_commit_impl(&newtype); if (mpi_errno) MPIR_ERR_POP(mpi_errno); position = 0; mpi_errno = MPIR_Pack_impl(recvbuf, 1, newtype, tmp_buf, pack_size, &position); if (mpi_errno) MPIR_ERR_POP(mpi_errno); mpi_errno = MPIC_Sendrecv(tmp_buf, position, MPI_PACKED, dst, MPIR_ALLTOALL_TAG, recvbuf, 1, newtype, src, MPIR_ALLTOALL_TAG, comm_ptr, MPI_STATUS_IGNORE, errflag); if (mpi_errno) { /* for communication errors, just record the error but continue */ *errflag = MPIR_ERR_GET_CLASS(mpi_errno); MPIR_ERR_SET(mpi_errno, *errflag, "**fail"); MPIR_ERR_ADD(mpi_errno_ret, mpi_errno); } MPIR_Type_free_impl(&newtype); pof2 *= 2; } /* Rotate blocks in recvbuf upwards by (rank + 1) blocks. Need * a temporary buffer of the same size as recvbuf. */ /* get true extent of recvtype */ MPIR_Type_get_true_extent_impl(recvtype, &recvtype_true_lb, &recvtype_true_extent); recvbuf_extent = recvcount * comm_size * (MPL_MAX(recvtype_true_extent, recvtype_extent)); MPIR_CHKLMEM_MALLOC(tmp_buf, void *, recvbuf_extent, mpi_errno, "tmp_buf"); /* adjust for potential negative lower bound in datatype */ tmp_buf = (void *)((char*)tmp_buf - recvtype_true_lb); mpi_errno = MPIR_Localcopy((char *) recvbuf + (rank+1)*recvcount*recvtype_extent, (comm_size - rank - 1)*recvcount, recvtype, tmp_buf, (comm_size - rank - 1)*recvcount, recvtype); if (mpi_errno) { MPIR_ERR_POP(mpi_errno); } mpi_errno = MPIR_Localcopy(recvbuf, (rank+1)*recvcount, recvtype, (char *) tmp_buf + (comm_size-rank-1)*recvcount*recvtype_extent, (rank+1)*recvcount, recvtype); if (mpi_errno) { MPIR_ERR_POP(mpi_errno); } /* Blocks are in the reverse order now (comm_size-1 to 0). * Reorder them to (0 to comm_size-1) and store them in recvbuf. */ for (i=0; i<comm_size; i++){ mpi_errno = MPIR_Localcopy((char *) tmp_buf + i*recvcount*recvtype_extent, recvcount, recvtype, (char *) recvbuf + (comm_size-i-1)*recvcount*recvtype_extent, recvcount, recvtype); if (mpi_errno) MPIR_ERR_POP(mpi_errno); } }