int MPIR_Ireduce_scatter_block_sched_intra_auto(const void *sendbuf, void *recvbuf, int recvcount, MPI_Datatype datatype, MPI_Op op, MPIR_Comm * comm_ptr, MPIR_Sched_t s) { int mpi_errno = MPI_SUCCESS; int is_commutative; int total_count, type_size, nbytes; int comm_size; is_commutative = MPIR_Op_is_commutative(op); comm_size = comm_ptr->local_size; total_count = recvcount * comm_size; if (total_count == 0) { goto fn_exit; } MPIR_Datatype_get_size_macro(datatype, type_size); nbytes = total_count * type_size; /* select an appropriate algorithm based on commutivity and message size */ if (is_commutative && (nbytes < MPIR_CVAR_REDUCE_SCATTER_COMMUTATIVE_LONG_MSG_SIZE)) { mpi_errno = MPIR_Ireduce_scatter_block_sched_intra_recursive_halving(sendbuf, recvbuf, recvcount, datatype, op, comm_ptr, s); if (mpi_errno) MPIR_ERR_POP(mpi_errno); } else if (is_commutative && (nbytes >= MPIR_CVAR_REDUCE_SCATTER_COMMUTATIVE_LONG_MSG_SIZE)) { mpi_errno = MPIR_Ireduce_scatter_block_sched_intra_pairwise(sendbuf, recvbuf, recvcount, datatype, op, comm_ptr, s); if (mpi_errno) MPIR_ERR_POP(mpi_errno); } else { /* (!is_commutative) */ if (MPL_is_pof2(comm_size, NULL)) { /* noncommutative, pof2 size */ mpi_errno = MPIR_Ireduce_scatter_block_sched_intra_noncommutative(sendbuf, recvbuf, recvcount, datatype, op, comm_ptr, s); if (mpi_errno) MPIR_ERR_POP(mpi_errno); } else { /* noncommutative and non-pof2, use recursive doubling. */ mpi_errno = MPIR_Ireduce_scatter_block_sched_intra_recursive_doubling(sendbuf, recvbuf, recvcount, datatype, op, comm_ptr, s); if (mpi_errno) MPIR_ERR_POP(mpi_errno); } } fn_exit: return mpi_errno; fn_fail: goto fn_exit; }
int MPIR_Ireduce_scatter_block_impl(const void *sendbuf, void *recvbuf, int recvcount, MPI_Datatype datatype, MPI_Op op, MPIR_Comm * comm_ptr, MPIR_Request ** request) { int mpi_errno = MPI_SUCCESS; int tag = -1; int is_commutative = MPIR_Op_is_commutative(op); MPIR_Sched_t s = MPIR_SCHED_NULL; *request = NULL; /* If the user picks one of the transport-enabled algorithms, branch there * before going down to the MPIR_Sched-based algorithms. */ /* TODO - Eventually the intention is to replace all of the * MPIR_Sched-based algorithms with transport-enabled algorithms, but that * will require sufficient performance testing and replacement algorithms. */ if (comm_ptr->comm_kind == MPIR_COMM_KIND__INTRACOMM) { /* intracommunicator */ switch (MPIR_CVAR_IREDUCE_SCATTER_BLOCK_INTRA_ALGORITHM) { case MPIR_CVAR_IREDUCE_SCATTER_BLOCK_INTRA_ALGORITHM_gentran_recexch: if (is_commutative) { mpi_errno = MPIR_Ireduce_scatter_block_intra_gentran_recexch(sendbuf, recvbuf, recvcount, datatype, op, comm_ptr, request); if (mpi_errno) MPIR_ERR_POP(mpi_errno); goto fn_exit; } break; default: /* go down to the MPIR_Sched-based algorithms */ break; } } mpi_errno = MPIR_Sched_next_tag(comm_ptr, &tag); if (mpi_errno) MPIR_ERR_POP(mpi_errno); mpi_errno = MPIR_Sched_create(&s); if (mpi_errno) MPIR_ERR_POP(mpi_errno); mpi_errno = MPIR_Ireduce_scatter_block_sched(sendbuf, recvbuf, recvcount, datatype, op, comm_ptr, s); if (mpi_errno) MPIR_ERR_POP(mpi_errno); mpi_errno = MPIR_Sched_start(&s, comm_ptr, tag, request); if (mpi_errno) MPIR_ERR_POP(mpi_errno); fn_exit: return mpi_errno; fn_fail: goto fn_exit; }
int MPIR_Ireduce_sched_intra_binomial(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPIR_Comm * comm_ptr, MPIR_Sched_t s) { int mpi_errno = MPI_SUCCESS; int comm_size, rank, is_commutative; int mask, relrank, source, lroot; MPI_Aint true_lb, true_extent, extent; void *tmp_buf; MPIR_SCHED_CHKPMEM_DECL(2); MPIR_Assert(comm_ptr->comm_kind == MPIR_COMM_KIND__INTRACOMM); if (count == 0) return MPI_SUCCESS; comm_size = comm_ptr->local_size; rank = comm_ptr->rank; /* set op_errno to 0. stored in perthread structure */ { MPIR_Per_thread_t *per_thread = NULL; int err = 0; MPID_THREADPRIV_KEY_GET_ADDR(MPIR_ThreadInfo.isThreaded, MPIR_Per_thread_key, MPIR_Per_thread, per_thread, &err); MPIR_Assert(err == 0); per_thread->op_errno = 0; } /* Create a temporary buffer */ MPIR_Type_get_true_extent_impl(datatype, &true_lb, &true_extent); MPIR_Datatype_get_extent_macro(datatype, extent); is_commutative = MPIR_Op_is_commutative(op); /* I think this is the worse case, so we can avoid an assert() * inside the for loop */ /* should be buf+{this}? */ MPIR_Ensure_Aint_fits_in_pointer(count * MPL_MAX(extent, true_extent)); MPIR_SCHED_CHKPMEM_MALLOC(tmp_buf, void *, count * (MPL_MAX(extent, true_extent)), mpi_errno, "temporary buffer", MPL_MEM_BUFFER); /* adjust for potential negative lower bound in datatype */ tmp_buf = (void *) ((char *) tmp_buf - true_lb); /* If I'm not the root, then my recvbuf may not be valid, therefore * I have to allocate a temporary one */ if (rank != root) { MPIR_SCHED_CHKPMEM_MALLOC(recvbuf, void *, count * (MPL_MAX(extent, true_extent)), mpi_errno, "receive buffer", MPL_MEM_BUFFER); recvbuf = (void *) ((char *) recvbuf - true_lb); }
static int MPIR_Reduce_binomial ( const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPID_Comm *comm_ptr, int *errflag ) { int mpi_errno = MPI_SUCCESS; int mpi_errno_ret = MPI_SUCCESS; MPI_Status status; int comm_size, rank, is_commutative, type_size ATTRIBUTE((unused)); int mask, relrank, source, lroot; MPI_Aint true_lb, true_extent, extent; void *tmp_buf; MPI_Comm comm; MPIU_CHKLMEM_DECL(2); if (count == 0) return MPI_SUCCESS; comm = comm_ptr->handle; comm_size = comm_ptr->local_size; rank = comm_ptr->rank; /* Create a temporary buffer */ MPIR_Type_get_true_extent_impl(datatype, &true_lb, &true_extent); MPID_Datatype_get_extent_macro(datatype, extent); is_commutative = MPIR_Op_is_commutative(op); /* I think this is the worse case, so we can avoid an assert() * inside the for loop */ /* should be buf+{this}? */ MPID_Ensure_Aint_fits_in_pointer(count * MPIR_MAX(extent, true_extent)); MPIU_CHKLMEM_MALLOC(tmp_buf, void *, count*(MPIR_MAX(extent,true_extent)), mpi_errno, "temporary buffer"); /* adjust for potential negative lower bound in datatype */ tmp_buf = (void *)((char*)tmp_buf - true_lb); /* If I'm not the root, then my recvbuf may not be valid, therefore I have to allocate a temporary one */ if (rank != root) { MPIU_CHKLMEM_MALLOC(recvbuf, void *, count*(MPIR_MAX(extent,true_extent)), mpi_errno, "receive buffer"); recvbuf = (void *)((char*)recvbuf - true_lb); }
/* This function implements a binomial tree reduce. Cost = lgp.alpha + n.lgp.beta + n.lgp.gamma */ int MPIR_Reduce_intra_binomial(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPIR_Comm * comm_ptr, MPIR_Errflag_t * errflag) { int mpi_errno = MPI_SUCCESS; int mpi_errno_ret = MPI_SUCCESS; MPI_Status status; int comm_size, rank, is_commutative, type_size ATTRIBUTE((unused)); int mask, relrank, source, lroot; MPI_Aint true_lb, true_extent, extent; void *tmp_buf; MPIR_CHKLMEM_DECL(2); if (count == 0) return MPI_SUCCESS; comm_size = comm_ptr->local_size; rank = comm_ptr->rank; /* Create a temporary buffer */ MPIR_Type_get_true_extent_impl(datatype, &true_lb, &true_extent); MPIR_Datatype_get_extent_macro(datatype, extent); is_commutative = MPIR_Op_is_commutative(op); MPIR_CHKLMEM_MALLOC(tmp_buf, void *, count * (MPL_MAX(extent, true_extent)), mpi_errno, "temporary buffer", MPL_MEM_BUFFER); /* adjust for potential negative lower bound in datatype */ tmp_buf = (void *) ((char *) tmp_buf - true_lb); /* If I'm not the root, then my recvbuf may not be valid, therefore * I have to allocate a temporary one */ if (rank != root) { MPIR_CHKLMEM_MALLOC(recvbuf, void *, count * (MPL_MAX(extent, true_extent)), mpi_errno, "receive buffer", MPL_MEM_BUFFER); recvbuf = (void *) ((char *) recvbuf - true_lb); }
int MPIR_Iscan_rec_dbl(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPIR_Comm *comm_ptr, MPIR_Sched_t s) { int mpi_errno = MPI_SUCCESS; MPI_Aint true_extent, true_lb, extent; int is_commutative; int mask, dst, rank, comm_size; void *partial_scan = NULL; void *tmp_buf = NULL; MPIR_SCHED_CHKPMEM_DECL(2); if (count == 0) goto fn_exit; comm_size = comm_ptr->local_size; rank = comm_ptr->rank; is_commutative = MPIR_Op_is_commutative(op); /* need to allocate temporary buffer to store partial scan*/ MPIR_Type_get_true_extent_impl(datatype, &true_lb, &true_extent); MPID_Datatype_get_extent_macro(datatype, extent); MPIR_SCHED_CHKPMEM_MALLOC(partial_scan, void *, count*(MPL_MAX(extent,true_extent)), mpi_errno, "partial_scan"); /* This eventually gets malloc()ed as a temp buffer, not added to * any user buffers */ MPIR_Ensure_Aint_fits_in_pointer(count * MPL_MAX(extent, true_extent)); /* adjust for potential negative lower bound in datatype */ partial_scan = (void *)((char*)partial_scan - true_lb); /* need to allocate temporary buffer to store incoming data*/ MPIR_SCHED_CHKPMEM_MALLOC(tmp_buf, void *, count*(MPL_MAX(extent,true_extent)), mpi_errno, "tmp_buf"); /* adjust for potential negative lower bound in datatype */ tmp_buf = (void *)((char*)tmp_buf - true_lb); /* Since this is an inclusive scan, copy local contribution into recvbuf. */ if (sendbuf != MPI_IN_PLACE) { mpi_errno = MPIR_Sched_copy(sendbuf, count, datatype, recvbuf, count, datatype, s); if (mpi_errno) MPIR_ERR_POP(mpi_errno); } if (sendbuf != MPI_IN_PLACE) mpi_errno = MPIR_Sched_copy(sendbuf, count, datatype, partial_scan, count, datatype, s); else mpi_errno = MPIR_Sched_copy(recvbuf, count, datatype, partial_scan, count, datatype, s); if (mpi_errno) MPIR_ERR_POP(mpi_errno); mask = 0x1; while (mask < comm_size) { dst = rank ^ mask; if (dst < comm_size) { /* Send partial_scan to dst. Recv into tmp_buf */ mpi_errno = MPIR_Sched_send(partial_scan, count, datatype, dst, comm_ptr, s); if (mpi_errno) MPIR_ERR_POP(mpi_errno); /* sendrecv, no barrier here */ mpi_errno = MPIR_Sched_recv(tmp_buf, count, datatype, dst, comm_ptr, s); if (mpi_errno) MPIR_ERR_POP(mpi_errno); MPIR_SCHED_BARRIER(s); if (rank > dst) { mpi_errno = MPIR_Sched_reduce(tmp_buf, partial_scan, count, datatype, op, s); if (mpi_errno) MPIR_ERR_POP(mpi_errno); mpi_errno = MPIR_Sched_reduce(tmp_buf, recvbuf, count, datatype, op, s); if (mpi_errno) MPIR_ERR_POP(mpi_errno); MPIR_SCHED_BARRIER(s); } else { if (is_commutative) { mpi_errno = MPIR_Sched_reduce(tmp_buf, partial_scan, count, datatype, op, s); if (mpi_errno) MPIR_ERR_POP(mpi_errno); MPIR_SCHED_BARRIER(s); } else { mpi_errno = MPIR_Sched_reduce(partial_scan, tmp_buf, count, datatype, op, s); if (mpi_errno) MPIR_ERR_POP(mpi_errno); MPIR_SCHED_BARRIER(s); mpi_errno = MPIR_Sched_copy(tmp_buf, count, datatype, partial_scan, count, datatype, s); if (mpi_errno) MPIR_ERR_POP(mpi_errno); MPIR_SCHED_BARRIER(s); } } } mask <<= 1; } MPIR_SCHED_CHKPMEM_COMMIT(s); fn_exit: return mpi_errno; fn_fail: MPIR_SCHED_CHKPMEM_REAP(s); goto fn_exit; }
int MPIR_Allreduce_intra ( void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPID_Comm *comm_ptr, int *errflag ) { int is_homogeneous; #ifdef MPID_HAS_HETERO int rc; #endif int comm_size, rank, type_size; int mpi_errno = MPI_SUCCESS; int mpi_errno_ret = MPI_SUCCESS; int mask, dst, is_commutative, pof2, newrank, rem, newdst, i, send_idx, recv_idx, last_idx, send_cnt, recv_cnt, *cnts, *disps; MPI_Aint true_extent, true_lb, extent; void *tmp_buf; MPI_Comm comm; MPIU_CHKLMEM_DECL(3); /* check if multiple threads are calling this collective function */ MPIDU_ERR_CHECK_MULTIPLE_THREADS_ENTER( comm_ptr ); if (count == 0) goto fn_exit; comm = comm_ptr->handle; is_commutative = MPIR_Op_is_commutative(op); #if defined(USE_SMP_COLLECTIVES) /* is the op commutative? We do SMP optimizations only if it is. */ if (MPIR_Comm_is_node_aware(comm_ptr) && is_commutative) { /* on each node, do a reduce to the local root */ if (comm_ptr->node_comm != NULL) { /* take care of the MPI_IN_PLACE case. For reduce, MPI_IN_PLACE is specified only on the root; for allreduce it is specified on all processes. */ if ((sendbuf == MPI_IN_PLACE) && (comm_ptr->node_comm->rank != 0)) { /* IN_PLACE and not root of reduce. Data supplied to this allreduce is in recvbuf. Pass that as the sendbuf to reduce. */ mpi_errno = MPIR_Reduce_impl(recvbuf, NULL, count, datatype, op, 0, comm_ptr->node_comm, errflag); if (mpi_errno) { /* for communication errors, just record the error but continue */ *errflag = TRUE; MPIU_ERR_SET(mpi_errno, MPI_ERR_OTHER, "**fail"); MPIU_ERR_ADD(mpi_errno_ret, mpi_errno); } } else { mpi_errno = MPIR_Reduce_impl(sendbuf, recvbuf, count, datatype, op, 0, comm_ptr->node_comm, errflag); if (mpi_errno) { /* for communication errors, just record the error but continue */ *errflag = TRUE; MPIU_ERR_SET(mpi_errno, MPI_ERR_OTHER, "**fail"); MPIU_ERR_ADD(mpi_errno_ret, mpi_errno); } } } else { /* only one process on the node. copy sendbuf to recvbuf */ if (sendbuf != MPI_IN_PLACE) { mpi_errno = MPIR_Localcopy(sendbuf, count, datatype, recvbuf, count, datatype); if (mpi_errno) MPIU_ERR_POP(mpi_errno); } } /* now do an IN_PLACE allreduce among the local roots of all nodes */ if (comm_ptr->node_roots_comm != NULL) { mpi_errno = allreduce_intra_or_coll_fn(MPI_IN_PLACE, recvbuf, count, datatype, op, comm_ptr->node_roots_comm, errflag); if (mpi_errno) { /* for communication errors, just record the error but continue */ *errflag = TRUE; MPIU_ERR_SET(mpi_errno, MPI_ERR_OTHER, "**fail"); MPIU_ERR_ADD(mpi_errno_ret, mpi_errno); } } /* now broadcast the result among local processes */ if (comm_ptr->node_comm != NULL) { mpi_errno = MPIR_Bcast_impl(recvbuf, count, datatype, 0, comm_ptr->node_comm, errflag); if (mpi_errno) { /* for communication errors, just record the error but continue */ *errflag = TRUE; MPIU_ERR_SET(mpi_errno, MPI_ERR_OTHER, "**fail"); MPIU_ERR_ADD(mpi_errno_ret, mpi_errno); } } goto fn_exit; } #endif is_homogeneous = 1; #ifdef MPID_HAS_HETERO if (comm_ptr->is_hetero) is_homogeneous = 0; #endif #ifdef MPID_HAS_HETERO if (!is_homogeneous) { /* heterogeneous. To get the same result on all processes, we do a reduce to 0 and then broadcast. */ mpi_errno = MPIR_Reduce_impl ( sendbuf, recvbuf, count, datatype, op, 0, comm_ptr, errflag ); if (mpi_errno) { /* for communication errors, just record the error but continue */ *errflag = TRUE; MPIU_ERR_SET(mpi_errno, MPI_ERR_OTHER, "**fail"); MPIU_ERR_ADD(mpi_errno_ret, mpi_errno); } mpi_errno = MPIR_Bcast_impl( recvbuf, count, datatype, 0, comm_ptr, errflag ); if (mpi_errno) { /* for communication errors, just record the error but continue */ *errflag = TRUE; MPIU_ERR_SET(mpi_errno, MPI_ERR_OTHER, "**fail"); MPIU_ERR_ADD(mpi_errno_ret, mpi_errno); } } else #endif /* MPID_HAS_HETERO */ { /* homogeneous */ comm_size = comm_ptr->local_size; rank = comm_ptr->rank; is_commutative = MPIR_Op_is_commutative(op); /* need to allocate temporary buffer to store incoming data*/ MPIR_Type_get_true_extent_impl(datatype, &true_lb, &true_extent); MPID_Datatype_get_extent_macro(datatype, extent); MPID_Ensure_Aint_fits_in_pointer(count * MPIR_MAX(extent, true_extent)); MPIU_CHKLMEM_MALLOC(tmp_buf, void *, count*(MPIR_MAX(extent,true_extent)), mpi_errno, "temporary buffer"); /* adjust for potential negative lower bound in datatype */ tmp_buf = (void *)((char*)tmp_buf - true_lb); /* copy local data into recvbuf */ if (sendbuf != MPI_IN_PLACE) { mpi_errno = MPIR_Localcopy(sendbuf, count, datatype, recvbuf, count, datatype); if (mpi_errno) MPIU_ERR_POP(mpi_errno); } MPID_Datatype_get_size_macro(datatype, type_size); /* find nearest power-of-two less than or equal to comm_size */ pof2 = 1; while (pof2 <= comm_size) pof2 <<= 1; pof2 >>=1; rem = comm_size - pof2; /* In the non-power-of-two case, all even-numbered processes of rank < 2*rem send their data to (rank+1). These even-numbered processes no longer participate in the algorithm until the very end. The remaining processes form a nice power-of-two. */ if (rank < 2*rem) { if (rank % 2 == 0) { /* even */ mpi_errno = MPIC_Send_ft(recvbuf, count, datatype, rank+1, MPIR_ALLREDUCE_TAG, comm, errflag); if (mpi_errno) { /* for communication errors, just record the error but continue */ *errflag = TRUE; MPIU_ERR_SET(mpi_errno, MPI_ERR_OTHER, "**fail"); MPIU_ERR_ADD(mpi_errno_ret, mpi_errno); } /* temporarily set the rank to -1 so that this process does not pariticipate in recursive doubling */ newrank = -1; } else { /* odd */ mpi_errno = MPIC_Recv_ft(tmp_buf, count, datatype, rank-1, MPIR_ALLREDUCE_TAG, comm, MPI_STATUS_IGNORE, errflag); if (mpi_errno) { /* for communication errors, just record the error but continue */ *errflag = TRUE; MPIU_ERR_SET(mpi_errno, MPI_ERR_OTHER, "**fail"); MPIU_ERR_ADD(mpi_errno_ret, mpi_errno); } /* do the reduction on received data. since the ordering is right, it doesn't matter whether the operation is commutative or not. */ mpi_errno = MPIR_Reduce_local_impl(tmp_buf, recvbuf, count, datatype, op); if (mpi_errno) MPIU_ERR_POP(mpi_errno); /* change the rank */ newrank = rank / 2; } } else /* rank >= 2*rem */ newrank = rank - rem; /* If op is user-defined or count is less than pof2, use recursive doubling algorithm. Otherwise do a reduce-scatter followed by allgather. (If op is user-defined, derived datatypes are allowed and the user could pass basic datatypes on one process and derived on another as long as the type maps are the same. Breaking up derived datatypes to do the reduce-scatter is tricky, therefore using recursive doubling in that case.) */ if (newrank != -1) { if ((count*type_size <= MPIR_PARAM_ALLREDUCE_SHORT_MSG_SIZE) || (HANDLE_GET_KIND(op) != HANDLE_KIND_BUILTIN) || (count < pof2)) { /* use recursive doubling */ mask = 0x1; while (mask < pof2) { newdst = newrank ^ mask; /* find real rank of dest */ dst = (newdst < rem) ? newdst*2 + 1 : newdst + rem; /* Send the most current data, which is in recvbuf. Recv into tmp_buf */ mpi_errno = MPIC_Sendrecv_ft(recvbuf, count, datatype, dst, MPIR_ALLREDUCE_TAG, tmp_buf, count, datatype, dst, MPIR_ALLREDUCE_TAG, comm, MPI_STATUS_IGNORE, errflag); if (mpi_errno) { /* for communication errors, just record the error but continue */ *errflag = TRUE; MPIU_ERR_SET(mpi_errno, MPI_ERR_OTHER, "**fail"); MPIU_ERR_ADD(mpi_errno_ret, mpi_errno); } /* tmp_buf contains data received in this step. recvbuf contains data accumulated so far */ if (is_commutative || (dst < rank)) { /* op is commutative OR the order is already right */ mpi_errno = MPIR_Reduce_local_impl(tmp_buf, recvbuf, count, datatype, op); if (mpi_errno) MPIU_ERR_POP(mpi_errno); } else { /* op is noncommutative and the order is not right */ mpi_errno = MPIR_Reduce_local_impl(recvbuf, tmp_buf, count, datatype, op); if (mpi_errno) MPIU_ERR_POP(mpi_errno); /* copy result back into recvbuf */ mpi_errno = MPIR_Localcopy(tmp_buf, count, datatype, recvbuf, count, datatype); if (mpi_errno) MPIU_ERR_POP(mpi_errno); } mask <<= 1; } } else { /* do a reduce-scatter followed by allgather */ /* for the reduce-scatter, calculate the count that each process receives and the displacement within the buffer */ MPIU_CHKLMEM_MALLOC(cnts, int *, pof2*sizeof(int), mpi_errno, "counts"); MPIU_CHKLMEM_MALLOC(disps, int *, pof2*sizeof(int), mpi_errno, "displacements"); for (i=0; i<(pof2-1); i++) cnts[i] = count/pof2; cnts[pof2-1] = count - (count/pof2)*(pof2-1); disps[0] = 0; for (i=1; i<pof2; i++) disps[i] = disps[i-1] + cnts[i-1]; mask = 0x1; send_idx = recv_idx = 0; last_idx = pof2; while (mask < pof2) { newdst = newrank ^ mask; /* find real rank of dest */ dst = (newdst < rem) ? newdst*2 + 1 : newdst + rem; send_cnt = recv_cnt = 0; if (newrank < newdst) { send_idx = recv_idx + pof2/(mask*2); for (i=send_idx; i<last_idx; i++) send_cnt += cnts[i]; for (i=recv_idx; i<send_idx; i++) recv_cnt += cnts[i]; } else { recv_idx = send_idx + pof2/(mask*2); for (i=send_idx; i<recv_idx; i++) send_cnt += cnts[i]; for (i=recv_idx; i<last_idx; i++) recv_cnt += cnts[i]; } /* printf("Rank %d, send_idx %d, recv_idx %d, send_cnt %d, recv_cnt %d, last_idx %d\n", newrank, send_idx, recv_idx, send_cnt, recv_cnt, last_idx); */ /* Send data from recvbuf. Recv into tmp_buf */ mpi_errno = MPIC_Sendrecv_ft((char *) recvbuf + disps[send_idx]*extent, send_cnt, datatype, dst, MPIR_ALLREDUCE_TAG, (char *) tmp_buf + disps[recv_idx]*extent, recv_cnt, datatype, dst, MPIR_ALLREDUCE_TAG, comm, MPI_STATUS_IGNORE, errflag); if (mpi_errno) { /* for communication errors, just record the error but continue */ *errflag = TRUE; MPIU_ERR_SET(mpi_errno, MPI_ERR_OTHER, "**fail"); MPIU_ERR_ADD(mpi_errno_ret, mpi_errno); } /* tmp_buf contains data received in this step. recvbuf contains data accumulated so far */ /* This algorithm is used only for predefined ops and predefined ops are always commutative. */ mpi_errno = MPIR_Reduce_local_impl(((char *) tmp_buf + disps[recv_idx]*extent), ((char *) recvbuf + disps[recv_idx]*extent), recv_cnt, datatype, op); if (mpi_errno) MPIU_ERR_POP(mpi_errno); /* update send_idx for next iteration */ send_idx = recv_idx; mask <<= 1; /* update last_idx, but not in last iteration because the value is needed in the allgather step below. */ if (mask < pof2) last_idx = recv_idx + pof2/mask; } /* now do the allgather */ mask >>= 1; while (mask > 0) { newdst = newrank ^ mask; /* find real rank of dest */ dst = (newdst < rem) ? newdst*2 + 1 : newdst + rem; send_cnt = recv_cnt = 0; if (newrank < newdst) { /* update last_idx except on first iteration */ if (mask != pof2/2) last_idx = last_idx + pof2/(mask*2); recv_idx = send_idx + pof2/(mask*2); for (i=send_idx; i<recv_idx; i++) send_cnt += cnts[i]; for (i=recv_idx; i<last_idx; i++) recv_cnt += cnts[i]; } else { recv_idx = send_idx - pof2/(mask*2); for (i=send_idx; i<last_idx; i++) send_cnt += cnts[i]; for (i=recv_idx; i<send_idx; i++) recv_cnt += cnts[i]; } mpi_errno = MPIC_Sendrecv_ft((char *) recvbuf + disps[send_idx]*extent, send_cnt, datatype, dst, MPIR_ALLREDUCE_TAG, (char *) recvbuf + disps[recv_idx]*extent, recv_cnt, datatype, dst, MPIR_ALLREDUCE_TAG, comm, MPI_STATUS_IGNORE, errflag); if (mpi_errno) { /* for communication errors, just record the error but continue */ *errflag = TRUE; MPIU_ERR_SET(mpi_errno, MPI_ERR_OTHER, "**fail"); MPIU_ERR_ADD(mpi_errno_ret, mpi_errno); } if (newrank > newdst) send_idx = recv_idx; mask >>= 1; } } } /* In the non-power-of-two case, all odd-numbered processes of rank < 2*rem send the result to (rank-1), the ranks who didn't participate above. */ if (rank < 2*rem) { if (rank % 2) /* odd */ mpi_errno = MPIC_Send_ft(recvbuf, count, datatype, rank-1, MPIR_ALLREDUCE_TAG, comm, errflag); else /* even */ mpi_errno = MPIC_Recv_ft(recvbuf, count, datatype, rank+1, MPIR_ALLREDUCE_TAG, comm, MPI_STATUS_IGNORE, errflag); if (mpi_errno) { /* for communication errors, just record the error but continue */ *errflag = TRUE; MPIU_ERR_SET(mpi_errno, MPI_ERR_OTHER, "**fail"); MPIU_ERR_ADD(mpi_errno_ret, mpi_errno); } } } fn_exit: /* check if multiple threads are calling this collective function */ MPIDU_ERR_CHECK_MULTIPLE_THREADS_EXIT( comm_ptr ); MPIU_CHKLMEM_FREEALL(); if (mpi_errno_ret) mpi_errno = mpi_errno_ret; return (mpi_errno); fn_fail: goto fn_exit; }
int MPIR_Iexscan(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPID_Comm *comm_ptr, MPID_Sched_t s) { int mpi_errno = MPI_SUCCESS; int rank, comm_size; int mask, dst, is_commutative, flag; MPI_Aint true_extent, true_lb, extent; void *partial_scan, *tmp_buf; MPIR_SCHED_CHKPMEM_DECL(2); if (count == 0) goto fn_exit; comm_size = comm_ptr->local_size; rank = comm_ptr->rank; is_commutative = MPIR_Op_is_commutative(op); /* need to allocate temporary buffer to store partial scan*/ MPIR_Type_get_true_extent_impl(datatype, &true_lb, &true_extent); MPID_Datatype_get_extent_macro(datatype, extent); MPIR_SCHED_CHKPMEM_MALLOC(partial_scan, void *, (count*(MPIR_MAX(true_extent,extent))), mpi_errno, "partial_scan"); /* adjust for potential negative lower bound in datatype */ partial_scan = (void *)((char*)partial_scan - true_lb); /* need to allocate temporary buffer to store incoming data*/ MPIR_SCHED_CHKPMEM_MALLOC(tmp_buf, void *, (count*(MPIR_MAX(true_extent,extent))), mpi_errno, "tmp_buf"); /* adjust for potential negative lower bound in datatype */ tmp_buf = (void *)((char*)tmp_buf - true_lb); mpi_errno = MPID_Sched_copy((sendbuf == MPI_IN_PLACE ? recvbuf : sendbuf), count, datatype, partial_scan, count, datatype, s); if (mpi_errno) MPIU_ERR_POP(mpi_errno); flag = 0; mask = 0x1; while (mask < comm_size) { dst = rank ^ mask; if (dst < comm_size) { /* Send partial_scan to dst. Recv into tmp_buf */ mpi_errno = MPID_Sched_send(partial_scan, count, datatype, dst, comm_ptr, s); if (mpi_errno) MPIU_ERR_POP(mpi_errno); /* sendrecv, no barrier here */ mpi_errno = MPID_Sched_recv(tmp_buf, count, datatype, dst, comm_ptr, s); if (mpi_errno) MPIU_ERR_POP(mpi_errno); MPID_SCHED_BARRIER(s); if (rank > dst) { mpi_errno = MPID_Sched_reduce(tmp_buf, partial_scan, count, datatype, op, s); if (mpi_errno) MPIU_ERR_POP(mpi_errno); MPID_SCHED_BARRIER(s); /* On rank 0, recvbuf is not defined. For sendbuf==MPI_IN_PLACE recvbuf must not change (per MPI-2.2). On rank 1, recvbuf is to be set equal to the value in sendbuf on rank 0. On others, recvbuf is the scan of values in the sendbufs on lower ranks. */ if (rank != 0) { if (flag == 0) { /* simply copy data recd from rank 0 into recvbuf */ mpi_errno = MPID_Sched_copy(tmp_buf, count, datatype, recvbuf, count, datatype, s); if (mpi_errno) MPIU_ERR_POP(mpi_errno); MPID_SCHED_BARRIER(s); flag = 1; } else { mpi_errno = MPID_Sched_reduce(tmp_buf, recvbuf, count, datatype, op, s); if (mpi_errno) MPIU_ERR_POP(mpi_errno); MPID_SCHED_BARRIER(s); } } } else { if (is_commutative) { mpi_errno = MPID_Sched_reduce(tmp_buf, partial_scan, count, datatype, op, s); if (mpi_errno) MPIU_ERR_POP(mpi_errno); MPID_SCHED_BARRIER(s); } else { mpi_errno = MPID_Sched_reduce(partial_scan, tmp_buf, count, datatype, op, s); if (mpi_errno) MPIU_ERR_POP(mpi_errno); MPID_SCHED_BARRIER(s); mpi_errno = MPID_Sched_copy(tmp_buf, count, datatype, partial_scan, count, datatype, s); if (mpi_errno) MPIU_ERR_POP(mpi_errno); MPID_SCHED_BARRIER(s); } } } mask <<= 1; } MPIR_SCHED_CHKPMEM_COMMIT(s); fn_exit: return mpi_errno; fn_fail: MPIR_SCHED_CHKPMEM_REAP(s); goto fn_exit; }
int MPIR_Reduce_intra_auto ( const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPIR_Comm *comm_ptr, MPIR_Errflag_t *errflag ) { int mpi_errno = MPI_SUCCESS; int mpi_errno_ret = MPI_SUCCESS; int is_commutative, type_size, pof2; int nbytes = 0; if (count == 0) return MPI_SUCCESS; /* is the op commutative? We do SMP optimizations only if it is. */ is_commutative = MPIR_Op_is_commutative(op); MPIR_Datatype_get_size_macro(datatype, type_size); nbytes = MPIR_CVAR_MAX_SMP_REDUCE_MSG_SIZE ? type_size*count : 0; if (MPIR_CVAR_ENABLE_SMP_COLLECTIVES && MPIR_CVAR_ENABLE_SMP_REDUCE && MPIR_Comm_is_node_aware(comm_ptr) && is_commutative && nbytes <= MPIR_CVAR_MAX_SMP_REDUCE_MSG_SIZE) { mpi_errno = MPIR_Reduce_intra_smp(sendbuf, recvbuf, count, datatype, op, root, comm_ptr, errflag); if (mpi_errno) { /* for communication errors, just record the error but continue */ *errflag = MPIR_ERR_GET_CLASS(mpi_errno); MPIR_ERR_SET(mpi_errno, *errflag, "**fail"); MPIR_ERR_ADD(mpi_errno_ret, mpi_errno); } goto fn_exit; } MPIR_Datatype_get_size_macro(datatype, type_size); /* get nearest power-of-two less than or equal to comm_size */ pof2 = comm_ptr->pof2; if ((count*type_size > MPIR_CVAR_REDUCE_SHORT_MSG_SIZE) && (HANDLE_GET_KIND(op) == HANDLE_KIND_BUILTIN) && (count >= pof2)) { /* do a reduce-scatter followed by gather to root. */ mpi_errno = MPIR_Reduce_intra_reduce_scatter_gather(sendbuf, recvbuf, count, datatype, op, root, comm_ptr, errflag); } else { /* use a binomial tree algorithm */ mpi_errno = MPIR_Reduce_intra_binomial(sendbuf, recvbuf, count, datatype, op, root, comm_ptr, errflag); } if (mpi_errno) { /* for communication errors, just record the error but continue */ *errflag = MPIR_ERR_GET_CLASS(mpi_errno); MPIR_ERR_SET(mpi_errno, *errflag, "**fail"); MPIR_ERR_ADD(mpi_errno_ret, mpi_errno); } fn_exit: if (mpi_errno_ret) mpi_errno = mpi_errno_ret; else if (*errflag != MPIR_ERR_NONE) MPIR_ERR_SET(mpi_errno, *errflag, "**coll_fail"); return mpi_errno; }
int MPIR_Allreduce_intra_recursive_doubling( const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPIR_Comm * comm_ptr, MPIR_Errflag_t * errflag) { MPIR_CHKLMEM_DECL(1); #ifdef MPID_HAS_HETERO int is_homogeneous; int rc; #endif int comm_size, rank; int mpi_errno = MPI_SUCCESS; int mpi_errno_ret = MPI_SUCCESS; int mask, dst, is_commutative, pof2, newrank, rem, newdst; MPI_Aint true_extent, true_lb, extent; void *tmp_buf; comm_size = comm_ptr->local_size; rank = comm_ptr->rank; is_commutative = MPIR_Op_is_commutative(op); /* need to allocate temporary buffer to store incoming data*/ MPIR_Type_get_true_extent_impl(datatype, &true_lb, &true_extent); MPIR_Datatype_get_extent_macro(datatype, extent); MPIR_Ensure_Aint_fits_in_pointer(count * MPL_MAX(extent, true_extent)); MPIR_CHKLMEM_MALLOC(tmp_buf, void *, count*(MPL_MAX(extent,true_extent)), mpi_errno, "temporary buffer", MPL_MEM_BUFFER); /* adjust for potential negative lower bound in datatype */ tmp_buf = (void *)((char*)tmp_buf - true_lb); /* copy local data into recvbuf */ if (sendbuf != MPI_IN_PLACE) { mpi_errno = MPIR_Localcopy(sendbuf, count, datatype, recvbuf, count, datatype); if (mpi_errno) MPIR_ERR_POP(mpi_errno); } /* get nearest power-of-two less than or equal to comm_size */ pof2 = comm_ptr->pof2; rem = comm_size - pof2; /* In the non-power-of-two case, all even-numbered processes of rank < 2*rem send their data to (rank+1). These even-numbered processes no longer participate in the algorithm until the very end. The remaining processes form a nice power-of-two. */ if (rank < 2*rem) { if (rank % 2 == 0) { /* even */ mpi_errno = MPIC_Send(recvbuf, count, datatype, rank+1, MPIR_ALLREDUCE_TAG, comm_ptr, errflag); if (mpi_errno) { /* for communication errors, just record the error but continue */ *errflag = MPIR_ERR_GET_CLASS(mpi_errno); MPIR_ERR_SET(mpi_errno, *errflag, "**fail"); MPIR_ERR_ADD(mpi_errno_ret, mpi_errno); } /* temporarily set the rank to -1 so that this process does not pariticipate in recursive doubling */ newrank = -1; } else { /* odd */ mpi_errno = MPIC_Recv(tmp_buf, count, datatype, rank-1, MPIR_ALLREDUCE_TAG, comm_ptr, MPI_STATUS_IGNORE, errflag); if (mpi_errno) { /* for communication errors, just record the error but continue */ *errflag = MPIR_ERR_GET_CLASS(mpi_errno); MPIR_ERR_SET(mpi_errno, *errflag, "**fail"); MPIR_ERR_ADD(mpi_errno_ret, mpi_errno); } /* do the reduction on received data. since the ordering is right, it doesn't matter whether the operation is commutative or not. */ mpi_errno = MPIR_Reduce_local(tmp_buf, recvbuf, count, datatype, op); if (mpi_errno) MPIR_ERR_POP(mpi_errno); /* change the rank */ newrank = rank / 2; } } else /* rank >= 2*rem */ newrank = rank - rem; /* If op is user-defined or count is less than pof2, use recursive doubling algorithm. Otherwise do a reduce-scatter followed by allgather. (If op is user-defined, derived datatypes are allowed and the user could pass basic datatypes on one process and derived on another as long as the type maps are the same. Breaking up derived datatypes to do the reduce-scatter is tricky, therefore using recursive doubling in that case.) */ if (newrank != -1) { mask = 0x1; while (mask < pof2) { newdst = newrank ^ mask; /* find real rank of dest */ dst = (newdst < rem) ? newdst*2 + 1 : newdst + rem; /* Send the most current data, which is in recvbuf. Recv into tmp_buf */ mpi_errno = MPIC_Sendrecv(recvbuf, count, datatype, dst, MPIR_ALLREDUCE_TAG, tmp_buf, count, datatype, dst, MPIR_ALLREDUCE_TAG, comm_ptr, MPI_STATUS_IGNORE, errflag); if (mpi_errno) { /* for communication errors, just record the error but continue */ *errflag = MPIR_ERR_GET_CLASS(mpi_errno); MPIR_ERR_SET(mpi_errno, *errflag, "**fail"); MPIR_ERR_ADD(mpi_errno_ret, mpi_errno); } /* tmp_buf contains data received in this step. recvbuf contains data accumulated so far */ if (is_commutative || (dst < rank)) { /* op is commutative OR the order is already right */ mpi_errno = MPIR_Reduce_local(tmp_buf, recvbuf, count, datatype, op); if (mpi_errno) MPIR_ERR_POP(mpi_errno); } else { /* op is noncommutative and the order is not right */ mpi_errno = MPIR_Reduce_local(recvbuf, tmp_buf, count, datatype, op); if (mpi_errno) MPIR_ERR_POP(mpi_errno); /* copy result back into recvbuf */ mpi_errno = MPIR_Localcopy(tmp_buf, count, datatype, recvbuf, count, datatype); if (mpi_errno) MPIR_ERR_POP(mpi_errno); } mask <<= 1; } } /* In the non-power-of-two case, all odd-numbered processes of rank < 2*rem send the result to (rank-1), the ranks who didn't participate above. */ if (rank < 2*rem) { if (rank % 2) /* odd */ mpi_errno = MPIC_Send(recvbuf, count, datatype, rank-1, MPIR_ALLREDUCE_TAG, comm_ptr, errflag); else /* even */ mpi_errno = MPIC_Recv(recvbuf, count, datatype, rank+1, MPIR_ALLREDUCE_TAG, comm_ptr, MPI_STATUS_IGNORE, errflag); if (mpi_errno) { /* for communication errors, just record the error but continue */ *errflag = MPIR_ERR_GET_CLASS(mpi_errno); MPIR_ERR_SET(mpi_errno, *errflag, "**fail"); MPIR_ERR_ADD(mpi_errno_ret, mpi_errno); } } fn_exit: MPIR_CHKLMEM_FREEALL(); return mpi_errno; fn_fail: goto fn_exit; }
int MPIR_Ireduce_scatter_sched_intra_recursive_doubling(const void *sendbuf, void *recvbuf, const int recvcounts[], MPI_Datatype datatype, MPI_Op op, MPIR_Comm *comm_ptr, MPIR_Sched_t s) { int mpi_errno = MPI_SUCCESS; int rank, comm_size, i; MPI_Aint extent, true_extent, true_lb; int *disps; void *tmp_recvbuf, *tmp_results; int type_size ATTRIBUTE((unused)), dis[2], blklens[2], total_count, dst; int mask, dst_tree_root, my_tree_root, j, k; int received; MPI_Datatype sendtype, recvtype; int nprocs_completed, tmp_mask, tree_root, is_commutative; MPIR_SCHED_CHKPMEM_DECL(5); comm_size = comm_ptr->local_size; rank = comm_ptr->rank; MPIR_Datatype_get_extent_macro(datatype, extent); MPIR_Type_get_true_extent_impl(datatype, &true_lb, &true_extent); is_commutative = MPIR_Op_is_commutative(op); MPIR_SCHED_CHKPMEM_MALLOC(disps, int *, comm_size * sizeof(int), mpi_errno, "disps", MPL_MEM_BUFFER); total_count = 0; for (i=0; i<comm_size; i++) { disps[i] = total_count; total_count += recvcounts[i]; } if (total_count == 0) { goto fn_exit; } MPIR_Datatype_get_size_macro(datatype, type_size); /* total_count*extent eventually gets malloced. it isn't added to * a user-passed in buffer */ MPIR_Ensure_Aint_fits_in_pointer(total_count * MPL_MAX(true_extent, extent)); /* need to allocate temporary buffer to receive incoming data*/ MPIR_SCHED_CHKPMEM_MALLOC(tmp_recvbuf, void *, total_count*(MPL_MAX(true_extent,extent)), mpi_errno, "tmp_recvbuf", MPL_MEM_BUFFER); /* adjust for potential negative lower bound in datatype */ tmp_recvbuf = (void *)((char*)tmp_recvbuf - true_lb); /* need to allocate another temporary buffer to accumulate results */ MPIR_SCHED_CHKPMEM_MALLOC(tmp_results, void *, total_count*(MPL_MAX(true_extent,extent)), mpi_errno, "tmp_results", MPL_MEM_BUFFER); /* adjust for potential negative lower bound in datatype */ tmp_results = (void *)((char*)tmp_results - true_lb); /* copy sendbuf into tmp_results */ if (sendbuf != MPI_IN_PLACE) mpi_errno = MPIR_Sched_copy(sendbuf, total_count, datatype, tmp_results, total_count, datatype, s); else mpi_errno = MPIR_Sched_copy(recvbuf, total_count, datatype, tmp_results, total_count, datatype, s); if (mpi_errno) MPIR_ERR_POP(mpi_errno); MPIR_SCHED_BARRIER(s); mask = 0x1; i = 0; while (mask < comm_size) { dst = rank ^ mask; dst_tree_root = dst >> i; dst_tree_root <<= i; my_tree_root = rank >> i; my_tree_root <<= i; /* At step 1, processes exchange (n-n/p) amount of data; at step 2, (n-2n/p) amount of data; at step 3, (n-4n/p) amount of data, and so forth. We use derived datatypes for this. At each step, a process does not need to send data indexed from my_tree_root to my_tree_root+mask-1. Similarly, a process won't receive data indexed from dst_tree_root to dst_tree_root+mask-1. */ /* calculate sendtype */ blklens[0] = blklens[1] = 0; for (j=0; j<my_tree_root; j++) blklens[0] += recvcounts[j]; for (j=my_tree_root+mask; j<comm_size; j++) blklens[1] += recvcounts[j]; dis[0] = 0; dis[1] = blklens[0]; for (j=my_tree_root; (j<my_tree_root+mask) && (j<comm_size); j++) dis[1] += recvcounts[j]; mpi_errno = MPIR_Type_indexed_impl(2, blklens, dis, datatype, &sendtype); if (mpi_errno) MPIR_ERR_POP(mpi_errno); mpi_errno = MPIR_Type_commit_impl(&sendtype); if (mpi_errno) MPIR_ERR_POP(mpi_errno); /* calculate recvtype */ blklens[0] = blklens[1] = 0; for (j=0; j<dst_tree_root && j<comm_size; j++) blklens[0] += recvcounts[j]; for (j=dst_tree_root+mask; j<comm_size; j++) blklens[1] += recvcounts[j]; dis[0] = 0; dis[1] = blklens[0]; for (j=dst_tree_root; (j<dst_tree_root+mask) && (j<comm_size); j++) dis[1] += recvcounts[j]; mpi_errno = MPIR_Type_indexed_impl(2, blklens, dis, datatype, &recvtype); if (mpi_errno) MPIR_ERR_POP(mpi_errno); mpi_errno = MPIR_Type_commit_impl(&recvtype); if (mpi_errno) MPIR_ERR_POP(mpi_errno); received = 0; if (dst < comm_size) { /* tmp_results contains data to be sent in each step. Data is received in tmp_recvbuf and then accumulated into tmp_results. accumulation is done later below. */ mpi_errno = MPIR_Sched_send(tmp_results, 1, sendtype, dst, comm_ptr, s); if (mpi_errno) MPIR_ERR_POP(mpi_errno); mpi_errno = MPIR_Sched_recv(tmp_recvbuf, 1, recvtype, dst, comm_ptr, s); if (mpi_errno) MPIR_ERR_POP(mpi_errno); MPIR_SCHED_BARRIER(s); received = 1; } /* if some processes in this process's subtree in this step did not have any destination process to communicate with because of non-power-of-two, we need to send them the result. We use a logarithmic recursive-halfing algorithm for this. */ if (dst_tree_root + mask > comm_size) { nprocs_completed = comm_size - my_tree_root - mask; /* nprocs_completed is the number of processes in this subtree that have all the data. Send data to others in a tree fashion. First find root of current tree that is being divided into two. k is the number of least-significant bits in this process's rank that must be zeroed out to find the rank of the root */ j = mask; k = 0; while (j) { j >>= 1; k++; } k--; tmp_mask = mask >> 1; while (tmp_mask) { dst = rank ^ tmp_mask; tree_root = rank >> k; tree_root <<= k; /* send only if this proc has data and destination doesn't have data. at any step, multiple processes can send if they have the data */ if ((dst > rank) && (rank < tree_root + nprocs_completed) && (dst >= tree_root + nprocs_completed)) { /* send the current result */ mpi_errno = MPIR_Sched_send(tmp_recvbuf, 1, recvtype, dst, comm_ptr, s); if (mpi_errno) MPIR_ERR_POP(mpi_errno); MPIR_SCHED_BARRIER(s); } /* recv only if this proc. doesn't have data and sender has data */ else if ((dst < rank) && (dst < tree_root + nprocs_completed) && (rank >= tree_root + nprocs_completed)) { mpi_errno = MPIR_Sched_recv(tmp_recvbuf, 1, recvtype, dst, comm_ptr, s); if (mpi_errno) MPIR_ERR_POP(mpi_errno); MPIR_SCHED_BARRIER(s); received = 1; } tmp_mask >>= 1; k--; } } /* N.B. The following comment comes from the FT version of * MPI_Reduce_scatter. It does not currently apply to this code, but * will in the future when we update the NBC code to be fault-tolerant * in roughly the same fashion. [goodell@ 2011-03-03] */ /* The following reduction is done here instead of after the MPIC_Sendrecv or MPIC_Recv above. This is because to do it above, in the noncommutative case, we would need an extra temp buffer so as not to overwrite temp_recvbuf, because temp_recvbuf may have to be communicated to other processes in the non-power-of-two case. To avoid that extra allocation, we do the reduce here. */ if (received) { if (is_commutative || (dst_tree_root < my_tree_root)) { mpi_errno = MPIR_Sched_reduce(tmp_recvbuf, tmp_results, blklens[0], datatype, op, s); if (mpi_errno) MPIR_ERR_POP(mpi_errno); mpi_errno = MPIR_Sched_reduce(((char *)tmp_recvbuf + dis[1]*extent), ((char *)tmp_results + dis[1]*extent), blklens[1], datatype, op, s); if (mpi_errno) MPIR_ERR_POP(mpi_errno); MPIR_SCHED_BARRIER(s); } else { mpi_errno = MPIR_Sched_reduce(tmp_results, tmp_recvbuf, blklens[0], datatype, op, s); if (mpi_errno) MPIR_ERR_POP(mpi_errno); mpi_errno = MPIR_Sched_reduce(((char *)tmp_results + dis[1]*extent), ((char *)tmp_recvbuf + dis[1]*extent), blklens[1], datatype, op, s); if (mpi_errno) MPIR_ERR_POP(mpi_errno); MPIR_SCHED_BARRIER(s); /* copy result back into tmp_results */ mpi_errno = MPIR_Sched_copy(tmp_recvbuf, 1, recvtype, tmp_results, 1, recvtype, s); if (mpi_errno) MPIR_ERR_POP(mpi_errno); MPIR_SCHED_BARRIER(s); } } MPIR_Type_free_impl(&sendtype); MPIR_Type_free_impl(&recvtype); mask <<= 1; i++; }
int MPIR_Ireduce_scatter_sched_intra_recursive_halving(const void *sendbuf, void *recvbuf, const int recvcounts[], MPI_Datatype datatype, MPI_Op op, MPIR_Comm * comm_ptr, MPIR_Sched_t s) { int mpi_errno = MPI_SUCCESS; int rank, comm_size, i; MPI_Aint extent, true_extent, true_lb; int *disps; void *tmp_recvbuf, *tmp_results; int type_size ATTRIBUTE((unused)), total_count, dst; int mask; int *newcnts, *newdisps, rem, newdst, send_idx, recv_idx, last_idx, send_cnt, recv_cnt; int pof2, old_i, newrank; MPIR_SCHED_CHKPMEM_DECL(5); comm_size = comm_ptr->local_size; rank = comm_ptr->rank; MPIR_Datatype_get_extent_macro(datatype, extent); MPIR_Type_get_true_extent_impl(datatype, &true_lb, &true_extent); #ifdef HAVE_ERROR_CHECKING MPIR_Assert(MPIR_Op_is_commutative(op)); #endif MPIR_SCHED_CHKPMEM_MALLOC(disps, int *, comm_size * sizeof(int), mpi_errno, "disps", MPL_MEM_BUFFER); total_count = 0; for (i = 0; i < comm_size; i++) { disps[i] = total_count; total_count += recvcounts[i]; } if (total_count == 0) { goto fn_exit; } MPIR_Datatype_get_size_macro(datatype, type_size); /* allocate temp. buffer to receive incoming data */ MPIR_SCHED_CHKPMEM_MALLOC(tmp_recvbuf, void *, total_count * (MPL_MAX(true_extent, extent)), mpi_errno, "tmp_recvbuf", MPL_MEM_BUFFER); /* adjust for potential negative lower bound in datatype */ tmp_recvbuf = (void *) ((char *) tmp_recvbuf - true_lb); /* need to allocate another temporary buffer to accumulate * results because recvbuf may not be big enough */ MPIR_SCHED_CHKPMEM_MALLOC(tmp_results, void *, total_count * (MPL_MAX(true_extent, extent)), mpi_errno, "tmp_results", MPL_MEM_BUFFER); /* adjust for potential negative lower bound in datatype */ tmp_results = (void *) ((char *) tmp_results - true_lb); /* copy sendbuf into tmp_results */ if (sendbuf != MPI_IN_PLACE) mpi_errno = MPIR_Sched_copy(sendbuf, total_count, datatype, tmp_results, total_count, datatype, s); else mpi_errno = MPIR_Sched_copy(recvbuf, total_count, datatype, tmp_results, total_count, datatype, s); if (mpi_errno) MPIR_ERR_POP(mpi_errno); MPIR_SCHED_BARRIER(s); pof2 = comm_ptr->pof2; rem = comm_size - pof2; /* In the non-power-of-two case, all even-numbered * processes of rank < 2*rem send their data to * (rank+1). These even-numbered processes no longer * participate in the algorithm until the very end. The * remaining processes form a nice power-of-two. */ if (rank < 2 * rem) { if (rank % 2 == 0) { /* even */ mpi_errno = MPIR_Sched_send(tmp_results, total_count, datatype, rank + 1, comm_ptr, s); if (mpi_errno) MPIR_ERR_POP(mpi_errno); MPIR_SCHED_BARRIER(s); /* temporarily set the rank to -1 so that this * process does not pariticipate in recursive * doubling */ newrank = -1; } else { /* odd */ mpi_errno = MPIR_Sched_recv(tmp_recvbuf, total_count, datatype, rank - 1, comm_ptr, s); if (mpi_errno) MPIR_ERR_POP(mpi_errno); MPIR_SCHED_BARRIER(s); /* do the reduction on received data. since the * ordering is right, it doesn't matter whether * the operation is commutative or not. */ mpi_errno = MPIR_Sched_reduce(tmp_recvbuf, tmp_results, total_count, datatype, op, s); if (mpi_errno) MPIR_ERR_POP(mpi_errno); MPIR_SCHED_BARRIER(s); /* change the rank */ newrank = rank / 2; } } else /* rank >= 2*rem */ newrank = rank - rem; if (newrank != -1) { /* recalculate the recvcounts and disps arrays because the * even-numbered processes who no longer participate will * have their result calculated by the process to their * right (rank+1). */ MPIR_SCHED_CHKPMEM_MALLOC(newcnts, int *, pof2 * sizeof(int), mpi_errno, "newcnts", MPL_MEM_BUFFER); MPIR_SCHED_CHKPMEM_MALLOC(newdisps, int *, pof2 * sizeof(int), mpi_errno, "newdisps", MPL_MEM_BUFFER); for (i = 0; i < pof2; i++) { /* what does i map to in the old ranking? */ old_i = (i < rem) ? i * 2 + 1 : i + rem; if (old_i < 2 * rem) { /* This process has to also do its left neighbor's * work */ newcnts[i] = recvcounts[old_i] + recvcounts[old_i - 1]; } else newcnts[i] = recvcounts[old_i]; } newdisps[0] = 0; for (i = 1; i < pof2; i++) newdisps[i] = newdisps[i - 1] + newcnts[i - 1]; mask = pof2 >> 1; send_idx = recv_idx = 0; last_idx = pof2; while (mask > 0) { newdst = newrank ^ mask; /* find real rank of dest */ dst = (newdst < rem) ? newdst * 2 + 1 : newdst + rem; send_cnt = recv_cnt = 0; if (newrank < newdst) { send_idx = recv_idx + mask; for (i = send_idx; i < last_idx; i++) send_cnt += newcnts[i]; for (i = recv_idx; i < send_idx; i++) recv_cnt += newcnts[i]; } else { recv_idx = send_idx + mask; for (i = send_idx; i < recv_idx; i++) send_cnt += newcnts[i]; for (i = recv_idx; i < last_idx; i++) recv_cnt += newcnts[i]; } /* Send data from tmp_results. Recv into tmp_recvbuf */ { /* avoid sending and receiving pointless 0-byte messages */ int send_dst = (send_cnt ? dst : MPI_PROC_NULL); int recv_dst = (recv_cnt ? dst : MPI_PROC_NULL); mpi_errno = MPIR_Sched_send(((char *) tmp_results + newdisps[send_idx] * extent), send_cnt, datatype, send_dst, comm_ptr, s); if (mpi_errno) MPIR_ERR_POP(mpi_errno); mpi_errno = MPIR_Sched_recv(((char *) tmp_recvbuf + newdisps[recv_idx] * extent), recv_cnt, datatype, recv_dst, comm_ptr, s); if (mpi_errno) MPIR_ERR_POP(mpi_errno); MPIR_SCHED_BARRIER(s); } /* tmp_recvbuf contains data received in this step. * tmp_results contains data accumulated so far */ if (recv_cnt) { mpi_errno = MPIR_Sched_reduce(((char *) tmp_recvbuf + newdisps[recv_idx] * extent), ((char *) tmp_results + newdisps[recv_idx] * extent), recv_cnt, datatype, op, s); MPIR_SCHED_BARRIER(s); } /* update send_idx for next iteration */ send_idx = recv_idx; last_idx = recv_idx + mask; mask >>= 1; } /* copy this process's result from tmp_results to recvbuf */ if (recvcounts[rank]) { mpi_errno = MPIR_Sched_copy(((char *) tmp_results + disps[rank] * extent), recvcounts[rank], datatype, recvbuf, recvcounts[rank], datatype, s); if (mpi_errno) MPIR_ERR_POP(mpi_errno); MPIR_SCHED_BARRIER(s); } }
int MPIR_Exscan_intra_recursive_doubling ( const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPIR_Comm *comm_ptr, MPIR_Errflag_t *errflag ) { MPI_Status status; int rank, comm_size; int mpi_errno = MPI_SUCCESS; int mpi_errno_ret = MPI_SUCCESS; int mask, dst, is_commutative, flag; MPI_Aint true_extent, true_lb, extent; void *partial_scan, *tmp_buf; MPIR_CHKLMEM_DECL(2); if (count == 0) return MPI_SUCCESS; comm_size = comm_ptr->local_size; rank = comm_ptr->rank; /* set op_errno to 0. stored in perthread structure */ { MPIR_Per_thread_t *per_thread = NULL; int err = 0; MPID_THREADPRIV_KEY_GET_ADDR(MPIR_ThreadInfo.isThreaded, MPIR_Per_thread_key, MPIR_Per_thread, per_thread, &err); MPIR_Assert(err == 0); per_thread->op_errno = 0; } is_commutative = MPIR_Op_is_commutative(op); /* need to allocate temporary buffer to store partial scan*/ MPIR_Type_get_true_extent_impl(datatype, &true_lb, &true_extent); MPIR_Datatype_get_extent_macro( datatype, extent ); MPIR_CHKLMEM_MALLOC(partial_scan, void *, (count*(MPL_MAX(true_extent,extent))), mpi_errno, "partial_scan", MPL_MEM_BUFFER); /* adjust for potential negative lower bound in datatype */ partial_scan = (void *)((char*)partial_scan - true_lb); /* need to allocate temporary buffer to store incoming data*/ MPIR_CHKLMEM_MALLOC(tmp_buf, void *, (count*(MPL_MAX(true_extent,extent))), mpi_errno, "tmp_buf", MPL_MEM_BUFFER); /* adjust for potential negative lower bound in datatype */ tmp_buf = (void *)((char*)tmp_buf - true_lb); mpi_errno = MPIR_Localcopy((sendbuf == MPI_IN_PLACE ? (const void *)recvbuf : sendbuf), count, datatype, partial_scan, count, datatype); if (mpi_errno) MPIR_ERR_POP(mpi_errno); flag = 0; mask = 0x1; while (mask < comm_size) { dst = rank ^ mask; if (dst < comm_size) { /* Send partial_scan to dst. Recv into tmp_buf */ mpi_errno = MPIC_Sendrecv(partial_scan, count, datatype, dst, MPIR_EXSCAN_TAG, tmp_buf, count, datatype, dst, MPIR_EXSCAN_TAG, comm_ptr, &status, errflag); if (mpi_errno) { /* for communication errors, just record the error but continue */ *errflag = MPIR_ERR_GET_CLASS(mpi_errno); MPIR_ERR_SET(mpi_errno, *errflag, "**fail"); MPIR_ERR_ADD(mpi_errno_ret, mpi_errno); } if (rank > dst) { mpi_errno = MPIR_Reduce_local( tmp_buf, partial_scan, count, datatype, op ); if (mpi_errno) MPIR_ERR_POP(mpi_errno); /* On rank 0, recvbuf is not defined. For sendbuf==MPI_IN_PLACE recvbuf must not change (per MPI-2.2). On rank 1, recvbuf is to be set equal to the value in sendbuf on rank 0. On others, recvbuf is the scan of values in the sendbufs on lower ranks. */ if (rank != 0) { if (flag == 0) { /* simply copy data recd from rank 0 into recvbuf */ mpi_errno = MPIR_Localcopy(tmp_buf, count, datatype, recvbuf, count, datatype); if (mpi_errno) MPIR_ERR_POP(mpi_errno); flag = 1; } else { mpi_errno = MPIR_Reduce_local( tmp_buf, recvbuf, count, datatype, op ); if (mpi_errno) MPIR_ERR_POP(mpi_errno); } } } else { if (is_commutative) { mpi_errno = MPIR_Reduce_local( tmp_buf, partial_scan, count, datatype, op ); if (mpi_errno) MPIR_ERR_POP(mpi_errno); } else { mpi_errno = MPIR_Reduce_local( partial_scan, tmp_buf, count, datatype, op ); if (mpi_errno) MPIR_ERR_POP(mpi_errno); mpi_errno = MPIR_Localcopy(tmp_buf, count, datatype, partial_scan, count, datatype); if (mpi_errno) MPIR_ERR_POP(mpi_errno); } } } mask <<= 1; } { MPIR_Per_thread_t *per_thread = NULL; int err = 0; MPID_THREADPRIV_KEY_GET_ADDR(MPIR_ThreadInfo.isThreaded, MPIR_Per_thread_key, MPIR_Per_thread, per_thread, &err); MPIR_Assert(err == 0); if (per_thread->op_errno) mpi_errno = per_thread->op_errno; } fn_exit: MPIR_CHKLMEM_FREEALL(); if (mpi_errno_ret) mpi_errno = mpi_errno_ret; else if (*errflag != MPIR_ERR_NONE) MPIR_ERR_SET(mpi_errno, MPI_ERR_OTHER, "**coll_fail"); return mpi_errno; fn_fail: goto fn_exit; }
/* Algorithm: Recursive halving * * This is a recursive-halving algorithm in which the first p/2 processes send * the second n/2 data to their counterparts in the other half and receive the * first n/2 data from them. This procedure continues recursively, halving the * data communicated at each step, for a total of lgp steps. If the number of * processes is not a power-of-two, we convert it to the nearest lower * power-of-two by having the first few even-numbered processes send their data * to the neighboring odd-numbered process at (rank+1). Those odd-numbered * processes compute the result for their left neighbor as well in the * recursive halving algorithm, and then at the end send the result back to * the processes that didn't participate. Therefore, if p is a power-of-two: * * Cost = lgp.alpha + n.((p-1)/p).beta + n.((p-1)/p).gamma * * If p is not a power-of-two: * * Cost = (floor(lgp)+2).alpha + n.(1+(p-1+n)/p).beta + n.(1+(p-1)/p).gamma * * The above cost in the non power-of-two case is approximate because there is * some imbalance in the amount of work each process does because some * processes do the work of their neighbors as well. */ int MPIR_Reduce_scatter_block_intra_recursive_halving ( const void *sendbuf, void *recvbuf, int recvcount, MPI_Datatype datatype, MPI_Op op, MPIR_Comm *comm_ptr, MPIR_Errflag_t *errflag ) { int rank, comm_size, i; MPI_Aint extent, true_extent, true_lb; int *disps; void *tmp_recvbuf, *tmp_results; int mpi_errno = MPI_SUCCESS; int mpi_errno_ret = MPI_SUCCESS; int total_count, dst; int mask; int *newcnts, *newdisps, rem, newdst, send_idx, recv_idx, last_idx, send_cnt, recv_cnt; int pof2, old_i, newrank; MPIR_CHKLMEM_DECL(5); comm_size = comm_ptr->local_size; rank = comm_ptr->rank; #ifdef HAVE_ERROR_CHECKING { int is_commutative; is_commutative = MPIR_Op_is_commutative(op); MPIR_Assert(is_commutative); } #endif /* HAVE_ERROR_CHECKING */ /* set op_errno to 0. stored in perthread structure */ { MPIR_Per_thread_t *per_thread = NULL; int err = 0; MPID_THREADPRIV_KEY_GET_ADDR(MPIR_ThreadInfo.isThreaded, MPIR_Per_thread_key, MPIR_Per_thread, per_thread, &err); MPIR_Assert(err == 0); per_thread->op_errno = 0; } if (recvcount == 0) { goto fn_exit; } MPIR_Datatype_get_extent_macro(datatype, extent); MPIR_Type_get_true_extent_impl(datatype, &true_lb, &true_extent); MPIR_CHKLMEM_MALLOC(disps, int *, comm_size * sizeof(int), mpi_errno, "disps", MPL_MEM_BUFFER); total_count = comm_size*recvcount; for (i=0; i<comm_size; i++) { disps[i] = i*recvcount; } /* total_count*extent eventually gets malloced. it isn't added to * a user-passed in buffer */ MPIR_Ensure_Aint_fits_in_pointer(total_count * MPL_MAX(true_extent, extent)); /* commutative and short. use recursive halving algorithm */ /* allocate temp. buffer to receive incoming data */ MPIR_CHKLMEM_MALLOC(tmp_recvbuf, void *, total_count*(MPL_MAX(true_extent,extent)), mpi_errno, "tmp_recvbuf", MPL_MEM_BUFFER); /* adjust for potential negative lower bound in datatype */ tmp_recvbuf = (void *)((char*)tmp_recvbuf - true_lb); /* need to allocate another temporary buffer to accumulate results because recvbuf may not be big enough */ MPIR_CHKLMEM_MALLOC(tmp_results, void *, total_count*(MPL_MAX(true_extent,extent)), mpi_errno, "tmp_results", MPL_MEM_BUFFER); /* adjust for potential negative lower bound in datatype */ tmp_results = (void *)((char*)tmp_results - true_lb); /* copy sendbuf into tmp_results */ if (sendbuf != MPI_IN_PLACE) mpi_errno = MPIR_Localcopy(sendbuf, total_count, datatype, tmp_results, total_count, datatype); else mpi_errno = MPIR_Localcopy(recvbuf, total_count, datatype, tmp_results, total_count, datatype); if (mpi_errno) MPIR_ERR_POP(mpi_errno); pof2 = comm_ptr->pof2; rem = comm_size - pof2; /* In the non-power-of-two case, all even-numbered processes of rank < 2*rem send their data to (rank+1). These even-numbered processes no longer participate in the algorithm until the very end. The remaining processes form a nice power-of-two. */ if (rank < 2*rem) { if (rank % 2 == 0) { /* even */ mpi_errno = MPIC_Send(tmp_results, total_count, datatype, rank+1, MPIR_REDUCE_SCATTER_BLOCK_TAG, comm_ptr, errflag); if (mpi_errno) { /* for communication errors, just record the error but continue */ *errflag = MPIR_ERR_GET_CLASS(mpi_errno); MPIR_ERR_SET(mpi_errno, *errflag, "**fail"); MPIR_ERR_ADD(mpi_errno_ret, mpi_errno); } /* temporarily set the rank to -1 so that this process does not pariticipate in recursive doubling */ newrank = -1; } else { /* odd */ mpi_errno = MPIC_Recv(tmp_recvbuf, total_count, datatype, rank-1, MPIR_REDUCE_SCATTER_BLOCK_TAG, comm_ptr, MPI_STATUS_IGNORE, errflag); if (mpi_errno) { /* for communication errors, just record the error but continue */ *errflag = MPIR_ERR_GET_CLASS(mpi_errno); MPIR_ERR_SET(mpi_errno, *errflag, "**fail"); MPIR_ERR_ADD(mpi_errno_ret, mpi_errno); } /* do the reduction on received data. since the ordering is right, it doesn't matter whether the operation is commutative or not. */ mpi_errno = MPIR_Reduce_local( tmp_recvbuf, tmp_results, total_count, datatype, op); /* change the rank */ newrank = rank / 2; } } else /* rank >= 2*rem */ newrank = rank - rem; if (newrank != -1) { /* recalculate the recvcnts and disps arrays because the even-numbered processes who no longer participate will have their result calculated by the process to their right (rank+1). */ MPIR_CHKLMEM_MALLOC(newcnts, int *, pof2*sizeof(int), mpi_errno, "newcnts", MPL_MEM_BUFFER); MPIR_CHKLMEM_MALLOC(newdisps, int *, pof2*sizeof(int), mpi_errno, "newdisps", MPL_MEM_BUFFER); for (i=0; i<pof2; i++) { /* what does i map to in the old ranking? */ old_i = (i < rem) ? i*2 + 1 : i + rem; if (old_i < 2*rem) { /* This process has to also do its left neighbor's work */ newcnts[i] = 2 * recvcount; } else newcnts[i] = recvcount; } newdisps[0] = 0; for (i=1; i<pof2; i++) newdisps[i] = newdisps[i-1] + newcnts[i-1]; mask = pof2 >> 1; send_idx = recv_idx = 0; last_idx = pof2; while (mask > 0) { newdst = newrank ^ mask; /* find real rank of dest */ dst = (newdst < rem) ? newdst*2 + 1 : newdst + rem; send_cnt = recv_cnt = 0; if (newrank < newdst) { send_idx = recv_idx + mask; for (i=send_idx; i<last_idx; i++) send_cnt += newcnts[i]; for (i=recv_idx; i<send_idx; i++) recv_cnt += newcnts[i]; } else { recv_idx = send_idx + mask; for (i=send_idx; i<recv_idx; i++) send_cnt += newcnts[i]; for (i=recv_idx; i<last_idx; i++) recv_cnt += newcnts[i]; } /* printf("Rank %d, send_idx %d, recv_idx %d, send_cnt %d, recv_cnt %d, last_idx %d\n", newrank, send_idx, recv_idx, send_cnt, recv_cnt, last_idx); */ /* Send data from tmp_results. Recv into tmp_recvbuf */ if ((send_cnt != 0) && (recv_cnt != 0)) mpi_errno = MPIC_Sendrecv((char *) tmp_results + newdisps[send_idx]*extent, send_cnt, datatype, dst, MPIR_REDUCE_SCATTER_BLOCK_TAG, (char *) tmp_recvbuf + newdisps[recv_idx]*extent, recv_cnt, datatype, dst, MPIR_REDUCE_SCATTER_BLOCK_TAG, comm_ptr, MPI_STATUS_IGNORE, errflag); else if ((send_cnt == 0) && (recv_cnt != 0)) mpi_errno = MPIC_Recv((char *) tmp_recvbuf + newdisps[recv_idx]*extent, recv_cnt, datatype, dst, MPIR_REDUCE_SCATTER_BLOCK_TAG, comm_ptr, MPI_STATUS_IGNORE, errflag); else if ((recv_cnt == 0) && (send_cnt != 0)) mpi_errno = MPIC_Send((char *) tmp_results + newdisps[send_idx]*extent, send_cnt, datatype, dst, MPIR_REDUCE_SCATTER_BLOCK_TAG, comm_ptr, errflag); if (mpi_errno) { /* for communication errors, just record the error but continue */ *errflag = MPIR_ERR_GET_CLASS(mpi_errno); MPIR_ERR_SET(mpi_errno, *errflag, "**fail"); MPIR_ERR_ADD(mpi_errno_ret, mpi_errno); } /* tmp_recvbuf contains data received in this step. tmp_results contains data accumulated so far */ if (recv_cnt) { mpi_errno = MPIR_Reduce_local( (char *) tmp_recvbuf + newdisps[recv_idx]*extent, (char *) tmp_results + newdisps[recv_idx]*extent, recv_cnt, datatype, op); } /* update send_idx for next iteration */ send_idx = recv_idx; last_idx = recv_idx + mask; mask >>= 1; } /* copy this process's result from tmp_results to recvbuf */ mpi_errno = MPIR_Localcopy((char *)tmp_results + disps[rank]*extent, recvcount, datatype, recvbuf, recvcount, datatype); if (mpi_errno) MPIR_ERR_POP(mpi_errno); }
int MPIR_Ireduce_scatter_sched_intra_auto(const void *sendbuf, void *recvbuf, const int recvcounts[], MPI_Datatype datatype, MPI_Op op, MPIR_Comm * comm_ptr, MPIR_Sched_t s) { int mpi_errno = MPI_SUCCESS; int i; int is_commutative; int total_count, type_size, nbytes; int comm_size; is_commutative = MPIR_Op_is_commutative(op); comm_size = comm_ptr->local_size; total_count = 0; for (i = 0; i < comm_size; i++) { total_count += recvcounts[i]; } if (total_count == 0) { goto fn_exit; } MPIR_Datatype_get_size_macro(datatype, type_size); nbytes = total_count * type_size; /* select an appropriate algorithm based on commutivity and message size */ if (is_commutative && (nbytes < MPIR_CVAR_REDUCE_SCATTER_COMMUTATIVE_LONG_MSG_SIZE)) { mpi_errno = MPIR_Ireduce_scatter_sched_intra_recursive_halving(sendbuf, recvbuf, recvcounts, datatype, op, comm_ptr, s); if (mpi_errno) MPIR_ERR_POP(mpi_errno); } else if (is_commutative && (nbytes >= MPIR_CVAR_REDUCE_SCATTER_COMMUTATIVE_LONG_MSG_SIZE)) { mpi_errno = MPIR_Ireduce_scatter_sched_intra_pairwise(sendbuf, recvbuf, recvcounts, datatype, op, comm_ptr, s); if (mpi_errno) MPIR_ERR_POP(mpi_errno); } else { /* (!is_commutative) */ int is_block_regular = TRUE; for (i = 0; i < (comm_size - 1); ++i) { if (recvcounts[i] != recvcounts[i + 1]) { is_block_regular = FALSE; break; } } if (MPL_is_pof2(comm_size, NULL) && is_block_regular) { /* noncommutative, pof2 size, and block regular */ mpi_errno = MPIR_Ireduce_scatter_sched_intra_noncommutative(sendbuf, recvbuf, recvcounts, datatype, op, comm_ptr, s); if (mpi_errno) MPIR_ERR_POP(mpi_errno); } else { /* noncommutative and (non-pof2 or block irregular), use recursive doubling. */ mpi_errno = MPIR_Ireduce_scatter_sched_intra_recursive_doubling(sendbuf, recvbuf, recvcounts, datatype, op, comm_ptr, s); if (mpi_errno) MPIR_ERR_POP(mpi_errno); } } fn_exit: return mpi_errno; fn_fail: goto fn_exit; }