ucs_status_ptr_t ucp_tag_send_sync_nb(ucp_ep_h ep, const void *buffer, size_t count, ucp_datatype_t datatype, ucp_tag_t tag, ucp_send_callback_t cb) { ucp_request_t *req; ucs_trace_req("send_sync_nb buffer %p count %zu tag %"PRIx64" to %s cb %p", buffer, count, tag, ucp_ep_peer_name(ep), cb); req = ucp_request_get(ep->worker); if (req == NULL) { return UCS_STATUS_PTR(UCS_ERR_NO_MEMORY); } UCS_INSTRUMENT_RECORD(UCS_INSTRUMENT_TYPE_UCP_TX, "ucp_tag_send_sync_nb", req, ucp_dt_length(datatype, count, buffer, &req->send.state)); /* Remote side needs to send reply, so have it connect to us */ ucp_ep_connect_remote(ep); ucp_tag_send_req_init(req, ep, buffer, datatype, tag); return ucp_tag_send_req(req, count, -1, /* disable short method */ ucp_ep_config(ep)->sync_zcopy_thresh, ucp_ep_config(ep)->sync_rndv_thresh, cb, &ucp_tag_eager_sync_proto); }
static UCS_F_ALWAYS_INLINE ucs_status_t ucp_tag_recv_common(ucp_worker_h worker, void *buffer, size_t count, uintptr_t datatype, ucp_tag_t tag, ucp_tag_t tag_mask, ucp_request_t *req, uint16_t req_flags, ucp_tag_recv_callback_t cb, ucp_recv_desc_t *rdesc, const char *debug_name) { unsigned save_rreq = 1; ucs_queue_head_t *queue; ucp_context_h context; ucs_status_t status; size_t buffer_size; ucp_tag_recv_request_init(req, worker, buffer, count, datatype, req_flags); buffer_size = ucp_dt_length(datatype, count, buffer, &req->recv.state); ucs_trace_req("%s buffer %p buffer_size %zu tag %"PRIx64"/%"PRIx64, debug_name, buffer, buffer_size, tag, tag_mask); /* First, search in unexpected list */ status = ucp_tag_search_unexp(worker, buffer, buffer_size, datatype, tag, tag_mask, req, &req->recv.info, cb, rdesc, &save_rreq); if (status != UCS_INPROGRESS) { if (req_flags & UCP_REQUEST_FLAG_CALLBACK) { cb(req + 1, status, &req->recv.info); } ucp_tag_recv_request_completed(req, status, &req->recv.info, debug_name); } else if (save_rreq) { /* If not found on unexpected, wait until it arrives. * If was found but need this receive request for later completion, save it */ context = worker->context; queue = ucp_tag_exp_get_queue(&context->tm, tag, tag_mask); req->recv.buffer = buffer; req->recv.length = buffer_size; req->recv.datatype = datatype; req->recv.tag = tag; req->recv.tag_mask = tag_mask; req->recv.cb = cb; ucp_tag_exp_push(&context->tm, queue, req); /* If offload supported, post this tag to transport as well. * TODO: need to distinguish the cases when posting is not needed. */ ucp_tag_offload_try_post(worker->context, req); ucs_trace_req("%s returning expected request %p (%p)", debug_name, req, req + 1); } return status; }
static UCS_F_ALWAYS_INLINE void ucp_tag_send_req_init(ucp_request_t* req, ucp_ep_h ep, const void* buffer, uintptr_t datatype, size_t count, ucp_tag_t tag, uint16_t flags) { req->flags = flags; req->send.ep = ep; req->send.buffer = buffer; req->send.datatype = datatype; req->send.tag = tag; ucp_request_send_state_init(req, datatype, count); req->send.length = ucp_dt_length(req->send.datatype, count, req->send.buffer, &req->send.state.dt); req->send.lane = ucp_ep_config(ep)->tag.lane; }
ucs_status_ptr_t ucp_tag_send_nb(ucp_ep_h ep, const void *buffer, size_t count, uintptr_t datatype, ucp_tag_t tag, ucp_send_callback_t cb) { ucs_status_t status; ucp_request_t *req; size_t length; ucs_trace_req("send_nb buffer %p count %zu tag %"PRIx64" to %s cb %p", buffer, count, tag, ucp_ep_peer_name(ep), cb); if (ucs_likely((datatype & UCP_DATATYPE_CLASS_MASK) == UCP_DATATYPE_CONTIG)) { length = ucp_contig_dt_length(datatype, count); UCS_INSTRUMENT_RECORD(UCS_INSTRUMENT_TYPE_UCP_TX, "ucp_tag_send_nb (eager - start)", buffer, length); if (ucs_likely(length <= ucp_ep_config(ep)->max_eager_short)) { status = ucp_tag_send_eager_short(ep, tag, buffer, length); if (ucs_likely(status != UCS_ERR_NO_RESOURCE)) { UCS_INSTRUMENT_RECORD(UCS_INSTRUMENT_TYPE_UCP_TX, "ucp_tag_send_nb (eager - finish)", buffer, length); return UCS_STATUS_PTR(status); /* UCS_OK also goes here */ } } } req = ucp_request_get(ep->worker); if (req == NULL) { return UCS_STATUS_PTR(UCS_ERR_NO_MEMORY); } UCS_INSTRUMENT_RECORD(UCS_INSTRUMENT_TYPE_UCP_TX, "ucp_tag_send_nb", req, ucp_dt_length(datatype, count, buffer, &req->send.state)); ucp_tag_send_req_init(req, ep, buffer, datatype, tag); return ucp_tag_send_req(req, count, ucp_ep_config(ep)->max_eager_short, ucp_ep_config(ep)->zcopy_thresh, ucp_ep_config(ep)->rndv_thresh, cb, &ucp_tag_eager_proto); }
static UCS_F_ALWAYS_INLINE void ucp_tag_recv_common(ucp_worker_h worker, void *buffer, size_t count, uintptr_t datatype, ucp_tag_t tag, ucp_tag_t tag_mask, ucp_request_t *req, uint32_t req_flags, ucp_tag_recv_callback_t cb, ucp_recv_desc_t *rdesc, const char *debug_name) { unsigned common_flags = UCP_REQUEST_FLAG_RECV | UCP_REQUEST_FLAG_EXPECTED; ucp_eager_first_hdr_t *eagerf_hdr; ucp_request_queue_t *req_queue; uct_memory_type_t mem_type; size_t hdr_len, recv_len; ucs_status_t status; uint64_t msg_id; ucp_trace_req(req, "%s buffer %p dt 0x%lx count %zu tag %"PRIx64"/%"PRIx64, debug_name, buffer, datatype, count, tag, tag_mask); /* First, check the fast path case - single fragment * in this case avoid initializing most of request fields * */ if (ucs_likely((rdesc != NULL) && (rdesc->flags & UCP_RECV_DESC_FLAG_EAGER_ONLY))) { UCS_PROFILE_REQUEST_EVENT(req, "eager_only_match", 0); UCP_WORKER_STAT_EAGER_MSG(worker, rdesc->flags); UCP_WORKER_STAT_EAGER_CHUNK(worker, UNEXP); if (ucs_unlikely(rdesc->flags & UCP_RECV_DESC_FLAG_EAGER_SYNC)) { ucp_tag_eager_sync_send_ack(worker, rdesc + 1, rdesc->flags); } req->flags = UCP_REQUEST_FLAG_RECV | req_flags; hdr_len = rdesc->payload_offset; recv_len = rdesc->length - hdr_len; req->recv.tag.info.sender_tag = ucp_rdesc_get_tag(rdesc); req->recv.tag.info.length = recv_len; ucp_memory_type_detect_mds(worker->context, buffer, recv_len, &mem_type); status = ucp_dt_unpack_only(worker, buffer, count, datatype, mem_type, (void*)(rdesc + 1) + hdr_len, recv_len, 1); ucp_recv_desc_release(rdesc); if (req_flags & UCP_REQUEST_FLAG_CALLBACK) { cb(req + 1, status, &req->recv.tag.info); } ucp_tag_recv_request_completed(req, status, &req->recv.tag.info, debug_name); return; } /* Initialize receive request */ req->status = UCS_OK; req->recv.worker = worker; req->recv.buffer = buffer; req->recv.datatype = datatype; ucp_dt_recv_state_init(&req->recv.state, buffer, datatype, count); if (!UCP_DT_IS_CONTIG(datatype)) { common_flags |= UCP_REQUEST_FLAG_BLOCK_OFFLOAD; } req->flags = common_flags | req_flags; req->recv.length = ucp_dt_length(datatype, count, buffer, &req->recv.state); ucp_memory_type_detect_mds(worker->context, buffer, req->recv.length, &mem_type); req->recv.mem_type = mem_type; req->recv.tag.tag = tag; req->recv.tag.tag_mask = tag_mask; req->recv.tag.cb = cb; if (ucs_log_is_enabled(UCS_LOG_LEVEL_TRACE_REQ)) { req->recv.tag.info.sender_tag = 0; } if (ucs_unlikely(rdesc == NULL)) { /* If not found on unexpected, wait until it arrives. * If was found but need this receive request for later completion, save it */ req_queue = ucp_tag_exp_get_queue(&worker->tm, tag, tag_mask); /* If offload supported, post this tag to transport as well. * TODO: need to distinguish the cases when posting is not needed. */ ucp_tag_offload_try_post(worker, req, req_queue); ucp_tag_exp_push(&worker->tm, req_queue, req); ucs_trace_req("%s returning expected request %p (%p)", debug_name, req, req + 1); return; } /* Check rendezvous case */ if (ucs_unlikely(rdesc->flags & UCP_RECV_DESC_FLAG_RNDV)) { ucp_rndv_matched(worker, req, (void*)(rdesc + 1)); UCP_WORKER_STAT_RNDV(worker, UNEXP); ucp_recv_desc_release(rdesc); return; } if (ucs_unlikely(rdesc->flags & UCP_RECV_DESC_FLAG_EAGER_SYNC)) { ucp_tag_eager_sync_send_ack(worker, rdesc + 1, rdesc->flags); } UCP_WORKER_STAT_EAGER_MSG(worker, rdesc->flags); ucs_assert(rdesc->flags & UCP_RECV_DESC_FLAG_EAGER); eagerf_hdr = (void*)(rdesc + 1); req->recv.tag.info.sender_tag = ucp_rdesc_get_tag(rdesc); req->recv.tag.info.length = req->recv.tag.remaining = eagerf_hdr->total_len; /* process first fragment */ UCP_WORKER_STAT_EAGER_CHUNK(worker, UNEXP); msg_id = eagerf_hdr->msg_id; status = ucp_tag_recv_request_process_rdesc(req, rdesc, 0); ucs_assert(status == UCS_INPROGRESS); /* process additional fragments */ ucp_tag_frag_list_process_queue(&worker->tm, req, msg_id UCS_STATS_ARG(UCP_WORKER_STAT_TAG_RX_EAGER_CHUNK_UNEXP)); }