Ejemplo n.º 1
0
static UCS_F_ALWAYS_INLINE void
ucp_tag_recv_request_completed(ucp_request_t *req, ucs_status_t status,
                               ucp_tag_recv_info_t *info, const char *function)
{
    ucs_trace_req("%s returning completed request %p (%p) stag 0x%"PRIx64" len %zu, %s",
                  function, req, req + 1, info->sender_tag, info->length,
                  ucs_status_string(status));

    req->status = status;
    if ((req->flags |= UCP_REQUEST_FLAG_COMPLETED) & UCP_REQUEST_FLAG_RELEASED) {
        ucp_request_put(req);
    }
    UCS_PROFILE_REQUEST_EVENT(req, "complete_recv", 0);
}
Ejemplo n.º 2
0
static UCS_F_ALWAYS_INLINE void
ucp_tag_recv_request_completed(ucp_request_t *req, ucs_status_t status,
                               ucp_tag_recv_info_t *info, const char *function)
{
    ucs_trace_req("%s returning completed request %p (%p) stag 0x%"PRIx64" len %zu, %s",
                  function, req, req + 1, info->sender_tag, info->length,
                  ucs_status_string(status));

    req->status = status;
    req->flags |= UCP_REQUEST_FLAG_COMPLETED;
    if (req->flags & UCP_REQUEST_FLAG_BLOCK_OFFLOAD) {
        --req->recv.worker->context->tm.sw_req_count;
    }
    UCS_PROFILE_REQUEST_EVENT(req, "complete_recv", 0);
}
Ejemplo n.º 3
0
static UCS_F_ALWAYS_INLINE void
ucp_eager_expected_handler(ucp_worker_t *worker, ucp_request_t *req,
                           void *data, size_t recv_len, ucp_tag_t recv_tag,
                           uint16_t flags)
{
    ucs_trace_req("found req %p", req);
    UCS_PROFILE_REQUEST_EVENT(req, "eager_recv", recv_len);

    /* First fragment fills the receive information */
    UCP_WORKER_STAT_EAGER_MSG(worker, flags);
    UCP_WORKER_STAT_EAGER_CHUNK(worker, EXP);

    req->recv.tag.info.sender_tag = recv_tag;

    /* Cancel req in transport if it was offloaded,
     * because it arrived either:
     * 1) via SW TM (e. g. peer doesn't support offload)
     * 2) as unexpected via HW TM */
    ucp_tag_offload_try_cancel(worker, req,
                               UCP_TAG_OFFLOAD_CANCEL_FORCE |
                               UCP_TAG_OFFLOAD_CANCEL_DEREG);
}
Ejemplo n.º 4
0
static UCS_F_ALWAYS_INLINE void
ucp_tag_recv_common(ucp_worker_h worker, void *buffer, size_t count,
                    uintptr_t datatype, ucp_tag_t tag, ucp_tag_t tag_mask,
                    ucp_request_t *req, uint32_t req_flags, ucp_tag_recv_callback_t cb,
                    ucp_recv_desc_t *rdesc, const char *debug_name)
{
    unsigned common_flags = UCP_REQUEST_FLAG_RECV | UCP_REQUEST_FLAG_EXPECTED;
    ucp_eager_first_hdr_t *eagerf_hdr;
    ucp_request_queue_t *req_queue;
    uct_memory_type_t mem_type;
    size_t hdr_len, recv_len;
    ucs_status_t status;
    uint64_t msg_id;

    ucp_trace_req(req, "%s buffer %p dt 0x%lx count %zu tag %"PRIx64"/%"PRIx64,
                  debug_name, buffer, datatype, count, tag, tag_mask);

    /* First, check the fast path case - single fragment
     * in this case avoid initializing most of request fields
     * */
    if (ucs_likely((rdesc != NULL) && (rdesc->flags & UCP_RECV_DESC_FLAG_EAGER_ONLY))) {
        UCS_PROFILE_REQUEST_EVENT(req, "eager_only_match", 0);
        UCP_WORKER_STAT_EAGER_MSG(worker, rdesc->flags);
        UCP_WORKER_STAT_EAGER_CHUNK(worker, UNEXP);

        if (ucs_unlikely(rdesc->flags & UCP_RECV_DESC_FLAG_EAGER_SYNC)) {
            ucp_tag_eager_sync_send_ack(worker, rdesc + 1, rdesc->flags);
        }

        req->flags                    = UCP_REQUEST_FLAG_RECV | req_flags;
        hdr_len                       = rdesc->payload_offset;
        recv_len                      = rdesc->length - hdr_len;
        req->recv.tag.info.sender_tag = ucp_rdesc_get_tag(rdesc);
        req->recv.tag.info.length     = recv_len;

        ucp_memory_type_detect_mds(worker->context, buffer, recv_len, &mem_type);

        status = ucp_dt_unpack_only(worker, buffer, count, datatype, mem_type,
                                    (void*)(rdesc + 1) + hdr_len, recv_len, 1);
        ucp_recv_desc_release(rdesc);

        if (req_flags & UCP_REQUEST_FLAG_CALLBACK) {
            cb(req + 1, status, &req->recv.tag.info);
        }
        ucp_tag_recv_request_completed(req, status, &req->recv.tag.info,
                                       debug_name);
        return;
    }

    /* Initialize receive request */
    req->status             = UCS_OK;
    req->recv.worker        = worker;
    req->recv.buffer        = buffer;
    req->recv.datatype      = datatype;

    ucp_dt_recv_state_init(&req->recv.state, buffer, datatype, count);

    if (!UCP_DT_IS_CONTIG(datatype)) {
        common_flags       |= UCP_REQUEST_FLAG_BLOCK_OFFLOAD;
    }

    req->flags              = common_flags | req_flags;
    req->recv.length        = ucp_dt_length(datatype, count, buffer,
                                            &req->recv.state);

    ucp_memory_type_detect_mds(worker->context, buffer, req->recv.length, &mem_type);

    req->recv.mem_type      = mem_type;
    req->recv.tag.tag       = tag;
    req->recv.tag.tag_mask  = tag_mask;
    req->recv.tag.cb        = cb;
    if (ucs_log_is_enabled(UCS_LOG_LEVEL_TRACE_REQ)) {
        req->recv.tag.info.sender_tag = 0;
    }

    if (ucs_unlikely(rdesc == NULL)) {
        /* If not found on unexpected, wait until it arrives.
         * If was found but need this receive request for later completion, save it */
        req_queue = ucp_tag_exp_get_queue(&worker->tm, tag, tag_mask);

        /* If offload supported, post this tag to transport as well.
         * TODO: need to distinguish the cases when posting is not needed. */
        ucp_tag_offload_try_post(worker, req, req_queue);

        ucp_tag_exp_push(&worker->tm, req_queue, req);

        ucs_trace_req("%s returning expected request %p (%p)", debug_name, req,
                      req + 1);
        return;
    }

    /* Check rendezvous case */
    if (ucs_unlikely(rdesc->flags & UCP_RECV_DESC_FLAG_RNDV)) {
        ucp_rndv_matched(worker, req, (void*)(rdesc + 1));
        UCP_WORKER_STAT_RNDV(worker, UNEXP);
        ucp_recv_desc_release(rdesc);
        return;
    }

    if (ucs_unlikely(rdesc->flags & UCP_RECV_DESC_FLAG_EAGER_SYNC)) {
        ucp_tag_eager_sync_send_ack(worker, rdesc + 1, rdesc->flags);
    }

    UCP_WORKER_STAT_EAGER_MSG(worker, rdesc->flags);
    ucs_assert(rdesc->flags & UCP_RECV_DESC_FLAG_EAGER);
    eagerf_hdr                    = (void*)(rdesc + 1);
    req->recv.tag.info.sender_tag = ucp_rdesc_get_tag(rdesc);
    req->recv.tag.info.length     =
    req->recv.tag.remaining       = eagerf_hdr->total_len;

    /* process first fragment */
    UCP_WORKER_STAT_EAGER_CHUNK(worker, UNEXP);
    msg_id = eagerf_hdr->msg_id;
    status = ucp_tag_recv_request_process_rdesc(req, rdesc, 0);
    ucs_assert(status == UCS_INPROGRESS);

    /* process additional fragments */
    ucp_tag_frag_list_process_queue(&worker->tm, req, msg_id
                                    UCS_STATS_ARG(UCP_WORKER_STAT_TAG_RX_EAGER_CHUNK_UNEXP));
}
Ejemplo n.º 5
0
static UCS_F_ALWAYS_INLINE ucs_status_t
ucp_tag_search_unexp(ucp_worker_h worker, void *buffer, size_t buffer_size,
                     ucp_datatype_t datatype, ucp_tag_t tag, uint64_t tag_mask,
                     ucp_request_t *req, ucp_tag_recv_info_t *info,
                     ucp_tag_recv_callback_t cb, ucp_recv_desc_t *first_rdesc,
                     unsigned *save_rreq)
{
    ucp_context_h context = worker->context;
    ucp_recv_desc_t *rdesc, *next;
    ucs_list_link_t *list;
    ucs_status_t status;
    ucp_tag_t recv_tag;
    unsigned flags;
    int i_list;

    /* fast check of global unexpected queue */
    if (ucs_list_is_empty(&context->tm.unexpected.all)) {
        return UCS_INPROGRESS;
    }

    if (first_rdesc == NULL) {
        if (tag_mask == UCP_TAG_MASK_FULL) {
            list   = ucp_tag_unexp_get_list_for_tag(&context->tm, tag);
            if (ucs_list_is_empty(list)) {
                return UCS_INPROGRESS;
            }

            i_list = UCP_RDESC_HASH_LIST;
        } else {
            list   = &context->tm.unexpected.all;
            i_list = UCP_RDESC_ALL_LIST;
        }
        rdesc = ucs_list_head(list, ucp_recv_desc_t, list[i_list]);
    } else {
        ucs_assert(tag_mask == UCP_TAG_MASK_FULL);
        list   = ucp_tag_unexp_get_list_for_tag(&context->tm, tag);
        i_list = UCP_RDESC_HASH_LIST;
        rdesc  = first_rdesc;
    }

    do {
        recv_tag = ucp_rdesc_get_tag(rdesc);
        flags    = rdesc->flags;
        ucs_trace_req("searching for %"PRIx64"/%"PRIx64"/%"PRIx64" offset %zu, "
                      "checking desc %p %"PRIx64" %c%c%c%c%c",
                      tag, tag_mask, info->sender_tag, req->recv.state.offset,
                      rdesc, recv_tag,
                      (flags & UCP_RECV_DESC_FLAG_FIRST) ? 'f' : '-',
                      (flags & UCP_RECV_DESC_FLAG_LAST)  ? 'l' : '-',
                      (flags & UCP_RECV_DESC_FLAG_EAGER) ? 'e' : '-',
                      (flags & UCP_RECV_DESC_FLAG_SYNC)  ? 's' : '-',
                      (flags & UCP_RECV_DESC_FLAG_RNDV)  ? 'r' : '-');
        if (ucp_tag_recv_is_match(recv_tag, flags, tag, tag_mask,
                                  req->recv.state.offset, info->sender_tag))
        {
            ucp_tag_log_match(recv_tag, rdesc->length - rdesc->hdr_len, req, tag,
                              tag_mask, req->recv.state.offset, "unexpected");
            ucp_tag_unexp_remove(rdesc);

            if (rdesc->flags & UCP_RECV_DESC_FLAG_EAGER) {
                UCS_PROFILE_REQUEST_EVENT(req, "eager_match", 0);
                status = ucp_eager_unexp_match(worker, rdesc, recv_tag, flags,
                                               buffer, buffer_size, datatype,
                                               &req->recv.state, info);
                ucs_trace_req("release receive descriptor %p", rdesc);
                if (status != UCS_INPROGRESS) {
                    goto out_release_desc;
                }

                next = ucp_tag_unexp_list_next(rdesc, i_list);
                ucp_tag_unexp_desc_release(rdesc);
                rdesc = next;
            } else {
                ucs_assert_always(rdesc->flags & UCP_RECV_DESC_FLAG_RNDV);
                *save_rreq         = 0;
                req->recv.buffer   = buffer;
                req->recv.length   = buffer_size;
                req->recv.datatype = datatype;
                req->recv.cb       = cb;
                ucp_rndv_matched(worker, req, (void*)(rdesc + 1));
                UCP_WORKER_STAT_RNDV(worker, UNEXP);
                status = UCS_INPROGRESS;
                goto out_release_desc;
            }
        } else {
            rdesc = ucp_tag_unexp_list_next(rdesc, i_list);
        }
    } while (&rdesc->list[i_list] != list);
    return UCS_INPROGRESS;

out_release_desc:
    ucp_tag_unexp_desc_release(rdesc);
    return status;
}