static int process_rx_read_entry(struct tcpx_xfer_entry *rx_entry) { struct tcpx_cq *tcpx_cq; int ret; ret = tcpx_recv_msg_data(rx_entry); if (OFI_SOCK_TRY_SND_RCV_AGAIN(-ret)) return ret; if (!ret) goto done; FI_WARN(&tcpx_prov, FI_LOG_DOMAIN, "msg recv Failed ret = %d\n", ret); if (ret == -FI_ENOTCONN) tcpx_ep_shutdown_report(rx_entry->ep, &rx_entry->ep->util_ep.ep_fid.fid); done: tcpx_cq_report_completion(rx_entry->ep->util_ep.tx_cq, rx_entry, -ret); slist_remove_head(&rx_entry->ep->rma_read_queue); tcpx_cq = container_of(rx_entry->ep->util_ep.tx_cq, struct tcpx_cq, util_cq); tcpx_xfer_entry_release(tcpx_cq, rx_entry); return FI_SUCCESS; }
void process_tx_entry(struct tcpx_xfer_entry *tx_entry) { struct tcpx_cq *tcpx_cq; int ret; ret = tcpx_send_msg(tx_entry); if (OFI_SOCK_TRY_SND_RCV_AGAIN(-ret)) return; if (!ret) goto done; FI_WARN(&tcpx_prov, FI_LOG_DOMAIN, "msg send failed\n"); if (ret == -FI_ENOTCONN) tcpx_ep_shutdown_report(tx_entry->ep, &tx_entry->ep->util_ep.ep_fid.fid); done: tcpx_cq_report_completion(tx_entry->ep->util_ep.tx_cq, tx_entry, -ret); slist_remove_head(&tx_entry->ep->tx_queue); if (ntohl(tx_entry->msg_hdr.hdr.flags) & (OFI_DELIVERY_COMPLETE | OFI_COMMIT_COMPLETE)) { tx_entry->flags |= FI_COMPLETION; slist_insert_tail(&tx_entry->entry, &tx_entry->ep->tx_rsp_pend_queue); return; } tcpx_cq = container_of(tx_entry->ep->util_ep.tx_cq, struct tcpx_cq, util_cq); tcpx_xfer_entry_release(tcpx_cq, tx_entry); }
int tcpx_get_rx_entry_op_msg(struct tcpx_ep *tcpx_ep) { struct tcpx_xfer_entry *rx_entry; struct tcpx_xfer_entry *tx_entry; struct slist_entry *entry; struct tcpx_cq *tcpx_cq; struct tcpx_rx_detect *rx_detect = &tcpx_ep->rx_detect; int ret; tcpx_cq = container_of(tcpx_ep->util_ep.rx_cq, struct tcpx_cq, util_cq); if (rx_detect->hdr.hdr.op_data == TCPX_OP_MSG_RESP) { assert(!slist_empty(&tcpx_ep->tx_rsp_pend_queue)); entry = tcpx_ep->tx_rsp_pend_queue.head; tx_entry = container_of(entry, struct tcpx_xfer_entry, entry); tcpx_cq = container_of(tcpx_ep->util_ep.tx_cq, struct tcpx_cq, util_cq); tcpx_cq_report_completion(tx_entry->ep->util_ep.tx_cq, tx_entry, 0); slist_remove_head(&tx_entry->ep->tx_rsp_pend_queue); tcpx_xfer_entry_release(tcpx_cq, tx_entry); rx_detect->done_len = 0; return -FI_EAGAIN; }
static int process_rx_entry(struct tcpx_xfer_entry *rx_entry) { struct tcpx_cq *tcpx_cq; int ret; ret = tcpx_recv_msg_data(rx_entry); if (OFI_SOCK_TRY_SND_RCV_AGAIN(-ret)) return ret; if (!ret) goto done; FI_WARN(&tcpx_prov, FI_LOG_DOMAIN, "msg recv Failed ret = %d\n", ret); if (ret == -FI_ENOTCONN) tcpx_ep_shutdown_report(rx_entry->ep, &rx_entry->ep->util_ep.ep_fid.fid); done: if (ntohl(rx_entry->msg_hdr.hdr.flags) & OFI_DELIVERY_COMPLETE) { if (tcpx_prepare_rx_entry_resp(rx_entry)) rx_entry->ep->cur_rx_proc_fn = tcpx_prepare_rx_entry_resp; return FI_SUCCESS; } tcpx_cq_report_completion(rx_entry->ep->util_ep.rx_cq, rx_entry, -ret); tcpx_cq = container_of(rx_entry->ep->util_ep.rx_cq, struct tcpx_cq, util_cq); tcpx_xfer_entry_release(tcpx_cq, rx_entry); return FI_SUCCESS; }
static int tcpx_prepare_rx_write_resp(struct tcpx_xfer_entry *rx_entry) { struct tcpx_cq *tcpx_rx_cq, *tcpx_tx_cq; struct tcpx_xfer_entry *resp_entry; tcpx_tx_cq = container_of(rx_entry->ep->util_ep.tx_cq, struct tcpx_cq, util_cq); resp_entry = tcpx_xfer_entry_alloc(tcpx_tx_cq, TCPX_OP_MSG_RESP); if (!resp_entry) return -FI_EAGAIN; resp_entry->msg_data.iov[0].iov_base = (void *) &resp_entry->msg_hdr; resp_entry->msg_data.iov[0].iov_len = sizeof(resp_entry->msg_hdr); resp_entry->msg_data.iov_cnt = 1; resp_entry->msg_hdr.hdr.op = ofi_op_msg; resp_entry->msg_hdr.hdr.size = htonll(sizeof(resp_entry->msg_hdr)); resp_entry->flags &= ~FI_COMPLETION; resp_entry->context = NULL; resp_entry->done_len = 0; resp_entry->ep = rx_entry->ep; tcpx_tx_queue_insert(resp_entry->ep, resp_entry); tcpx_cq_report_completion(rx_entry->ep->util_ep.rx_cq, rx_entry, 0); tcpx_rx_cq = container_of(rx_entry->ep->util_ep.rx_cq, struct tcpx_cq, util_cq); tcpx_xfer_entry_release(tcpx_rx_cq, rx_entry); return FI_SUCCESS; }
static void process_tx_entry(struct tcpx_xfer_entry *tx_entry) { struct tcpx_cq *tcpx_cq; int ret; ret = tcpx_send_msg(tx_entry); if (OFI_SOCK_TRY_SND_RCV_AGAIN(-ret)) return; /* Keep this path below as a single pass path.*/ tx_entry->ep->hdr_bswap(&tx_entry->hdr.base_hdr); slist_remove_head(&tx_entry->ep->tx_queue); if (ret) { FI_WARN(&tcpx_prov, FI_LOG_DOMAIN, "msg send failed\n"); tcpx_ep_shutdown_report(tx_entry->ep, &tx_entry->ep->util_ep.ep_fid.fid); tcpx_cq_report_error(tx_entry->ep->util_ep.tx_cq, tx_entry, ret); } else { tcpx_cq_report_success(tx_entry->ep->util_ep.tx_cq, tx_entry); if (tx_entry->hdr.base_hdr.flags & (OFI_DELIVERY_COMPLETE | OFI_COMMIT_COMPLETE)) { tx_entry->flags |= FI_COMPLETION; slist_insert_tail(&tx_entry->entry, &tx_entry->ep->tx_rsp_pend_queue); return; } } tcpx_cq = container_of(tx_entry->ep->util_ep.tx_cq, struct tcpx_cq, util_cq); tcpx_xfer_entry_release(tcpx_cq, tx_entry); }
static int tcpx_prepare_rx_write_resp(struct tcpx_xfer_entry *rx_entry) { struct tcpx_cq *tcpx_rx_cq, *tcpx_tx_cq; struct tcpx_xfer_entry *resp_entry; tcpx_tx_cq = container_of(rx_entry->ep->util_ep.tx_cq, struct tcpx_cq, util_cq); resp_entry = tcpx_xfer_entry_alloc(tcpx_tx_cq, TCPX_OP_MSG_RESP); if (!resp_entry) return -FI_EAGAIN; resp_entry->iov[0].iov_base = (void *) &resp_entry->hdr; resp_entry->iov[0].iov_len = sizeof(resp_entry->hdr.base_hdr); resp_entry->iov_cnt = 1; resp_entry->hdr.base_hdr.op = ofi_op_msg; resp_entry->hdr.base_hdr.size = sizeof(resp_entry->hdr.base_hdr); resp_entry->hdr.base_hdr.payload_off = (uint8_t)sizeof(resp_entry->hdr.base_hdr); resp_entry->flags &= ~FI_COMPLETION; resp_entry->context = NULL; resp_entry->rem_len = resp_entry->hdr.base_hdr.size; resp_entry->ep = rx_entry->ep; resp_entry->ep->hdr_bswap(&resp_entry->hdr.base_hdr); tcpx_tx_queue_insert(resp_entry->ep, resp_entry); tcpx_cq_report_success(rx_entry->ep->util_ep.rx_cq, rx_entry); tcpx_rx_cq = container_of(rx_entry->ep->util_ep.rx_cq, struct tcpx_cq, util_cq); tcpx_xfer_entry_release(tcpx_rx_cq, rx_entry); return FI_SUCCESS; }
static void tcpx_ep_tx_rx_queues_release(struct tcpx_ep *ep) { struct slist_entry *entry; struct tcpx_xfer_entry *xfer_entry; struct tcpx_cq *tcpx_cq; fastlock_acquire(&ep->lock); while (!slist_empty(&ep->tx_queue)) { entry = ep->tx_queue.head; xfer_entry = container_of(entry, struct tcpx_xfer_entry, entry); slist_remove_head(&ep->tx_queue); tcpx_cq = container_of(xfer_entry->ep->util_ep.tx_cq, struct tcpx_cq, util_cq); tcpx_xfer_entry_release(tcpx_cq, xfer_entry); } while (!slist_empty(&ep->rx_queue)) { entry = ep->rx_queue.head; xfer_entry = container_of(entry, struct tcpx_xfer_entry, entry); slist_remove_head(&ep->rx_queue); tcpx_cq = container_of(xfer_entry->ep->util_ep.rx_cq, struct tcpx_cq, util_cq); tcpx_xfer_entry_release(tcpx_cq, xfer_entry); } while (!slist_empty(&ep->rma_read_queue)) { entry = ep->rma_read_queue.head; xfer_entry = container_of(entry, struct tcpx_xfer_entry, entry); slist_remove_head(&ep->rma_read_queue); tcpx_cq = container_of(xfer_entry->ep->util_ep.tx_cq, struct tcpx_cq, util_cq); tcpx_xfer_entry_release(tcpx_cq, xfer_entry); } while (!slist_empty(&ep->tx_rsp_pend_queue)) { entry = ep->tx_rsp_pend_queue.head; xfer_entry = container_of(entry, struct tcpx_xfer_entry, entry); slist_remove_head(&ep->tx_rsp_pend_queue); tcpx_cq = container_of(xfer_entry->ep->util_ep.tx_cq, struct tcpx_cq, util_cq); tcpx_xfer_entry_release(tcpx_cq, xfer_entry); } fastlock_release(&ep->lock); }
void tcpx_rx_msg_release(struct tcpx_xfer_entry *rx_entry) { struct tcpx_cq *tcpx_cq; assert(rx_entry->hdr.base_hdr.op_data == TCPX_OP_MSG_RECV); if (rx_entry->ep->srx_ctx) { tcpx_srx_xfer_release(rx_entry->ep->srx_ctx, rx_entry); } else { tcpx_cq = container_of(rx_entry->ep->util_ep.rx_cq, struct tcpx_cq, util_cq); tcpx_xfer_entry_release(tcpx_cq, rx_entry); } }
static void tcpx_cq_report_xfer_fail(struct tcpx_ep *tcpx_ep, int err) { struct slist_entry *entry; struct tcpx_xfer_entry *tx_entry; struct tcpx_cq *tcpx_cq; while (!slist_empty(&tcpx_ep->tx_rsp_pend_queue)) { entry = slist_remove_head(&tcpx_ep->tx_rsp_pend_queue); tx_entry = container_of(entry, struct tcpx_xfer_entry, entry); tcpx_cq_report_completion(tx_entry->ep->util_ep.tx_cq, tx_entry, -err); tcpx_cq = container_of(tx_entry->ep->util_ep.tx_cq, struct tcpx_cq, util_cq); tcpx_xfer_entry_release(tcpx_cq, tx_entry); } }
static int process_rx_remote_write_entry(struct tcpx_xfer_entry *rx_entry) { struct tcpx_cq *tcpx_cq; int ret = FI_SUCCESS; ret = tcpx_recv_msg_data(rx_entry); if (OFI_SOCK_TRY_SND_RCV_AGAIN(-ret)) return ret; if (ret) { FI_WARN(&tcpx_prov, FI_LOG_DOMAIN, "remote write Failed ret = %d\n", ret); tcpx_ep_shutdown_report(rx_entry->ep, &rx_entry->ep->util_ep.ep_fid.fid); tcpx_cq_report_error(rx_entry->ep->util_ep.rx_cq, rx_entry, ret); tcpx_cq = container_of(rx_entry->ep->util_ep.rx_cq, struct tcpx_cq, util_cq); tcpx_xfer_entry_release(tcpx_cq, rx_entry); } else if (rx_entry->hdr.base_hdr.flags &