static int usdf_msg_ep_enable(struct fid_ep *fep) { struct usdf_ep *ep; struct usd_filter filt; struct usd_qp_impl *uqp; int ret; ep = ep_ftou(fep); filt.uf_type = USD_FTY_UDP_SOCK; filt.uf_filter.uf_udp_sock.u_sock = ep->ep_sock; ret = usd_create_qp(ep->ep_domain->dom_dev, USD_QTR_UDP, USD_QTY_NORMAL, ep->ep_wcq->cq_cq, ep->ep_rcq->cq_cq, ep->ep_wqe, ep->ep_rqe, &filt, &ep->ep_qp); if (ret != 0) { goto fail; } ep->ep_qp->uq_context = ep; /* * Allocate a memory region big enough to hold a header for each * RQ entry */ uqp = to_qpi(ep->ep_qp); ep->ep_hdr_ptr = calloc(uqp->uq_rq.urq_num_entries, sizeof(ep->ep_hdr_ptr[0])); if (ep->ep_hdr_ptr == NULL) { ret = -FI_ENOMEM; goto fail; } ret = usd_alloc_mr(ep->ep_domain->dom_dev, usd_get_recv_credits(ep->ep_qp) * USDF_HDR_BUF_ENTRY, &ep->ep_hdr_buf); if (ret != 0) { goto fail; } return 0; fail: if (ep->ep_hdr_ptr != NULL) { free(ep->ep_hdr_ptr); } if (ep->ep_qp != NULL) { usd_destroy_qp(ep->ep_qp); } return ret; }
ssize_t usdf_dgram_rx_size_left(struct fid_ep *fep) { struct usdf_ep *ep; USDF_DBG_SYS(EP_DATA, "\n"); if (fep == NULL) return -FI_EINVAL; ep = ep_ftou(fep); if (ep->e.dg.ep_qp == NULL) return -FI_EOPBADSTATE; /* EP not enabled */ return usd_get_recv_credits(ep->e.dg.ep_qp) / (ep->e.dg.rx_iov_limit + 1); }
ssize_t usdf_dgram_prefix_rx_size_left(struct fid_ep *fep) { struct usdf_ep *ep; USDF_DBG_SYS(EP_DATA, "\n"); if (fep == NULL) return -FI_EINVAL; ep = ep_ftou(fep); if (ep->e.dg.ep_qp == NULL) return -FI_EOPBADSTATE; /* EP not enabled */ /* prefix_recvv can post up to iov_limit descriptors */ return (usd_get_recv_credits(ep->e.dg.ep_qp) / ep->e.dg.rx_iov_limit); }
ssize_t usdf_dgram_rx_size_left(struct fid_ep *fep) { struct usdf_ep *ep; USDF_DBG_SYS(EP_DATA, "\n"); if (fep == NULL) return -FI_EINVAL; ep = ep_ftou(fep); if (ep->e.dg.ep_qp == NULL) return -FI_EOPBADSTATE; /* EP not enabled */ /* NOTE-SIZE-LEFT: divide by constant right now, rather than keeping * track of the rx_attr->iov_limit value we gave to the user. This * sometimes under-reports the number of RX ops that could be posted, * but it avoids touching a cache line that we don't otherwise need. * * sendv/recvv could potentially post iov_limit+1 descriptors */ return usd_get_recv_credits(ep->e.dg.ep_qp) / (USDF_DGRAM_DFLT_SGE + 1); }