static struct rxd_pkt_entry *rxd_get_rx_pkt(struct rxd_ep *ep) { struct rxd_pkt_entry *pkt_entry; void *mr = NULL; pkt_entry = ep->do_local_mr ? ofi_buf_alloc_ex(ep->rx_pkt_pool, &mr) : ofi_buf_alloc(ep->rx_pkt_pool); if (!pkt_entry) return NULL; pkt_entry->mr = (struct fid_mr *) mr; rxd_set_rx_pkt(ep, pkt_entry); return pkt_entry; }
struct tcpx_xfer_entry *tcpx_xfer_entry_alloc(struct tcpx_cq *tcpx_cq, enum tcpx_xfer_op_codes type) { struct tcpx_xfer_entry *xfer_entry; tcpx_cq->util_cq.cq_fastlock_acquire(&tcpx_cq->util_cq.cq_lock); /* optimization: don't allocate queue_entry when cq is full */ if (ofi_cirque_isfull(tcpx_cq->util_cq.cirq)) { tcpx_cq->util_cq.cq_fastlock_release(&tcpx_cq->util_cq.cq_lock); return NULL; } xfer_entry = ofi_buf_alloc(tcpx_cq->buf_pools[type].pool); if (!xfer_entry) { tcpx_cq->util_cq.cq_fastlock_release(&tcpx_cq->util_cq.cq_lock); FI_INFO(&tcpx_prov, FI_LOG_DOMAIN,"failed to get buffer\n"); return NULL; } tcpx_cq->util_cq.cq_fastlock_release(&tcpx_cq->util_cq.cq_lock); return xfer_entry; }