static void fi_ibv_cq_read_msg_entry(struct ibv_wc *wc, int i, void *buf) { struct fi_cq_msg_entry *entry = buf; entry[i].op_context = (void *) (uintptr_t) wc->wr_id; entry[i].flags = fi_ibv_comp_flags(wc); entry[i].len = (uint64_t) wc->byte_len; }
static void fi_ibv_cq_read_data_entry(struct ibv_wc *wc, int i, void *buf) { struct fi_cq_data_entry *entry = buf; entry[i].op_context = (void *) (uintptr_t) wc->wr_id; entry[i].flags = fi_ibv_comp_flags(wc); entry[i].data = (wc->wc_flags & IBV_WC_WITH_IMM) ? ntohl(wc->imm_data) : 0; entry->len = (wc->opcode & (IBV_WC_RECV | IBV_WC_RECV_RDMA_WITH_IMM)) ? wc->byte_len : 0; }
static ssize_t fi_ibv_cq_readerr(struct fid_cq *cq_fid, struct fi_cq_err_entry *entry, uint64_t flags) { struct fi_ibv_cq *cq; struct fi_ibv_wce *wce; struct slist_entry *slist_entry; cq = container_of(cq_fid, struct fi_ibv_cq, cq_fid); fastlock_acquire(&cq->lock); if (slist_empty(&cq->wcq)) goto err; wce = container_of(cq->wcq.head, struct fi_ibv_wce, entry); if (!wce->wc.status) goto err; slist_entry = slist_remove_head(&cq->wcq); fastlock_release(&cq->lock); wce = container_of(slist_entry, struct fi_ibv_wce, entry); entry->op_context = (void *) (uintptr_t) wce->wc.wr_id; entry->flags = fi_ibv_comp_flags(&wce->wc); entry->err = EIO; entry->prov_errno = wce->wc.status; memcpy(&entry->err_data, &wce->wc.vendor_err, sizeof(wce->wc.vendor_err)); util_buf_release(cq->domain->fab->wce_pool, wce); return sizeof(*entry); err: fastlock_release(&cq->lock); return -FI_EAGAIN; }