static ssize_t sock_cq_sreadfrom(struct fid_cq *cq, void *buf, size_t count, fi_addr_t *src_addr, const void *cond, int timeout) { int ret = 0; size_t threshold; struct sock_cq *sock_cq; uint64_t start_ms = 0, end_ms = 0; ssize_t cq_entry_len, avail; sock_cq = container_of(cq, struct sock_cq, cq_fid); if (rbused(&sock_cq->cqerr_rb)) return -FI_EAVAIL; cq_entry_len = sock_cq->cq_entry_size; if (sock_cq->attr.wait_cond == FI_CQ_COND_THRESHOLD) threshold = MIN((uintptr_t) cond, count); else threshold = count; if (sock_cq->domain->progress_mode == FI_PROGRESS_MANUAL) { if (timeout >= 0) { start_ms = fi_gettime_ms(); end_ms = start_ms + timeout; } do { sock_cq_progress(sock_cq); fastlock_acquire(&sock_cq->lock); avail = rbfdused(&sock_cq->cq_rbfd); if (avail) ret = sock_cq_rbuf_read(sock_cq, buf, MIN(threshold, avail / cq_entry_len), src_addr, cq_entry_len); fastlock_release(&sock_cq->lock); if (ret == 0 && timeout >= 0) { if (fi_gettime_ms() >= end_ms) return -FI_EAGAIN; } } while (ret == 0); } else { ret = rbfdwait(&sock_cq->cq_rbfd, timeout); if (ret > 0) { fastlock_acquire(&sock_cq->lock); ret = 0; avail = rbfdused(&sock_cq->cq_rbfd); if (avail) ret = sock_cq_rbuf_read(sock_cq, buf, MIN(threshold, avail / cq_entry_len), src_addr, cq_entry_len); fastlock_release(&sock_cq->lock); } } return (ret == 0 || ret == -FI_ETIMEDOUT) ? -FI_EAGAIN : ret; }
static int sock_poll_poll(struct fid_poll *pollset, void **context, int count) { struct sock_poll *poll; struct sock_cq *cq; struct sock_eq *eq; struct sock_cntr *cntr; struct sock_fid_list *list_item; struct dlist_entry *p, *head; int ret_count = 0; poll = container_of(pollset, struct sock_poll, poll_fid.fid); head = &poll->fid_list; for (p = head->next; p != head && ret_count < count; p = p->next) { list_item = container_of(p, struct sock_fid_list, entry); switch (list_item->fid->fclass) { case FI_CLASS_CQ: cq = container_of(list_item->fid, struct sock_cq, cq_fid); sock_cq_progress(cq); fastlock_acquire(&cq->lock); if (rbfdused(&cq->cq_rbfd)) { *context++ = cq->cq_fid.fid.context; ret_count++; } fastlock_release(&cq->lock); break; case FI_CLASS_CNTR: cntr = container_of(list_item->fid, struct sock_cntr, cntr_fid); sock_cntr_progress(cntr); fastlock_acquire(&cntr->mut); if (atomic_get(&cntr->value) >= atomic_get(&cntr->threshold)) { *context++ = cntr->cntr_fid.fid.context; ret_count++; } fastlock_release(&cntr->mut); break; case FI_CLASS_EQ: eq = container_of(list_item->fid, struct sock_eq, eq); fastlock_acquire(&eq->lock); if (!dlistfd_empty(&eq->list)) { *context++ = eq->eq.fid.context; ret_count++; } fastlock_release(&eq->lock); break; default: break; } } return ret_count; }