static ssize_t sock_cq_sreadfrom(struct fid_cq *cq, void *buf, size_t count, fi_addr_t *src_addr, const void *cond, int timeout) { int ret = 0; size_t threshold; struct sock_cq *sock_cq; uint64_t start_ms = 0, end_ms = 0; ssize_t cq_entry_len, avail; sock_cq = container_of(cq, struct sock_cq, cq_fid); if (rbused(&sock_cq->cqerr_rb)) return -FI_EAVAIL; cq_entry_len = sock_cq->cq_entry_size; if (sock_cq->attr.wait_cond == FI_CQ_COND_THRESHOLD) threshold = MIN((uintptr_t) cond, count); else threshold = count; if (sock_cq->domain->progress_mode == FI_PROGRESS_MANUAL) { if (timeout >= 0) { start_ms = fi_gettime_ms(); end_ms = start_ms + timeout; } do { sock_cq_progress(sock_cq); fastlock_acquire(&sock_cq->lock); avail = rbfdused(&sock_cq->cq_rbfd); if (avail) ret = sock_cq_rbuf_read(sock_cq, buf, MIN(threshold, avail / cq_entry_len), src_addr, cq_entry_len); fastlock_release(&sock_cq->lock); if (ret == 0 && timeout >= 0) { if (fi_gettime_ms() >= end_ms) return -FI_EAGAIN; } } while (ret == 0); } else { ret = rbfdwait(&sock_cq->cq_rbfd, timeout); if (ret > 0) { fastlock_acquire(&sock_cq->lock); ret = 0; avail = rbfdused(&sock_cq->cq_rbfd); if (avail) ret = sock_cq_rbuf_read(sock_cq, buf, MIN(threshold, avail / cq_entry_len), src_addr, cq_entry_len); fastlock_release(&sock_cq->lock); } } return (ret == 0 || ret == -FI_ETIMEDOUT) ? -FI_EAGAIN : ret; }
static ssize_t sock_cq_sreadfrom(struct fid_cq *cq, void *buf, size_t count, fi_addr_t *src_addr, const void *cond, int timeout) { int ret = 0; size_t threshold; struct sock_cq *sock_cq; uint64_t start_ms; ssize_t cq_entry_len, avail; sock_cq = container_of(cq, struct sock_cq, cq_fid); if (ofi_rbused(&sock_cq->cqerr_rb)) return -FI_EAVAIL; cq_entry_len = sock_cq->cq_entry_size; if (sock_cq->attr.wait_cond == FI_CQ_COND_THRESHOLD) threshold = MIN((uintptr_t) cond, count); else threshold = count; start_ms = (timeout >= 0) ? fi_gettime_ms() : 0; if (sock_cq->domain->progress_mode == FI_PROGRESS_MANUAL) { while (1) { sock_cq_progress(sock_cq); fastlock_acquire(&sock_cq->lock); avail = ofi_rbfdused(&sock_cq->cq_rbfd); if (avail) { ret = sock_cq_rbuf_read(sock_cq, buf, MIN(threshold, (size_t)(avail / cq_entry_len)), src_addr, cq_entry_len); } fastlock_release(&sock_cq->lock); if (ret) return ret; if (timeout >= 0) { timeout -= (int) (fi_gettime_ms() - start_ms); if (timeout <= 0) return -FI_EAGAIN; } if (ofi_atomic_get32(&sock_cq->signaled)) { ofi_atomic_set32(&sock_cq->signaled, 0); return -FI_ECANCELED; } }; } else { do { fastlock_acquire(&sock_cq->lock); ret = 0; avail = ofi_rbfdused(&sock_cq->cq_rbfd); if (avail) { ret = sock_cq_rbuf_read(sock_cq, buf, MIN(threshold, (size_t)(avail / cq_entry_len)), src_addr, cq_entry_len); } else { ofi_rbfdreset(&sock_cq->cq_rbfd); } fastlock_release(&sock_cq->lock); if (ret && ret != -FI_EAGAIN) return ret; if (timeout >= 0) { timeout -= (int) (fi_gettime_ms() - start_ms); if (timeout <= 0) return -FI_EAGAIN; } if (ofi_atomic_get32(&sock_cq->signaled)) { ofi_atomic_set32(&sock_cq->signaled, 0); return -FI_ECANCELED; } ret = ofi_rbfdwait(&sock_cq->cq_rbfd, timeout); } while (ret > 0); } return (ret == 0 || ret == -FI_ETIMEDOUT) ? -FI_EAGAIN : ret; }