static int sock_cq_signal(struct fid_cq *cq) { struct sock_cq *sock_cq; sock_cq = container_of(cq, struct sock_cq, cq_fid); ofi_atomic_set32(&sock_cq->signaled, 1); fastlock_acquire(&sock_cq->lock); ofi_rbfdsignal(&sock_cq->cq_rbfd); fastlock_release(&sock_cq->lock); return 0; }
ssize_t fi_ibv_send(struct fi_ibv_msg_ep *ep, struct ibv_send_wr *wr, size_t len, int count, void *context) { struct ibv_send_wr *bad_wr; int ret; assert(ep->scq); wr->num_sge = count; wr->wr_id = (uintptr_t) context; if (wr->send_flags & IBV_SEND_SIGNALED) { assert((wr->wr_id & ep->scq->wr_id_mask) != ep->scq->send_signal_wr_id); ofi_atomic_set32(&ep->unsignaled_send_cnt, 0); } else { if (VERBS_SIGNAL_SEND(ep)) { ret = fi_ibv_signal_send(ep, wr); if (ret) return ret; } else { ofi_atomic_inc32(&ep->unsignaled_send_cnt); if (ofi_atomic_get32(&ep->unsignaled_send_cnt) >= VERBS_SEND_COMP_THRESH(ep)) { ret = fi_ibv_reap_comp(ep); if (ret) return ret; } } } ret = ibv_post_send(ep->id->qp, wr, &bad_wr); switch (ret) { case ENOMEM: return -FI_EAGAIN; case -1: /* Deal with non-compliant libibverbs drivers which set errno * instead of directly returning the error value */ return (errno == ENOMEM) ? -FI_EAGAIN : -errno; default: return -ret; } }
/** * This function will return a block of id's starting at id through nids * * @param domain gnix domain * @param nids number of id's * @param id if -1 return an id based on the counter and seed */ int _gnix_get_new_cdm_id_set(struct gnix_fid_domain *domain, int nids, uint32_t *id) { uint32_t cdm_id; int v; if (*id == -1) { v = ofi_atomic_add32(&gnix_id_counter, nids); cdm_id = ((domain->cdm_id_seed & 0xFFF) << 12) | v; *id = cdm_id; } else { /* * asking for a block starting at a chosen base * TODO: sanity check that requested base is reasonable */ if (*id <= ofi_atomic_get32(&gnix_id_counter)) return -FI_ENOSPC; ofi_atomic_set32(&gnix_id_counter, (*(int *)id + nids)); } return FI_SUCCESS; }
static ssize_t sock_cq_sreadfrom(struct fid_cq *cq, void *buf, size_t count, fi_addr_t *src_addr, const void *cond, int timeout) { int ret = 0; size_t threshold; struct sock_cq *sock_cq; uint64_t start_ms; ssize_t cq_entry_len, avail; sock_cq = container_of(cq, struct sock_cq, cq_fid); if (ofi_rbused(&sock_cq->cqerr_rb)) return -FI_EAVAIL; cq_entry_len = sock_cq->cq_entry_size; if (sock_cq->attr.wait_cond == FI_CQ_COND_THRESHOLD) threshold = MIN((uintptr_t) cond, count); else threshold = count; start_ms = (timeout >= 0) ? fi_gettime_ms() : 0; if (sock_cq->domain->progress_mode == FI_PROGRESS_MANUAL) { while (1) { sock_cq_progress(sock_cq); fastlock_acquire(&sock_cq->lock); avail = ofi_rbfdused(&sock_cq->cq_rbfd); if (avail) { ret = sock_cq_rbuf_read(sock_cq, buf, MIN(threshold, (size_t)(avail / cq_entry_len)), src_addr, cq_entry_len); } fastlock_release(&sock_cq->lock); if (ret) return ret; if (timeout >= 0) { timeout -= (int) (fi_gettime_ms() - start_ms); if (timeout <= 0) return -FI_EAGAIN; } if (ofi_atomic_get32(&sock_cq->signaled)) { ofi_atomic_set32(&sock_cq->signaled, 0); return -FI_ECANCELED; } }; } else { do { fastlock_acquire(&sock_cq->lock); ret = 0; avail = ofi_rbfdused(&sock_cq->cq_rbfd); if (avail) { ret = sock_cq_rbuf_read(sock_cq, buf, MIN(threshold, (size_t)(avail / cq_entry_len)), src_addr, cq_entry_len); } else { ofi_rbfdreset(&sock_cq->cq_rbfd); } fastlock_release(&sock_cq->lock); if (ret && ret != -FI_EAGAIN) return ret; if (timeout >= 0) { timeout -= (int) (fi_gettime_ms() - start_ms); if (timeout <= 0) return -FI_EAGAIN; } if (ofi_atomic_get32(&sock_cq->signaled)) { ofi_atomic_set32(&sock_cq->signaled, 0); return -FI_ECANCELED; } ret = ofi_rbfdwait(&sock_cq->cq_rbfd, timeout); } while (ret > 0); } return (ret == 0 || ret == -FI_ETIMEDOUT) ? -FI_EAGAIN : ret; }