static inline void usdf_progress_hard_cq(struct usdf_cq_hard *hcq, enum fi_cq_format format) { int ret; struct usd_completion comp; void *entry; size_t entry_size; struct fi_cq_entry *ctx_entry; struct fi_cq_msg_entry *msg_entry; struct fi_cq_data_entry *data_entry; struct usdf_cq *cq; cq = hcq->cqh_cq; do { ret = usd_poll_cq(hcq->cqh_ucq, &comp); if (ret == 0) { entry = cq->c.soft.cq_head; switch (format) { case FI_CQ_FORMAT_CONTEXT: entry_size = sizeof(*ctx_entry); ctx_entry = (struct fi_cq_entry *)entry; ctx_entry->op_context = cq->cq_comp.uc_context; break; case FI_CQ_FORMAT_MSG: entry_size = sizeof(*msg_entry); msg_entry = (struct fi_cq_msg_entry *)entry; msg_entry->op_context = cq->cq_comp.uc_context; msg_entry->flags = 0; msg_entry->len = cq->cq_comp.uc_bytes; break; case FI_CQ_FORMAT_DATA: entry_size = sizeof(*data_entry); data_entry = (struct fi_cq_data_entry *)entry; data_entry->op_context = cq->cq_comp.uc_context; data_entry->flags = 0; data_entry->len = cq->cq_comp.uc_bytes; data_entry->buf = 0; /* XXX */ data_entry->data = 0; break; default: return; } /* update with wrap */ entry = (uint8_t *)entry + entry_size; if (entry != cq->c.soft.cq_end) { cq->c.soft.cq_head = entry; } else { cq->c.soft.cq_head = cq->c.soft.cq_comps; } } } while (ret != -EAGAIN); }
/* * poll a hard CQ * Since this routine is an inline and is always called with format as * a constant, I am counting on the compiler optimizing away all the switches * on format. */ static inline ssize_t usdf_cq_read_common(struct fid_cq *fcq, void *buf, size_t count, enum fi_cq_format format) { struct usdf_cq *cq; uint8_t *entry; uint8_t *last; size_t entry_len; ssize_t ret; cq = cq_ftou(fcq); if (cq->cq_comp.uc_status != 0) return -FI_EAVAIL; switch (format) { case FI_CQ_FORMAT_CONTEXT: entry_len = sizeof(struct fi_cq_entry); break; case FI_CQ_FORMAT_MSG: entry_len = sizeof(struct fi_cq_msg_entry); break; case FI_CQ_FORMAT_DATA: entry_len = sizeof(struct fi_cq_data_entry); break; default: return 0; } ret = 0; entry = buf; last = entry + (entry_len * count); while (entry < last) { ret = usd_poll_cq(cq->c.hard.cq_cq, &cq->cq_comp); if (ret == -EAGAIN) break; if (cq->cq_comp.uc_status != 0) { ret = -FI_EAVAIL; break; } ret = usdf_cq_copy_cq_entry(entry, &cq->cq_comp, format); if (ret < 0) return ret; entry += entry_len; } if (entry > (uint8_t *)buf) return (entry - (uint8_t *)buf) / entry_len; else return ret; }
/* * Process message completions */ void usdf_msg_hcq_progress(struct usdf_cq_hard *hcq) { struct usd_completion comp; while (usd_poll_cq(hcq->cqh_ucq, &comp) != -EAGAIN) { switch (comp.uc_type) { case USD_COMPTYPE_SEND: usdf_msg_send_completion(&comp); break; case USD_COMPTYPE_RECV: usdf_msg_handle_recv(hcq->cqh_cq->cq_domain, &comp); break; } } }
void usdf_progress_hard_cq(struct usdf_cq_hard *hcq) { int ret; struct usd_completion comp; struct usdf_cq_soft_entry *entry; struct usdf_cq *cq; cq = hcq->cqh_cq; do { ret = usd_poll_cq(hcq->cqh_ucq, &comp); if (ret == 0) { entry = cq->c.soft.cq_head; /* If the current entry is equal to the tail and the * last operation was a write, then we have filled the * queue and we just drop whatever there isn't space * for. */ if ((entry == cq->c.soft.cq_tail) && (cq->c.soft.cq_last_op == USDF_SOFT_CQ_WRITE)) return; entry->cse_context = cq->cq_comp.uc_context; entry->cse_flags = 0; entry->cse_len = cq->cq_comp.uc_bytes; entry->cse_buf = 0; /* XXX TODO */ entry->cse_data = 0; /* update with wrap */ entry++; if (entry != cq->c.soft.cq_end) { cq->c.soft.cq_head = entry; } else { cq->c.soft.cq_head = cq->c.soft.cq_comps; } cq->c.soft.cq_last_op = USDF_SOFT_CQ_WRITE; } } while (ret != -EAGAIN); }
static ssize_t usdf_cq_read_data(struct fid_cq *fcq, void *buf, size_t count) { struct usdf_cq *cq; struct fi_cq_data_entry *entry; struct fi_cq_data_entry *last; ssize_t ret; cq = cq_ftou(fcq); if (cq->cq_comp.uc_status != 0) { return -FI_EAVAIL; } ret = 0; entry = buf; last = entry + count; while (entry < last) { ret = usd_poll_cq(cq->cq_cq, &cq->cq_comp); if (ret == -EAGAIN) { ret = 0; break; } if (cq->cq_comp.uc_status != 0) { ret = -FI_EAVAIL; break; } entry->op_context = cq->cq_comp.uc_context; entry->flags = 0; entry->len = cq->cq_comp.uc_bytes; entry->buf = 0; /* XXX */ entry->data = 0; entry++; } if (entry > (struct fi_cq_data_entry *)buf) { return entry - (struct fi_cq_data_entry *)buf; } else { return ret; } }
static ssize_t usdf_cq_readfrom_context_soft(struct fid_cq *fcq, void *buf, size_t count, fi_addr_t *src_addr) { struct usdf_cq *cq; struct usd_cq_impl *ucq; struct fi_cq_entry *entry; struct fi_cq_entry *last; ssize_t ret; struct cq_desc *cq_desc; struct usdf_ep *ep; struct sockaddr_in sin; struct usd_udp_hdr *hdr; uint16_t index; cq = cq_ftou(fcq); if (cq->cq_comp.uc_status != 0) { return -FI_EAVAIL; } ucq = to_cqi(cq->c.hard.cq_cq); ret = 0; entry = buf; last = entry + count; while (entry < last) { cq_desc = (struct cq_desc *)((uint8_t *)ucq->ucq_desc_ring + (ucq->ucq_next_desc << 4)); ret = usd_poll_cq(cq->c.hard.cq_cq, &cq->cq_comp); if (ret == -EAGAIN) { ret = 0; break; } if (cq->cq_comp.uc_status != 0) { ret = -FI_EAVAIL; break; } if (cq->cq_comp.uc_type == USD_COMPTYPE_RECV) { index = le16_to_cpu(cq_desc->completed_index) & CQ_DESC_COMP_NDX_MASK; ep = cq->cq_comp.uc_qp->uq_context; hdr = ep->e.dg.ep_hdr_ptr[index]; memset(&sin, 0, sizeof(sin)); sin.sin_addr.s_addr = hdr->uh_ip.saddr; sin.sin_port = hdr->uh_udp.source; ret = fi_av_insert(av_utof(ep->e.dg.ep_av), &sin, 1, src_addr, 0, NULL); if (ret != 1) { *src_addr = FI_ADDR_NOTAVAIL; } ++src_addr; } entry->op_context = cq->cq_comp.uc_context; entry++; } if (entry > (struct fi_cq_entry *)buf) { return entry - (struct fi_cq_entry *)buf; } else { return ret; } }
/* * poll a hard CQ * Since this routine is an inline and is always called with format as * a constant, I am counting on the compiler optimizing away all the switches * on format. */ static inline ssize_t usdf_cq_read_common(struct fid_cq *fcq, void *buf, size_t count, enum fi_cq_format format) { struct usdf_cq *cq; uint8_t *entry; uint8_t *last; size_t entry_len; struct fi_cq_entry *ctx_entry; struct fi_cq_msg_entry *msg_entry; struct fi_cq_data_entry *data_entry; ssize_t ret; cq = cq_ftou(fcq); if (cq->cq_comp.uc_status != 0) { return -FI_EAVAIL; } switch (format) { case FI_CQ_FORMAT_CONTEXT: entry_len = sizeof(struct fi_cq_entry); break; case FI_CQ_FORMAT_MSG: entry_len = sizeof(struct fi_cq_msg_entry); break; case FI_CQ_FORMAT_DATA: entry_len = sizeof(struct fi_cq_data_entry); break; default: return 0; } ret = 0; entry = buf; last = entry + (entry_len * count); while (entry < last) { ret = usd_poll_cq(cq->c.hard.cq_cq, &cq->cq_comp); if (ret == -EAGAIN) { break; } if (cq->cq_comp.uc_status != 0) { ret = -FI_EAVAIL; break; } switch (format) { case FI_CQ_FORMAT_CONTEXT: ctx_entry = (struct fi_cq_entry *)entry; ctx_entry->op_context = cq->cq_comp.uc_context; break; case FI_CQ_FORMAT_MSG: msg_entry = (struct fi_cq_msg_entry *)entry; msg_entry->op_context = cq->cq_comp.uc_context; msg_entry->flags = 0; msg_entry->len = cq->cq_comp.uc_bytes; break; case FI_CQ_FORMAT_DATA: data_entry = (struct fi_cq_data_entry *)entry; data_entry->op_context = cq->cq_comp.uc_context; data_entry->flags = 0; data_entry->len = cq->cq_comp.uc_bytes; data_entry->buf = 0; /* XXX */ data_entry->data = 0; break; default: return 0; } entry += entry_len; } if (entry > (uint8_t *)buf) { return (entry - (uint8_t *)buf) / entry_len; } else { return ret; } }
static inline ssize_t usdf_cq_sread_common(struct fid_cq *fcq, void *buf, size_t count, const void *cond, int timeout_ms, enum fi_cq_format format) { struct usdf_cq *cq; uint8_t *entry; uint8_t *last; size_t entry_len; ssize_t ret; size_t sleep_time_us; size_t time_spent_us = 0; sleep_time_us = SREAD_INIT_SLEEP_TIME_US; cq = cq_ftou(fcq); if (cq->cq_attr.wait_obj == FI_WAIT_NONE) return -FI_EOPNOTSUPP; if (cq->cq_comp.uc_status != 0) return -FI_EAVAIL; switch (format) { case FI_CQ_FORMAT_CONTEXT: entry_len = sizeof(struct fi_cq_entry); break; case FI_CQ_FORMAT_MSG: entry_len = sizeof(struct fi_cq_msg_entry); break; case FI_CQ_FORMAT_DATA: entry_len = sizeof(struct fi_cq_data_entry); break; default: return 0; } ret = 0; entry = buf; last = entry + (entry_len * count); while (entry < last) { ret = usd_poll_cq(cq->c.hard.cq_cq, &cq->cq_comp); if (ret == -EAGAIN) { if (entry > (uint8_t *)buf) break; if (timeout_ms >= 0 && (time_spent_us >= 1000 * timeout_ms)) break; usleep(sleep_time_us); time_spent_us += sleep_time_us; /* exponentially back off up to a limit */ if (sleep_time_us < SREAD_MAX_SLEEP_TIME_US) sleep_time_us *= SREAD_EXP_BASE; sleep_time_us = MIN(sleep_time_us, SREAD_MAX_SLEEP_TIME_US); continue; } if (cq->cq_comp.uc_status != 0) { if (entry > (uint8_t *) buf) break; else return -FI_EAVAIL; } ret = usdf_cq_copy_cq_entry(entry, &cq->cq_comp, format); if (ret < 0) return ret; entry += entry_len; } if (entry > (uint8_t *)buf) return (entry - (uint8_t *)buf) / entry_len; return -FI_EAGAIN; }