void rxd_cq_progress(struct util_cq *util_cq) { ssize_t ret = 0; struct rxd_cq *cq; struct fi_cq_msg_entry cq_entry; struct dlist_entry *item, *next; struct rxd_unexp_cq_entry *unexp; cq = container_of(util_cq, struct rxd_cq, util_cq); fastlock_acquire(&cq->lock); do { ret = fi_cq_read(cq->dg_cq, &cq_entry, 1); if (ret == -FI_EAGAIN) break; if (cq_entry.flags & FI_SEND) { rxd_handle_send_comp(&cq_entry); } else if (cq_entry.flags & FI_RECV) { rxd_handle_recv_comp(cq, &cq_entry, 0); } else assert (0); } while (ret > 0); for (item = cq->unexp_list.next; item != &cq->unexp_list;) { unexp = container_of(item, struct rxd_unexp_cq_entry, entry); next = item->next; rxd_handle_recv_comp(cq, &unexp->cq_entry, 1); item = next; } fastlock_release(&cq->lock); }
static void rxd_ep_progress(struct util_ep *util_ep) { struct rxd_peer *peer; struct fi_cq_msg_entry cq_entry; struct dlist_entry *tmp; struct rxd_ep *ep; ssize_t ret; int i; ep = container_of(util_ep, struct rxd_ep, util_ep); fastlock_acquire(&ep->util_ep.lock); for(ret = 1, i = 0; ret > 0 && (!rxd_env.spin_count || i < rxd_env.spin_count); i++) { ret = fi_cq_read(ep->dg_cq, &cq_entry, 1); if (ret == -FI_EAGAIN) break; if (ret == -FI_EAVAIL) { rxd_handle_error(ep); continue; } if (cq_entry.flags & FI_RECV) rxd_handle_recv_comp(ep, &cq_entry); else rxd_handle_send_comp(ep, &cq_entry); } if (!rxd_env.retry) goto out; ep->next_retry = -1; dlist_foreach_container_safe(&ep->rts_sent_list, struct rxd_peer, peer, entry, tmp) rxd_progress_pkt_list(ep, peer); dlist_foreach_container_safe(&ep->active_peers, struct rxd_peer, peer, entry, tmp) { rxd_progress_pkt_list(ep, peer); if (dlist_empty(&peer->unacked)) rxd_progress_tx_list(ep, peer); } out: while (ep->posted_bufs < ep->rx_size && !ret) ret = rxd_ep_post_buf(ep); fastlock_release(&ep->util_ep.lock); }