void __mlx4_cq_clean(struct mlx4_cq *cq, uint32_t qpn, struct mlx4_srq *srq) { struct mlx4_cqe *cqe, *dest; uint32_t prod_index; uint8_t owner_bit; int nfreed = 0; int is_xrc_srq = 0; int cqe_inc = cq->cqe_size == 64 ? 1 : 0; if (srq && srq->ibv_srq.xrc_cq) is_xrc_srq = 1; /* * First we need to find the current producer index, so we * know where to start cleaning from. It doesn't matter if HW * adds new entries after this loop -- the QP we're worried * about is already in RESET, so the new entries won't come * from our QP and therefore don't need to be checked. */ for (prod_index = cq->cons_index; get_sw_cqe(cq, prod_index); ++prod_index) if (prod_index == cq->cons_index + cq->ibv_cq.cqe) break; /* * Now sweep backwards through the CQ, removing CQ entries * that match our QP by copying older entries on top of them. */ while ((int) --prod_index - (int) cq->cons_index >= 0) { cqe = get_cqe(cq, prod_index & cq->ibv_cq.cqe); cqe += cqe_inc; if (is_xrc_srq && (ntohl(cqe->g_mlpath_rqpn & 0xffffff) == srq->srqn) && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK)) { mlx4_free_srq_wqe(srq, ntohs(cqe->wqe_index)); ++nfreed; } else if ((ntohl(cqe->my_qpn) & 0xffffff) == qpn) { if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK)) mlx4_free_srq_wqe(srq, ntohs(cqe->wqe_index)); ++nfreed; } else if (nfreed) { dest = get_cqe(cq, (prod_index + nfreed) & cq->ibv_cq.cqe); dest += cqe_inc; owner_bit = dest->owner_sr_opcode & MLX4_CQE_OWNER_MASK; memcpy(dest, cqe, sizeof *cqe); dest->owner_sr_opcode = owner_bit | (dest->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK); } } if (nfreed) { cq->cons_index += nfreed; /* * Make sure update of buffer contents is done before * updating consumer index. */ wmb(); update_cons_index(cq); } }
int mlx4_poll_cq(struct ibv_cq *ibcq, int ne, struct ibv_wc *wc) { struct mlx4_cq *cq = to_mcq(ibcq); struct mlx4_qp *qp = NULL; int npolled; int err = CQ_OK; pthread_spin_lock(&cq->lock); for (npolled = 0; npolled < ne; ++npolled) { err = mlx4_poll_one(cq, &qp, wc + npolled); if (err != CQ_OK) break; } if (npolled) update_cons_index(cq); pthread_spin_unlock(&cq->lock); return err == CQ_POLL_ERR ? err : npolled; }