/** * i40iw_puda_poll_info - poll cq for completion * @cq: cq for poll * @info: info return for successful completion */ static enum i40iw_status_code i40iw_puda_poll_info(struct i40iw_sc_cq *cq, struct i40iw_puda_completion_info *info) { u64 qword0, qword2, qword3; u64 *cqe; u64 comp_ctx; bool valid_bit; u32 major_err, minor_err; bool error; cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(&cq->cq_uk); get_64bit_val(cqe, 24, &qword3); valid_bit = (bool)RS_64(qword3, I40IW_CQ_VALID); if (valid_bit != cq->cq_uk.polarity) return I40IW_ERR_QUEUE_EMPTY; i40iw_debug_buf(cq->dev, I40IW_DEBUG_PUDA, "PUDA CQE", cqe, 32); error = (bool)RS_64(qword3, I40IW_CQ_ERROR); if (error) { i40iw_debug(cq->dev, I40IW_DEBUG_PUDA, "%s receive error\n", __func__); major_err = (u32)(RS_64(qword3, I40IW_CQ_MAJERR)); minor_err = (u32)(RS_64(qword3, I40IW_CQ_MINERR)); info->compl_error = major_err << 16 | minor_err; return I40IW_ERR_CQ_COMPL_ERROR; } get_64bit_val(cqe, 0, &qword0); get_64bit_val(cqe, 16, &qword2); info->q_type = (u8)RS_64(qword3, I40IW_CQ_SQ); info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID); get_64bit_val(cqe, 8, &comp_ctx); info->qp = (struct i40iw_qp_uk *)(unsigned long)comp_ctx; info->wqe_idx = (u32)RS_64(qword3, I40IW_CQ_WQEIDX); if (info->q_type == I40IW_CQE_QTYPE_RQ) { info->vlan_valid = (bool)RS_64(qword3, I40IW_VLAN_TAG_VALID); info->l4proto = (u8)RS_64(qword2, I40IW_UDA_L4PROTO); info->l3proto = (u8)RS_64(qword2, I40IW_UDA_L3PROTO); info->payload_len = (u16)RS_64(qword0, I40IW_UDA_PAYLOADLEN); } return 0; }
/** * i40iw_ilq_putback_rcvbuf - ilq buffer to put back on rq * @qp: ilq's qp resource * @wqe_idx: wqe index of completed rcvbuf */ static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp *qp, u32 wqe_idx) { u64 *wqe; u64 offset24; wqe = qp->qp_uk.rq_base[wqe_idx].elem; get_64bit_val(wqe, 24, &offset24); offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID); set_64bit_val(wqe, 24, offset24); }
/** * i40iw_puda_post_recvbuf - set wqe for rcv buffer * @rsrc: resource ptr * @wqe_idx: wqe index to use * @buf: puda buffer for rcv q * @initial: flag if during init time */ static void i40iw_puda_post_recvbuf(struct i40iw_puda_rsrc *rsrc, u32 wqe_idx, struct i40iw_puda_buf *buf, bool initial) { u64 *wqe; struct i40iw_sc_qp *qp = &rsrc->qp; u64 offset24 = 0; qp->qp_uk.rq_wrid_array[wqe_idx] = (uintptr_t)buf; wqe = qp->qp_uk.rq_base[wqe_idx].elem; i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, "%s: wqe_idx= %d buf = %p wqe = %p\n", __func__, wqe_idx, buf, wqe); if (!initial) get_64bit_val(wqe, 24, &offset24); offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID); set_64bit_val(wqe, 0, buf->mem.pa); set_64bit_val(wqe, 8, LS_64(buf->mem.size, I40IWQPSQ_FRAG_LEN)); i40iw_insert_wqe_hdr(wqe, offset24); }
/** * i40iw_qp_post_wr - post wr to hrdware * @qp: hw qp ptr */ void i40iw_qp_post_wr(struct i40iw_qp_uk *qp) { u64 temp; u32 hw_sq_tail; u32 sw_sq_head; /* valid bit is written and loads completed before reading shadow * * Whatever is happening here does not match our common macros for * producer/consumer DMA and may not be portable, however on x86-64 * the required barrier is MFENCE, get a 'portable' version via C11 * atomic. */ atomic_thread_fence(memory_order_seq_cst); /* read the doorbell shadow area */ get_64bit_val(qp->shadow_area, I40IW_BYTE_0, &temp); hw_sq_tail = (u32)RS_64(temp, I40IW_QP_DBSA_HW_SQ_TAIL); sw_sq_head = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring); if (sw_sq_head != hw_sq_tail) { if (sw_sq_head > qp->initial_ring.head) { if ((hw_sq_tail >= qp->initial_ring.head) && (hw_sq_tail < sw_sq_head)) { db_wr32(qp->qp_id, qp->wqe_alloc_reg); } } else if (sw_sq_head != qp->initial_ring.head) { if ((hw_sq_tail >= qp->initial_ring.head) || (hw_sq_tail < sw_sq_head)) { db_wr32(qp->qp_id, qp->wqe_alloc_reg); } } } qp->initial_ring.head = qp->sq_ring.head; }