/** * i40iw_nop_1 - insert a nop wqe and move head. no post work * @qp: hw qp ptr */ static enum i40iw_status_code i40iw_nop_1(struct i40iw_qp_uk *qp) { u64 header, *wqe; u64 *wqe_0 = NULL; u32 wqe_idx, peek_head; bool signaled = false; if (!qp->sq_ring.head) return I40IW_ERR_PARAM; wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring); wqe = qp->sq_base[wqe_idx].elem; qp->sq_wrtrk_array[wqe_idx].wqe_size = I40IW_QP_WQE_MIN_SIZE; peek_head = (qp->sq_ring.head + 1) % qp->sq_ring.size; wqe_0 = qp->sq_base[peek_head].elem; if (peek_head) wqe_0[3] = LS_64(!qp->swqe_polarity, I40IWQPSQ_VALID); else wqe_0[3] = LS_64(qp->swqe_polarity, I40IWQPSQ_VALID); set_64bit_val(wqe, I40IW_BYTE_0, 0); set_64bit_val(wqe, I40IW_BYTE_8, 0); set_64bit_val(wqe, I40IW_BYTE_16, 0); header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) | LS_64(signaled, I40IWQPSQ_SIGCOMPL) | LS_64(qp->swqe_polarity, I40IWQPSQ_VALID) | nop_signature++; udma_to_device_barrier(); /* Memory barrier to ensure data is written before valid bit is set */ set_64bit_val(wqe, I40IW_BYTE_24, header); return 0; }
/** * i40iw_puda_get_next_send_wqe - return next wqe for processing * @qp: puda qp for wqe * @wqe_idx: wqe index for caller */ static u64 *i40iw_puda_get_next_send_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx) { u64 *wqe = NULL; enum i40iw_status_code ret_code = 0; *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring); if (!*wqe_idx) qp->swqe_polarity = !qp->swqe_polarity; I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code); if (ret_code) return wqe; wqe = qp->sq_base[*wqe_idx].elem; return wqe; }
/** * i40iw_qp_post_wr - post wr to hrdware * @qp: hw qp ptr */ void i40iw_qp_post_wr(struct i40iw_qp_uk *qp) { u64 temp; u32 hw_sq_tail; u32 sw_sq_head; /* valid bit is written and loads completed before reading shadow * * Whatever is happening here does not match our common macros for * producer/consumer DMA and may not be portable, however on x86-64 * the required barrier is MFENCE, get a 'portable' version via C11 * atomic. */ atomic_thread_fence(memory_order_seq_cst); /* read the doorbell shadow area */ get_64bit_val(qp->shadow_area, I40IW_BYTE_0, &temp); hw_sq_tail = (u32)RS_64(temp, I40IW_QP_DBSA_HW_SQ_TAIL); sw_sq_head = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring); if (sw_sq_head != hw_sq_tail) { if (sw_sq_head > qp->initial_ring.head) { if ((hw_sq_tail >= qp->initial_ring.head) && (hw_sq_tail < sw_sq_head)) { db_wr32(qp->qp_id, qp->wqe_alloc_reg); } } else if (sw_sq_head != qp->initial_ring.head) { if ((hw_sq_tail >= qp->initial_ring.head) || (hw_sq_tail < sw_sq_head)) { db_wr32(qp->qp_id, qp->wqe_alloc_reg); } } } qp->initial_ring.head = qp->sq_ring.head; }
} } } qp->initial_ring.head = qp->sq_ring.head; } /** * i40iw_qp_ring_push_db - ring qp doorbell * @qp: hw qp ptr * @wqe_idx: wqe index */ static void i40iw_qp_ring_push_db(struct i40iw_qp_uk *qp, u32 wqe_idx) { set_32bit_val(qp->push_db, 0, LS_32((wqe_idx >> 2), I40E_PFPE_WQEALLOC_WQE_DESC_INDEX) | qp->qp_id); qp->initial_ring.head = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring); } /** * i40iw_qp_get_next_send_wqe - return next wqe ptr * @qp: hw qp ptr * @wqe_idx: return wqe index * @wqe_size: size of sq wqe */ u64 *i40iw_qp_get_next_send_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx, u8 wqe_size, u32 total_size, u64 wr_id ) {
/** * i40iw_puda_poll_completion - processes completion for cq * @dev: iwarp device * @cq: cq getting interrupt * @compl_err: return any completion err */ enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq, u32 *compl_err) { struct i40iw_qp_uk *qp; struct i40iw_cq_uk *cq_uk = &cq->cq_uk; struct i40iw_puda_completion_info info; enum i40iw_status_code ret = 0; struct i40iw_puda_buf *buf; struct i40iw_puda_rsrc *rsrc; void *sqwrid; u8 cq_type = cq->cq_type; unsigned long flags; if ((cq_type == I40IW_CQ_TYPE_ILQ) || (cq_type == I40IW_CQ_TYPE_IEQ)) { rsrc = (cq_type == I40IW_CQ_TYPE_ILQ) ? cq->vsi->ilq : cq->vsi->ieq; } else { i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s qp_type error\n", __func__); return I40IW_ERR_BAD_PTR; } memset(&info, 0, sizeof(info)); ret = i40iw_puda_poll_info(cq, &info); *compl_err = info.compl_error; if (ret == I40IW_ERR_QUEUE_EMPTY) return ret; if (ret) goto done; qp = info.qp; if (!qp || !rsrc) { ret = I40IW_ERR_BAD_PTR; goto done; } if (qp->qp_id != rsrc->qp_id) { ret = I40IW_ERR_BAD_PTR; goto done; } if (info.q_type == I40IW_CQE_QTYPE_RQ) { buf = (struct i40iw_puda_buf *)(uintptr_t)qp->rq_wrid_array[info.wqe_idx]; /* Get all the tcpip information in the buf header */ ret = i40iw_puda_get_tcpip_info(&info, buf); if (ret) { rsrc->stats_rcvd_pkt_err++; if (cq_type == I40IW_CQ_TYPE_ILQ) { i40iw_ilq_putback_rcvbuf(&rsrc->qp, info.wqe_idx); } else { i40iw_puda_ret_bufpool(rsrc, buf); i40iw_puda_replenish_rq(rsrc, false); } goto done; } rsrc->stats_pkt_rcvd++; rsrc->compl_rxwqe_idx = info.wqe_idx; i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s RQ completion\n", __func__); rsrc->receive(rsrc->vsi, buf); if (cq_type == I40IW_CQ_TYPE_ILQ) i40iw_ilq_putback_rcvbuf(&rsrc->qp, info.wqe_idx); else i40iw_puda_replenish_rq(rsrc, false); } else { i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s SQ completion\n", __func__); sqwrid = (void *)(uintptr_t)qp->sq_wrtrk_array[info.wqe_idx].wrid; I40IW_RING_SET_TAIL(qp->sq_ring, info.wqe_idx); rsrc->xmit_complete(rsrc->vsi, sqwrid); spin_lock_irqsave(&rsrc->bufpool_lock, flags); rsrc->tx_wqe_avail_cnt++; spin_unlock_irqrestore(&rsrc->bufpool_lock, flags); if (!list_empty(&rsrc->vsi->ilq->txpend)) i40iw_puda_send_buf(rsrc->vsi->ilq, NULL); } done: I40IW_RING_MOVE_HEAD(cq_uk->cq_ring, ret); if (I40IW_RING_GETCURRENT_HEAD(cq_uk->cq_ring) == 0) cq_uk->polarity = !cq_uk->polarity; /* update cq tail in cq shadow memory also */ I40IW_RING_MOVE_TAIL(cq_uk->cq_ring); set_64bit_val(cq_uk->shadow_area, 0, I40IW_RING_GETCURRENT_HEAD(cq_uk->cq_ring)); return 0; }