/** * i40iw_nop_1 - insert a nop wqe and move head. no post work * @qp: hw qp ptr */ static enum i40iw_status_code i40iw_nop_1(struct i40iw_qp_uk *qp) { u64 header, *wqe; u64 *wqe_0 = NULL; u32 wqe_idx, peek_head; bool signaled = false; if (!qp->sq_ring.head) return I40IW_ERR_PARAM; wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring); wqe = qp->sq_base[wqe_idx].elem; qp->sq_wrtrk_array[wqe_idx].wqe_size = I40IW_QP_WQE_MIN_SIZE; peek_head = (qp->sq_ring.head + 1) % qp->sq_ring.size; wqe_0 = qp->sq_base[peek_head].elem; if (peek_head) wqe_0[3] = LS_64(!qp->swqe_polarity, I40IWQPSQ_VALID); else wqe_0[3] = LS_64(qp->swqe_polarity, I40IWQPSQ_VALID); set_64bit_val(wqe, I40IW_BYTE_0, 0); set_64bit_val(wqe, I40IW_BYTE_8, 0); set_64bit_val(wqe, I40IW_BYTE_16, 0); header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) | LS_64(signaled, I40IWQPSQ_SIGCOMPL) | LS_64(qp->swqe_polarity, I40IWQPSQ_VALID) | nop_signature++; udma_to_device_barrier(); /* Memory barrier to ensure data is written before valid bit is set */ set_64bit_val(wqe, I40IW_BYTE_24, header); return 0; }
/** * i40iw_puda_qp_wqe - setup wqe for qp create * @rsrc: resource for qp */ static enum i40iw_status_code i40iw_puda_qp_wqe(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp) { struct i40iw_sc_cqp *cqp; u64 *wqe; u64 header; struct i40iw_ccq_cqe_info compl_info; enum i40iw_status_code status = 0; cqp = dev->cqp; wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, 0); if (!wqe) return I40IW_ERR_RING_FULL; set_64bit_val(wqe, 16, qp->hw_host_ctx_pa); set_64bit_val(wqe, 40, qp->shadow_area_pa); header = qp->qp_uk.qp_id | LS_64(I40IW_CQP_OP_CREATE_QP, I40IW_CQPSQ_OPCODE) | LS_64(I40IW_QP_TYPE_UDA, I40IW_CQPSQ_QP_QPTYPE) | LS_64(1, I40IW_CQPSQ_QP_CQNUMVALID) | LS_64(2, I40IW_CQPSQ_QP_NEXTIWSTATE) | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); set_64bit_val(wqe, 24, header); i40iw_debug_buf(cqp->dev, I40IW_DEBUG_PUDA, "PUDA CQE", wqe, 32); i40iw_sc_cqp_post_sq(cqp); status = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp, I40IW_CQP_OP_CREATE_QP, &compl_info); return status; }
/** * i40iw_puda_qp_setctx - during init, set qp's context * @rsrc: qp's resource */ static void i40iw_puda_qp_setctx(struct i40iw_puda_rsrc *rsrc) { struct i40iw_sc_qp *qp = &rsrc->qp; u64 *qp_ctx = qp->hw_host_ctx; set_64bit_val(qp_ctx, 8, qp->sq_pa); set_64bit_val(qp_ctx, 16, qp->rq_pa); set_64bit_val(qp_ctx, 24, LS_64(qp->hw_rq_size, I40IWQPC_RQSIZE) | LS_64(qp->hw_sq_size, I40IWQPC_SQSIZE)); set_64bit_val(qp_ctx, 48, LS_64(1514, I40IWQPC_SNDMSS)); set_64bit_val(qp_ctx, 56, 0); set_64bit_val(qp_ctx, 64, 1); set_64bit_val(qp_ctx, 136, LS_64(rsrc->cq_id, I40IWQPC_TXCQNUM) | LS_64(rsrc->cq_id, I40IWQPC_RXCQNUM)); set_64bit_val(qp_ctx, 160, LS_64(1, I40IWQPC_PRIVEN)); set_64bit_val(qp_ctx, 168, LS_64((uintptr_t)qp, I40IWQPC_QPCOMPCTX)); set_64bit_val(qp_ctx, 176, LS_64(qp->sq_tph_val, I40IWQPC_SQTPHVAL) | LS_64(qp->rq_tph_val, I40IWQPC_RQTPHVAL) | LS_64(qp->qs_handle, I40IWQPC_QSHANDLE)); i40iw_debug_buf(rsrc->dev, I40IW_DEBUG_PUDA, "PUDA QP CONTEXT", qp_ctx, I40IW_QP_CTX_SIZE); }
/** * i40iw_puda_send - complete send wqe for transmit * @qp: puda qp for send * @info: buffer information for transmit */ enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp, struct i40iw_puda_send_info *info) { u64 *wqe; u32 iplen, l4len; u64 header[2]; u32 wqe_idx; u8 iipt; /* number of 32 bits DWORDS in header */ l4len = info->tcplen >> 2; if (info->ipv4) { iipt = 3; iplen = 5; } else { iipt = 1; iplen = 10; } wqe = i40iw_puda_get_next_send_wqe(&qp->qp_uk, &wqe_idx); if (!wqe) return I40IW_ERR_QP_TOOMANY_WRS_POSTED; qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid = (uintptr_t)info->scratch; /* Third line of WQE descriptor */ /* maclen is in words */ header[0] = LS_64((info->maclen >> 1), I40IW_UDA_QPSQ_MACLEN) | LS_64(iplen, I40IW_UDA_QPSQ_IPLEN) | LS_64(1, I40IW_UDA_QPSQ_L4T) | LS_64(iipt, I40IW_UDA_QPSQ_IIPT) | LS_64(l4len, I40IW_UDA_QPSQ_L4LEN); /* Forth line of WQE descriptor */ header[1] = LS_64(I40IW_OP_TYPE_SEND, I40IW_UDA_QPSQ_OPCODE) | LS_64(1, I40IW_UDA_QPSQ_SIGCOMPL) | LS_64(info->doloopback, I40IW_UDA_QPSQ_DOLOOPBACK) | LS_64(qp->qp_uk.swqe_polarity, I40IW_UDA_QPSQ_VALID); set_64bit_val(wqe, 0, info->paddr); set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN)); set_64bit_val(wqe, 16, header[0]); i40iw_insert_wqe_hdr(wqe, header[1]); i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32); i40iw_qp_post_wr(&qp->qp_uk); return 0; }
/** * i40iw_ilq_putback_rcvbuf - ilq buffer to put back on rq * @qp: ilq's qp resource * @wqe_idx: wqe index of completed rcvbuf */ static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp *qp, u32 wqe_idx) { u64 *wqe; u64 offset24; wqe = qp->qp_uk.rq_base[wqe_idx].elem; get_64bit_val(wqe, 24, &offset24); offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID); set_64bit_val(wqe, 24, offset24); }
/** * i40iw_puda_cq_wqe - setup wqe for cq create * @rsrc: resource for cq */ static enum i40iw_status_code i40iw_puda_cq_wqe(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq) { u64 *wqe; struct i40iw_sc_cqp *cqp; u64 header; struct i40iw_ccq_cqe_info compl_info; enum i40iw_status_code status = 0; cqp = dev->cqp; wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, 0); if (!wqe) return I40IW_ERR_RING_FULL; set_64bit_val(wqe, 0, cq->cq_uk.cq_size); set_64bit_val(wqe, 8, RS_64_1(cq, 1)); set_64bit_val(wqe, 16, LS_64(cq->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD)); set_64bit_val(wqe, 32, cq->cq_pa); set_64bit_val(wqe, 40, cq->shadow_area_pa); header = cq->cq_uk.cq_id | LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) | LS_64(1, I40IW_CQPSQ_CQ_CHKOVERFLOW) | LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) | LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); i40iw_insert_wqe_hdr(wqe, header); i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE", wqe, I40IW_CQP_WQE_SIZE * 8); i40iw_sc_cqp_post_sq(dev->cqp); status = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp, I40IW_CQP_OP_CREATE_CQ, &compl_info); return status; }
/** * i40iw_puda_post_recvbuf - set wqe for rcv buffer * @rsrc: resource ptr * @wqe_idx: wqe index to use * @buf: puda buffer for rcv q * @initial: flag if during init time */ static void i40iw_puda_post_recvbuf(struct i40iw_puda_rsrc *rsrc, u32 wqe_idx, struct i40iw_puda_buf *buf, bool initial) { u64 *wqe; struct i40iw_sc_qp *qp = &rsrc->qp; u64 offset24 = 0; qp->qp_uk.rq_wrid_array[wqe_idx] = (uintptr_t)buf; wqe = qp->qp_uk.rq_base[wqe_idx].elem; i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, "%s: wqe_idx= %d buf = %p wqe = %p\n", __func__, wqe_idx, buf, wqe); if (!initial) get_64bit_val(wqe, 24, &offset24); offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID); set_64bit_val(wqe, 0, buf->mem.pa); set_64bit_val(wqe, 8, LS_64(buf->mem.size, I40IWQPSQ_FRAG_LEN)); i40iw_insert_wqe_hdr(wqe, offset24); }
/** * i40iw_puda_poll_completion - processes completion for cq * @dev: iwarp device * @cq: cq getting interrupt * @compl_err: return any completion err */ enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq, u32 *compl_err) { struct i40iw_qp_uk *qp; struct i40iw_cq_uk *cq_uk = &cq->cq_uk; struct i40iw_puda_completion_info info; enum i40iw_status_code ret = 0; struct i40iw_puda_buf *buf; struct i40iw_puda_rsrc *rsrc; void *sqwrid; u8 cq_type = cq->cq_type; unsigned long flags; if ((cq_type == I40IW_CQ_TYPE_ILQ) || (cq_type == I40IW_CQ_TYPE_IEQ)) { rsrc = (cq_type == I40IW_CQ_TYPE_ILQ) ? cq->vsi->ilq : cq->vsi->ieq; } else { i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s qp_type error\n", __func__); return I40IW_ERR_BAD_PTR; } memset(&info, 0, sizeof(info)); ret = i40iw_puda_poll_info(cq, &info); *compl_err = info.compl_error; if (ret == I40IW_ERR_QUEUE_EMPTY) return ret; if (ret) goto done; qp = info.qp; if (!qp || !rsrc) { ret = I40IW_ERR_BAD_PTR; goto done; } if (qp->qp_id != rsrc->qp_id) { ret = I40IW_ERR_BAD_PTR; goto done; } if (info.q_type == I40IW_CQE_QTYPE_RQ) { buf = (struct i40iw_puda_buf *)(uintptr_t)qp->rq_wrid_array[info.wqe_idx]; /* Get all the tcpip information in the buf header */ ret = i40iw_puda_get_tcpip_info(&info, buf); if (ret) { rsrc->stats_rcvd_pkt_err++; if (cq_type == I40IW_CQ_TYPE_ILQ) { i40iw_ilq_putback_rcvbuf(&rsrc->qp, info.wqe_idx); } else { i40iw_puda_ret_bufpool(rsrc, buf); i40iw_puda_replenish_rq(rsrc, false); } goto done; } rsrc->stats_pkt_rcvd++; rsrc->compl_rxwqe_idx = info.wqe_idx; i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s RQ completion\n", __func__); rsrc->receive(rsrc->vsi, buf); if (cq_type == I40IW_CQ_TYPE_ILQ) i40iw_ilq_putback_rcvbuf(&rsrc->qp, info.wqe_idx); else i40iw_puda_replenish_rq(rsrc, false); } else { i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s SQ completion\n", __func__); sqwrid = (void *)(uintptr_t)qp->sq_wrtrk_array[info.wqe_idx].wrid; I40IW_RING_SET_TAIL(qp->sq_ring, info.wqe_idx); rsrc->xmit_complete(rsrc->vsi, sqwrid); spin_lock_irqsave(&rsrc->bufpool_lock, flags); rsrc->tx_wqe_avail_cnt++; spin_unlock_irqrestore(&rsrc->bufpool_lock, flags); if (!list_empty(&rsrc->vsi->ilq->txpend)) i40iw_puda_send_buf(rsrc->vsi->ilq, NULL); } done: I40IW_RING_MOVE_HEAD(cq_uk->cq_ring, ret); if (I40IW_RING_GETCURRENT_HEAD(cq_uk->cq_ring) == 0) cq_uk->polarity = !cq_uk->polarity; /* update cq tail in cq shadow memory also */ I40IW_RING_MOVE_TAIL(cq_uk->cq_ring); set_64bit_val(cq_uk->shadow_area, 0, I40IW_RING_GETCURRENT_HEAD(cq_uk->cq_ring)); return 0; }