/** * i40iw_puda_qp_wqe - setup wqe for qp create * @rsrc: resource for qp */ static enum i40iw_status_code i40iw_puda_qp_wqe(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp) { struct i40iw_sc_cqp *cqp; u64 *wqe; u64 header; struct i40iw_ccq_cqe_info compl_info; enum i40iw_status_code status = 0; cqp = dev->cqp; wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, 0); if (!wqe) return I40IW_ERR_RING_FULL; set_64bit_val(wqe, 16, qp->hw_host_ctx_pa); set_64bit_val(wqe, 40, qp->shadow_area_pa); header = qp->qp_uk.qp_id | LS_64(I40IW_CQP_OP_CREATE_QP, I40IW_CQPSQ_OPCODE) | LS_64(I40IW_QP_TYPE_UDA, I40IW_CQPSQ_QP_QPTYPE) | LS_64(1, I40IW_CQPSQ_QP_CQNUMVALID) | LS_64(2, I40IW_CQPSQ_QP_NEXTIWSTATE) | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); set_64bit_val(wqe, 24, header); i40iw_debug_buf(cqp->dev, I40IW_DEBUG_PUDA, "PUDA CQE", wqe, 32); i40iw_sc_cqp_post_sq(cqp); status = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp, I40IW_CQP_OP_CREATE_QP, &compl_info); return status; }
/** * i40iw_puda_qp_setctx - during init, set qp's context * @rsrc: qp's resource */ static void i40iw_puda_qp_setctx(struct i40iw_puda_rsrc *rsrc) { struct i40iw_sc_qp *qp = &rsrc->qp; u64 *qp_ctx = qp->hw_host_ctx; set_64bit_val(qp_ctx, 8, qp->sq_pa); set_64bit_val(qp_ctx, 16, qp->rq_pa); set_64bit_val(qp_ctx, 24, LS_64(qp->hw_rq_size, I40IWQPC_RQSIZE) | LS_64(qp->hw_sq_size, I40IWQPC_SQSIZE)); set_64bit_val(qp_ctx, 48, LS_64(1514, I40IWQPC_SNDMSS)); set_64bit_val(qp_ctx, 56, 0); set_64bit_val(qp_ctx, 64, 1); set_64bit_val(qp_ctx, 136, LS_64(rsrc->cq_id, I40IWQPC_TXCQNUM) | LS_64(rsrc->cq_id, I40IWQPC_RXCQNUM)); set_64bit_val(qp_ctx, 160, LS_64(1, I40IWQPC_PRIVEN)); set_64bit_val(qp_ctx, 168, LS_64((uintptr_t)qp, I40IWQPC_QPCOMPCTX)); set_64bit_val(qp_ctx, 176, LS_64(qp->sq_tph_val, I40IWQPC_SQTPHVAL) | LS_64(qp->rq_tph_val, I40IWQPC_RQTPHVAL) | LS_64(qp->qs_handle, I40IWQPC_QSHANDLE)); i40iw_debug_buf(rsrc->dev, I40IW_DEBUG_PUDA, "PUDA QP CONTEXT", qp_ctx, I40IW_QP_CTX_SIZE); }
/** * i40iw_ieq_handle_partial - process partial fpdu buffer * @ieq: ieq resource * @pfpdu: partial management per user qp * @buf: receive buffer * @fpdu_len: fpdu len in the buffer */ static enum i40iw_status_code i40iw_ieq_handle_partial(struct i40iw_puda_rsrc *ieq, struct i40iw_pfpdu *pfpdu, struct i40iw_puda_buf *buf, u16 fpdu_len) { enum i40iw_status_code status = 0; u8 *crcptr; u32 mpacrc; u32 seqnum = buf->seqnum; struct list_head pbufl; /* partial buffer list */ struct i40iw_puda_buf *txbuf = NULL; struct list_head *rxlist = &pfpdu->rxlist; INIT_LIST_HEAD(&pbufl); list_add(&buf->list, &pbufl); status = i40iw_ieq_create_pbufl(pfpdu, rxlist, &pbufl, buf, fpdu_len); if (status) goto error; txbuf = i40iw_puda_get_bufpool(ieq); if (!txbuf) { pfpdu->no_tx_bufs++; status = I40IW_ERR_NO_TXBUFS; goto error; } i40iw_ieq_compl_pfpdu(ieq, rxlist, &pbufl, txbuf, fpdu_len); i40iw_ieq_update_tcpip_info(txbuf, fpdu_len, seqnum); crcptr = txbuf->data + fpdu_len - 4; mpacrc = *(u32 *)crcptr; if (ieq->check_crc) { status = i40iw_ieq_check_mpacrc(ieq->hash_desc, txbuf->data, (fpdu_len - 4), mpacrc); if (status) { i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ, "%s: error bad crc\n", __func__); goto error; } } i40iw_debug_buf(ieq->dev, I40IW_DEBUG_IEQ, "IEQ TX BUFFER", txbuf->mem.va, txbuf->totallen); i40iw_puda_send_buf(ieq, txbuf); pfpdu->rcv_nxt = seqnum + fpdu_len; return status; error: while (!list_empty(&pbufl)) { buf = (struct i40iw_puda_buf *)(pbufl.prev); list_del(&buf->list); list_add(&buf->list, rxlist); } if (txbuf) i40iw_puda_ret_bufpool(ieq, txbuf); return status; }
/** * i40iw_puda_poll_info - poll cq for completion * @cq: cq for poll * @info: info return for successful completion */ static enum i40iw_status_code i40iw_puda_poll_info(struct i40iw_sc_cq *cq, struct i40iw_puda_completion_info *info) { u64 qword0, qword2, qword3; u64 *cqe; u64 comp_ctx; bool valid_bit; u32 major_err, minor_err; bool error; cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(&cq->cq_uk); get_64bit_val(cqe, 24, &qword3); valid_bit = (bool)RS_64(qword3, I40IW_CQ_VALID); if (valid_bit != cq->cq_uk.polarity) return I40IW_ERR_QUEUE_EMPTY; i40iw_debug_buf(cq->dev, I40IW_DEBUG_PUDA, "PUDA CQE", cqe, 32); error = (bool)RS_64(qword3, I40IW_CQ_ERROR); if (error) { i40iw_debug(cq->dev, I40IW_DEBUG_PUDA, "%s receive error\n", __func__); major_err = (u32)(RS_64(qword3, I40IW_CQ_MAJERR)); minor_err = (u32)(RS_64(qword3, I40IW_CQ_MINERR)); info->compl_error = major_err << 16 | minor_err; return I40IW_ERR_CQ_COMPL_ERROR; } get_64bit_val(cqe, 0, &qword0); get_64bit_val(cqe, 16, &qword2); info->q_type = (u8)RS_64(qword3, I40IW_CQ_SQ); info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID); get_64bit_val(cqe, 8, &comp_ctx); info->qp = (struct i40iw_qp_uk *)(unsigned long)comp_ctx; info->wqe_idx = (u32)RS_64(qword3, I40IW_CQ_WQEIDX); if (info->q_type == I40IW_CQE_QTYPE_RQ) { info->vlan_valid = (bool)RS_64(qword3, I40IW_VLAN_TAG_VALID); info->l4proto = (u8)RS_64(qword2, I40IW_UDA_L4PROTO); info->l3proto = (u8)RS_64(qword2, I40IW_UDA_L3PROTO); info->payload_len = (u16)RS_64(qword0, I40IW_UDA_PAYLOADLEN); } return 0; }
/** * i40iw_puda_send - complete send wqe for transmit * @qp: puda qp for send * @info: buffer information for transmit */ enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp, struct i40iw_puda_send_info *info) { u64 *wqe; u32 iplen, l4len; u64 header[2]; u32 wqe_idx; u8 iipt; /* number of 32 bits DWORDS in header */ l4len = info->tcplen >> 2; if (info->ipv4) { iipt = 3; iplen = 5; } else { iipt = 1; iplen = 10; } wqe = i40iw_puda_get_next_send_wqe(&qp->qp_uk, &wqe_idx); if (!wqe) return I40IW_ERR_QP_TOOMANY_WRS_POSTED; qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid = (uintptr_t)info->scratch; /* Third line of WQE descriptor */ /* maclen is in words */ header[0] = LS_64((info->maclen >> 1), I40IW_UDA_QPSQ_MACLEN) | LS_64(iplen, I40IW_UDA_QPSQ_IPLEN) | LS_64(1, I40IW_UDA_QPSQ_L4T) | LS_64(iipt, I40IW_UDA_QPSQ_IIPT) | LS_64(l4len, I40IW_UDA_QPSQ_L4LEN); /* Forth line of WQE descriptor */ header[1] = LS_64(I40IW_OP_TYPE_SEND, I40IW_UDA_QPSQ_OPCODE) | LS_64(1, I40IW_UDA_QPSQ_SIGCOMPL) | LS_64(info->doloopback, I40IW_UDA_QPSQ_DOLOOPBACK) | LS_64(qp->qp_uk.swqe_polarity, I40IW_UDA_QPSQ_VALID); set_64bit_val(wqe, 0, info->paddr); set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN)); set_64bit_val(wqe, 16, header[0]); i40iw_insert_wqe_hdr(wqe, header[1]); i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32); i40iw_qp_post_wr(&qp->qp_uk); return 0; }
/** * i40iw_puda_cq_wqe - setup wqe for cq create * @rsrc: resource for cq */ static enum i40iw_status_code i40iw_puda_cq_wqe(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq) { u64 *wqe; struct i40iw_sc_cqp *cqp; u64 header; struct i40iw_ccq_cqe_info compl_info; enum i40iw_status_code status = 0; cqp = dev->cqp; wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, 0); if (!wqe) return I40IW_ERR_RING_FULL; set_64bit_val(wqe, 0, cq->cq_uk.cq_size); set_64bit_val(wqe, 8, RS_64_1(cq, 1)); set_64bit_val(wqe, 16, LS_64(cq->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD)); set_64bit_val(wqe, 32, cq->cq_pa); set_64bit_val(wqe, 40, cq->shadow_area_pa); header = cq->cq_uk.cq_id | LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) | LS_64(1, I40IW_CQPSQ_CQ_CHKOVERFLOW) | LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) | LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); set_64bit_val(wqe, 24, header); i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE", wqe, I40IW_CQP_WQE_SIZE * 8); i40iw_sc_cqp_post_sq(dev->cqp); status = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp, I40IW_CQP_OP_CREATE_CQ, &compl_info); return status; }
/** * i40iw_ieq_handle_exception - handle qp's exception * @ieq: ieq resource * @qp: qp receiving excpetion * @buf: receive buffer */ static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp, struct i40iw_puda_buf *buf) { struct i40iw_puda_buf *tmpbuf = NULL; struct i40iw_pfpdu *pfpdu = &qp->pfpdu; u32 *hw_host_ctx = (u32 *)qp->hw_host_ctx; u32 rcv_wnd = hw_host_ctx[23]; /* first partial seq # in q2 */ u32 fps = qp->q2_buf[16]; struct list_head *rxlist = &pfpdu->rxlist; struct list_head *plist; pfpdu->total_ieq_bufs++; if (pfpdu->mpa_crc_err) { pfpdu->crc_err++; goto error; } if (pfpdu->mode && (fps != pfpdu->fps)) { /* clean up qp as it is new partial sequence */ i40iw_ieq_cleanup_qp(ieq, qp); i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ, "%s: restarting new partial\n", __func__); pfpdu->mode = false; } if (!pfpdu->mode) { i40iw_debug_buf(ieq->dev, I40IW_DEBUG_IEQ, "Q2 BUFFER", (u64 *)qp->q2_buf, 128); /* First_Partial_Sequence_Number check */ pfpdu->rcv_nxt = fps; pfpdu->fps = fps; pfpdu->mode = true; pfpdu->max_fpdu_data = ieq->vsi->mss; pfpdu->pmode_count++; INIT_LIST_HEAD(rxlist); i40iw_ieq_check_first_buf(buf, fps); } if (!(rcv_wnd >= (buf->seqnum - pfpdu->rcv_nxt))) { pfpdu->bad_seq_num++; goto error; } if (!list_empty(rxlist)) { tmpbuf = (struct i40iw_puda_buf *)rxlist->next; plist = &tmpbuf->list; while ((struct list_head *)tmpbuf != rxlist) { if ((int)(buf->seqnum - tmpbuf->seqnum) < 0) break; tmpbuf = (struct i40iw_puda_buf *)plist->next; } /* Insert buf before tmpbuf */ list_add_tail(&buf->list, &tmpbuf->list); } else { list_add_tail(&buf->list, rxlist); } i40iw_ieq_process_fpdus(qp, ieq); return; error: i40iw_puda_ret_bufpool(ieq, buf); }