static struct wrqe * mk_update_tcb_for_ddp(struct adapter *sc, struct toepcb *toep, int db_idx, int offset, uint64_t ddp_flags) { struct ddp_buffer *db = toep->db[db_idx]; struct wrqe *wr; struct work_request_hdr *wrh; struct ulp_txpkt *ulpmc; int len; KASSERT(db_idx == 0 || db_idx == 1, ("%s: bad DDP buffer index %d", __func__, db_idx)); /* * We'll send a compound work request that has 3 SET_TCB_FIELDs and an * RX_DATA_ACK (with RX_MODULATE to speed up delivery). * * The work request header is 16B and always ends at a 16B boundary. * The ULPTX master commands that follow must all end at 16B boundaries * too so we round up the size to 16. */ len = sizeof(*wrh) + 3 * roundup2(LEN__SET_TCB_FIELD_ULP, 16) + roundup2(LEN__RX_DATA_ACK_ULP, 16); wr = alloc_wrqe(len, toep->ctrlq); if (wr == NULL) return (NULL); wrh = wrtod(wr); INIT_ULPTX_WRH(wrh, len, 1, 0); /* atomic */ ulpmc = (struct ulp_txpkt *)(wrh + 1); /* Write the buffer's tag */ ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, W_TCB_RX_DDP_BUF0_TAG + db_idx, V_TCB_RX_DDP_BUF0_TAG(M_TCB_RX_DDP_BUF0_TAG), V_TCB_RX_DDP_BUF0_TAG(db->tag)); /* Update the current offset in the DDP buffer and its total length */ if (db_idx == 0) ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, W_TCB_RX_DDP_BUF0_OFFSET, V_TCB_RX_DDP_BUF0_OFFSET(M_TCB_RX_DDP_BUF0_OFFSET) | V_TCB_RX_DDP_BUF0_LEN(M_TCB_RX_DDP_BUF0_LEN), V_TCB_RX_DDP_BUF0_OFFSET(offset) | V_TCB_RX_DDP_BUF0_LEN(db->len)); else ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, W_TCB_RX_DDP_BUF1_OFFSET, V_TCB_RX_DDP_BUF1_OFFSET(M_TCB_RX_DDP_BUF1_OFFSET) | V_TCB_RX_DDP_BUF1_LEN((u64)M_TCB_RX_DDP_BUF1_LEN << 32), V_TCB_RX_DDP_BUF1_OFFSET(offset) | V_TCB_RX_DDP_BUF1_LEN((u64)db->len << 32)); /* Update DDP flags */ ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, W_TCB_RX_DDP_FLAGS, V_TF_DDP_BUF0_FLUSH(1) | V_TF_DDP_BUF1_FLUSH(1) | V_TF_DDP_PUSH_DISABLE_0(1) | V_TF_DDP_PUSH_DISABLE_1(1) | V_TF_DDP_BUF0_VALID(1) | V_TF_DDP_BUF1_VALID(1) | V_TF_DDP_ACTIVE_BUF(1) | V_TF_DDP_INDICATE_OUT(1), ddp_flags); /* Gratuitous RX_DATA_ACK with RX_MODULATE set to speed up delivery. */ ulpmc = mk_rx_data_ack_ulp(ulpmc, toep); return (wr); }
/** * Delete the specified hash filter. */ static int cxgbe_del_hash_filter(struct rte_eth_dev *dev, unsigned int filter_id, struct filter_ctx *ctx) { struct adapter *adapter = ethdev2adap(dev); struct tid_info *t = &adapter->tids; struct filter_entry *f; struct sge_ctrl_txq *ctrlq; unsigned int port_id = ethdev2pinfo(dev)->port_id; int ret; if (filter_id > adapter->tids.ntids) return -E2BIG; f = lookup_tid(t, filter_id); if (!f) { dev_err(adapter, "%s: no filter entry for filter_id = %d\n", __func__, filter_id); return -EINVAL; } ret = writable_filter(f); if (ret) return ret; if (f->valid) { unsigned int wrlen; struct rte_mbuf *mbuf; struct work_request_hdr *wr; struct ulptx_idata *aligner; struct cpl_set_tcb_field *req; struct cpl_abort_req *abort_req; struct cpl_abort_rpl *abort_rpl; f->ctx = ctx; f->pending = 1; wrlen = cxgbe_roundup(sizeof(*wr) + (sizeof(*req) + sizeof(*aligner)) + sizeof(*abort_req) + sizeof(*abort_rpl), 16); ctrlq = &adapter->sge.ctrlq[port_id]; mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool); if (!mbuf) { dev_err(adapter, "%s: could not allocate skb ..\n", __func__); goto out_err; } mbuf->data_len = wrlen; mbuf->pkt_len = mbuf->data_len; req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *); INIT_ULPTX_WR(req, wrlen, 0, 0); wr = (struct work_request_hdr *)req; wr++; req = (struct cpl_set_tcb_field *)wr; mk_set_tcb_field_ulp(f, req, W_TCB_RSS_INFO, V_TCB_RSS_INFO(M_TCB_RSS_INFO), V_TCB_RSS_INFO(adapter->sge.fw_evtq.abs_id), 0, 1); aligner = (struct ulptx_idata *)(req + 1); abort_req = (struct cpl_abort_req *)(aligner + 1); mk_abort_req_ulp(abort_req, f->tid); abort_rpl = (struct cpl_abort_rpl *)(abort_req + 1); mk_abort_rpl_ulp(abort_rpl, f->tid); t4_mgmt_tx(ctrlq, mbuf); }