static int cxgb4_del_hash_filter(struct net_device *dev, int filter_id, struct filter_ctx *ctx) { struct adapter *adapter = netdev2adap(dev); struct tid_info *t = &adapter->tids; struct cpl_abort_req *abort_req; struct cpl_abort_rpl *abort_rpl; struct cpl_set_tcb_field *req; struct ulptx_idata *aligner; struct work_request_hdr *wr; struct filter_entry *f; struct sk_buff *skb; unsigned int wrlen; int ret; netdev_dbg(dev, "%s: filter_id = %d ; nftids = %d\n", __func__, filter_id, adapter->tids.nftids); if (filter_id > adapter->tids.ntids) return -E2BIG; f = lookup_tid(t, filter_id); if (!f) { netdev_err(dev, "%s: no filter entry for filter_id = %d", __func__, filter_id); return -EINVAL; } ret = writable_filter(f); if (ret) return ret; if (!f->valid) return -EINVAL; f->ctx = ctx; f->pending = 1; wrlen = roundup(sizeof(*wr) + (sizeof(*req) + sizeof(*aligner)) + sizeof(*abort_req) + sizeof(*abort_rpl), 16); skb = alloc_skb(wrlen, GFP_KERNEL); if (!skb) { netdev_err(dev, "%s: could not allocate skb ..\n", __func__); return -ENOMEM; } set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3); req = (struct cpl_set_tcb_field *)__skb_put(skb, wrlen); INIT_ULPTX_WR(req, wrlen, 0, 0); wr = (struct work_request_hdr *)req; wr++; req = (struct cpl_set_tcb_field *)wr; mk_set_tcb_ulp(f, req, TCB_RSS_INFO_W, TCB_RSS_INFO_V(TCB_RSS_INFO_M), TCB_RSS_INFO_V(adapter->sge.fw_evtq.abs_id), 0, 1); aligner = (struct ulptx_idata *)(req + 1); abort_req = (struct cpl_abort_req *)(aligner + 1); mk_abort_req_ulp(abort_req, f->tid); abort_rpl = (struct cpl_abort_rpl *)(abort_req + 1); mk_abort_rpl_ulp(abort_rpl, f->tid); t4_ofld_send(adapter, skb); return 0; }
/* Delete the filter at the specified index (if valid). The checks for all * the common problems with doing this like the filter being locked, currently * pending in another operation, etc. */ int delete_filter(struct adapter *adapter, unsigned int fidx) { struct filter_entry *f; int ret; if (fidx >= adapter->tids.nftids + adapter->tids.nsftids) return -EINVAL; f = &adapter->tids.ftid_tab[fidx]; ret = writable_filter(f); if (ret) return ret; if (f->valid) return del_filter_wr(adapter, fidx); return 0; }
/* Check a delete filter request for validity and send it to the hardware. * Return 0 on success, an error number otherwise. We attach any provided * filter operation context to the internal filter specification in order to * facilitate signaling completion of the operation. */ int __cxgb4_del_filter(struct net_device *dev, int filter_id, struct ch_filter_specification *fs, struct filter_ctx *ctx) { struct adapter *adapter = netdev2adap(dev); unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip); struct filter_entry *f; unsigned int max_fidx; int ret; if (fs && fs->hash) { if (is_hashfilter(adapter)) return cxgb4_del_hash_filter(dev, filter_id, ctx); netdev_err(dev, "%s: Exact-match filters only supported with Hash Filter configuration\n", __func__); return -EINVAL; } max_fidx = adapter->tids.nftids; if (filter_id != (max_fidx + adapter->tids.nsftids - 1) && filter_id >= max_fidx) return -E2BIG; f = &adapter->tids.ftid_tab[filter_id]; ret = writable_filter(f); if (ret) return ret; if (f->valid) { f->ctx = ctx; cxgb4_clear_ftid(&adapter->tids, filter_id, f->fs.type ? PF_INET6 : PF_INET, chip_ver); return del_filter_wr(adapter, filter_id); } /* If the caller has passed in a Completion Context then we need to * mark it as a successful completion so they don't stall waiting * for it. */ if (ctx) { ctx->result = 0; complete(&ctx->completion); } return ret; }
/* Check a Chelsio Filter Request for validity, convert it into our internal * format and send it to the hardware. Return 0 on success, an error number * otherwise. We attach any provided filter operation context to the internal * filter specification in order to facilitate signaling completion of the * operation. */ int __cxgb4_set_filter(struct net_device *dev, int filter_id, struct ch_filter_specification *fs, struct filter_ctx *ctx) { struct adapter *adapter = netdev2adap(dev); unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip); unsigned int max_fidx, fidx; struct filter_entry *f; u32 iconf; int iq, ret; if (fs->hash) { if (is_hashfilter(adapter)) return cxgb4_set_hash_filter(dev, fs, ctx); netdev_err(dev, "%s: Exact-match filters only supported with Hash Filter configuration\n", __func__); return -EINVAL; } max_fidx = adapter->tids.nftids; if (filter_id != (max_fidx + adapter->tids.nsftids - 1) && filter_id >= max_fidx) return -E2BIG; fill_default_mask(fs); ret = validate_filter(dev, fs); if (ret) return ret; iq = get_filter_steerq(dev, fs); if (iq < 0) return iq; /* IPv6 filters occupy four slots and must be aligned on * four-slot boundaries. IPv4 filters only occupy a single * slot and have no alignment requirements but writing a new * IPv4 filter into the middle of an existing IPv6 filter * requires clearing the old IPv6 filter and hence we prevent * insertion. */ if (fs->type == 0) { /* IPv4 */ /* For T6, If our IPv4 filter isn't being written to a * multiple of two filter index and there's an IPv6 * filter at the multiple of 2 base slot, then we need * to delete that IPv6 filter ... * For adapters below T6, IPv6 filter occupies 4 entries. * Hence we need to delete the filter in multiple of 4 slot. */ if (chip_ver < CHELSIO_T6) fidx = filter_id & ~0x3; else fidx = filter_id & ~0x1; if (fidx != filter_id && adapter->tids.ftid_tab[fidx].fs.type) { f = &adapter->tids.ftid_tab[fidx]; if (f->valid) { dev_err(adapter->pdev_dev, "Invalid location. IPv6 requires 4 slots and is occupying slots %u to %u\n", fidx, fidx + 3); return -EINVAL; } } } else { /* IPv6 */ if (chip_ver < CHELSIO_T6) { /* Ensure that the IPv6 filter is aligned on a * multiple of 4 boundary. */ if (filter_id & 0x3) { dev_err(adapter->pdev_dev, "Invalid location. IPv6 must be aligned on a 4-slot boundary\n"); return -EINVAL; } /* Check all except the base overlapping IPv4 filter * slots. */ for (fidx = filter_id + 1; fidx < filter_id + 4; fidx++) { f = &adapter->tids.ftid_tab[fidx]; if (f->valid) { dev_err(adapter->pdev_dev, "Invalid location. IPv6 requires 4 slots and an IPv4 filter exists at %u\n", fidx); return -EBUSY; } } } else { /* For T6, CLIP being enabled, IPv6 filter would occupy * 2 entries. */ if (filter_id & 0x1) return -EINVAL; /* Check overlapping IPv4 filter slot */ fidx = filter_id + 1; f = &adapter->tids.ftid_tab[fidx]; if (f->valid) { pr_err("%s: IPv6 filter requires 2 indices. IPv4 filter already present at %d. Please remove IPv4 filter first.\n", __func__, fidx); return -EBUSY; } } } /* Check to make sure that provided filter index is not * already in use by someone else */ f = &adapter->tids.ftid_tab[filter_id]; if (f->valid) return -EBUSY; fidx = filter_id + adapter->tids.ftid_base; ret = cxgb4_set_ftid(&adapter->tids, filter_id, fs->type ? PF_INET6 : PF_INET, chip_ver); if (ret) return ret; /* Check t make sure the filter requested is writable ... */ ret = writable_filter(f); if (ret) { /* Clear the bits we have set above */ cxgb4_clear_ftid(&adapter->tids, filter_id, fs->type ? PF_INET6 : PF_INET, chip_ver); return ret; } if (is_t6(adapter->params.chip) && fs->type && ipv6_addr_type((const struct in6_addr *)fs->val.lip) != IPV6_ADDR_ANY) { ret = cxgb4_clip_get(dev, (const u32 *)&fs->val.lip, 1); if (ret) { cxgb4_clear_ftid(&adapter->tids, filter_id, PF_INET6, chip_ver); return ret; } } /* Convert the filter specification into our internal format. * We copy the PF/VF specification into the Outer VLAN field * here so the rest of the code -- including the interface to * the firmware -- doesn't have to constantly do these checks. */ f->fs = *fs; f->fs.iq = iq; f->dev = dev; iconf = adapter->params.tp.ingress_config; if (iconf & VNIC_F) { f->fs.val.ovlan = (fs->val.pf << 13) | fs->val.vf; f->fs.mask.ovlan = (fs->mask.pf << 13) | fs->mask.vf; f->fs.val.ovlan_vld = fs->val.pfvf_vld; f->fs.mask.ovlan_vld = fs->mask.pfvf_vld; } /* Attempt to set the filter. If we don't succeed, we clear * it and return the failure. */ f->ctx = ctx; f->tid = fidx; /* Save the actual tid */ ret = set_filter_wr(adapter, filter_id); if (ret) { cxgb4_clear_ftid(&adapter->tids, filter_id, fs->type ? PF_INET6 : PF_INET, chip_ver); clear_filter(adapter, f); } return ret; }
/** * Delete the specified hash filter. */ static int cxgbe_del_hash_filter(struct rte_eth_dev *dev, unsigned int filter_id, struct filter_ctx *ctx) { struct adapter *adapter = ethdev2adap(dev); struct tid_info *t = &adapter->tids; struct filter_entry *f; struct sge_ctrl_txq *ctrlq; unsigned int port_id = ethdev2pinfo(dev)->port_id; int ret; if (filter_id > adapter->tids.ntids) return -E2BIG; f = lookup_tid(t, filter_id); if (!f) { dev_err(adapter, "%s: no filter entry for filter_id = %d\n", __func__, filter_id); return -EINVAL; } ret = writable_filter(f); if (ret) return ret; if (f->valid) { unsigned int wrlen; struct rte_mbuf *mbuf; struct work_request_hdr *wr; struct ulptx_idata *aligner; struct cpl_set_tcb_field *req; struct cpl_abort_req *abort_req; struct cpl_abort_rpl *abort_rpl; f->ctx = ctx; f->pending = 1; wrlen = cxgbe_roundup(sizeof(*wr) + (sizeof(*req) + sizeof(*aligner)) + sizeof(*abort_req) + sizeof(*abort_rpl), 16); ctrlq = &adapter->sge.ctrlq[port_id]; mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool); if (!mbuf) { dev_err(adapter, "%s: could not allocate skb ..\n", __func__); goto out_err; } mbuf->data_len = wrlen; mbuf->pkt_len = mbuf->data_len; req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *); INIT_ULPTX_WR(req, wrlen, 0, 0); wr = (struct work_request_hdr *)req; wr++; req = (struct cpl_set_tcb_field *)wr; mk_set_tcb_field_ulp(f, req, W_TCB_RSS_INFO, V_TCB_RSS_INFO(M_TCB_RSS_INFO), V_TCB_RSS_INFO(adapter->sge.fw_evtq.abs_id), 0, 1); aligner = (struct ulptx_idata *)(req + 1); abort_req = (struct cpl_abort_req *)(aligner + 1); mk_abort_req_ulp(abort_req, f->tid); abort_rpl = (struct cpl_abort_rpl *)(abort_req + 1); mk_abort_rpl_ulp(abort_rpl, f->tid); t4_mgmt_tx(ctrlq, mbuf); }