/* * 0 Valid * -1 Invalid Field Name * -2 Type Mismatch */ int validate_filter(node_t *root, fielddefset_t *fields) { int valid; if (!root) { return 1; } valid = validate_node(root, fields); if (!valid) { return 0; } return (validate_filter(root->left_child, fields) && validate_filter(root->right_child, fields)); }
TEST(CopyFilterTest, test) { const zimg::PixelType types[] = { zimg::PixelType::BYTE, zimg::PixelType::WORD, zimg::PixelType::HALF, zimg::PixelType::FLOAT }; const unsigned w = 591; const unsigned h = 333; const char *expected_sha1[][3] = { { "b7399d798c5f96b4c9ac4c6cccd4c979468bdc7a" }, { "43362943f1de4b51f45679a0c460f55c8bd8d2f2" }, { "e28cce115314b303454b427fde07a66ecbaff62f" }, { "078016e8752bcfb63b16c86b4ae212a51579f028" } }; for (unsigned x = 0; x < 4; ++x) { SCOPED_TRACE(static_cast<int>(types[x])); zimg::CopyFilter copy{ w, h, types[x] }; validate_filter(©, w, h, types[x], expected_sha1[x]); } }
TEST(DepthConvert2Test, test_non_full_integer) { const unsigned w = 640; const unsigned h = 480; zimg::PixelFormat src_format[] = { { zimg::PixelType::BYTE, 1, true, false }, { zimg::PixelType::BYTE, 7, true, true }, { zimg::PixelType::WORD, 9, false, false }, { zimg::PixelType::WORD, 15, false, true }, { zimg::PixelType::WORD, 9, true, false }, { zimg::PixelType::WORD, 15, true, true } }; const char *expected_sha1[][3] = { { "6a6a49a71b307303c68ec76e1e196736acc41730" }, { "1ebf85f96a3d3cc00cfa6da71edbd9030e0d371e" }, { "1451f96e1221f3194dce3b972dfc40fad74c5f80" }, { "f28cfe65453b2c1bcb4988d1db9dd3c512d9bdb1" }, { "62ca57c53cab818046b537449ad5418e7988cc68" }, { "422e55781a043f20738685f22a8c8c3c116810dd" }, }; unsigned idx = 0; for (const zimg::PixelFormat &format : src_format) { SCOPED_TRACE(static_cast<int>(format.type)); SCOPED_TRACE(format.depth); SCOPED_TRACE(format.fullrange); SCOPED_TRACE(format.chroma); zimg::PixelFormat dst_format = zimg::default_pixel_format(zimg::PixelType::FLOAT); dst_format.chroma = format.chroma; zimg::depth::DepthConvert2 convert{ w, h, format, dst_format, zimg::CPUClass::CPU_NONE }; validate_filter(&convert, w, h, format, expected_sha1[idx++]); } }
/* Check a Chelsio Filter Request for validity, convert it into our internal * format and send it to the hardware. Return 0 on success, an error number * otherwise. We attach any provided filter operation context to the internal * filter specification in order to facilitate signaling completion of the * operation. */ int __cxgb4_set_filter(struct net_device *dev, int filter_id, struct ch_filter_specification *fs, struct filter_ctx *ctx) { struct adapter *adapter = netdev2adap(dev); unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip); unsigned int max_fidx, fidx; struct filter_entry *f; u32 iconf; int iq, ret; if (fs->hash) { if (is_hashfilter(adapter)) return cxgb4_set_hash_filter(dev, fs, ctx); netdev_err(dev, "%s: Exact-match filters only supported with Hash Filter configuration\n", __func__); return -EINVAL; } max_fidx = adapter->tids.nftids; if (filter_id != (max_fidx + adapter->tids.nsftids - 1) && filter_id >= max_fidx) return -E2BIG; fill_default_mask(fs); ret = validate_filter(dev, fs); if (ret) return ret; iq = get_filter_steerq(dev, fs); if (iq < 0) return iq; /* IPv6 filters occupy four slots and must be aligned on * four-slot boundaries. IPv4 filters only occupy a single * slot and have no alignment requirements but writing a new * IPv4 filter into the middle of an existing IPv6 filter * requires clearing the old IPv6 filter and hence we prevent * insertion. */ if (fs->type == 0) { /* IPv4 */ /* For T6, If our IPv4 filter isn't being written to a * multiple of two filter index and there's an IPv6 * filter at the multiple of 2 base slot, then we need * to delete that IPv6 filter ... * For adapters below T6, IPv6 filter occupies 4 entries. * Hence we need to delete the filter in multiple of 4 slot. */ if (chip_ver < CHELSIO_T6) fidx = filter_id & ~0x3; else fidx = filter_id & ~0x1; if (fidx != filter_id && adapter->tids.ftid_tab[fidx].fs.type) { f = &adapter->tids.ftid_tab[fidx]; if (f->valid) { dev_err(adapter->pdev_dev, "Invalid location. IPv6 requires 4 slots and is occupying slots %u to %u\n", fidx, fidx + 3); return -EINVAL; } } } else { /* IPv6 */ if (chip_ver < CHELSIO_T6) { /* Ensure that the IPv6 filter is aligned on a * multiple of 4 boundary. */ if (filter_id & 0x3) { dev_err(adapter->pdev_dev, "Invalid location. IPv6 must be aligned on a 4-slot boundary\n"); return -EINVAL; } /* Check all except the base overlapping IPv4 filter * slots. */ for (fidx = filter_id + 1; fidx < filter_id + 4; fidx++) { f = &adapter->tids.ftid_tab[fidx]; if (f->valid) { dev_err(adapter->pdev_dev, "Invalid location. IPv6 requires 4 slots and an IPv4 filter exists at %u\n", fidx); return -EBUSY; } } } else { /* For T6, CLIP being enabled, IPv6 filter would occupy * 2 entries. */ if (filter_id & 0x1) return -EINVAL; /* Check overlapping IPv4 filter slot */ fidx = filter_id + 1; f = &adapter->tids.ftid_tab[fidx]; if (f->valid) { pr_err("%s: IPv6 filter requires 2 indices. IPv4 filter already present at %d. Please remove IPv4 filter first.\n", __func__, fidx); return -EBUSY; } } } /* Check to make sure that provided filter index is not * already in use by someone else */ f = &adapter->tids.ftid_tab[filter_id]; if (f->valid) return -EBUSY; fidx = filter_id + adapter->tids.ftid_base; ret = cxgb4_set_ftid(&adapter->tids, filter_id, fs->type ? PF_INET6 : PF_INET, chip_ver); if (ret) return ret; /* Check t make sure the filter requested is writable ... */ ret = writable_filter(f); if (ret) { /* Clear the bits we have set above */ cxgb4_clear_ftid(&adapter->tids, filter_id, fs->type ? PF_INET6 : PF_INET, chip_ver); return ret; } if (is_t6(adapter->params.chip) && fs->type && ipv6_addr_type((const struct in6_addr *)fs->val.lip) != IPV6_ADDR_ANY) { ret = cxgb4_clip_get(dev, (const u32 *)&fs->val.lip, 1); if (ret) { cxgb4_clear_ftid(&adapter->tids, filter_id, PF_INET6, chip_ver); return ret; } } /* Convert the filter specification into our internal format. * We copy the PF/VF specification into the Outer VLAN field * here so the rest of the code -- including the interface to * the firmware -- doesn't have to constantly do these checks. */ f->fs = *fs; f->fs.iq = iq; f->dev = dev; iconf = adapter->params.tp.ingress_config; if (iconf & VNIC_F) { f->fs.val.ovlan = (fs->val.pf << 13) | fs->val.vf; f->fs.mask.ovlan = (fs->mask.pf << 13) | fs->mask.vf; f->fs.val.ovlan_vld = fs->val.pfvf_vld; f->fs.mask.ovlan_vld = fs->mask.pfvf_vld; } /* Attempt to set the filter. If we don't succeed, we clear * it and return the failure. */ f->ctx = ctx; f->tid = fidx; /* Save the actual tid */ ret = set_filter_wr(adapter, filter_id); if (ret) { cxgb4_clear_ftid(&adapter->tids, filter_id, fs->type ? PF_INET6 : PF_INET, chip_ver); clear_filter(adapter, f); } return ret; }
static int cxgb4_set_hash_filter(struct net_device *dev, struct ch_filter_specification *fs, struct filter_ctx *ctx) { struct adapter *adapter = netdev2adap(dev); struct tid_info *t = &adapter->tids; struct filter_entry *f; struct sk_buff *skb; int iq, atid, size; int ret = 0; u32 iconf; fill_default_mask(fs); ret = validate_filter(dev, fs); if (ret) return ret; iq = get_filter_steerq(dev, fs); if (iq < 0) return iq; f = kzalloc(sizeof(*f), GFP_KERNEL); if (!f) return -ENOMEM; f->fs = *fs; f->ctx = ctx; f->dev = dev; f->fs.iq = iq; /* If the new filter requires loopback Destination MAC and/or VLAN * rewriting then we need to allocate a Layer 2 Table (L2T) entry for * the filter. */ if (f->fs.newdmac || f->fs.newvlan) { /* allocate L2T entry for new filter */ f->l2t = t4_l2t_alloc_switching(adapter, f->fs.vlan, f->fs.eport, f->fs.dmac); if (!f->l2t) { ret = -ENOMEM; goto out_err; } } /* If the new filter requires loopback Source MAC rewriting then * we need to allocate a SMT entry for the filter. */ if (f->fs.newsmac) { f->smt = cxgb4_smt_alloc_switching(f->dev, f->fs.smac); if (!f->smt) { if (f->l2t) { cxgb4_l2t_release(f->l2t); f->l2t = NULL; } ret = -ENOMEM; goto free_l2t; } } atid = cxgb4_alloc_atid(t, f); if (atid < 0) { ret = atid; goto free_smt; } iconf = adapter->params.tp.ingress_config; if (iconf & VNIC_F) { f->fs.val.ovlan = (fs->val.pf << 13) | fs->val.vf; f->fs.mask.ovlan = (fs->mask.pf << 13) | fs->mask.vf; f->fs.val.ovlan_vld = fs->val.pfvf_vld; f->fs.mask.ovlan_vld = fs->mask.pfvf_vld; } size = sizeof(struct cpl_t6_act_open_req); if (f->fs.type) { ret = cxgb4_clip_get(f->dev, (const u32 *)&f->fs.val.lip, 1); if (ret) goto free_atid; skb = alloc_skb(size, GFP_KERNEL); if (!skb) { ret = -ENOMEM; goto free_clip; } mk_act_open_req6(f, skb, ((adapter->sge.fw_evtq.abs_id << 14) | atid), adapter); } else { skb = alloc_skb(size, GFP_KERNEL); if (!skb) { ret = -ENOMEM; goto free_atid; } mk_act_open_req(f, skb, ((adapter->sge.fw_evtq.abs_id << 14) | atid), adapter); } f->pending = 1; set_wr_txq(skb, CPL_PRIORITY_SETUP, f->fs.val.iport & 0x3); t4_ofld_send(adapter, skb); return 0; free_clip: cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1); free_atid: cxgb4_free_atid(t, atid); free_smt: if (f->smt) { cxgb4_smt_release(f->smt); f->smt = NULL; } free_l2t: if (f->l2t) { cxgb4_l2t_release(f->l2t); f->l2t = NULL; } out_err: kfree(f); return ret; }
void validate_filter(const zimg::IZimgFilter *filter, unsigned src_width, unsigned src_height, zimg::PixelType src_type, const char * const sha1_str[3]) { validate_filter(filter, src_width, src_height, zimg::default_pixel_format(src_type), sha1_str); }