Exemplo n.º 1
0
/* Check a delete filter request for validity and send it to the hardware.
 * Return 0 on success, an error number otherwise.  We attach any provided
 * filter operation context to the internal filter specification in order to
 * facilitate signaling completion of the operation.
 */
int __cxgb4_del_filter(struct net_device *dev, int filter_id,
		       struct ch_filter_specification *fs,
		       struct filter_ctx *ctx)
{
	struct adapter *adapter = netdev2adap(dev);
	unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
	struct filter_entry *f;
	unsigned int max_fidx;
	int ret;

	if (fs && fs->hash) {
		if (is_hashfilter(adapter))
			return cxgb4_del_hash_filter(dev, filter_id, ctx);
		netdev_err(dev, "%s: Exact-match filters only supported with Hash Filter configuration\n",
			   __func__);
		return -EINVAL;
	}

	max_fidx = adapter->tids.nftids;
	if (filter_id != (max_fidx + adapter->tids.nsftids - 1) &&
	    filter_id >= max_fidx)
		return -E2BIG;

	f = &adapter->tids.ftid_tab[filter_id];
	ret = writable_filter(f);
	if (ret)
		return ret;

	if (f->valid) {
		f->ctx = ctx;
		cxgb4_clear_ftid(&adapter->tids, filter_id,
				 f->fs.type ? PF_INET6 : PF_INET,
				 chip_ver);
		return del_filter_wr(adapter, filter_id);
	}

	/* If the caller has passed in a Completion Context then we need to
	 * mark it as a successful completion so they don't stall waiting
	 * for it.
	 */
	if (ctx) {
		ctx->result = 0;
		complete(&ctx->completion);
	}
	return ret;
}
Exemplo n.º 2
0
bool is_filter_exact_match(struct adapter *adap,
			   struct ch_filter_specification *fs)
{
	struct tp_params *tp = &adap->params.tp;
	u64 hash_filter_mask = tp->hash_filter_mask;
	u32 mask;

	if (!is_hashfilter(adap))
		return false;

	if (fs->type) {
		if (is_inaddr_any(fs->val.fip, AF_INET6) ||
		    !is_addr_all_mask(fs->mask.fip, AF_INET6))
			return false;

		if (is_inaddr_any(fs->val.lip, AF_INET6) ||
		    !is_addr_all_mask(fs->mask.lip, AF_INET6))
			return false;
	} else {
		if (is_inaddr_any(fs->val.fip, AF_INET) ||
		    !is_addr_all_mask(fs->mask.fip, AF_INET))
			return false;

		if (is_inaddr_any(fs->val.lip, AF_INET) ||
		    !is_addr_all_mask(fs->mask.lip, AF_INET))
			return false;
	}

	if (!fs->val.lport || fs->mask.lport != 0xffff)
		return false;

	if (!fs->val.fport || fs->mask.fport != 0xffff)
		return false;

	if (tp->fcoe_shift >= 0) {
		mask = (hash_filter_mask >> tp->fcoe_shift) & FT_FCOE_W;
		if (mask && !fs->mask.fcoe)
			return false;
	}
Exemplo n.º 3
0
bool is_filter_exact_match(struct adapter *adap,
			   struct ch_filter_specification *fs)
{
	struct tp_params *tp = &adap->params.tp;
	u64 hash_filter_mask = tp->hash_filter_mask;
	u64 ntuple_mask = 0;

	if (!is_hashfilter(adap))
		return false;

	if (fs->type) {
		if (is_inaddr_any(fs->val.fip, AF_INET6) ||
		    !is_addr_all_mask(fs->mask.fip, AF_INET6))
			return false;

		if (is_inaddr_any(fs->val.lip, AF_INET6) ||
		    !is_addr_all_mask(fs->mask.lip, AF_INET6))
			return false;
	} else {
		if (is_inaddr_any(fs->val.fip, AF_INET) ||
		    !is_addr_all_mask(fs->mask.fip, AF_INET))
			return false;

		if (is_inaddr_any(fs->val.lip, AF_INET) ||
		    !is_addr_all_mask(fs->mask.lip, AF_INET))
			return false;
	}

	if (!fs->val.lport || fs->mask.lport != 0xffff)
		return false;

	if (!fs->val.fport || fs->mask.fport != 0xffff)
		return false;

	/* calculate tuple mask and compare with mask configured in hw */
	if (tp->fcoe_shift >= 0)
		ntuple_mask |= (u64)fs->mask.fcoe << tp->fcoe_shift;

	if (tp->port_shift >= 0)
		ntuple_mask |= (u64)fs->mask.iport << tp->port_shift;

	if (tp->vnic_shift >= 0) {
		if ((adap->params.tp.ingress_config & VNIC_F))
			ntuple_mask |= (u64)fs->mask.pfvf_vld << tp->vnic_shift;
		else
			ntuple_mask |= (u64)fs->mask.ovlan_vld <<
				tp->vnic_shift;
	}

	if (tp->vlan_shift >= 0)
		ntuple_mask |= (u64)fs->mask.ivlan << tp->vlan_shift;

	if (tp->tos_shift >= 0)
		ntuple_mask |= (u64)fs->mask.tos << tp->tos_shift;

	if (tp->protocol_shift >= 0)
		ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;

	if (tp->ethertype_shift >= 0)
		ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;

	if (tp->macmatch_shift >= 0)
		ntuple_mask |= (u64)fs->mask.macidx << tp->macmatch_shift;

	if (tp->matchtype_shift >= 0)
		ntuple_mask |= (u64)fs->mask.matchtype << tp->matchtype_shift;

	if (tp->frag_shift >= 0)
		ntuple_mask |= (u64)fs->mask.frag << tp->frag_shift;

	if (ntuple_mask != hash_filter_mask)
		return false;

	return true;
}
Exemplo n.º 4
0
static int get_filter_count(struct adapter *adapter, unsigned int fidx,
			    u64 *pkts, u64 *bytes, bool hash)
{
	unsigned int tcb_base, tcbaddr;
	unsigned int word_offset;
	struct filter_entry *f;
	__be64 be64_byte_count;
	int ret;

	tcb_base = t4_read_reg(adapter, TP_CMM_TCB_BASE_A);
	if (is_hashfilter(adapter) && hash) {
		if (fidx < adapter->tids.ntids) {
			f = adapter->tids.tid_tab[fidx];
			if (!f)
				return -EINVAL;
		} else {
			return -E2BIG;
		}
	} else {
		if ((fidx != (adapter->tids.nftids +
			      adapter->tids.nsftids - 1)) &&
		    fidx >= adapter->tids.nftids)
			return -E2BIG;

		f = &adapter->tids.ftid_tab[fidx];
		if (!f->valid)
			return -EINVAL;
	}
	tcbaddr = tcb_base + f->tid * TCB_SIZE;

	spin_lock(&adapter->win0_lock);
	if (is_t4(adapter->params.chip)) {
		__be64 be64_count;

		/* T4 doesn't maintain byte counts in hw */
		*bytes = 0;

		/* Get pkts */
		word_offset = 4;
		ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
				   tcbaddr + (word_offset * sizeof(__be32)),
				   sizeof(be64_count),
				   (__be32 *)&be64_count,
				   T4_MEMORY_READ);
		if (ret < 0)
			goto out;
		*pkts = be64_to_cpu(be64_count);
	} else {
		__be32 be32_count;

		/* Get bytes */
		word_offset = 4;
		ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
				   tcbaddr + (word_offset * sizeof(__be32)),
				   sizeof(be64_byte_count),
				   &be64_byte_count,
				   T4_MEMORY_READ);
		if (ret < 0)
			goto out;
		*bytes = be64_to_cpu(be64_byte_count);

		/* Get pkts */
		word_offset = 6;
		ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
				   tcbaddr + (word_offset * sizeof(__be32)),
				   sizeof(be32_count),
				   &be32_count,
				   T4_MEMORY_READ);
		if (ret < 0)
			goto out;
		*pkts = (u64)be32_to_cpu(be32_count);
	}

out:
	spin_unlock(&adapter->win0_lock);
	return ret;
}
Exemplo n.º 5
0
/* Check a Chelsio Filter Request for validity, convert it into our internal
 * format and send it to the hardware.  Return 0 on success, an error number
 * otherwise.  We attach any provided filter operation context to the internal
 * filter specification in order to facilitate signaling completion of the
 * operation.
 */
int __cxgb4_set_filter(struct net_device *dev, int filter_id,
		       struct ch_filter_specification *fs,
		       struct filter_ctx *ctx)
{
	struct adapter *adapter = netdev2adap(dev);
	unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
	unsigned int max_fidx, fidx;
	struct filter_entry *f;
	u32 iconf;
	int iq, ret;

	if (fs->hash) {
		if (is_hashfilter(adapter))
			return cxgb4_set_hash_filter(dev, fs, ctx);
		netdev_err(dev, "%s: Exact-match filters only supported with Hash Filter configuration\n",
			   __func__);
		return -EINVAL;
	}

	max_fidx = adapter->tids.nftids;
	if (filter_id != (max_fidx + adapter->tids.nsftids - 1) &&
	    filter_id >= max_fidx)
		return -E2BIG;

	fill_default_mask(fs);

	ret = validate_filter(dev, fs);
	if (ret)
		return ret;

	iq = get_filter_steerq(dev, fs);
	if (iq < 0)
		return iq;

	/* IPv6 filters occupy four slots and must be aligned on
	 * four-slot boundaries.  IPv4 filters only occupy a single
	 * slot and have no alignment requirements but writing a new
	 * IPv4 filter into the middle of an existing IPv6 filter
	 * requires clearing the old IPv6 filter and hence we prevent
	 * insertion.
	 */
	if (fs->type == 0) { /* IPv4 */
		/* For T6, If our IPv4 filter isn't being written to a
		 * multiple of two filter index and there's an IPv6
		 * filter at the multiple of 2 base slot, then we need
		 * to delete that IPv6 filter ...
		 * For adapters below T6, IPv6 filter occupies 4 entries.
		 * Hence we need to delete the filter in multiple of 4 slot.
		 */
		if (chip_ver < CHELSIO_T6)
			fidx = filter_id & ~0x3;
		else
			fidx = filter_id & ~0x1;

		if (fidx != filter_id &&
		    adapter->tids.ftid_tab[fidx].fs.type) {
			f = &adapter->tids.ftid_tab[fidx];
			if (f->valid) {
				dev_err(adapter->pdev_dev,
					"Invalid location. IPv6 requires 4 slots and is occupying slots %u to %u\n",
					fidx, fidx + 3);
				return -EINVAL;
			}
		}
	} else { /* IPv6 */
		if (chip_ver < CHELSIO_T6) {
			/* Ensure that the IPv6 filter is aligned on a
			 * multiple of 4 boundary.
			 */
			if (filter_id & 0x3) {
				dev_err(adapter->pdev_dev,
					"Invalid location. IPv6 must be aligned on a 4-slot boundary\n");
				return -EINVAL;
			}

			/* Check all except the base overlapping IPv4 filter
			 * slots.
			 */
			for (fidx = filter_id + 1; fidx < filter_id + 4;
			     fidx++) {
				f = &adapter->tids.ftid_tab[fidx];
				if (f->valid) {
					dev_err(adapter->pdev_dev,
						"Invalid location.  IPv6 requires 4 slots and an IPv4 filter exists at %u\n",
						fidx);
					return -EBUSY;
				}
			}
		} else {
			/* For T6, CLIP being enabled, IPv6 filter would occupy
			 * 2 entries.
			 */
			if (filter_id & 0x1)
				return -EINVAL;
			/* Check overlapping IPv4 filter slot */
			fidx = filter_id + 1;
			f = &adapter->tids.ftid_tab[fidx];
			if (f->valid) {
				pr_err("%s: IPv6 filter requires 2 indices. IPv4 filter already present at %d. Please remove IPv4 filter first.\n",
				       __func__, fidx);
				return -EBUSY;
			}
		}
	}

	/* Check to make sure that provided filter index is not
	 * already in use by someone else
	 */
	f = &adapter->tids.ftid_tab[filter_id];
	if (f->valid)
		return -EBUSY;

	fidx = filter_id + adapter->tids.ftid_base;
	ret = cxgb4_set_ftid(&adapter->tids, filter_id,
			     fs->type ? PF_INET6 : PF_INET,
			     chip_ver);
	if (ret)
		return ret;

	/* Check t  make sure the filter requested is writable ... */
	ret = writable_filter(f);
	if (ret) {
		/* Clear the bits we have set above */
		cxgb4_clear_ftid(&adapter->tids, filter_id,
				 fs->type ? PF_INET6 : PF_INET,
				 chip_ver);
		return ret;
	}

	if (is_t6(adapter->params.chip) && fs->type &&
	    ipv6_addr_type((const struct in6_addr *)fs->val.lip) !=
	    IPV6_ADDR_ANY) {
		ret = cxgb4_clip_get(dev, (const u32 *)&fs->val.lip, 1);
		if (ret) {
			cxgb4_clear_ftid(&adapter->tids, filter_id, PF_INET6,
					 chip_ver);
			return ret;
		}
	}

	/* Convert the filter specification into our internal format.
	 * We copy the PF/VF specification into the Outer VLAN field
	 * here so the rest of the code -- including the interface to
	 * the firmware -- doesn't have to constantly do these checks.
	 */
	f->fs = *fs;
	f->fs.iq = iq;
	f->dev = dev;

	iconf = adapter->params.tp.ingress_config;
	if (iconf & VNIC_F) {
		f->fs.val.ovlan = (fs->val.pf << 13) | fs->val.vf;
		f->fs.mask.ovlan = (fs->mask.pf << 13) | fs->mask.vf;
		f->fs.val.ovlan_vld = fs->val.pfvf_vld;
		f->fs.mask.ovlan_vld = fs->mask.pfvf_vld;
	}

	/* Attempt to set the filter.  If we don't succeed, we clear
	 * it and return the failure.
	 */
	f->ctx = ctx;
	f->tid = fidx; /* Save the actual tid */
	ret = set_filter_wr(adapter, filter_id);
	if (ret) {
		cxgb4_clear_ftid(&adapter->tids, filter_id,
				 fs->type ? PF_INET6 : PF_INET,
				 chip_ver);
		clear_filter(adapter, f);
	}

	return ret;
}