Exemplo n.º 1
0
static int prism2_get_station(struct wiphy *wiphy, struct net_device *dev,
			      const u8 *mac, struct station_info *sinfo)
{
	struct wlandevice *wlandev = dev->ml_priv;
	struct p80211msg_lnxreq_commsquality quality;
	int result;

	memset(sinfo, 0, sizeof(*sinfo));

	if (!wlandev || (wlandev->msdstate != WLAN_MSD_RUNNING))
		return -EOPNOTSUPP;

	/* build request message */
	quality.msgcode = DIDMSG_LNXREQ_COMMSQUALITY;
	quality.dbm.data = P80211ENUM_truth_true;
	quality.dbm.status = P80211ENUM_msgitem_status_data_ok;

	/* send message to nsd */
	if (!wlandev->mlmerequest)
		return -EOPNOTSUPP;

	result = wlandev->mlmerequest(wlandev, (struct p80211msg *)&quality);

	if (result == 0) {
		sinfo->txrate.legacy = quality.txrate.data;
		sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
		sinfo->signal = quality.level.data;
		sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
	}

	return result;
}
Exemplo n.º 2
0
/**
 * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
 * @adapter: board private structure
 * @cmd: ethtool rxnfc command
 *
 * Returns Success if the flow is supported, else Invalid Input.
 **/
static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter,
				    struct ethtool_rxnfc *cmd)
{
	struct i40e_hw *hw = &adapter->hw;
	u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
		   ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);

	/* We always hash on IP src and dest addresses */
	cmd->data = RXH_IP_SRC | RXH_IP_DST;

	switch (cmd->flow_type) {
	case TCP_V4_FLOW:
		if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
		break;
	case UDP_V4_FLOW:
		if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
		break;

	case SCTP_V4_FLOW:
	case AH_ESP_V4_FLOW:
	case AH_V4_FLOW:
	case ESP_V4_FLOW:
	case IPV4_FLOW:
		break;

	case TCP_V6_FLOW:
		if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
		break;
	case UDP_V6_FLOW:
		if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
		break;

	case SCTP_V6_FLOW:
	case AH_ESP_V6_FLOW:
	case AH_V6_FLOW:
	case ESP_V6_FLOW:
	case IPV6_FLOW:
		break;
	default:
		cmd->data = 0;
		return -EINVAL;
	}

	return 0;
}
Exemplo n.º 3
0
int ixgbe_xsk_async_xmit(struct net_device *dev, u32 qid)
{
	struct ixgbe_adapter *adapter = netdev_priv(dev);
	struct ixgbe_ring *ring;

	if (test_bit(__IXGBE_DOWN, &adapter->state))
		return -ENETDOWN;

	if (!READ_ONCE(adapter->xdp_prog))
		return -ENXIO;

	if (qid >= adapter->num_xdp_queues)
		return -ENXIO;

	if (!adapter->xsk_umems || !adapter->xsk_umems[qid])
		return -ENXIO;

	ring = adapter->xdp_ring[qid];
	if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
		u64 eics = BIT_ULL(ring->q_vector->v_idx);

		ixgbe_irq_rearm_queues(adapter, eics);
	}

	return 0;
}
Exemplo n.º 4
0
static int amd_init_dev(struct amd_ntb_dev *ndev)
{
	struct pci_dev *pdev;
	int rc = 0;

	pdev = ndev->ntb.pdev;

	ndev->ntb.topo = amd_get_topo(ndev);
	dev_dbg(&pdev->dev, "AMD NTB topo is %s\n",
		ntb_topo_string(ndev->ntb.topo));

	rc = amd_init_ntb(ndev);
	if (rc)
		return rc;

	rc = amd_init_isr(ndev);
	if (rc) {
		dev_err(&pdev->dev, "fail to init isr.\n");
		return rc;
	}

	ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;

	return 0;
}
Exemplo n.º 5
0
/**
 * ir_raw_gen_manchester() - Encode data with Manchester (bi-phase) modulation.
 * @ev:		Pointer to pointer to next free event. *@ev is incremented for
 *		each raw event filled.
 * @max:	Maximum number of raw events to fill.
 * @timings:	Manchester modulation timings.
 * @n:		Number of bits of data.
 * @data:	Data bits to encode.
 *
 * Encodes the @n least significant bits of @data using Manchester (bi-phase)
 * modulation with the timing characteristics described by @timings, writing up
 * to @max raw IR events using the *@ev pointer.
 *
 * Returns:	0 on success.
 *		-ENOBUFS if there isn't enough space in the array to fit the
 *		full encoded data. In this case all @max events will have been
 *		written.
 */
int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
			  const struct ir_raw_timings_manchester *timings,
			  unsigned int n, u64 data)
{
	bool need_pulse;
	u64 i;
	int ret = -ENOBUFS;

	i = BIT_ULL(n - 1);

	if (timings->leader) {
		if (!max--)
			return ret;
		if (timings->pulse_space_start) {
			init_ir_raw_event_duration((*ev)++, 1, timings->leader);

			if (!max--)
				return ret;
			init_ir_raw_event_duration((*ev), 0, timings->leader);
		} else {
			init_ir_raw_event_duration((*ev), 1, timings->leader);
		}
		i >>= 1;
	} else {
		/* continue existing signal */
		--(*ev);
Exemplo n.º 6
0
static int cn23xx_enable_vf_io_queues(struct octeon_device *oct)
{
	u32 q_no;

	for (q_no = 0; q_no < oct->num_iqs; q_no++) {
		u64 reg_val;

		/* set the corresponding IQ IS_64B bit */
		if (oct->io_qmask.iq64B & BIT_ULL(q_no)) {
			reg_val = octeon_read_csr64(
			    oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no));
			reg_val |= CN23XX_PKT_INPUT_CTL_IS_64B;
			octeon_write_csr64(
			    oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
		}

		/* set the corresponding IQ ENB bit */
		if (oct->io_qmask.iq & BIT_ULL(q_no)) {
			reg_val = octeon_read_csr64(
			    oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no));
			reg_val |= CN23XX_PKT_INPUT_CTL_RING_ENB;
			octeon_write_csr64(
			    oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
		}
	}
	for (q_no = 0; q_no < oct->num_oqs; q_no++) {
		u32 reg_val;

		/* set the corresponding OQ ENB bit */
		if (oct->io_qmask.oq & BIT_ULL(q_no)) {
			reg_val = octeon_read_csr(
			    oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no));
			reg_val |= CN23XX_PKT_OUTPUT_CTL_RING_ENB;
			octeon_write_csr(
			    oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no), reg_val);
		}
	}

	return 0;
}
Exemplo n.º 7
0
void nitrox_config_pom_unit(struct nitrox_device *ndev)
{
	union pom_int_ena_w1s pom_int;
	int i;

	/* enable pom interrupts */
	pom_int.value = 0;
	pom_int.s.illegal_dport = 1;
	nitrox_write_csr(ndev, POM_INT_ENA_W1S, pom_int.value);

	/* enable perf counters */
	for (i = 0; i < ndev->hw.se_cores; i++)
		nitrox_write_csr(ndev, POM_PERF_CTL, BIT_ULL(i));
}
Exemplo n.º 8
0
/* This gets the device's feature bits. */
static u64 vop_get_features(struct virtio_device *vdev)
{
	unsigned int i, bits;
	u64 features = 0;
	struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
	u8 __iomem *in_features = _vop_vq_features(desc);
	int feature_len = ioread8(&desc->feature_len);

	bits = min_t(unsigned, feature_len, sizeof(vdev->features)) * 8;
	for (i = 0; i < bits; i++)
		if (ioread8(&in_features[i / 8]) & (BIT(i % 8)))
			features |= BIT_ULL(i);

	return features;
}
Exemplo n.º 9
0
/**
 * i40e_read_qword - read HMC context qword into struct
 * @hmc_bits: pointer to the HMC memory
 * @ce_info: a description of the struct to be filled
 * @dest: the struct to be filled
 **/
static void i40e_read_qword(u8 *hmc_bits,
			    struct i40e_context_ele *ce_info,
			    u8 *dest)
{
	u64 dest_qword, mask;
	u8 *src, *target;
	u16 shift_width;
	__le64 src_qword;

	/* prepare the bits and mask */
	shift_width = ce_info->lsb % 8;

	/* if the field width is exactly 64 on an x86 machine, then the shift
	 * operation will not work because the SHL instructions count is masked
	 * to 6 bits so the shift will do nothing
	 */
	if (ce_info->width < 64)
		mask = BIT_ULL(ce_info->width) - 1;
	else
		mask = ~(u64)0;

	/* shift to correct alignment */
	mask <<= shift_width;

	/* get the current bits from the src bit string */
	src = hmc_bits + (ce_info->lsb / 8);

	i40e_memcpy(&src_qword, src, sizeof(src_qword), I40E_DMA_TO_NONDMA);

	/* the data in the memory is stored as little endian so mask it
	 * correctly
	 */
	src_qword &= ~(CPU_TO_LE64(mask));

	/* get the data back into host order before shifting */
	dest_qword = LE64_TO_CPU(src_qword);

	dest_qword >>= shift_width;

	/* get the address from the struct field */
	target = dest + ce_info->offset;

	/* put it back in the struct */
	i40e_memcpy(target, &dest_qword, sizeof(dest_qword),
		    I40E_NONDMA_TO_DMA);
}
Exemplo n.º 10
0
/**
 * i40e_write_qword - replace HMC context qword
 * @hmc_bits: pointer to the HMC memory
 * @ce_info: a description of the struct to be read from
 * @src: the struct to be read from
 **/
static void i40e_write_qword(u8 *hmc_bits,
			     struct i40e_context_ele *ce_info,
			     u8 *src)
{
	u64 src_qword, mask;
	u8 *from, *dest;
	u16 shift_width;
	__le64 dest_qword;

	/* copy from the next struct field */
	from = src + ce_info->offset;

	/* prepare the bits and mask */
	shift_width = ce_info->lsb % 8;

	/* if the field width is exactly 64 on an x86 machine, then the shift
	 * operation will not work because the SHL instructions count is masked
	 * to 6 bits so the shift will do nothing
	 */
	if (ce_info->width < 64)
		mask = BIT_ULL(ce_info->width) - 1;
	else
		mask = ~(u64)0;

	/* don't swizzle the bits until after the mask because the mask bits
	 * will be in a different bit position on big endian machines
	 */
	src_qword = *(u64 *)from;
	src_qword &= mask;

	/* shift to correct alignment */
	mask <<= shift_width;
	src_qword <<= shift_width;

	/* get the current bits from the target bit string */
	dest = hmc_bits + (ce_info->lsb / 8);

	i40e_memcpy(&dest_qword, dest, sizeof(dest_qword), I40E_DMA_TO_NONDMA);

	dest_qword &= ~(CPU_TO_LE64(mask));	/* get the bits not changing */
	dest_qword |= CPU_TO_LE64(src_qword);	/* add in the new bits */

	/* put it all back */
	i40e_memcpy(dest, &dest_qword, sizeof(dest_qword), I40E_NONDMA_TO_DMA);
}
Exemplo n.º 11
0
static void pp_init_flds(struct pp_ctx *pp)
{
	int pidx, lport, pcnt;

	/* Find global port index */
	lport = ntb_port_number(pp->ntb);
	pcnt = ntb_peer_port_count(pp->ntb);
	for (pidx = 0; pidx < pcnt; pidx++) {
		if (lport < ntb_peer_port_number(pp->ntb, pidx))
			break;
	}

	pp->in_db = BIT_ULL(lport);
	pp->pmask = GENMASK_ULL(pidx, 0) >> 1;
	pp->nmask = GENMASK_ULL(pcnt - 1, pidx);

	dev_dbg(&pp->ntb->dev, "Inbound db %#llx, prev %#llx, next %#llx\n",
		pp->in_db, pp->pmask, pp->nmask);
}
Exemplo n.º 12
0
static int finish_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data)
{
	u64 fifo_cfg;
	int count;

	/* Check if there are any pending requests left */
	fifo_cfg = readq(host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
	count = FIELD_GET(MIO_EMM_DMA_FIFO_CFG_COUNT, fifo_cfg);
	if (count)
		dev_err(host->dev, "%u requests still pending\n", count);

	data->bytes_xfered = data->blocks * data->blksz;
	data->error = 0;

	/* Clear and disable FIFO */
	writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
	dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
	return 1;
}
Exemplo n.º 13
0
int lio_wait_for_clean_oq(struct octeon_device *oct)
{
	int retry = 100, pending_pkts = 0;
	int idx;

	do {
		pending_pkts = 0;

		for (idx = 0; idx < MAX_OCTEON_OUTPUT_QUEUES(oct); idx++) {
			if (!(oct->io_qmask.oq & BIT_ULL(idx)))
				continue;
			pending_pkts +=
				atomic_read(&oct->droq[idx]->pkts_pending);
		}

		if (pending_pkts > 0)
			schedule_timeout_uninterruptible(1);

	} while (retry-- && pending_pkts);

	return pending_pkts;
}
Exemplo n.º 14
0
static int amd_init_ntb(struct amd_ntb_dev *ndev)
{
	void __iomem *mmio = ndev->self_mmio;

	ndev->mw_count = AMD_MW_CNT;
	ndev->spad_count = AMD_SPADS_CNT;
	ndev->db_count = AMD_DB_CNT;

	switch (ndev->ntb.topo) {
	case NTB_TOPO_PRI:
	case NTB_TOPO_SEC:
		ndev->spad_count >>= 1;
		if (ndev->ntb.topo == NTB_TOPO_PRI) {
			ndev->self_spad = 0;
			ndev->peer_spad = 0x20;
		} else {
			ndev->self_spad = 0x20;
			ndev->peer_spad = 0;
		}

		INIT_DELAYED_WORK(&ndev->hb_timer, amd_link_hb);
		schedule_delayed_work(&ndev->hb_timer, AMD_LINK_HB_TIMEOUT);

		break;
	default:
		dev_err(&ndev->ntb.pdev->dev,
			"AMD NTB does not support B2B mode.\n");
		return -EINVAL;
	}

	ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;

	/* Mask event interrupts */
	writel(ndev->int_mask, mmio + AMD_INTMASK_OFFSET);

	return 0;
}
Exemplo n.º 15
0
/**
 * \brief Droq packet processor sceduler
 * @param oct octeon device
 */
static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
{
	struct octeon_device_priv *oct_priv =
		(struct octeon_device_priv *)oct->priv;
	struct octeon_droq *droq;
	u64 oq_no;

	if (oct->int_status & OCT_DEV_INTR_PKT_DATA) {
		for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct);
		     oq_no++) {
			if (!(oct->droq_intr & BIT_ULL(oq_no)))
				continue;

			droq = oct->droq[oq_no];

			if (droq->ops.poll_mode) {
				droq->ops.napi_fn(droq);
				oct_priv->napi_mask |= (1 << oq_no);
			} else {
				tasklet_schedule(&oct_priv->droq_tasklet);
			}
		}
	}
}
Exemplo n.º 16
0
static int pp_find_next_peer(struct pp_ctx *pp)
{
	u64 link, out_db;
	int pidx;

	link = ntb_link_is_up(pp->ntb, NULL, NULL);

	/* Find next available peer */
	if (link & pp->nmask)
		pidx = __ffs64(link & pp->nmask);
	else if (link & pp->pmask)
		pidx = __ffs64(link & pp->pmask);
	else
		return -ENODEV;

	out_db = BIT_ULL(ntb_peer_port_number(pp->ntb, pidx));

	spin_lock(&pp->lock);
	pp->out_pidx = pidx;
	pp->out_db = out_db;
	spin_unlock(&pp->lock);

	return 0;
}
Exemplo n.º 17
0
/**
 * i40e_init_lan_hmc - initialize i40e_hmc_info struct
 * @hw: pointer to the HW structure
 * @txq_num: number of Tx queues needing backing context
 * @rxq_num: number of Rx queues needing backing context
 * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
 * @fcoe_filt_num: number of FCoE filters needing backing context
 *
 * This function will be called once per physical function initialization.
 * It will fill out the i40e_hmc_obj_info structure for LAN objects based on
 * the driver's provided input, as well as information from the HMC itself
 * loaded from NVRAM.
 *
 * Assumptions:
 *   - HMC Resource Profile has been selected before calling this function.
 **/
enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
					u32 rxq_num, u32 fcoe_cntx_num,
					u32 fcoe_filt_num)
{
	struct i40e_hmc_obj_info *obj, *full_obj;
	enum i40e_status_code ret_code = I40E_SUCCESS;
	u64 l2fpm_size;
	u32 size_exp;

	hw->hmc.signature = I40E_HMC_INFO_SIGNATURE;
	hw->hmc.hmc_fn_id = hw->pf_id;

	/* allocate memory for hmc_obj */
	ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem,
			sizeof(struct i40e_hmc_obj_info) * I40E_HMC_LAN_MAX);
	if (ret_code)
		goto init_lan_hmc_out;
	hw->hmc.hmc_obj = (struct i40e_hmc_obj_info *)
			  hw->hmc.hmc_obj_virt_mem.va;

	/* The full object will be used to create the LAN HMC SD */
	full_obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_FULL];
	full_obj->max_cnt = 0;
	full_obj->cnt = 0;
	full_obj->base = 0;
	full_obj->size = 0;

	/* Tx queue context information */
	obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
	obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
	obj->cnt = txq_num;
	obj->base = 0;
	size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ);
	obj->size = BIT_ULL(size_exp);

	/* validate values requested by driver don't exceed HMC capacity */
	if (txq_num > obj->max_cnt) {
		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
		DEBUGOUT3("i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
			  txq_num, obj->max_cnt, ret_code);
		goto init_lan_hmc_out;
	}

	/* aggregate values into the full LAN object for later */
	full_obj->max_cnt += obj->max_cnt;
	full_obj->cnt += obj->cnt;

	/* Rx queue context information */
	obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
	obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
	obj->cnt = rxq_num;
	obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_TX].base +
		    (hw->hmc.hmc_obj[I40E_HMC_LAN_TX].cnt *
		     hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size);
	obj->base = i40e_align_l2obj_base(obj->base);
	size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ);
	obj->size = BIT_ULL(size_exp);

	/* validate values requested by driver don't exceed HMC capacity */
	if (rxq_num > obj->max_cnt) {
		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
		DEBUGOUT3("i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
			  rxq_num, obj->max_cnt, ret_code);
		goto init_lan_hmc_out;
	}

	/* aggregate values into the full LAN object for later */
	full_obj->max_cnt += obj->max_cnt;
	full_obj->cnt += obj->cnt;

	/* FCoE context information */
	obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
	obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEMAX);
	obj->cnt = fcoe_cntx_num;
	obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_RX].base +
		    (hw->hmc.hmc_obj[I40E_HMC_LAN_RX].cnt *
		     hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size);
	obj->base = i40e_align_l2obj_base(obj->base);
	size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ);
	obj->size = BIT_ULL(size_exp);

	/* validate values requested by driver don't exceed HMC capacity */
	if (fcoe_cntx_num > obj->max_cnt) {
		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
		DEBUGOUT3("i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
			  fcoe_cntx_num, obj->max_cnt, ret_code);
		goto init_lan_hmc_out;
	}

	/* aggregate values into the full LAN object for later */
	full_obj->max_cnt += obj->max_cnt;
	full_obj->cnt += obj->cnt;

	/* FCoE filter information */
	obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
	obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEFMAX);
	obj->cnt = fcoe_filt_num;
	obj->base = hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].base +
		    (hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].cnt *
		     hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size);
	obj->base = i40e_align_l2obj_base(obj->base);
	size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ);
	obj->size = BIT_ULL(size_exp);

	/* validate values requested by driver don't exceed HMC capacity */
	if (fcoe_filt_num > obj->max_cnt) {
		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
		DEBUGOUT3("i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
			  fcoe_filt_num, obj->max_cnt, ret_code);
		goto init_lan_hmc_out;
	}

	/* aggregate values into the full LAN object for later */
	full_obj->max_cnt += obj->max_cnt;
	full_obj->cnt += obj->cnt;

	hw->hmc.first_sd_index = 0;
	hw->hmc.sd_table.ref_cnt = 0;
	l2fpm_size = i40e_calculate_l2fpm_size(txq_num, rxq_num, fcoe_cntx_num,
					       fcoe_filt_num);
	if (NULL == hw->hmc.sd_table.sd_entry) {
		hw->hmc.sd_table.sd_cnt = (u32)
				   (l2fpm_size + I40E_HMC_DIRECT_BP_SIZE - 1) /
				   I40E_HMC_DIRECT_BP_SIZE;

		/* allocate the sd_entry members in the sd_table */
		ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.sd_table.addr,
					  (sizeof(struct i40e_hmc_sd_entry) *
					  hw->hmc.sd_table.sd_cnt));
		if (ret_code)
			goto init_lan_hmc_out;
		hw->hmc.sd_table.sd_entry =
			(struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va;
	}
	/* store in the LAN full object for later */
	full_obj->size = l2fpm_size;

init_lan_hmc_out:
	return ret_code;
}
Exemplo n.º 18
0
static int thunder_mmc_probe(struct pci_dev *pdev,
			     const struct pci_device_id *id)
{
	struct device_node *node = pdev->dev.of_node;
	struct device *dev = &pdev->dev;
	struct device_node *child_node;
	struct cvm_mmc_host *host;
	int ret, i = 0;

	host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
	if (!host)
		return -ENOMEM;

	pci_set_drvdata(pdev, host);
	ret = pcim_enable_device(pdev);
	if (ret)
		return ret;

	ret = pci_request_regions(pdev, KBUILD_MODNAME);
	if (ret)
		return ret;

	host->base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0));
	if (!host->base)
		return -EINVAL;

	/* On ThunderX these are identical */
	host->dma_base = host->base;

	host->reg_off = 0x2000;
	host->reg_off_dma = 0x160;

	host->clk = devm_clk_get(dev, NULL);
	if (IS_ERR(host->clk))
		return PTR_ERR(host->clk);

	ret = clk_prepare_enable(host->clk);
	if (ret)
		return ret;
	host->sys_freq = clk_get_rate(host->clk);

	spin_lock_init(&host->irq_handler_lock);
	sema_init(&host->mmc_serializer, 1);

	host->dev = dev;
	host->acquire_bus = thunder_mmc_acquire_bus;
	host->release_bus = thunder_mmc_release_bus;
	host->int_enable = thunder_mmc_int_enable;

	host->use_sg = true;
	host->big_dma_addr = true;
	host->need_irq_handler_lock = true;
	host->last_slot = -1;

	ret = dma_set_mask(dev, DMA_BIT_MASK(48));
	if (ret)
		goto error;

	/*
	 * Clear out any pending interrupts that may be left over from
	 * bootloader. Writing 1 to the bits clears them.
	 */
	writeq(127, host->base + MIO_EMM_INT_EN(host));
	writeq(3, host->base + MIO_EMM_DMA_INT_ENA_W1C(host));
	/* Clear DMA FIFO */
	writeq(BIT_ULL(16), host->base + MIO_EMM_DMA_FIFO_CFG(host));

	ret = thunder_mmc_register_interrupts(host, pdev);
	if (ret)
		goto error;

	for_each_child_of_node(node, child_node) {
		/*
		 * mmc_of_parse and devm* require one device per slot.
		 * Create a dummy device per slot and set the node pointer to
		 * the slot. The easiest way to get this is using
		 * of_platform_device_create.
		 */
		if (of_device_is_compatible(child_node, "mmc-slot")) {
			host->slot_pdev[i] = of_platform_device_create(child_node, NULL,
								       &pdev->dev);
			if (!host->slot_pdev[i])
				continue;

			ret = cvm_mmc_of_slot_probe(&host->slot_pdev[i]->dev, host);
			if (ret)
				goto error;
		}
		i++;
	}
	dev_info(dev, "probed\n");
	return 0;

error:
	for (i = 0; i < CAVIUM_MAX_MMC; i++) {
		if (host->slot[i])
			cvm_mmc_of_slot_remove(host->slot[i]);
		if (host->slot_pdev[i])
			of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL);
	}
	clk_disable_unprepare(host->clk);
	return ret;
}
Exemplo n.º 19
0
static int
lio_cn23xx_pf_enable_io_queues(struct octeon_device *oct)
{
	uint64_t	reg_val;
	uint32_t	ern, loop = BUSY_READING_REG_PF_LOOP_COUNT;
	uint32_t	q_no, srn;

	srn = oct->sriov_info.pf_srn;
	ern = srn + oct->num_iqs;

	for (q_no = srn; q_no < ern; q_no++) {
		/* set the corresponding IQ IS_64B bit */
		if (oct->io_qmask.iq64B & BIT_ULL(q_no - srn)) {
			reg_val = lio_read_csr64(oct,
					LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
			reg_val = reg_val | LIO_CN23XX_PKT_INPUT_CTL_IS_64B;
			lio_write_csr64(oct,
					LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
					reg_val);
		}
		/* set the corresponding IQ ENB bit */
		if (oct->io_qmask.iq & BIT_ULL(q_no - srn)) {
			/*
			 * IOQs are in reset by default in PEM2 mode,
			 * clearing reset bit
			 */
			reg_val = lio_read_csr64(oct,
					LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no));

			if (reg_val & LIO_CN23XX_PKT_INPUT_CTL_RST) {
				while ((reg_val &
					LIO_CN23XX_PKT_INPUT_CTL_RST) &&
				       !(reg_val &
					 LIO_CN23XX_PKT_INPUT_CTL_QUIET) &&
				       loop) {
					reg_val = lio_read_csr64(oct,
					LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
					loop--;
				}
				if (!loop) {
					lio_dev_err(oct, "clearing the reset reg failed or setting the quiet reg failed for qno: %u\n",
						    q_no);
					return (-1);
				}
				reg_val = reg_val &
					~LIO_CN23XX_PKT_INPUT_CTL_RST;
				lio_write_csr64(oct,
					LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
					reg_val);

				reg_val = lio_read_csr64(oct,
					LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
				if (reg_val & LIO_CN23XX_PKT_INPUT_CTL_RST) {
					lio_dev_err(oct, "clearing the reset failed for qno: %u\n",
						    q_no);
					return (-1);
				}
			}
			reg_val = lio_read_csr64(oct,
					LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
			reg_val = reg_val | LIO_CN23XX_PKT_INPUT_CTL_RING_ENB;
			lio_write_csr64(oct,
					LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
					reg_val);
		}
	}
	for (q_no = srn; q_no < ern; q_no++) {
		uint32_t	reg_val;
		/* set the corresponding OQ ENB bit */
		if (oct->io_qmask.oq & BIT_ULL(q_no - srn)) {
			reg_val = lio_read_csr32(oct,
					LIO_CN23XX_SLI_OQ_PKT_CONTROL(q_no));
			reg_val = reg_val | LIO_CN23XX_PKT_OUTPUT_CTL_RING_ENB;
			lio_write_csr32(oct,
					LIO_CN23XX_SLI_OQ_PKT_CONTROL(q_no),
					reg_val);
		}
	}
	return (0);
}
Exemplo n.º 20
0
static void nic_clear_mbx_intr(struct nicpf *nic, int vf, int mbx_reg)
{
	nic_reg_write(nic, NIC_PF_MAILBOX_INT + (mbx_reg << 3), BIT_ULL(vf));
}
Exemplo n.º 21
0
int rxe_register_device(struct rxe_dev *rxe)
{
	int err;
	int i;
	struct ib_device *dev = &rxe->ib_dev;

	strlcpy(dev->name, "rxe%d", IB_DEVICE_NAME_MAX);
	strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));

	dev->owner = THIS_MODULE;
	dev->node_type = RDMA_NODE_IB_CA;
	dev->phys_port_cnt = 1;
	dev->num_comp_vectors = RXE_NUM_COMP_VECTORS;
	dev->dma_device = rxe->ifc_ops->dma_device(rxe);
	dev->local_dma_lkey = 0;
	dev->node_guid = rxe->ifc_ops->node_guid(rxe);
	dev->dma_ops = &rxe_dma_mapping_ops;

	dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION;
	dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE)
	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT)
	    | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD)
	    | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD)
	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ)
	    | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ)
	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ)
	    | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ)
	    | BIT_ULL(IB_USER_VERBS_CMD_POST_SRQ_RECV)
	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP)
	    | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP)
	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP)
	    | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP)
	    | BIT_ULL(IB_USER_VERBS_CMD_POST_SEND)
	    | BIT_ULL(IB_USER_VERBS_CMD_POST_RECV)
	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ)
	    | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ)
	    | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ)
	    | BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ)
	    | BIT_ULL(IB_USER_VERBS_CMD_PEEK_CQ)
	    | BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)
	    | BIT_ULL(IB_USER_VERBS_CMD_REG_MR)
	    | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR)
	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH)
	    | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_AH)
	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_AH)
	    | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH)
	    | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST)
	    | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST)
	    ;

	dev->query_device = rxe_query_device;
	dev->modify_device = rxe_modify_device;
	dev->query_port = rxe_query_port;
	dev->modify_port = rxe_modify_port;
	dev->get_link_layer = rxe_get_link_layer;
	dev->query_gid = rxe_query_gid;
	dev->get_netdev = rxe_get_netdev;
	dev->add_gid = rxe_add_gid;
	dev->del_gid = rxe_del_gid;
	dev->query_pkey = rxe_query_pkey;
	dev->alloc_ucontext = rxe_alloc_ucontext;
	dev->dealloc_ucontext = rxe_dealloc_ucontext;
	dev->mmap = rxe_mmap;
	dev->get_port_immutable = rxe_port_immutable;
	dev->alloc_pd = rxe_alloc_pd;
	dev->dealloc_pd = rxe_dealloc_pd;
	dev->create_ah = rxe_create_ah;
	dev->modify_ah = rxe_modify_ah;
	dev->query_ah = rxe_query_ah;
	dev->destroy_ah = rxe_destroy_ah;
	dev->create_srq = rxe_create_srq;
	dev->modify_srq = rxe_modify_srq;
	dev->query_srq = rxe_query_srq;
	dev->destroy_srq = rxe_destroy_srq;
	dev->post_srq_recv = rxe_post_srq_recv;
	dev->create_qp = rxe_create_qp;
	dev->modify_qp = rxe_modify_qp;
	dev->query_qp = rxe_query_qp;
	dev->destroy_qp = rxe_destroy_qp;
	dev->post_send = rxe_post_send;
	dev->post_recv = rxe_post_recv;
	dev->create_cq = rxe_create_cq;
	dev->destroy_cq = rxe_destroy_cq;
	dev->resize_cq = rxe_resize_cq;
	dev->poll_cq = rxe_poll_cq;
	dev->peek_cq = rxe_peek_cq;
	dev->req_notify_cq = rxe_req_notify_cq;
	dev->get_dma_mr = rxe_get_dma_mr;
	dev->reg_user_mr = rxe_reg_user_mr;
	dev->dereg_mr = rxe_dereg_mr;
	dev->alloc_mr = rxe_alloc_mr;
	dev->map_mr_sg = rxe_map_mr_sg;
	dev->attach_mcast = rxe_attach_mcast;
	dev->detach_mcast = rxe_detach_mcast;

	err = ib_register_device(dev, NULL);
	if (err) {
		pr_warn("rxe_register_device failed, err = %d\n", err);
		goto err1;
	}

	for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i) {
		err = device_create_file(&dev->dev, rxe_dev_attributes[i]);
		if (err) {
			pr_warn("device_create_file failed, i = %d, err = %d\n",
				i, err);
			goto err2;
		}
	}

	return 0;

err2:
	ib_unregister_device(dev);
err1:
	return err;
}
Exemplo n.º 22
0
static void
lio_cn23xx_pf_disable_io_queues(struct octeon_device *oct)
{
	volatile uint64_t	d64;
	volatile uint32_t	d32;
	int			loop;
	unsigned int		q_no;
	uint32_t		ern, srn;

	srn = oct->sriov_info.pf_srn;
	ern = srn + oct->num_iqs;

	/* Disable Input Queues. */
	for (q_no = srn; q_no < ern; q_no++) {
		loop = lio_ms_to_ticks(1000);

		/* start the Reset for a particular ring */
		d64 = lio_read_csr64(oct,
				     LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
		d64 &= ~LIO_CN23XX_PKT_INPUT_CTL_RING_ENB;
		d64 |= LIO_CN23XX_PKT_INPUT_CTL_RST;
		lio_write_csr64(oct, LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
				d64);

		/*
		 * Wait until hardware indicates that the particular IQ
		 * is out of reset.
		 */
		d64 = lio_read_csr64(oct, LIO_CN23XX_SLI_PKT_IOQ_RING_RST);
		while (!(d64 & BIT_ULL(q_no)) && loop--) {
			d64 = lio_read_csr64(oct,
					     LIO_CN23XX_SLI_PKT_IOQ_RING_RST);
			lio_sleep_timeout(1);
			loop--;
		}

		/* Reset the doorbell register for this Input Queue. */
		lio_write_csr32(oct, LIO_CN23XX_SLI_IQ_DOORBELL(q_no),
				0xFFFFFFFF);
		while (((lio_read_csr64(oct,
					LIO_CN23XX_SLI_IQ_DOORBELL(q_no))) !=
			0ULL) && loop--) {
			lio_sleep_timeout(1);
		}
	}

	/* Disable Output Queues. */
	for (q_no = srn; q_no < ern; q_no++) {
		loop = lio_ms_to_ticks(1000);

		/*
		 * Wait until hardware indicates that the particular IQ
		 * is out of reset.It given that SLI_PKT_RING_RST is
		 * common for both IQs and OQs
		 */
		d64 = lio_read_csr64(oct, LIO_CN23XX_SLI_PKT_IOQ_RING_RST);
		while (!(d64 & BIT_ULL(q_no)) && loop--) {
			d64 = lio_read_csr64(oct,
					     LIO_CN23XX_SLI_PKT_IOQ_RING_RST);
			lio_sleep_timeout(1);
			loop--;
		}

		/* Reset the doorbell register for this Output Queue. */
		lio_write_csr32(oct, LIO_CN23XX_SLI_OQ_PKTS_CREDIT(q_no),
				0xFFFFFFFF);
		while ((lio_read_csr64(oct,
				       LIO_CN23XX_SLI_OQ_PKTS_CREDIT(q_no)) !=
			0ULL) && loop--) {
			lio_sleep_timeout(1);
		}

		/* clear the SLI_PKT(0..63)_CNTS[CNT] reg value */
		d32 = lio_read_csr32(oct, LIO_CN23XX_SLI_OQ_PKTS_SENT(q_no));
		lio_write_csr32(oct, LIO_CN23XX_SLI_OQ_PKTS_SENT(q_no),	d32);
	}
}
Exemplo n.º 23
0
int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
{
	int err;
	struct ib_device *dev = &rxe->ib_dev;
	struct crypto_shash *tfm;

	strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));

	dev->owner = THIS_MODULE;
	dev->node_type = RDMA_NODE_IB_CA;
	dev->phys_port_cnt = 1;
	dev->num_comp_vectors = num_possible_cpus();
	dev->dev.parent = rxe_dma_device(rxe);
	dev->local_dma_lkey = 0;
	addrconf_addr_eui48((unsigned char *)&dev->node_guid,
			    rxe->ndev->dev_addr);
	dev->dev.dma_ops = &dma_virt_ops;
	dma_coerce_mask_and_coherent(&dev->dev,
				     dma_get_required_mask(&dev->dev));

	dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION;
	dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE)
	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT)
	    | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD)
	    | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD)
	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ)
	    | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ)
	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ)
	    | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ)
	    | BIT_ULL(IB_USER_VERBS_CMD_POST_SRQ_RECV)
	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP)
	    | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP)
	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP)
	    | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP)
	    | BIT_ULL(IB_USER_VERBS_CMD_POST_SEND)
	    | BIT_ULL(IB_USER_VERBS_CMD_POST_RECV)
	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ)
	    | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ)
	    | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ)
	    | BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ)
	    | BIT_ULL(IB_USER_VERBS_CMD_PEEK_CQ)
	    | BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)
	    | BIT_ULL(IB_USER_VERBS_CMD_REG_MR)
	    | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR)
	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH)
	    | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_AH)
	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_AH)
	    | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH)
	    | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST)
	    | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST)
	    ;

	ib_set_device_ops(dev, &rxe_dev_ops);
	err = ib_device_set_netdev(&rxe->ib_dev, rxe->ndev, 1);
	if (err)
		return err;

	tfm = crypto_alloc_shash("crc32", 0, 0);
	if (IS_ERR(tfm)) {
		pr_err("failed to allocate crc algorithm err:%ld\n",
		       PTR_ERR(tfm));
		return PTR_ERR(tfm);
	}
	rxe->tfm = tfm;

	rdma_set_device_sysfs_group(dev, &rxe_attr_group);
	dev->driver_id = RDMA_DRIVER_RXE;
	err = ib_register_device(dev, ibdev_name);
	if (err)
		pr_warn("%s failed with error %d\n", __func__, err);

	/*
	 * Note that rxe may be invalid at this point if another thread
	 * unregistered it.
	 */
	return err;
}
Exemplo n.º 24
0
/**
 * i40evf_set_rss_hash_opt - Enable/Disable flow types for RSS hash
 * @adapter: board private structure
 * @cmd: ethtool rxnfc command
 *
 * Returns Success if the flow input set is supported.
 **/
static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
				   struct ethtool_rxnfc *nfc)
{
	struct i40e_hw *hw = &adapter->hw;

	u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
		   ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);

	/* RSS does not support anything other than hashing
	 * to queues on src and dst IPs and ports
	 */
	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
		return -EINVAL;

	/* We need at least the IP SRC and DEST fields for hashing */
	if (!(nfc->data & RXH_IP_SRC) ||
	    !(nfc->data & RXH_IP_DST))
		return -EINVAL;

	switch (nfc->flow_type) {
	case TCP_V4_FLOW:
		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
		case 0:
			hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
			break;
		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
			hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
			break;
		default:
			return -EINVAL;
		}
		break;
	case TCP_V6_FLOW:
		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
		case 0:
			hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
			break;
		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
			hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
			break;
		default:
			return -EINVAL;
		}
		break;
	case UDP_V4_FLOW:
		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
		case 0:
			hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
				  BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
			break;
		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
			hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
				 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
			break;
		default:
			return -EINVAL;
		}
		break;
	case UDP_V6_FLOW:
		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
		case 0:
			hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
				  BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
			break;
		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
			hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
				 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
			break;
		default:
			return -EINVAL;
		}
		break;
	case AH_ESP_V4_FLOW:
	case AH_V4_FLOW:
	case ESP_V4_FLOW:
	case SCTP_V4_FLOW:
		if ((nfc->data & RXH_L4_B_0_1) ||
		    (nfc->data & RXH_L4_B_2_3))
			return -EINVAL;
		hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
		break;
	case AH_ESP_V6_FLOW:
	case AH_V6_FLOW:
	case ESP_V6_FLOW:
	case SCTP_V6_FLOW:
		if ((nfc->data & RXH_L4_B_0_1) ||
		    (nfc->data & RXH_L4_B_2_3))
			return -EINVAL;
		hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
		break;
	case IPV4_FLOW:
		hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
			 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
		break;
	case IPV6_FLOW:
		hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
			 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
		break;
	default:
		return -EINVAL;
	}

	wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
	wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
	i40e_flush(hw);

	return 0;
}