示例#1
0
static int i40e_set_coalesce(struct net_device *netdev,
			     struct ethtool_coalesce *ec)
{
	struct i40e_netdev_priv *np = netdev_priv(netdev);
	struct i40e_q_vector *q_vector;
	struct i40e_vsi *vsi = np->vsi;
	struct i40e_pf *pf = vsi->back;
	struct i40e_hw *hw = &pf->hw;
	u16 vector;
	int i;

	if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
		vsi->work_limit = ec->tx_max_coalesced_frames_irq;

	switch (ec->rx_coalesce_usecs) {
	case 0:
		vsi->rx_itr_setting = 0;
		break;
	case 1:
		vsi->rx_itr_setting = (I40E_ITR_DYNAMIC |
				       ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
		break;
	default:
		if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
		    (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1)))
			return -EINVAL;
		vsi->rx_itr_setting = ec->rx_coalesce_usecs;
		break;
	}

	switch (ec->tx_coalesce_usecs) {
	case 0:
		vsi->tx_itr_setting = 0;
		break;
	case 1:
		vsi->tx_itr_setting = (I40E_ITR_DYNAMIC |
				       ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
		break;
	default:
		if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
		    (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1)))
			return -EINVAL;
		vsi->tx_itr_setting = ec->tx_coalesce_usecs;
		break;
	}

	vector = vsi->base_vector;
	for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
		q_vector = vsi->q_vectors[i];
		q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
		wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr);
		q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
		wr32(hw, I40E_PFINT_ITRN(1, vector - 1), q_vector->tx.itr);
		i40e_flush(hw);
	}

	return 0;
}
示例#2
0
/**
 * i40evf_set_coalesce - Set interrupt coalescing settings
 * @netdev: network interface device structure
 * @ec: ethtool coalesce structure
 *
 * Change current coalescing settings.
 **/
static int i40evf_set_coalesce(struct net_device *netdev,
			       struct ethtool_coalesce *ec)
{
	struct i40evf_adapter *adapter = netdev_priv(netdev);
	struct i40e_hw *hw = &adapter->hw;
	struct i40e_vsi *vsi = &adapter->vsi;
	struct i40e_q_vector *q_vector;
	int i;

	if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
		vsi->work_limit = ec->tx_max_coalesced_frames_irq;

	if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
	    (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
		vsi->rx_itr_setting = ec->rx_coalesce_usecs;

	else
		return -EINVAL;

	if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
	    (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
		vsi->tx_itr_setting = ec->tx_coalesce_usecs;
	else if (ec->use_adaptive_tx_coalesce)
		vsi->tx_itr_setting = (I40E_ITR_DYNAMIC |
				       ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
	else
		return -EINVAL;

	if (ec->use_adaptive_rx_coalesce)
		vsi->rx_itr_setting |= I40E_ITR_DYNAMIC;
	else
		vsi->rx_itr_setting &= ~I40E_ITR_DYNAMIC;

	if (ec->use_adaptive_tx_coalesce)
		vsi->tx_itr_setting |= I40E_ITR_DYNAMIC;
	else
		vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC;

	for (i = 0; i < adapter->num_msix_vectors - NONQ_VECS; i++) {
		q_vector = &adapter->q_vectors[i];
		q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
		wr32(hw, I40E_VFINT_ITRN1(0, i), q_vector->rx.itr);
		q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
		wr32(hw, I40E_VFINT_ITRN1(1, i), q_vector->tx.itr);
		i40e_flush(hw);
	}

	return 0;
}
示例#3
0
static int
i40e_xmit(struct i40e_queue *que, struct mbuf **m_headp)
{
	struct i40e_vsi		*vsi = que->vsi;
	struct i40e_hw		*hw = vsi->hw;
	struct tx_ring		*txr = &que->txr;
	struct i40e_tx_buf	*buf;
	struct i40e_tx_desc	*txd = NULL;
	struct mbuf		*m_head, *m;
	int             	i, j, error, nsegs, maxsegs;
	int			first, last = 0;
	u16			vtag = 0;
	u32			cmd, off;
	bus_dmamap_t		map;
	bus_dma_tag_t		tag;
	bus_dma_segment_t	segs[I40E_MAX_TSO_SEGS];


	cmd = off = 0;
	m_head = *m_headp;

        /*
         * Important to capture the first descriptor
         * used because it will contain the index of
         * the one we tell the hardware to report back
         */
        first = txr->next_avail;
	buf = &txr->buffers[first];
	map = buf->map;
	tag = txr->tx_tag;
	maxsegs = I40E_MAX_TX_SEGS;

	if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
		/* Use larger mapping for TSO */
		tag = txr->tso_tag;
		maxsegs = I40E_MAX_TSO_SEGS;
		if (i40e_tso_detect_sparse(m_head)) {
			m = m_defrag(m_head, M_NOWAIT);
			*m_headp = m;
		}
	}

	/*
	 * Map the packet for DMA.
	 */
	error = bus_dmamap_load_mbuf_sg(tag, map,
	    *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);

	if (error == EFBIG) {
		struct mbuf *m;

		m = m_collapse(*m_headp, M_NOWAIT, maxsegs);
		if (m == NULL) {
			que->mbuf_defrag_failed++;
			m_freem(*m_headp);
			*m_headp = NULL;
			return (ENOBUFS);
		}
		*m_headp = m;

		/* Try it again */
		error = bus_dmamap_load_mbuf_sg(tag, map,
		    *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);

		if (error == ENOMEM) {
			que->tx_dma_setup++;
			return (error);
		} else if (error != 0) {
			que->tx_dma_setup++;
			m_freem(*m_headp);
			*m_headp = NULL;
			return (error);
		}
	} else if (error == ENOMEM) {
		que->tx_dma_setup++;
		return (error);
	} else if (error != 0) {
		que->tx_dma_setup++;
		m_freem(*m_headp);
		*m_headp = NULL;
		return (error);
	}

	/* Make certain there are enough descriptors */
	if (nsegs > txr->avail - 2) {
		txr->no_desc++;
		error = ENOBUFS;
		goto xmit_fail;
	}
	m_head = *m_headp;

	/* Set up the TSO/CSUM offload */
	if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) {
		error = i40e_tx_setup_offload(que, m_head, &cmd, &off);
		if (error)
			goto xmit_fail;
	}

	cmd |= I40E_TX_DESC_CMD_ICRC;
	/* Grab the VLAN tag */
	if (m_head->m_flags & M_VLANTAG) {
		cmd |= I40E_TX_DESC_CMD_IL2TAG1;
		vtag = htole16(m_head->m_pkthdr.ether_vtag);
	}

	i = txr->next_avail;
	for (j = 0; j < nsegs; j++) {
		bus_size_t seglen;

		buf = &txr->buffers[i];
		buf->tag = tag; /* Keep track of the type tag */
		txd = &txr->base[i];
		seglen = segs[j].ds_len;

		txd->buffer_addr = htole64(segs[j].ds_addr);
		txd->cmd_type_offset_bsz =
		    htole64(I40E_TX_DESC_DTYPE_DATA
		    | ((u64)cmd  << I40E_TXD_QW1_CMD_SHIFT)
		    | ((u64)off << I40E_TXD_QW1_OFFSET_SHIFT)
		    | ((u64)seglen  << I40E_TXD_QW1_TX_BUF_SZ_SHIFT)
		    | ((u64)vtag  << I40E_TXD_QW1_L2TAG1_SHIFT));

		last = i; /* descriptor that will get completion IRQ */

		if (++i == que->num_desc)
			i = 0;

		buf->m_head = NULL;
		buf->eop_index = -1;
	}
	/* Set the last descriptor for report */
	txd->cmd_type_offset_bsz |=
	    htole64(((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT));
	txr->avail -= nsegs;
	txr->next_avail = i;

	buf->m_head = m_head;
	/* Swap the dma map between the first and last descriptor */
	txr->buffers[first].map = buf->map;
	buf->map = map;
	bus_dmamap_sync(tag, map, BUS_DMASYNC_PREWRITE);

        /* Set the index of the descriptor that will be marked done */
        buf = &txr->buffers[first];
	buf->eop_index = last;

        bus_dmamap_sync(txr->dma.tag, txr->dma.map,
            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
	/*
	 * Advance the Transmit Descriptor Tail (Tdt), this tells the
	 * hardware that this frame is available to transmit.
	 */
	++txr->total_packets;
	wr32(hw, txr->tail, i);

	i40e_flush(hw);
	/* Mark outstanding work */
	if (que->busy == 0)
		que->busy = 1;
	return (0);

xmit_fail:
	bus_dmamap_unload(tag, buf->map);
	return (error);
}
示例#4
0
/**
 * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash
 * @pf: pointer to the physical function struct
 * @cmd: ethtool rxnfc command
 *
 * Returns Success if the flow input set is supported.
 **/
static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
{
	struct i40e_hw *hw = &pf->hw;
	u64 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
		   ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);

	/* RSS does not support anything other than hashing
	 * to queues on src and dst IPs and ports
	 */
	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
		return -EINVAL;

	/* We need at least the IP SRC and DEST fields for hashing */
	if (!(nfc->data & RXH_IP_SRC) ||
	    !(nfc->data & RXH_IP_DST))
		return -EINVAL;

	switch (nfc->flow_type) {
	case TCP_V4_FLOW:
		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
		case 0:
			hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
			break;
		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
			hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
			break;
		default:
			return -EINVAL;
		}
		break;
	case TCP_V6_FLOW:
		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
		case 0:
			hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
			break;
		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
			hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
			break;
		default:
			return -EINVAL;
		}
		break;
	case UDP_V4_FLOW:
		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
		case 0:
			hena &=
			~(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
			((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
			break;
		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
			hena |=
			(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP)  |
			((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
			break;
		default:
			return -EINVAL;
		}
		break;
	case UDP_V6_FLOW:
		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
		case 0:
			hena &=
			~(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
			((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
			break;
		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
			hena |=
			(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP)  |
			((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
			break;
		default:
			return -EINVAL;
		}
		break;
	case AH_ESP_V4_FLOW:
	case AH_V4_FLOW:
	case ESP_V4_FLOW:
	case SCTP_V4_FLOW:
		if ((nfc->data & RXH_L4_B_0_1) ||
		    (nfc->data & RXH_L4_B_2_3))
			return -EINVAL;
		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
		break;
	case AH_ESP_V6_FLOW:
	case AH_V6_FLOW:
	case ESP_V6_FLOW:
	case SCTP_V6_FLOW:
		if ((nfc->data & RXH_L4_B_0_1) ||
		    (nfc->data & RXH_L4_B_2_3))
			return -EINVAL;
		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
		break;
	case IPV4_FLOW:
		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
		break;
	case IPV6_FLOW:
		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
		break;
	default:
		return -EINVAL;
	}

	wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
	wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
	i40e_flush(hw);

	return 0;
}
示例#5
0
/**
 * i40evf_set_rss_hash_opt - Enable/Disable flow types for RSS hash
 * @adapter: board private structure
 * @cmd: ethtool rxnfc command
 *
 * Returns Success if the flow input set is supported.
 **/
static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
				   struct ethtool_rxnfc *nfc)
{
	struct i40e_hw *hw = &adapter->hw;
	u32 flags = adapter->vf_res->vf_offload_flags;

	u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
		   ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);

	/* RSS does not support anything other than hashing
	 * to queues on src and dst IPs and ports
	 */
	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
		return -EINVAL;

	/* We need at least the IP SRC and DEST fields for hashing */
	if (!(nfc->data & RXH_IP_SRC) ||
	    !(nfc->data & RXH_IP_DST))
		return -EINVAL;

	switch (nfc->flow_type) {
	case TCP_V4_FLOW:
		if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
			if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
				hena |=
			   BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);

			hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
		} else {
			return -EINVAL;
		}
		break;
	case TCP_V6_FLOW:
		if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
			if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
				hena |=
			   BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);

			hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
		} else {
			return -EINVAL;
		}
		break;
	case UDP_V4_FLOW:
		if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
			if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
				hena |=
			    BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
			    BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);

			hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
				 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
		} else {
			return -EINVAL;
		}
		break;
	case UDP_V6_FLOW:
		if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
			if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
				hena |=
			    BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
			    BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);

			hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
				 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
		} else {
			return -EINVAL;
		}
		break;
	case AH_ESP_V4_FLOW:
	case AH_V4_FLOW:
	case ESP_V4_FLOW:
	case SCTP_V4_FLOW:
		if ((nfc->data & RXH_L4_B_0_1) ||
		    (nfc->data & RXH_L4_B_2_3))
			return -EINVAL;
		hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
		break;
	case AH_ESP_V6_FLOW:
	case AH_V6_FLOW:
	case ESP_V6_FLOW:
	case SCTP_V6_FLOW:
		if ((nfc->data & RXH_L4_B_0_1) ||
		    (nfc->data & RXH_L4_B_2_3))
			return -EINVAL;
		hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
		break;
	case IPV4_FLOW:
		hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
			 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
		break;
	case IPV6_FLOW:
		hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
			 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
		break;
	default:
		return -EINVAL;
	}

	wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
	wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
	i40e_flush(hw);

	return 0;
}