Beispiel #1
0
static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
				    struct net_device *dev)
{
	struct r6040_private *lp = netdev_priv(dev);
	struct r6040_descriptor *descptr;
	void __iomem *ioaddr = lp->base;
	unsigned long flags;

	if (skb_put_padto(skb, ETH_ZLEN) < 0)
		return NETDEV_TX_OK;

	/* Critical Section */
	spin_lock_irqsave(&lp->lock, flags);

	/* TX resource check */
	if (!lp->tx_free_desc) {
		spin_unlock_irqrestore(&lp->lock, flags);
		netif_stop_queue(dev);
		netdev_err(dev, ": no tx descriptor\n");
		return NETDEV_TX_BUSY;
	}

	/* Set TX descriptor & Transmit it */
	lp->tx_free_desc--;
	descptr = lp->tx_insert_ptr;
	descptr->len = skb->len;
	descptr->skb_ptr = skb;
	descptr->buf = cpu_to_le32(pci_map_single(lp->pdev,
		skb->data, skb->len, PCI_DMA_TODEVICE));
	descptr->status = DSC_OWNER_MAC;

	skb_tx_timestamp(skb);

	/* Trigger the MAC to check the TX descriptor */
	if (!skb->xmit_more || netif_queue_stopped(dev))
		iowrite16(TM2TX, ioaddr + MTPR);
	lp->tx_insert_ptr = descptr->vndescp;

	/* If no tx resource, stop */
	if (!lp->tx_free_desc)
		netif_stop_queue(dev);

	spin_unlock_irqrestore(&lp->lock, flags);

	return NETDEV_TX_OK;
}
Beispiel #2
0
/**
 * nfp_net_rx() - receive up to @budget packets on @rx_ring
 * @rx_ring:   RX ring to receive from
 * @budget:    NAPI budget
 *
 * Note, this function is separated out from the napi poll function to
 * more cleanly separate packet receive code from other bookkeeping
 * functions performed in the napi poll function.
 *
 * There are differences between the NFP-3200 firmware and the
 * NFP-6000 firmware.  The NFP-3200 firmware uses a dedicated RX queue
 * to indicate that new packets have arrived.  The NFP-6000 does not
 * have this queue and uses the DD bit in the RX descriptor. This
 * method cannot be used on the NFP-3200 as it causes a race
 * condition: The RX ring write pointer on the NFP-3200 is updated
 * after packets (and descriptors) have been DMAed.  If the DD bit is
 * used and subsequently the read pointer is updated this may lead to
 * the RX queue to underflow (if the firmware has not yet update the
 * write pointer).  Therefore we use slightly ugly conditional code
 * below to handle the differences.  We may, in the future update the
 * NFP-3200 firmware to behave the same as the firmware on the
 * NFP-6000.
 *
 * Return: Number of packets received.
 */
static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
{
	struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
	struct nfp_net *nn = r_vec->nfp_net;
	unsigned int data_len, meta_len;
	int avail = 0, pkts_polled = 0;
	struct sk_buff *skb, *new_skb;
	struct nfp_net_rx_desc *rxd;
	dma_addr_t new_dma_addr;
	u32 qcp_wr_p;
	int idx;

	if (nn->is_nfp3200) {
		/* Work out how many packets arrived */
		qcp_wr_p = nfp_qcp_wr_ptr_read(rx_ring->qcp_rx);
		idx = rx_ring->rd_p % rx_ring->cnt;

		if (qcp_wr_p == idx)
			/* No new packets */
			return 0;

		if (qcp_wr_p > idx)
			avail = qcp_wr_p - idx;
		else
			avail = qcp_wr_p + rx_ring->cnt - idx;
	} else {
		avail = budget + 1;
	}

	while (avail > 0 && pkts_polled < budget) {
		idx = rx_ring->rd_p % rx_ring->cnt;

		rxd = &rx_ring->rxds[idx];
		if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD)) {
			if (nn->is_nfp3200)
				nn_dbg(nn, "RX descriptor not valid (DD)%d:%u rxd[0]=%#x rxd[1]=%#x\n",
				       rx_ring->idx, idx,
				       rxd->vals[0], rxd->vals[1]);
			break;
		}
		/* Memory barrier to ensure that we won't do other reads
		 * before the DD bit.
		 */
		dma_rmb();

		rx_ring->rd_p++;
		pkts_polled++;
		avail--;

		skb = rx_ring->rxbufs[idx].skb;

		new_skb = nfp_net_rx_alloc_one(rx_ring, &new_dma_addr,
					       nn->fl_bufsz);
		if (!new_skb) {
			nfp_net_rx_give_one(rx_ring, rx_ring->rxbufs[idx].skb,
					    rx_ring->rxbufs[idx].dma_addr);
			u64_stats_update_begin(&r_vec->rx_sync);
			r_vec->rx_drops++;
			u64_stats_update_end(&r_vec->rx_sync);
			continue;
		}

		dma_unmap_single(&nn->pdev->dev,
				 rx_ring->rxbufs[idx].dma_addr,
				 nn->fl_bufsz, DMA_FROM_DEVICE);

		nfp_net_rx_give_one(rx_ring, new_skb, new_dma_addr);

		/*         < meta_len >
		 *  <-- [rx_offset] -->
		 *  ---------------------------------------------------------
		 * | [XX] |  metadata  |             packet           | XXXX |
		 *  ---------------------------------------------------------
		 *         <---------------- data_len --------------->
		 *
		 * The rx_offset is fixed for all packets, the meta_len can vary
		 * on a packet by packet basis. If rx_offset is set to zero
		 * (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the
		 * buffer and is immediately followed by the packet (no [XX]).
		 */
		meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
		data_len = le16_to_cpu(rxd->rxd.data_len);

		if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
			skb_reserve(skb, meta_len);
		else
			skb_reserve(skb, nn->rx_offset);
		skb_put(skb, data_len - meta_len);

		nfp_net_set_hash(nn->netdev, skb, rxd);

		/* Pad small frames to minimum */
		if (skb_put_padto(skb, 60))
			break;

		/* Stats update */
		u64_stats_update_begin(&r_vec->rx_sync);
		r_vec->rx_pkts++;
		r_vec->rx_bytes += skb->len;
		u64_stats_update_end(&r_vec->rx_sync);

		skb_record_rx_queue(skb, rx_ring->idx);
		skb->protocol = eth_type_trans(skb, nn->netdev);

		nfp_net_rx_csum(nn, r_vec, rxd, skb);

		if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
					       le16_to_cpu(rxd->rxd.vlan));

		napi_gro_receive(&rx_ring->r_vec->napi, skb);
	}

	if (nn->is_nfp3200)
		nfp_qcp_rd_ptr_add(rx_ring->qcp_rx, pkts_polled);

	return pkts_polled;
}
static void send_hsr_supervision_frame(struct hsr_port *master,
		u8 type, u8 hsrVer)
{
	struct sk_buff *skb;
	int hlen, tlen;
	struct hsr_tag *hsr_tag;
	struct hsr_sup_tag *hsr_stag;
	struct hsr_sup_payload *hsr_sp;
	unsigned long irqflags;

	hlen = LL_RESERVED_SPACE(master->dev);
	tlen = master->dev->needed_tailroom;
	skb = dev_alloc_skb(
			sizeof(struct hsr_tag) +
			sizeof(struct hsr_sup_tag) +
			sizeof(struct hsr_sup_payload) + hlen + tlen);

	if (skb == NULL)
		return;

	skb_reserve(skb, hlen);

	skb->dev = master->dev;
	skb->protocol = htons(hsrVer ? ETH_P_HSR : ETH_P_PRP);
	skb->priority = TC_PRIO_CONTROL;

	if (dev_hard_header(skb, skb->dev, (hsrVer ? ETH_P_HSR : ETH_P_PRP),
			    master->hsr->sup_multicast_addr,
			    skb->dev->dev_addr, skb->len) <= 0)
		goto out;
	skb_reset_mac_header(skb);

	if (hsrVer > 0) {
		hsr_tag = (typeof(hsr_tag)) skb_put(skb, sizeof(struct hsr_tag));
		hsr_tag->encap_proto = htons(ETH_P_PRP);
		set_hsr_tag_LSDU_size(hsr_tag, HSR_V1_SUP_LSDUSIZE);
	}

	hsr_stag = (typeof(hsr_stag)) skb_put(skb, sizeof(struct hsr_sup_tag));
	set_hsr_stag_path(hsr_stag, (hsrVer ? 0x0 : 0xf));
	set_hsr_stag_HSR_Ver(hsr_stag, hsrVer);

	/* From HSRv1 on we have separate supervision sequence numbers. */
	spin_lock_irqsave(&master->hsr->seqnr_lock, irqflags);
	if (hsrVer > 0) {
		hsr_stag->sequence_nr = htons(master->hsr->sup_sequence_nr);
		hsr_tag->sequence_nr = htons(master->hsr->sequence_nr);
		master->hsr->sup_sequence_nr++;
		master->hsr->sequence_nr++;
	} else {
		hsr_stag->sequence_nr = htons(master->hsr->sequence_nr);
		master->hsr->sequence_nr++;
	}
	spin_unlock_irqrestore(&master->hsr->seqnr_lock, irqflags);

	hsr_stag->HSR_TLV_Type = type;
	/* TODO: Why 12 in HSRv0? */
	hsr_stag->HSR_TLV_Length = hsrVer ? sizeof(struct hsr_sup_payload) : 12;

	/* Payload: MacAddressA */
	hsr_sp = (typeof(hsr_sp)) skb_put(skb, sizeof(struct hsr_sup_payload));
	ether_addr_copy(hsr_sp->MacAddressA, master->dev->dev_addr);

	skb_put_padto(skb, ETH_ZLEN + HSR_HLEN);

	hsr_forward_skb(skb, master);
	return;

out:
	WARN_ONCE(1, "HSR: Could not send supervision frame\n");
	kfree_skb(skb);
}