Example #1
0
/**
 * igb_ptp_tx_hwtstamp - utility function which checks for TX time stamp
 * @adapter: Board private structure.
 *
 * If we were asked to do hardware stamping and such a time stamp is
 * available, then it must have been for this skb here because we only
 * allow only one such packet into the queue.
 **/
static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
{
	struct e1000_hw *hw = &adapter->hw;
	struct skb_shared_hwtstamps shhwtstamps;
	u64 regval;
	int adjust = 0;

	regval = rd32(E1000_TXSTMPL);
	regval |= (u64)rd32(E1000_TXSTMPH) << 32;

	igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
	/* adjust timestamp for the TX latency based on link speed */
	if (adapter->hw.mac.type == e1000_i210) {
		switch (adapter->link_speed) {
		case SPEED_10:
			adjust = IGB_I210_TX_LATENCY_10;
			break;
		case SPEED_100:
			adjust = IGB_I210_TX_LATENCY_100;
			break;
		case SPEED_1000:
			adjust = IGB_I210_TX_LATENCY_1000;
			break;
		}
	}

	shhwtstamps.hwtstamp = ktime_sub_ns(shhwtstamps.hwtstamp, adjust);

	skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps);
	dev_kfree_skb_any(adapter->ptp_tx_skb);
	adapter->ptp_tx_skb = NULL;
	clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
}
Example #2
0
/**
 * i40e_ptp_tx_hwtstamp - Utility function which returns the Tx timestamp
 * @pf: Board private structure
 *
 * Read the value of the Tx timestamp from the registers, convert it into a
 * value consumable by the stack, and store that result into the shhwtstamps
 * struct before returning it up the stack.
 **/
void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf)
{
	struct skb_shared_hwtstamps shhwtstamps;
	struct sk_buff *skb = pf->ptp_tx_skb;
	struct i40e_hw *hw = &pf->hw;
	u32 hi, lo;
	u64 ns;

	if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_tx)
		return;

	/* don't attempt to timestamp if we don't have an skb */
	if (!pf->ptp_tx_skb)
		return;

	lo = rd32(hw, I40E_PRTTSYN_TXTIME_L);
	hi = rd32(hw, I40E_PRTTSYN_TXTIME_H);

	ns = (((u64)hi) << 32) | lo;
	i40e_ptp_convert_to_hwtstamp(&shhwtstamps, ns);

	/* Clear the bit lock as soon as possible after reading the register,
	 * and prior to notifying the stack via skb_tstamp_tx(). Otherwise
	 * applications might wake up and attempt to request another transmit
	 * timestamp prior to the bit lock being cleared.
	 */
	pf->ptp_tx_skb = NULL;
	clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);

	/* Notify the stack and free the skb after we've unlocked */
	skb_tstamp_tx(skb, &shhwtstamps);
	dev_kfree_skb_any(skb);
}
static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
{
	struct bfin_mac_local *lp = netdev_priv(netdev);

	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
		int timeout_cnt = MAX_TIMEOUT_CNT;

		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;

		while ((!(bfin_read_EMAC_PTP_ISTAT() & TXTL)) && (--timeout_cnt))
			udelay(1);
		if (timeout_cnt == 0)
			netdev_err(netdev, "timestamp the TX packet failed\n");
		else {
			struct skb_shared_hwtstamps shhwtstamps;
			u64 ns;
			u64 regval;

			regval = bfin_read_EMAC_PTP_TXSNAPLO();
			regval |= (u64)bfin_read_EMAC_PTP_TXSNAPHI() << 32;
			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
			ns = timecounter_cyc2time(&lp->clock,
					regval);
			timecompare_update(&lp->compare, ns);
			shhwtstamps.hwtstamp = ns_to_ktime(ns);
			shhwtstamps.syststamp =
				timecompare_transform(&lp->compare, ns);
			skb_tstamp_tx(skb, &shhwtstamps);

			bfin_dump_hwtamp("TX", &shhwtstamps.hwtstamp, &shhwtstamps.syststamp, &lp->compare);
		}
	}
}
Example #4
0
/**
 * i40e_ptp_tx_hwtstamp - Utility function which returns the Tx timestamp
 * @pf: Board private structure
 *
 * Read the value of the Tx timestamp from the registers, convert it into a
 * value consumable by the stack, and store that result into the shhwtstamps
 * struct before returning it up the stack.
 **/
void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf)
{
	struct skb_shared_hwtstamps shhwtstamps;
	struct i40e_hw *hw = &pf->hw;
	u32 hi, lo;
	u64 ns;

	if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_tx)
		return;

	/* don't attempt to timestamp if we don't have an skb */
	if (!pf->ptp_tx_skb)
		return;

	lo = rd32(hw, I40E_PRTTSYN_TXTIME_L);
	hi = rd32(hw, I40E_PRTTSYN_TXTIME_H);

	ns = (((u64)hi) << 32) | lo;

	i40e_ptp_convert_to_hwtstamp(&shhwtstamps, ns);
	skb_tstamp_tx(pf->ptp_tx_skb, &shhwtstamps);
	dev_kfree_skb_any(pf->ptp_tx_skb);
	pf->ptp_tx_skb = NULL;
	clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, &pf->state);
}
Example #5
0
/**
 * igb_ptp_tx_hwtstamp - utility function which checks for TX time stamp
 * @adapter: Board private structure.
 *
 * If we were asked to do hardware stamping and such a time stamp is
 * available, then it must have been for this skb here because we only
 * allow only one such packet into the queue.
 **/
void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
{
    struct e1000_hw *hw = &adapter->hw;
    struct skb_shared_hwtstamps shhwtstamps;
    u64 regval;

    regval = rd32(E1000_TXSTMPL);
    regval |= (u64)rd32(E1000_TXSTMPH) << 32;

    igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
    skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps);
    dev_kfree_skb_any(adapter->ptp_tx_skb);
    adapter->ptp_tx_skb = NULL;
}
/**
 * igb_ptp_tx_hwtstamp - utility function which checks for TX time stamp
 * @adapter: Board private structure.
 *
 * If we were asked to do hardware stamping and such a time stamp is
 * available, then it must have been for this skb here because we only
 * allow only one such packet into the queue.
 **/
static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
{
	struct e1000_hw *hw = &adapter->hw;
	struct skb_shared_hwtstamps shhwtstamps;
	u64 regval;

	regval = rd32(E1000_TXSTMPL);
	regval |= (u64)rd32(E1000_TXSTMPH) << 32;

	igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
	skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps);
	dev_kfree_skb_any(adapter->ptp_tx_skb);
	adapter->ptp_tx_skb = NULL;
	clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
}
/**
 * ixgbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp
 * @adapter: the private adapter struct
 *
 * if the timestamp is valid, we convert it into the timecounter ns
 * value, then store that result into the shhwtstamps structure which
 * is passed up the network stack
 */
static void ixgbe_ptp_tx_hwtstamp(struct ixgbe_adapter *adapter)
{
	struct ixgbe_hw *hw = &adapter->hw;
	struct skb_shared_hwtstamps shhwtstamps;
	u64 regval = 0;

	regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
	regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) << 32;

	ixgbe_ptp_convert_to_hwtstamp(adapter, &shhwtstamps, regval);
	skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps);

	dev_kfree_skb_any(adapter->ptp_tx_skb);
	adapter->ptp_tx_skb = NULL;
}
Example #8
0
/**
 * i40e_ptp_tx_hwtstamp - Utility function which returns the Tx timestamp
 * @pf: Board private structure
 *
 * Read the value of the Tx timestamp from the registers, convert it into a
 * value consumable by the stack, and store that result into the shhwtstamps
 * struct before returning it up the stack.
 **/
void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf)
{
	struct skb_shared_hwtstamps shhwtstamps;
	struct i40e_hw *hw = &pf->hw;
	u32 hi, lo;
	u64 ns;

	lo = rd32(hw, I40E_PRTTSYN_TXTIME_L);
	hi = rd32(hw, I40E_PRTTSYN_TXTIME_H);

	ns = (((u64)hi) << 32) | lo;

	i40e_ptp_convert_to_hwtstamp(&shhwtstamps, ns);
	skb_tstamp_tx(pf->ptp_tx_skb, &shhwtstamps);
	dev_kfree_skb_any(pf->ptp_tx_skb);
	pf->ptp_tx_skb = NULL;
	clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, &pf->state);
}
Example #9
0
File: tx.c Project: avagin/linux
static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
			       struct efx_tx_buffer *buffer,
			       unsigned int *pkts_compl,
			       unsigned int *bytes_compl)
{
	if (buffer->unmap_len) {
		struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
		dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
		if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
			dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
					 DMA_TO_DEVICE);
		else
			dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
				       DMA_TO_DEVICE);
		buffer->unmap_len = 0;
	}

	if (buffer->flags & EFX_TX_BUF_SKB) {
		struct sk_buff *skb = (struct sk_buff *)buffer->skb;

		EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
		(*pkts_compl)++;
		(*bytes_compl) += skb->len;
		if (tx_queue->timestamping &&
		    (tx_queue->completed_timestamp_major ||
		     tx_queue->completed_timestamp_minor)) {
			struct skb_shared_hwtstamps hwtstamp;

			hwtstamp.hwtstamp =
				efx_ptp_nic_to_kernel_time(tx_queue);
			skb_tstamp_tx(skb, &hwtstamp);

			tx_queue->completed_timestamp_major = 0;
			tx_queue->completed_timestamp_minor = 0;
		}
		dev_consume_skb_any((struct sk_buff *)buffer->skb);
		netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
			   "TX queue %d transmission id %x complete\n",
			   tx_queue->queue, tx_queue->read_count);
	}

	buffer->len = 0;
	buffer->flags = 0;
}
static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
{
	struct bfin_mac_local *lp = netdev_priv(netdev);
	union skb_shared_tx *shtx = skb_tx(skb);

	if (shtx->hardware) {
		int timeout_cnt = MAX_TIMEOUT_CNT;

		/* When doing time stamping, keep the connection to the socket
		 * a while longer
		 */
		shtx->in_progress = 1;

		/*
		 * The timestamping is done at the EMAC module's MII/RMII interface
		 * when the module sees the Start of Frame of an event message packet. This
		 * interface is the closest possible place to the physical Ethernet transmission
		 * medium, providing the best timing accuracy.
		 */
		while ((!(bfin_read_EMAC_PTP_ISTAT() & TXTL)) && (--timeout_cnt))
			udelay(1);
		if (timeout_cnt == 0)
			printk(KERN_ERR DRV_NAME
					": fails to timestamp the TX packet\n");
		else {
			struct skb_shared_hwtstamps shhwtstamps;
			u64 ns;
			u64 regval;

			regval = bfin_read_EMAC_PTP_TXSNAPLO();
			regval |= (u64)bfin_read_EMAC_PTP_TXSNAPHI() << 32;
			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
			ns = timecounter_cyc2time(&lp->clock,
					regval);
			timecompare_update(&lp->compare, ns);
			shhwtstamps.hwtstamp = ns_to_ktime(ns);
			shhwtstamps.syststamp =
				timecompare_transform(&lp->compare, ns);
			skb_tstamp_tx(skb, &shhwtstamps);

			bfin_dump_hwtamp("TX", &shhwtstamps.hwtstamp, &shhwtstamps.syststamp, &lp->compare);
		}
	}
}
Example #11
0
/**
 * ixgbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp
 * @q_vector: structure containing interrupt and ring information
 * @skb: particular skb to send timestamp with
 *
 * if the timestamp is valid, we convert it into the timecounter ns
 * value, then store that result into the shhwtstamps structure which
 * is passed up the network stack
 */
void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector,
			   struct sk_buff *skb)
{
	struct ixgbe_adapter *adapter;
	struct ixgbe_hw *hw;
	struct skb_shared_hwtstamps shhwtstamps;
	u64 regval = 0, ns;
	u32 tsynctxctl;
	unsigned long flags;

	/* we cannot process timestamps on a ring without a q_vector */
	if (!q_vector || !q_vector->adapter)
		return;

	adapter = q_vector->adapter;
	hw = &adapter->hw;

	tsynctxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
	regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
	regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) << 32;

	/*
	 * if TX timestamp is not valid, exit after clearing the
	 * timestamp registers
	 */
	if (!(tsynctxctl & IXGBE_TSYNCTXCTL_VALID))
		return;

	spin_lock_irqsave(&adapter->tmreg_lock, flags);
	ns = timecounter_cyc2time(&adapter->tc, regval);
	spin_unlock_irqrestore(&adapter->tmreg_lock, flags);

	memset(&shhwtstamps, 0, sizeof(shhwtstamps));
	shhwtstamps.hwtstamp = ns_to_ktime(ns);
	skb_tstamp_tx(skb, &shhwtstamps);
}
Example #12
0
static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
{
	struct bfin_mac_local *lp = netdev_priv(netdev);

	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
		int timeout_cnt = MAX_TIMEOUT_CNT;

		/* When doing time stamping, keep the connection to the socket
		 * a while longer
		 */
		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;

		/*
		 * The timestamping is done at the EMAC module's MII/RMII interface
		 * when the module sees the Start of Frame of an event message packet. This
		 * interface is the closest possible place to the physical Ethernet transmission
		 * medium, providing the best timing accuracy.
		 */
		while ((!(bfin_read_EMAC_PTP_ISTAT() & TXTL)) && (--timeout_cnt))
			udelay(1);
		if (timeout_cnt == 0)
			netdev_err(netdev, "timestamp the TX packet failed\n");
		else {
			struct skb_shared_hwtstamps shhwtstamps;
			u64 ns;
			u64 regval;

			regval = bfin_read_EMAC_PTP_TXSNAPLO();
			regval |= (u64)bfin_read_EMAC_PTP_TXSNAPHI() << 32;
			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
			ns = regval << lp->shift;
			shhwtstamps.hwtstamp = ns_to_ktime(ns);
			skb_tstamp_tx(skb, &shhwtstamps);
		}
	}
}
Example #13
0
static void
fec_enet_tx(struct net_device *ndev)
{
	struct	fec_enet_private *fep;
	struct bufdesc *bdp;
	unsigned short status;
	struct	sk_buff	*skb;

	fep = netdev_priv(ndev);
	spin_lock(&fep->hw_lock);
	bdp = fep->dirty_tx;

	while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
		if (bdp == fep->cur_tx && fep->tx_full == 0)
			break;

		dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
				FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
		bdp->cbd_bufaddr = 0;

		skb = fep->tx_skbuff[fep->skb_dirty];
		/* Check for errors. */
		if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
				   BD_ENET_TX_RL | BD_ENET_TX_UN |
				   BD_ENET_TX_CSL)) {
			ndev->stats.tx_errors++;
			if (status & BD_ENET_TX_HB)  /* No heartbeat */
				ndev->stats.tx_heartbeat_errors++;
			if (status & BD_ENET_TX_LC)  /* Late collision */
				ndev->stats.tx_window_errors++;
			if (status & BD_ENET_TX_RL)  /* Retrans limit */
				ndev->stats.tx_aborted_errors++;
			if (status & BD_ENET_TX_UN)  /* Underrun */
				ndev->stats.tx_fifo_errors++;
			if (status & BD_ENET_TX_CSL) /* Carrier lost */
				ndev->stats.tx_carrier_errors++;
		} else {
			ndev->stats.tx_packets++;
		}

		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) &&
			fep->bufdesc_ex) {
			struct skb_shared_hwtstamps shhwtstamps;
			unsigned long flags;
			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;

			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
			spin_lock_irqsave(&fep->tmreg_lock, flags);
			shhwtstamps.hwtstamp = ns_to_ktime(
				timecounter_cyc2time(&fep->tc, ebdp->ts));
			spin_unlock_irqrestore(&fep->tmreg_lock, flags);
			skb_tstamp_tx(skb, &shhwtstamps);
		}

		if (status & BD_ENET_TX_READY)
			printk("HEY! Enet xmit interrupt and TX_READY.\n");

		/* Deferred means some collisions occurred during transmit,
		 * but we eventually sent the packet OK.
		 */
		if (status & BD_ENET_TX_DEF)
			ndev->stats.collisions++;

		/* Free the sk buffer associated with this last transmit */
		dev_kfree_skb_any(skb);
		fep->tx_skbuff[fep->skb_dirty] = NULL;
		fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK;

		/* Update pointer to next buffer descriptor to be transmitted */
		if (status & BD_ENET_TX_WRAP)
			bdp = fep->tx_bd_base;
		else
			bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);

		/* Since we have freed up a buffer, the ring is no longer full
		 */
		if (fep->tx_full) {
			fep->tx_full = 0;
			if (netif_queue_stopped(ndev))
				netif_wake_queue(ndev);
		}
	}
	fep->dirty_tx = bdp;
	spin_unlock(&fep->hw_lock);
}
Example #14
0
File: en_tx.c Project: 8l/akaros
static uint32_t mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
				struct mlx4_en_tx_ring *ring,
				int index, uint8_t owner, uint64_t timestamp)
{
	struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
	struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
	struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset;
	void *end = ring->buf + ring->buf_size;
	struct block *block = tx_info->block;
	int nr_maps = tx_info->nr_maps;
	int i;

#if 0 // AKAROS_PORT
	/* We do not touch skb here, so prefetch skb->users location
	 * to speedup consume_skb()
	 */
	prefetchw(&skb->users);

	if (unlikely(timestamp)) {
		struct skb_shared_hwtstamps hwts;

		mlx4_en_fill_hwtstamps(priv->mdev, &hwts, timestamp);
		skb_tstamp_tx(skb, &hwts);
	}
#endif

	/* Optimize the common case when there are no wraparounds */
	if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
		if (!tx_info->inl) {
			if (tx_info->linear)
				dma_unmap_single(priv->ddev,
						tx_info->map0_dma,
						tx_info->map0_byte_count,
						PCI_DMA_TODEVICE);
			else
				dma_unmap_page(priv->ddev,
					       tx_info->map0_dma,
					       tx_info->map0_byte_count,
					       PCI_DMA_TODEVICE);
			for (i = 1; i < nr_maps; i++) {
				data++;
				dma_unmap_page(priv->ddev,
					(dma_addr_t)be64_to_cpu(data->addr),
					be32_to_cpu(data->byte_count),
					PCI_DMA_TODEVICE);
			}
		}
	} else {
		if (!tx_info->inl) {
			if ((void *) data >= end) {
				data = ring->buf + ((void *)data - end);
			}

			if (tx_info->linear)
				dma_unmap_single(priv->ddev,
						tx_info->map0_dma,
						tx_info->map0_byte_count,
						PCI_DMA_TODEVICE);
			else
				dma_unmap_page(priv->ddev,
					       tx_info->map0_dma,
					       tx_info->map0_byte_count,
					       PCI_DMA_TODEVICE);
			for (i = 1; i < nr_maps; i++) {
				data++;
				/* Check for wraparound before unmapping */
				if ((void *) data >= end)
					data = ring->buf;
				dma_unmap_page(priv->ddev,
					(dma_addr_t)be64_to_cpu(data->addr),
					be32_to_cpu(data->byte_count),
					PCI_DMA_TODEVICE);
			}
		}
	}
	freeb(block);
	return tx_info->nr_txbb;
}
Example #15
0
static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
{
	union cvmx_mixx_orcnt mix_orcnt;
	union mgmt_port_ring_entry re;
	struct sk_buff *skb;
	int cleaned = 0;
	unsigned long flags;

	mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
	while (mix_orcnt.s.orcnt) {
		spin_lock_irqsave(&p->tx_list.lock, flags);

		mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);

		if (mix_orcnt.s.orcnt == 0) {
			spin_unlock_irqrestore(&p->tx_list.lock, flags);
			break;
		}

		dma_sync_single_for_cpu(p->dev, p->tx_ring_handle,
					ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
					DMA_BIDIRECTIONAL);

		re.d64 = p->tx_ring[p->tx_next_clean];
		p->tx_next_clean =
			(p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE;
		skb = __skb_dequeue(&p->tx_list);

		mix_orcnt.u64 = 0;
		mix_orcnt.s.orcnt = 1;

		/* Acknowledge to hardware that we have the buffer.  */
		cvmx_write_csr(p->mix + MIX_ORCNT, mix_orcnt.u64);
		p->tx_current_fill--;

		spin_unlock_irqrestore(&p->tx_list.lock, flags);

		dma_unmap_single(p->dev, re.s.addr, re.s.len,
				 DMA_TO_DEVICE);

		/* Read the hardware TX timestamp if one was recorded */
		if (unlikely(re.s.tstamp)) {
			struct skb_shared_hwtstamps ts;
			/* Read the timestamp */
			u64 ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port));
			/* Remove the timestamp from the FIFO */
			cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0);
			/* Tell the kernel about the timestamp */
			ts.syststamp = ptp_to_ktime(ns);
			ts.hwtstamp = ns_to_ktime(ns);
			skb_tstamp_tx(skb, &ts);
		}

		dev_kfree_skb_any(skb);
		cleaned++;

		mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
	}

	if (cleaned && netif_queue_stopped(p->netdev))
		netif_wake_queue(p->netdev);
}
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
{
	struct mlx5_cqe64 *cqe;
	struct mlx5e_sq *sq;
	u32 dma_fifo_cc;
	u32 nbytes;
	u16 npkts;
	u16 sqcc;
	int i;

	sq = container_of(cq, struct mlx5e_sq, cq);

	if (unlikely(test_bit(MLX5E_SQ_TX_TIMEOUT, &sq->state)))
		return false;

	npkts = 0;
	nbytes = 0;

	/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
	 * otherwise a cq overrun may occur */
	sqcc = sq->cc;

	/* avoid dirtying sq cache line every cqe */
	dma_fifo_cc = sq->dma_fifo_cc;

	cqe = mlx5e_get_cqe(cq);

	for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) {
		u16 wqe_counter;
		bool last_wqe;

		if (!cqe)
			break;

		mlx5_cqwq_pop(&cq->wq);
		mlx5e_prefetch_cqe(cq);

		wqe_counter = be16_to_cpu(cqe->wqe_counter);

		do {
			struct mlx5e_tx_wqe_info *wi;
			struct sk_buff *skb;
			u16 ci;
			int j;

			last_wqe = (sqcc == wqe_counter);

			ci = sqcc & sq->wq.sz_m1;
			skb = sq->skb[ci];
			wi = &sq->wqe_info[ci];

			if (unlikely(!skb)) { /* nop */
				sqcc++;
				continue;
			}

			if (unlikely(MLX5E_TX_HW_STAMP(sq->channel->priv,
						       skb))) {
				struct skb_shared_hwtstamps hwts;

				mlx5e_fill_hwstamp(&sq->cq.channel->priv->tstamp,
						   &hwts, get_cqe_ts(cqe));
				skb_tstamp_tx(skb, &hwts);
			}

				for (j = 0; j < wi->num_dma; j++) {
				struct mlx5e_sq_dma *dma =
					mlx5e_dma_get(sq, dma_fifo_cc++);

				mlx5e_tx_dma_unmap(sq->pdev, dma);
			}

			npkts++;
			nbytes += wi->num_bytes;
			sqcc += wi->num_wqebbs;
			dev_kfree_skb(skb);
		} while (!last_wqe);

		cqe = mlx5e_get_cqe(cq);
	}

	mlx5_cqwq_update_db_record(&cq->wq);

	/* ensure cq space is freed before enabling more cqes */
	wmb();

	sq->dma_fifo_cc = dma_fifo_cc;
	sq->cc = sqcc;

	netdev_tx_completed_queue(sq->txq, npkts, nbytes);

	if (netif_tx_queue_stopped(sq->txq) &&
	    mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM) &&
	    likely(test_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state))) {
				netif_tx_wake_queue(sq->txq);
				sq->stats.queue_wake++;
	}
	return (i == MLX5E_TX_CQ_POLL_BUDGET);
}