Example #1
0
int bgmac_enet_resume(struct bgmac *bgmac)
{
	int rc;

	if (!netif_running(bgmac->net_dev))
		return 0;

	rc = bgmac_dma_init(bgmac);
	if (rc)
		return rc;

	bgmac_chip_init(bgmac);

	napi_enable(&bgmac->napi);

	netif_tx_lock(bgmac->net_dev);
	netif_device_attach(bgmac->net_dev);
	netif_tx_unlock(bgmac->net_dev);

	netif_start_queue(bgmac->net_dev);

	phy_start(bgmac->net_dev->phydev);

	return 0;
}
Example #2
0
static void queue_process(struct work_struct *work)
{
	struct netpoll_info *npinfo =
		container_of(work, struct netpoll_info, tx_work.work);
	struct sk_buff *skb;
	unsigned long flags;

	while ((skb = skb_dequeue(&npinfo->txq))) {
		struct net_device *dev = skb->dev;

		if (!netif_device_present(dev) || !netif_running(dev)) {
			__kfree_skb(skb);
			continue;
		}

		local_irq_save(flags);
		netif_tx_lock(dev);
		if (netif_queue_stopped(dev) ||
		    dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
			skb_queue_head(&npinfo->txq, skb);
			netif_tx_unlock(dev);
			local_irq_restore(flags);

			schedule_delayed_work(&npinfo->tx_work, HZ/10);
			return;
		}
		netif_tx_unlock(dev);
		local_irq_restore(flags);
	}
}
Example #3
0
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
{
	unsigned fill_level;
	struct efx_nic *efx = tx_queue->efx;

	EFX_BUG_ON_PARANOID(index > efx->type->txd_ring_mask);

	efx_dequeue_buffers(tx_queue, index);

	/* See if we need to restart the netif queue.  This barrier
	 * separates the update of read_count from the test of
	 * stopped. */
	smp_mb();
	if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) {
		fill_level = tx_queue->insert_count - tx_queue->read_count;
		if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) {
			EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));

			/* Do this under netif_tx_lock(), to avoid racing
			 * with efx_xmit(). */
			netif_tx_lock(efx->net_dev);
			if (tx_queue->stopped) {
				tx_queue->stopped = 0;
				efx_wake_queue(efx);
			}
			netif_tx_unlock(efx->net_dev);
		}
	}
}
/**
 * rmnet_map_send_ack() - Send N/ACK message for MAP commands
 * @skb: Socket buffer containing the MAP command message
 * @type: N/ACK message selector
 * @config: Physical end-point configuration of ingress device
 *
 * skb is modified to contain the message type selector. The message is then
 * transmitted on skb->dev. Note that this function grabs global Tx lock on
 * skb->dev for latency reasons.
 *
 * Return:
 *      - void
 */
static void rmnet_map_send_ack(struct sk_buff *skb,
			       unsigned char type,
			       struct rmnet_phys_ep_conf_s *config)
{
	struct rmnet_map_control_command_s *cmd;
	int xmit_status;

	if (unlikely(!skb))
		BUG();

	skb->protocol = htons(ETH_P_MAP);

	if ((config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV3) ||
	    (config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4)) {
		if (unlikely(skb->len < (sizeof(struct rmnet_map_header_s) +
		    + RMNET_MAP_GET_LENGTH(skb)
		    + sizeof(struct rmnet_map_dl_checksum_trailer_s)))) {
			rmnet_stats_dl_checksum(
			  RMNET_MAP_CHECKSUM_ERR_BAD_BUFFER);
			return;
		}

		skb_trim(skb, skb->len -
			 sizeof(struct rmnet_map_dl_checksum_trailer_s));
	}

	cmd = RMNET_MAP_GET_CMD_START(skb);
	cmd->cmd_type = type & 0x03;

	netif_tx_lock(skb->dev);
	xmit_status = skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev);
	netif_tx_unlock(skb->dev);

	LOGD("MAP command ACK=%hhu sent with rc: %d", type & 0x03, xmit_status);
}
static void dev_watchdog(unsigned long arg)
{
	struct net_device *dev = (struct net_device *)arg;

	netif_tx_lock(dev);
	if (dev->qdisc != &noop_qdisc) {
		if (netif_device_present(dev) &&
		    netif_running(dev) &&
		    netif_carrier_ok(dev)) {
			if (netif_queue_stopped(dev) &&
			    time_after(jiffies, dev->trans_start + dev->watchdog_timeo)) {
#if 0 // Fix QA-Bug #8011 on Agile.
				printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n",
				       dev->name);
#endif
				dev->tx_timeout(dev);
			}
			if (!mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + dev->watchdog_timeo)))
				dev_hold(dev);
		}
	}
	netif_tx_unlock(dev);

	dev_put(dev);
}
Example #6
0
void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
			     struct net_device *dev)
{
	int status;
	struct netpoll_info *npinfo;

	if (!np || !dev || !netif_running(dev)) {
		__kfree_skb(skb);
		return;
	}

	npinfo = dev->npinfo;

	/* avoid recursion */
	if (npinfo->poll_owner == smp_processor_id() ||
	    np->dev->xmit_lock_owner == smp_processor_id()) {
		if (np->drop)
			np->drop(skb);
		else
			__kfree_skb(skb);
		return;
	}

	do {
		npinfo->tries--;
		netif_tx_lock(dev);

		/*
		 * network drivers do not expect to be called if the queue is
		 * stopped.
		 */
		status = NETDEV_TX_BUSY;
		if (!netif_queue_stopped(dev)) {
			dev->priv_flags |= IFF_IN_NETPOLL;
			status = dev->hard_start_xmit(skb, dev);
			dev->priv_flags &= ~IFF_IN_NETPOLL;
		}

		netif_tx_unlock(dev);

		/* success */
		if(!status) {
			npinfo->tries = MAX_RETRIES; /* reset */
			return;
		}

		/* transmit busy */
		netpoll_poll_dev(dev);
		udelay(50);
	} while (npinfo->tries > 0);
}
Example #7
0
static void dev_watchdog(struct timer_list *t)
{
	struct net_device *dev = from_timer(dev, t, watchdog_timer);

	netif_tx_lock(dev);
	if (!qdisc_tx_is_noop(dev)) {
		if (netif_device_present(dev) &&
		    netif_running(dev) &&
		    netif_carrier_ok(dev)) {
			int some_queue_timedout = 0;
			unsigned int i;
			unsigned long trans_start;

			for (i = 0; i < dev->num_tx_queues; i++) {
				struct netdev_queue *txq;

				txq = netdev_get_tx_queue(dev, i);
				trans_start = txq->trans_start;
				if (netif_xmit_stopped(txq) &&
				    time_after(jiffies, (trans_start +
							 dev->watchdog_timeo))) {
					some_queue_timedout = 1;
					txq->trans_timeout++;
					break;
				}
			}

			if (some_queue_timedout) {
				WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n",
				       dev->name, netdev_drivername(dev), i);
				dev->netdev_ops->ndo_tx_timeout(dev);
			}
			if (!mod_timer(&dev->watchdog_timer,
				       round_jiffies(jiffies +
						     dev->watchdog_timeo)))
				dev_hold(dev);
		}
	}
	netif_tx_unlock(dev);

	dev_put(dev);
}
Example #8
0
static void rmnet_map_send_ack(struct sk_buff *skb,
			       unsigned char type,
			       struct rmnet_port *port)
{
	struct rmnet_map_control_command *cmd;
	struct net_device *dev = skb->dev;

	if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
		skb_trim(skb,
			 skb->len - sizeof(struct rmnet_map_dl_csum_trailer));

	skb->protocol = htons(ETH_P_MAP);

	cmd = RMNET_MAP_GET_CMD_START(skb);
	cmd->cmd_type = type & 0x03;

	netif_tx_lock(dev);
	dev->netdev_ops->ndo_start_xmit(skb, dev);
	netif_tx_unlock(dev);
}
static void dev_watchdog(unsigned long arg)
{
	struct net_device *dev = (struct net_device *)arg;

	netif_tx_lock(dev);
	if (!qdisc_tx_is_noop(dev)) {
		if (netif_device_present(dev) &&
		    netif_running(dev) &&
		    netif_carrier_ok(dev)) {
			int some_queue_timedout = 0;
			unsigned int i;
			unsigned long trans_start;

			for (i = 0; i < dev->num_tx_queues; i++) {
				struct netdev_queue *txq;

				txq = netdev_get_tx_queue(dev, i);
				/*
				 * old device drivers set dev->trans_start
				 */
				trans_start = txq->trans_start ? : dev->trans_start;
				if (netif_tx_queue_stopped(txq) &&
				    time_after(jiffies, (trans_start +
							 dev->watchdog_timeo))) {
					some_queue_timedout = 1;
					break;
				}
			}

			if (some_queue_timedout) {
				char drivername[64];
				WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n",
				       dev->name, netdev_drivername(dev, drivername, 64), i);
				dev->netdev_ops->ndo_tx_timeout(dev);
			}
			if (!mod_timer(&dev->watchdog_timer,
				       round_jiffies(jiffies +
						     dev->watchdog_timeo)))
				dev_hold(dev);
		}
	}
Example #10
0
int bgmac_enet_suspend(struct bgmac *bgmac)
{
	if (!netif_running(bgmac->net_dev))
		return 0;

	phy_stop(bgmac->net_dev->phydev);

	netif_stop_queue(bgmac->net_dev);

	napi_disable(&bgmac->napi);

	netif_tx_lock(bgmac->net_dev);
	netif_device_detach(bgmac->net_dev);
	netif_tx_unlock(bgmac->net_dev);

	bgmac_chip_intrs_off(bgmac);
	bgmac_chip_reset(bgmac);
	bgmac_dma_cleanup(bgmac);

	return 0;
}
Example #11
0
static void hisi_femac_xmit_reclaim(struct net_device *dev)
{
	struct sk_buff *skb;
	struct hisi_femac_priv *priv = netdev_priv(dev);
	struct hisi_femac_queue *txq = &priv->txq;
	unsigned int bytes_compl = 0, pkts_compl = 0;
	u32 val;

	netif_tx_lock(dev);

	val = readl(priv->port_base + ADDRQ_STAT) & TX_CNT_INUSE_MASK;
	while (val < priv->tx_fifo_used_cnt) {
		skb = txq->skb[txq->tail];
		if (unlikely(!skb)) {
			netdev_err(dev, "xmitq_cnt_inuse=%d, tx_fifo_used=%d\n",
				   val, priv->tx_fifo_used_cnt);
			break;
		}
		hisi_femac_tx_dma_unmap(priv, skb, txq->tail);
		pkts_compl++;
		bytes_compl += skb->len;
		dev_kfree_skb_any(skb);

		priv->tx_fifo_used_cnt--;

		val = readl(priv->port_base + ADDRQ_STAT) & TX_CNT_INUSE_MASK;
		txq->skb[txq->tail] = NULL;
		txq->tail = (txq->tail + 1) % txq->num;
	}

	netdev_completed_queue(dev, pkts_compl, bytes_compl);

	if (unlikely(netif_queue_stopped(dev)) && pkts_compl)
		netif_wake_queue(dev);

	netif_tx_unlock(dev);
}
Example #12
0
/**
 * stmmac_tx:
 * @priv: private driver structure
 * Description: it reclaims resources after transmission completes.
 */
static void stmmac_tx(struct stmmac_priv *priv)
{
	unsigned int txsize = priv->dma_tx_size;

	while (priv->dirty_tx != priv->cur_tx) {
		int last;
		unsigned int entry = priv->dirty_tx % txsize;
		struct sk_buff *skb = priv->tx_skbuff[entry];
		struct dma_desc *p = priv->dma_tx + entry;

		/* Check if the descriptor is owned by the DMA. */
		if (priv->hw->desc->get_tx_owner(p))
			break;

		/* Verify tx error by looking at the last segment */
		last = priv->hw->desc->get_tx_ls(p);
		if (likely(last)) {
			int tx_error =
				priv->hw->desc->tx_status(&priv->dev->stats,
							  &priv->xstats, p,
							  priv->ioaddr);
			if (likely(tx_error == 0)) {
				priv->dev->stats.tx_packets++;
				priv->xstats.tx_pkt_n++;
			} else
				priv->dev->stats.tx_errors++;
		}
		TX_DBG("%s: curr %d, dirty %d\n", __func__,
			priv->cur_tx, priv->dirty_tx);

		if (likely(p->des2))
			dma_unmap_single(priv->device, p->des2,
					 priv->hw->desc->get_tx_len(p),
					 DMA_TO_DEVICE);
		if (unlikely(p->des3))
			p->des3 = 0;

		if (likely(skb != NULL)) {
			/*
			 * If there's room in the queue (limit it to size)
			 * we add this skb back into the pool,
			 * if it's the right size.
			 */
			if ((skb_queue_len(&priv->rx_recycle) <
				priv->dma_rx_size) &&
				skb_recycle_check(skb, priv->dma_buf_sz))
				__skb_queue_head(&priv->rx_recycle, skb);
			else
				dev_kfree_skb(skb);

			priv->tx_skbuff[entry] = NULL;
		}

		priv->hw->desc->release_tx_desc(p);

		entry = (++priv->dirty_tx) % txsize;
	}
	if (unlikely(netif_queue_stopped(priv->dev) &&
		     stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) {
		netif_tx_lock(priv->dev);
		if (netif_queue_stopped(priv->dev) &&
		     stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv)) {
			TX_DBG("%s: restart transmit\n", __func__);
			netif_wake_queue(priv->dev);
		}
		netif_tx_unlock(priv->dev);
	}
}