Exemplo n.º 1
0
/* this functions used only in no_bxm mode,
 * it's not implemented in netdevice.h so we have it here
 * based on netif_tx_lock()
 */
static inline int vnic_netif_tx_trylock(struct net_device *dev)
{
	int i, cpu;

	spin_lock(&dev->tx_global_lock);
	cpu = smp_processor_id();
	for (i = 0; i < dev->num_tx_queues; ++i) {
		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
		if (__netif_tx_trylock(txq)) {
			set_bit(__QUEUE_STATE_FROZEN, &txq->state);
			__netif_tx_unlock(txq);
		} else {
			goto unlock;
		}
	}

	return 1;

unlock:
	/* based on netif_tx_unlock() */
	for (--i; i >= 0; --i) {
		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
		clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
		if (!test_bit(QUEUE_STATE_ANY_XOFF, &txq->state))
			__netif_schedule(txq->qdisc);
	}
	spin_unlock(&dev->tx_global_lock);

	return 0;
}
Exemplo n.º 2
0
int mpodp_clean_tx(struct mpodp_if_priv *priv, unsigned budget)
{
	int i;
	int worked = 0;

	for (i = 0; i < priv->n_txqs; ++i){
		struct netdev_queue *txq = priv->txqs[i].txq;
		if (__netif_tx_trylock(txq)) {
			worked += mpodp_clean_tx_unlocked(priv, &priv->txqs[i],
							  MPODP_MAX_TX_RECLAIM);
			__netif_tx_unlock(txq);
		}
	}
	return worked;
}
Exemplo n.º 3
0
static int lpc_eth_poll(struct napi_struct *napi, int budget)
{
	struct netdata_local *pldat = container_of(napi,
			struct netdata_local, napi);
	struct net_device *ndev = pldat->ndev;
	int rx_done = 0;
	struct netdev_queue *txq = netdev_get_tx_queue(ndev, 0);

	__netif_tx_lock(txq, smp_processor_id());
	__lpc_handle_xmit(ndev);
	__netif_tx_unlock(txq);
	rx_done = __lpc_handle_recv(ndev, budget);

	if (rx_done < budget) {
		napi_complete(napi);
		lpc_eth_enable_int(pldat->net_base);
	}

	return rx_done;
}
static void ri_tasklet(unsigned long dev)
{

	struct net_device *_dev = (struct net_device *)dev;
	struct ifb_private *dp = netdev_priv(_dev);
	struct net_device_stats *stats = &_dev->stats;
	struct netdev_queue *txq;
	struct sk_buff *skb;

	txq = netdev_get_tx_queue(_dev, 0);
	if ((skb = skb_peek(&dp->tq)) == NULL) {
		if (__netif_tx_trylock(txq)) {
			skb_queue_splice_tail_init(&dp->rq, &dp->tq);
			__netif_tx_unlock(txq);
		} else {
			/* reschedule */
			goto resched;
		}
	}

	while ((skb = __skb_dequeue(&dp->tq)) != NULL) {
		u32 from = G_TC_FROM(skb->tc_verd);

		skb->tc_verd = 0;
		skb->tc_verd = SET_TC_NCLS(skb->tc_verd);
		stats->tx_packets++;
		stats->tx_bytes +=skb->len;

		rcu_read_lock();
		skb->dev = dev_get_by_index_rcu(&init_net, skb->skb_iif);
		if (!skb->dev) {
			rcu_read_unlock();
			dev_kfree_skb(skb);
			stats->tx_dropped++;
			if (skb_queue_len(&dp->tq) != 0)
				goto resched;
			break;
		}
		rcu_read_unlock();
		skb->skb_iif = _dev->ifindex;

		if (from & AT_EGRESS) {
			dev_queue_xmit(skb);
		} else if (from & AT_INGRESS) {
			skb_pull(skb, skb->dev->hard_header_len);
			netif_receive_skb(skb);
		} else
			BUG();
	}

	if (__netif_tx_trylock(txq)) {
		if ((skb = skb_peek(&dp->rq)) == NULL) {
			dp->tasklet_pending = 0;
			if (netif_queue_stopped(_dev))
				netif_wake_queue(_dev);
		} else {
			__netif_tx_unlock(txq);
			goto resched;
		}
		__netif_tx_unlock(txq);
	} else {
resched:
		dp->tasklet_pending = 1;
		tasklet_schedule(&dp->ifb_tasklet);
	}

}
Exemplo n.º 5
0
static inline void
dma_xmit_clean(struct net_device *dev, END_DEVICE *ei_local)
{
	struct netdev_queue *txq;
	int cpu, clean_done = 0;
	u32 cpu_ptr, dma_ptr, cpu_idx;
#if defined (CONFIG_RAETH_BQL)
	u32 bytes_sent_ge1 = 0;
#if defined (CONFIG_PSEUDO_SUPPORT)
	u32 bytes_sent_ge2 = 0;
#endif
#endif

	spin_lock(&ei_local->page_lock);

	cpu_ptr = sysRegRead(QTX_CRX_PTR);
	dma_ptr = sysRegRead(QTX_DRX_PTR);

	/* get current CPU TXD index */
	cpu_idx = get_txd_offset(ei_local, cpu_ptr);

	while (cpu_ptr != dma_ptr) {
		struct QDMA_txdesc *txd;
		struct sk_buff *skb;
		
		txd = &ei_local->txd_pool[cpu_idx];
		
		/* check TXD not owned by DMA */
		if (!(ACCESS_ONCE(txd->txd_info3) & TX3_QDMA_OWN))
			break;
		
		/* hold next TXD ptr */
		cpu_ptr = ACCESS_ONCE(txd->txd_info2);
		
		/* release current TXD */
		put_free_txd(ei_local, cpu_idx);
		
		/* get next TXD index */
		cpu_idx = get_txd_offset(ei_local, cpu_ptr);
		
		/* free skb */
		skb = ei_local->txd_buff[cpu_idx];
		if (skb) {
#if defined (CONFIG_RAETH_BQL)
#if defined (CONFIG_PSEUDO_SUPPORT)
			if (skb->dev == ei_local->PseudoDev)
				bytes_sent_ge2 += skb->len;
			else
#endif
				bytes_sent_ge1 += skb->len;
#endif
			ei_local->txd_buff[cpu_idx] = NULL;
			dev_kfree_skb(skb);
		}
		
		clean_done++;
		
		/* prevent infinity loop when something wrong */
		if (clean_done > (NUM_TX_DESC-4))
			break;
	}

	if (clean_done)
		sysRegWrite(QTX_CRX_PTR, cpu_ptr);

	spin_unlock(&ei_local->page_lock);

	if (!clean_done)
		return;

	cpu = smp_processor_id();

	if (netif_running(dev)) {
		txq = netdev_get_tx_queue(dev, 0);
		__netif_tx_lock(txq, cpu);
#if defined (CONFIG_RAETH_BQL)
		netdev_tx_completed_queue(txq, 0, bytes_sent_ge1);
#endif
		if (netif_tx_queue_stopped(txq))
			netif_tx_wake_queue(txq);
		__netif_tx_unlock(txq);
	}

#if defined (CONFIG_PSEUDO_SUPPORT)
	if (netif_running(ei_local->PseudoDev)) {
		txq = netdev_get_tx_queue(ei_local->PseudoDev, 0);
		__netif_tx_lock(txq, cpu);
#if defined (CONFIG_RAETH_BQL)
		netdev_tx_completed_queue(txq, 0, bytes_sent_ge2);
#endif
		if (netif_tx_queue_stopped(txq))
			netif_tx_wake_queue(txq);
		__netif_tx_unlock(txq);
	}
#endif
}
Exemplo n.º 6
0
static inline void
dma_xmit_clean(struct net_device *dev, END_DEVICE *ei_local)
{
	struct netdev_queue *txq;
	int cpu, clean_done = 0;
	u32 txd_free_idx;
#if defined (CONFIG_RAETH_BQL)
	u32 bytes_sent_ge1 = 0;
#if defined (CONFIG_PSEUDO_SUPPORT)
	u32 bytes_sent_ge2 = 0;
#endif
#endif

	spin_lock(&ei_local->page_lock);

	txd_free_idx = ei_local->txd_free_idx;

	while (clean_done < (NUM_TX_DESC-2)) {
		struct PDMA_txdesc *txd;
		struct sk_buff *skb;
		
		skb = ei_local->txd_buff[txd_free_idx];
		if (!skb)
			break;
		
		txd = &ei_local->txd_ring[txd_free_idx];
		
		/* check TXD not owned by DMA */
		if (!(ACCESS_ONCE(txd->txd_info2) & TX2_DMA_DONE))
			break;
		
		if (skb != (struct sk_buff *)0xFFFFFFFF) {
#if defined (CONFIG_RAETH_BQL)
#if defined (CONFIG_PSEUDO_SUPPORT)
			if (skb->dev == ei_local->PseudoDev)
				bytes_sent_ge2 += skb->len;
			else
#endif
				bytes_sent_ge1 += skb->len;
#endif
			dev_kfree_skb(skb);
		}
		
		ei_local->txd_buff[txd_free_idx] = NULL;
		
		txd_free_idx = (txd_free_idx + 1) % NUM_TX_DESC;
		
		clean_done++;
	}

	if (ei_local->txd_free_idx != txd_free_idx)
		ei_local->txd_free_idx = txd_free_idx;

	spin_unlock(&ei_local->page_lock);

	if (!clean_done)
		return;

	cpu = smp_processor_id();

	if (netif_running(dev)) {
		txq = netdev_get_tx_queue(dev, 0);
		__netif_tx_lock(txq, cpu);
#if defined (CONFIG_RAETH_BQL)
		netdev_tx_completed_queue(txq, 0, bytes_sent_ge1);
#endif
		if (netif_tx_queue_stopped(txq))
			netif_tx_wake_queue(txq);
		__netif_tx_unlock(txq);
	}

#if defined (CONFIG_PSEUDO_SUPPORT)
	if (netif_running(ei_local->PseudoDev)) {
		txq = netdev_get_tx_queue(ei_local->PseudoDev, 0);
		__netif_tx_lock(txq, cpu);
#if defined (CONFIG_RAETH_BQL)
		netdev_tx_completed_queue(txq, 0, bytes_sent_ge2);
#endif
		if (netif_tx_queue_stopped(txq))
			netif_tx_wake_queue(txq);
		__netif_tx_unlock(txq);
	}
#endif
}
Exemplo n.º 7
0
static void ri_tasklet(unsigned long dev)
{

    struct net_device *_dev = (struct net_device *)dev;
    struct ifb_private *dp = netdev_priv(_dev);
    struct net_device_stats *stats = &_dev->stats;
    struct netdev_queue *txq;
    struct sk_buff *skb;

    txq = netdev_get_tx_queue(_dev, 0);
    dp->st_task_enter++;
    if ((skb = skb_peek(&dp->tq)) == NULL) {
        dp->st_txq_refl_try++;
        if (__netif_tx_trylock(txq)) {
            dp->st_rxq_enter++;
            while ((skb = skb_dequeue(&dp->rq)) != NULL) {
                skb_queue_tail(&dp->tq, skb);
                dp->st_rx2tx_tran++;
            }
            __netif_tx_unlock(txq);
        } else {
            /* reschedule */
            dp->st_rxq_notenter++;
            goto resched;
        }
    }

    while ((skb = skb_dequeue(&dp->tq)) != NULL) {
        u32 from = G_TC_FROM(skb->tc_verd);

        skb->tc_verd = 0;
        skb->tc_verd = SET_TC_NCLS(skb->tc_verd);
        stats->tx_packets++;
        stats->tx_bytes +=skb->len;

        skb->dev = dev_get_by_index(&init_net, skb->iif);
        if (!skb->dev) {
            dev_kfree_skb(skb);
            stats->tx_dropped++;
            break;
        }
        dev_put(skb->dev);
        skb->iif = _dev->ifindex;

        if (from & AT_EGRESS) {
            dp->st_rx_frm_egr++;
            dev_queue_xmit(skb);
        } else if (from & AT_INGRESS) {
            dp->st_rx_frm_ing++;
            skb_pull(skb, skb->dev->hard_header_len);
            netif_rx(skb);
        } else
            BUG();
    }

    if (__netif_tx_trylock(txq)) {
        dp->st_rxq_check++;
        if ((skb = skb_peek(&dp->rq)) == NULL) {
            dp->tasklet_pending = 0;
            if (netif_queue_stopped(_dev))
                netif_wake_queue(_dev);
        } else {
            dp->st_rxq_rsch++;
            __netif_tx_unlock(txq);
            goto resched;
        }
        __netif_tx_unlock(txq);
    } else {
resched:
        dp->tasklet_pending = 1;
        tasklet_schedule(&dp->ifb_tasklet);
    }

}
Exemplo n.º 8
0
static void ifb_ri_tasklet(unsigned long _txp)
{
	struct ifb_q_private *txp = (struct ifb_q_private *)_txp;
	struct netdev_queue *txq;
	struct sk_buff *skb;

	txq = netdev_get_tx_queue(txp->dev, txp->txqnum);
	skb = skb_peek(&txp->tq);
	if (!skb) {
		if (!__netif_tx_trylock(txq))
			goto resched;
		skb_queue_splice_tail_init(&txp->rq, &txp->tq);
		__netif_tx_unlock(txq);
	}

	while ((skb = __skb_dequeue(&txp->tq)) != NULL) {
		skb->tc_redirected = 0;
		skb->tc_skip_classify = 1;

		u64_stats_update_begin(&txp->tsync);
		txp->tx_packets++;
		txp->tx_bytes += skb->len;
		u64_stats_update_end(&txp->tsync);

		rcu_read_lock();
		skb->dev = dev_get_by_index_rcu(dev_net(txp->dev), skb->skb_iif);
		if (!skb->dev) {
			rcu_read_unlock();
			dev_kfree_skb(skb);
			txp->dev->stats.tx_dropped++;
			if (skb_queue_len(&txp->tq) != 0)
				goto resched;
			break;
		}
		rcu_read_unlock();
		skb->skb_iif = txp->dev->ifindex;

		if (!skb->tc_from_ingress) {
			dev_queue_xmit(skb);
		} else {
			skb_pull_rcsum(skb, skb->mac_len);
			netif_receive_skb(skb);
		}
	}

	if (__netif_tx_trylock(txq)) {
		skb = skb_peek(&txp->rq);
		if (!skb) {
			txp->tasklet_pending = 0;
			if (netif_tx_queue_stopped(txq))
				netif_tx_wake_queue(txq);
		} else {
			__netif_tx_unlock(txq);
			goto resched;
		}
		__netif_tx_unlock(txq);
	} else {
resched:
		txp->tasklet_pending = 1;
		tasklet_schedule(&txp->ifb_tasklet);
	}

}