コード例 #1
0
ファイル: sch_generic.c プロジェクト: johnny/CobraDroidBeta
static bool dev_all_qdisc_sleeping_noop(struct net_device *dev)
{
	unsigned int i;

	for (i = 0; i < dev->num_tx_queues; i++) {
		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);

		if (txq->qdisc_sleeping != &noop_qdisc)
			return false;
	}
	return true;
}
コード例 #2
0
ファイル: txrx.c プロジェクト: 03199618/linux
/*
 * Packet send completion callback handler.
 *
 * It either frees the buffer directly or forwards it to another
 * completion callback which checks conditions, updates statistics,
 * wakes up stalled traffic queue if required, and then frees the buffer.
 */
int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
				struct sk_buff *skb, int aggr, int status)
{
	struct mwifiex_private *priv;
	struct mwifiex_txinfo *tx_info;
	struct netdev_queue *txq;
	int index;

	if (!skb)
		return 0;

	tx_info = MWIFIEX_SKB_TXCB(skb);
	priv = mwifiex_get_priv_by_id(adapter, tx_info->bss_num,
				      tx_info->bss_type);
	if (!priv)
		goto done;

	if (adapter->iface_type == MWIFIEX_USB)
		adapter->data_sent = false;

	mwifiex_set_trans_start(priv->netdev);
	if (!status) {
		priv->stats.tx_packets++;
		priv->stats.tx_bytes += skb->len;
		if (priv->tx_timeout_cnt)
			priv->tx_timeout_cnt = 0;
	} else {
		priv->stats.tx_errors++;
	}

	if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT)
		atomic_dec_return(&adapter->pending_bridged_pkts);

	if (aggr)
		/* For skb_aggr, do not wake up tx queue */
		goto done;

	atomic_dec(&adapter->tx_pending);

	index = mwifiex_1d_to_wmm_queue[skb->priority];
	if (atomic_dec_return(&priv->wmm_tx_pending[index]) < LOW_TX_PENDING) {
		txq = netdev_get_tx_queue(priv->netdev, index);
		if (netif_tx_queue_stopped(txq)) {
			netif_tx_wake_queue(txq);
			dev_dbg(adapter->dev, "wake queue: %d\n", index);
		}
	}
done:
	dev_kfree_skb_any(skb);

	return 0;
}
コード例 #3
0
ファイル: vrf.c プロジェクト: ReneNyffenegger/linux
/* by default VRF devices do not have a qdisc and are expected
 * to be created with only a single queue.
 */
static bool qdisc_tx_is_default(const struct net_device *dev)
{
	struct netdev_queue *txq;
	struct Qdisc *qdisc;

	if (dev->num_tx_queues > 1)
		return false;

	txq = netdev_get_tx_queue(dev, 0);
	qdisc = rcu_access_pointer(txq->qdisc);

	return !qdisc->enqueue;
}
コード例 #4
0
ファイル: netdev.c プロジェクト: pfq/PFQ
int pfq_dev_queue_get(struct net *net, int ifindex, int queue, struct pfq_dev_queue *dq)
{
	struct net_device *dev = dev_get_by_index(net, ifindex);
	if (dev == NULL) {
		*dq = (struct pfq_dev_queue){.dev = NULL, .queue = NULL, .mapping = 0};
		return -EFAULT;
	}

	dq->dev = dev;
	dq->mapping = __pfq_dev_cap_txqueue(dev, queue);
	dq->queue = netdev_get_tx_queue(dev, dq->mapping);
	return 0;
}
コード例 #5
0
unsigned long dev_trans_start(struct net_device *dev)
{
	unsigned long val, res = dev->trans_start;
	unsigned int i;

	for (i = 0; i < dev->num_tx_queues; i++) {
		val = netdev_get_tx_queue(dev, i)->trans_start;
		if (val && time_after(val, res))
			res = val;
	}
	dev->trans_start = res;
	return res;
}
コード例 #6
0
ファイル: sch_mq.c プロジェクト: 19Dan01/linux
static struct netdev_queue *mq_select_queue(struct Qdisc *sch,
					    struct tcmsg *tcm)
{
	unsigned int ntx = TC_H_MIN(tcm->tcm_parent);
	struct netdev_queue *dev_queue = mq_queue_get(sch, ntx);

	if (!dev_queue) {
		struct net_device *dev = qdisc_dev(sch);

		return netdev_get_tx_queue(dev, 0);
	}
	return dev_queue;
}
コード例 #7
0
ファイル: en_main.c プロジェクト: Kirill2013/kasan
static int mlx5e_create_sq(struct mlx5e_channel *c,
			   int tc,
			   struct mlx5e_sq_param *param,
			   struct mlx5e_sq *sq)
{
	struct mlx5e_priv *priv = c->priv;
	struct mlx5_core_dev *mdev = priv->mdev;

	void *sqc = param->sqc;
	void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
	int txq_ix;
	int err;

	err = mlx5_alloc_map_uar(mdev, &sq->uar);
	if (err)
		return err;

	err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
				 &sq->wq_ctrl);
	if (err)
		goto err_unmap_free_uar;

	sq->wq.db       = &sq->wq.db[MLX5_SND_DBR];
	sq->uar_map     = sq->uar.map;
	sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;

	err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
	if (err)
		goto err_sq_wq_destroy;

	txq_ix = c->ix + tc * priv->params.num_channels;
	sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);

	sq->pdev    = c->pdev;
	sq->mkey_be = c->mkey_be;
	sq->channel = c;
	sq->tc      = tc;
	sq->edge    = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
	priv->txq_to_sq_map[txq_ix] = sq;

	return 0;

err_sq_wq_destroy:
	mlx5_wq_destroy(&sq->wq_ctrl);

err_unmap_free_uar:
	mlx5_unmap_free_uar(mdev, &sq->uar);

	return err;
}
コード例 #8
0
/**
 * nfp_net_tx_ring_reset() - Free any untransmitted buffers and reset pointers
 * @nn:		NFP Net device
 * @tx_ring:	TX ring structure
 *
 * Assumes that the device is stopped
 */
static void
nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring)
{
	const struct skb_frag_struct *frag;
	struct netdev_queue *nd_q;
	struct pci_dev *pdev = nn->pdev;

	while (tx_ring->rd_p != tx_ring->wr_p) {
		int nr_frags, fidx, idx;
		struct sk_buff *skb;

		idx = tx_ring->rd_p % tx_ring->cnt;
		skb = tx_ring->txbufs[idx].skb;
		nr_frags = skb_shinfo(skb)->nr_frags;
		fidx = tx_ring->txbufs[idx].fidx;

		if (fidx == -1) {
			/* unmap head */
			dma_unmap_single(&pdev->dev,
					 tx_ring->txbufs[idx].dma_addr,
					 skb_headlen(skb), DMA_TO_DEVICE);
		} else {
			/* unmap fragment */
			frag = &skb_shinfo(skb)->frags[fidx];
			dma_unmap_page(&pdev->dev,
				       tx_ring->txbufs[idx].dma_addr,
				       skb_frag_size(frag), DMA_TO_DEVICE);
		}

		/* check for last gather fragment */
		if (fidx == nr_frags - 1)
			dev_kfree_skb_any(skb);

		tx_ring->txbufs[idx].dma_addr = 0;
		tx_ring->txbufs[idx].skb = NULL;
		tx_ring->txbufs[idx].fidx = -2;

		tx_ring->qcp_rd_p++;
		tx_ring->rd_p++;
	}

	memset(tx_ring->txds, 0, sizeof(*tx_ring->txds) * tx_ring->cnt);
	tx_ring->wr_p = 0;
	tx_ring->rd_p = 0;
	tx_ring->qcp_rd_p = 0;
	tx_ring->wr_ptr_add = 0;

	nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx);
	netdev_tx_reset_queue(nd_q);
}
コード例 #9
0
static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
					    struct net_device *dev)
{
	struct ve_struct *env;
	int i = skb_get_queue_mapping(skb);
	struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
	struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
	unsigned int len;
	int ret;

	/* Handle non-VLAN frames if they are sent to us, for example by DHCP.
	 *
	 * NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING
	 * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs...
	 */
	if (veth->h_vlan_proto != htons(ETH_P_8021Q) ||
	    vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR) {
		unsigned int orig_headroom = skb_headroom(skb);
		u16 vlan_tci;

		vlan_dev_info(dev)->cnt_encap_on_xmit++;

		vlan_tci = vlan_dev_info(dev)->vlan_id;
		vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
		skb = __vlan_put_tag(skb, vlan_tci);
		if (!skb) {
			txq->tx_dropped++;
			return NETDEV_TX_OK;
		}

		if (orig_headroom < VLAN_HLEN)
			vlan_dev_info(dev)->cnt_inc_headroom_on_tx++;
	}


	skb->dev = vlan_dev_info(dev)->real_dev;
	len = skb->len;
	skb->owner_env = skb->dev->owner_env;
	env = set_exec_env(skb->owner_env);
	ret = dev_queue_xmit(skb);
	set_exec_env(env);

	if (likely(ret == NET_XMIT_SUCCESS)) {
		txq->tx_packets++;
		txq->tx_bytes += len;
	} else
		txq->tx_dropped++;

	return NETDEV_TX_OK;
}
コード例 #10
0
ファイル: vlan_dev.c プロジェクト: ndmsystems/linux-2.6.36
static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
#if defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
	struct rtnl_link_stats64 hw_nat_stats;
	memset(&hw_nat_stats, 0, sizeof(hw_nat_stats));
	if (ra_sw_nat_hook_get_stats)
		if (!ra_sw_nat_hook_get_stats(dev->name, &hw_nat_stats)) {
			struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
			txq->tx_bytes += hw_nat_stats.tx_bytes;
			txq->tx_packets += hw_nat_stats.tx_packets;
		}
#endif

	dev_txq_stats_fold(dev, stats);

	if (vlan_dev_info(dev)->vlan_rx_stats) {
		struct vlan_rx_stats *p, accum = {0};
		int i;

#if defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
		p = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, 0);
		p->rx_packets += hw_nat_stats.rx_packets;
		p->rx_bytes += hw_nat_stats.rx_bytes;
#endif
		for_each_possible_cpu(i) {
			u64 rxpackets, rxbytes, rxmulticast;
			unsigned int start;

			p = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, i);
			do {
				start = u64_stats_fetch_begin_bh(&p->syncp);
				rxpackets	= p->rx_packets;
				rxbytes		= p->rx_bytes;
				rxmulticast	= p->rx_multicast;
			} while (u64_stats_fetch_retry_bh(&p->syncp, start));
			accum.rx_packets += rxpackets;
			accum.rx_bytes   += rxbytes;
			accum.rx_multicast += rxmulticast;
			/* rx_errors is an ulong, not protected by syncp */
			accum.rx_errors  += p->rx_errors;
		}
		stats->rx_packets = accum.rx_packets;
		stats->rx_bytes   = accum.rx_bytes;
		stats->rx_errors  = accum.rx_errors;
		stats->multicast  = accum.rx_multicast;
	}
	return stats;
}
コード例 #11
0
static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
{
	struct xenvif_queue *queue = dev_id;
	struct netdev_queue *net_queue =
		netdev_get_tx_queue(queue->vif->dev, queue->id);

	/* QUEUE_STATUS_RX_PURGE_EVENT is only set if either QDisc was off OR
	 * the carrier went down and this queue was previously blocked
	 */
	if (unlikely(netif_tx_queue_stopped(net_queue) ||
		     (!netif_carrier_ok(queue->vif->dev) &&
		      test_bit(QUEUE_STATUS_RX_STALLED, &queue->status))))
		set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status);
	xenvif_kick_thread(queue);

	return IRQ_HANDLED;
}
コード例 #12
0
ファイル: vlan_dev.c プロジェクト: AppEngine/linux-2.6
static int vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb,
					    struct net_device *dev)
{
	struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
	u16 vlan_tci;

	vlan_tci = vlan_dev_info(dev)->vlan_id;
	vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
	skb = __vlan_hwaccel_put_tag(skb, vlan_tci);

	txq->tx_packets++;
	txq->tx_bytes += skb->len;

	skb->dev = vlan_dev_info(dev)->real_dev;
	dev_queue_xmit(skb);
	return NETDEV_TX_OK;
}
コード例 #13
0
ファイル: xmit_linux.c プロジェクト: Adri9102/rtl8188eu
static void rtw_check_xmit_resource(struct adapter *padapter, struct sk_buff *pkt)
{
	struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
	u16	queue;

	queue = skb_get_queue_mapping(pkt);
	if (padapter->registrypriv.wifi_spec) {
		/* No free space for Tx, tx_worker is too slow */
		if (pxmitpriv->hwxmits[queue].accnt > WMM_XMIT_THRESHOLD)
			netif_stop_subqueue(padapter->pnetdev, queue);
	} else {
		if (pxmitpriv->free_xmitframe_cnt <= 4) {
			if (!netif_tx_queue_stopped(netdev_get_tx_queue(padapter->pnetdev, queue)))
				netif_stop_subqueue(padapter->pnetdev, queue);
		}
	}
}
コード例 #14
0
ファイル: init.c プロジェクト: 3null/linux
/*
 * This function stops all queues in net_device
 */
void mwifiex_stop_net_dev_queue(struct net_device *netdev,
					struct mwifiex_adapter *adapter)
{
	unsigned long dev_queue_flags;
	unsigned int i;

	spin_lock_irqsave(&adapter->queue_lock, dev_queue_flags);

	for (i = 0; i < netdev->num_tx_queues; i++) {
		struct netdev_queue *txq = netdev_get_tx_queue(netdev, i);

		if (!netif_tx_queue_stopped(txq))
			netif_tx_stop_queue(txq);
	}

	spin_unlock_irqrestore(&adapter->queue_lock, dev_queue_flags);
}
コード例 #15
0
/*
 * NOTE: Called under qdisc_lock(q) with locally disabled BH.
 *
 * __QDISC_STATE_RUNNING guarantees only one CPU can process
 * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
 * this queue.
 *
 *  netif_tx_lock serializes accesses to device driver.
 *
 *  qdisc_lock(q) and netif_tx_lock are mutually exclusive,
 *  if one is grabbed, another must be free.
 *
 * Note, that this procedure can be called by a watchdog timer
 *
 * Returns to the caller:
 *				0  - queue is empty or throttled.
 *				>0 - queue is not empty.
 *
 */
static inline int qdisc_restart(struct Qdisc *q)
{
	struct netdev_queue *txq;
	struct net_device *dev;
	spinlock_t *root_lock;
	struct sk_buff *skb;

	/* Dequeue packet */
	skb = dequeue_skb(q);
	if (unlikely(!skb))
		return 0;
	WARN_ON_ONCE(skb_dst_is_noref(skb));
	root_lock = qdisc_lock(q);
	dev = qdisc_dev(q);
	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));

	return sch_direct_xmit(skb, q, dev, txq, root_lock);
}
コード例 #16
0
ファイル: sch_generic.c プロジェクト: ReneNyffenegger/linux
static void dev_watchdog(struct timer_list *t)
{
	struct net_device *dev = from_timer(dev, t, watchdog_timer);

	netif_tx_lock(dev);
	if (!qdisc_tx_is_noop(dev)) {
		if (netif_device_present(dev) &&
		    netif_running(dev) &&
		    netif_carrier_ok(dev)) {
			int some_queue_timedout = 0;
			unsigned int i;
			unsigned long trans_start;

			for (i = 0; i < dev->num_tx_queues; i++) {
				struct netdev_queue *txq;

				txq = netdev_get_tx_queue(dev, i);
				trans_start = txq->trans_start;
				if (netif_xmit_stopped(txq) &&
				    time_after(jiffies, (trans_start +
							 dev->watchdog_timeo))) {
					some_queue_timedout = 1;
					txq->trans_timeout++;
					break;
				}
			}

			if (some_queue_timedout) {
				WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n",
				       dev->name, netdev_drivername(dev), i);
				dev->netdev_ops->ndo_tx_timeout(dev);
			}
			if (!mod_timer(&dev->watchdog_timer,
				       round_jiffies(jiffies +
						     dev->watchdog_timeo)))
				dev_hold(dev);
		}
	}
	netif_tx_unlock(dev);

	dev_put(dev);
}
コード例 #17
0
static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
{
	struct sk_buff *skb = q->gso_skb;

	if (unlikely(skb)) {
		struct net_device *dev = qdisc_dev(q);
		struct netdev_queue *txq;

		/* check the reason of requeuing without tx lock first */
		txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
		if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq))
			q->gso_skb = NULL;
		else
			skb = NULL;
	} else {
		skb = q->dequeue(q);
	}

	return skb;
}
コード例 #18
0
ファイル: lpc_eth.c プロジェクト: guanhe0/kernel
static int lpc_eth_poll(struct napi_struct *napi, int budget)
{
	struct netdata_local *pldat = container_of(napi,
			struct netdata_local, napi);
	struct net_device *ndev = pldat->ndev;
	int rx_done = 0;
	struct netdev_queue *txq = netdev_get_tx_queue(ndev, 0);

	__netif_tx_lock(txq, smp_processor_id());
	__lpc_handle_xmit(ndev);
	__netif_tx_unlock(txq);
	rx_done = __lpc_handle_recv(ndev, budget);

	if (rx_done < budget) {
		napi_complete(napi);
		lpc_eth_enable_int(pldat->net_base);
	}

	return rx_done;
}
コード例 #19
0
ファイル: main.c プロジェクト: 383530895/linux
/*
 * Add buffer into wmm tx queue and queue work to transmit it.
 */
int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb)
{
	struct netdev_queue *txq;
	int index = mwifiex_1d_to_wmm_queue[skb->priority];

	if (atomic_inc_return(&priv->wmm_tx_pending[index]) >= MAX_TX_PENDING) {
		txq = netdev_get_tx_queue(priv->netdev, index);
		if (!netif_tx_queue_stopped(txq)) {
			netif_tx_stop_queue(txq);
			dev_dbg(priv->adapter->dev, "stop queue: %d\n", index);
		}
	}

	atomic_inc(&priv->adapter->tx_pending);
	mwifiex_wmm_add_buf_txqueue(priv, skb);

	queue_work(priv->adapter->workqueue, &priv->adapter->main_work);

	return 0;
}
コード例 #20
0
ファイル: sch_generic.c プロジェクト: sim0629/linux-openwrt
static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
{
	struct sk_buff *skb = q->gso_skb;
	const struct netdev_queue *txq = q->dev_queue;

	if (unlikely(skb)) {
		/* check the reason of requeuing without tx lock first */
		txq = netdev_get_tx_queue(txq->dev, skb_get_queue_mapping(skb));
		if (!netif_xmit_frozen_or_stopped(txq)) {
			q->gso_skb = NULL;
			q->q.qlen--;
		} else
			skb = NULL;
	} else {
		if (!(q->flags & TCQ_F_ONETXQUEUE) || !netif_xmit_frozen_or_stopped(txq))
			skb = q->dequeue(q);
	}

	return skb;
}
コード例 #21
0
ファイル: sch_generic.c プロジェクト: 513855417/linux
static void attach_default_qdiscs(struct net_device *dev)
{
	struct netdev_queue *txq;
	struct Qdisc *qdisc;

	txq = netdev_get_tx_queue(dev, 0);

	if (!netif_is_multiqueue(dev) ||
	    dev->priv_flags & IFF_NO_QUEUE) {
		netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
		dev->qdisc = txq->qdisc_sleeping;
		atomic_inc(&dev->qdisc->refcnt);
	} else {
		qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT);
		if (qdisc) {
			dev->qdisc = qdisc;
			qdisc->ops->attach(qdisc);
		}
	}
}
コード例 #22
0
static void dev_watchdog(unsigned long arg)
{
	struct net_device *dev = (struct net_device *)arg;

	netif_tx_lock(dev);
	if (!qdisc_tx_is_noop(dev)) {
		if (netif_device_present(dev) &&
		    netif_running(dev) &&
		    netif_carrier_ok(dev)) {
			int some_queue_timedout = 0;
			unsigned int i;
			unsigned long trans_start;

			for (i = 0; i < dev->num_tx_queues; i++) {
				struct netdev_queue *txq;

				txq = netdev_get_tx_queue(dev, i);
				/*
				 * old device drivers set dev->trans_start
				 */
				trans_start = txq->trans_start ? : dev->trans_start;
				if (netif_tx_queue_stopped(txq) &&
				    time_after(jiffies, (trans_start +
							 dev->watchdog_timeo))) {
					some_queue_timedout = 1;
					break;
				}
			}

			if (some_queue_timedout) {
				char drivername[64];
				WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n",
				       dev->name, netdev_drivername(dev, drivername, 64), i);
				dev->netdev_ops->ndo_tx_timeout(dev);
			}
			if (!mod_timer(&dev->watchdog_timer,
				       round_jiffies(jiffies +
						     dev->watchdog_timeo)))
				dev_hold(dev);
		}
	}
コード例 #23
0
static struct sk_buff *
teql_dequeue(struct Qdisc *sch)
{
	struct teql_sched_data *dat = qdisc_priv(sch);
	struct netdev_queue *dat_queue;
	struct sk_buff *skb;

	skb = __skb_dequeue(&dat->q);
	dat_queue = netdev_get_tx_queue(dat->m->dev, 0);
	if (skb == NULL) {
		struct net_device *m = qdisc_dev(dat_queue->qdisc);
		if (m) {
			dat->m->slaves = sch;
			netif_wake_queue(m);
		}
	} else {
		qdisc_bstats_update(sch, skb);
	}
	sch->q.qlen = dat->q.qlen + dat_queue->qdisc->q.qlen;
	return skb;
}
コード例 #24
0
struct netdev_queue *netdev_pick_tx(struct net_device *dev,
				    struct sk_buff *skb,
				    void *accel_priv)
{
	int queue_index = 0;

	if (dev->real_num_tx_queues != 1) {
		const struct net_device_ops *ops = dev->netdev_ops;
		if (ops->ndo_select_queue)
			queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
							    __netdev_pick_tx);
		else
			queue_index = __netdev_pick_tx(dev, skb);

		if (!accel_priv)
			queue_index = netdev_cap_txqueue(dev, queue_index);
	}

	skb_set_queue_mapping(skb, queue_index);
	return netdev_get_tx_queue(dev, queue_index);
}
コード例 #25
0
static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
{
	struct sk_buff *skb = q->gso_skb;

	if (unlikely(skb)) {
		struct net_device *dev = qdisc_dev(q);
		struct netdev_queue *txq;

		
		txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
		if (!netif_tx_queue_stopped(txq) &&
		    !netif_tx_queue_frozen(txq)) {
			q->gso_skb = NULL;
			q->q.qlen--;
		} else
			skb = NULL;
	} else {
		skb = q->dequeue(q);
	}

	return skb;
}
コード例 #26
0
ファイル: sch_mq.c プロジェクト: 19Dan01/linux
static int mq_init(struct Qdisc *sch, struct nlattr *opt)
{
	struct net_device *dev = qdisc_dev(sch);
	struct mq_sched *priv = qdisc_priv(sch);
	struct netdev_queue *dev_queue;
	struct Qdisc *qdisc;
	unsigned int ntx;

	if (sch->parent != TC_H_ROOT)
		return -EOPNOTSUPP;

	if (!netif_is_multiqueue(dev))
		return -EOPNOTSUPP;

	/* pre-allocate qdiscs, attachment can't fail */
	priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
			       GFP_KERNEL);
	if (priv->qdiscs == NULL)
		return -ENOMEM;

	for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
		dev_queue = netdev_get_tx_queue(dev, ntx);
		qdisc = qdisc_create_dflt(dev_queue, default_qdisc_ops,
					  TC_H_MAKE(TC_H_MAJ(sch->handle),
						    TC_H_MIN(ntx + 1)));
		if (qdisc == NULL)
			goto err;
		priv->qdiscs[ntx] = qdisc;
		qdisc->flags |= TCQ_F_ONETXQUEUE;
	}

	sch->flags |= TCQ_F_MQROOT;
	return 0;

err:
	mq_destroy(sch);
	return -ENOMEM;
}
コード例 #27
0
ファイル: ifb.c プロジェクト: AlexShiLucky/linux
static int ifb_dev_init(struct net_device *dev)
{
	struct ifb_dev_private *dp = netdev_priv(dev);
	struct ifb_q_private *txp;
	int i;

	txp = kcalloc(dev->num_tx_queues, sizeof(*txp), GFP_KERNEL);
	if (!txp)
		return -ENOMEM;
	dp->tx_private = txp;
	for (i = 0; i < dev->num_tx_queues; i++,txp++) {
		txp->txqnum = i;
		txp->dev = dev;
		__skb_queue_head_init(&txp->rq);
		__skb_queue_head_init(&txp->tq);
		u64_stats_init(&txp->rsync);
		u64_stats_init(&txp->tsync);
		tasklet_init(&txp->ifb_tasklet, ifb_ri_tasklet,
			     (unsigned long)txp);
		netif_tx_start_queue(netdev_get_tx_queue(dev, i));
	}
	return 0;
}
コード例 #28
0
ファイル: sch_mq.c プロジェクト: 19Dan01/linux
static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
{
	struct net_device *dev = qdisc_dev(sch);
	struct Qdisc *qdisc;
	unsigned int ntx;

	sch->q.qlen = 0;
	memset(&sch->bstats, 0, sizeof(sch->bstats));
	memset(&sch->qstats, 0, sizeof(sch->qstats));

	for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
		qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
		spin_lock_bh(qdisc_lock(qdisc));
		sch->q.qlen		+= qdisc->q.qlen;
		sch->bstats.bytes	+= qdisc->bstats.bytes;
		sch->bstats.packets	+= qdisc->bstats.packets;
		sch->qstats.backlog	+= qdisc->qstats.backlog;
		sch->qstats.drops	+= qdisc->qstats.drops;
		sch->qstats.requeues	+= qdisc->qstats.requeues;
		sch->qstats.overlimits	+= qdisc->qstats.overlimits;
		spin_unlock_bh(qdisc_lock(qdisc));
	}
	return 0;
}
コード例 #29
0
static void
teql_destroy(struct Qdisc *sch)
{
	struct Qdisc *q, *prev;
	struct teql_sched_data *dat = qdisc_priv(sch);
	struct teql_master *master = dat->m;

	prev = master->slaves;
	if (prev) {
		do {
			q = NEXT_SLAVE(prev);
			if (q == sch) {
				NEXT_SLAVE(prev) = NEXT_SLAVE(q);
				if (q == master->slaves) {
					master->slaves = NEXT_SLAVE(q);
					if (q == master->slaves) {
						struct netdev_queue *txq;
						spinlock_t *root_lock;

						txq = netdev_get_tx_queue(master->dev, 0);
						master->slaves = NULL;

						root_lock = qdisc_root_sleeping_lock(txq->qdisc);
						spin_lock_bh(root_lock);
						qdisc_reset(txq->qdisc);
						spin_unlock_bh(root_lock);
					}
				}
				skb_queue_purge(&dat->q);
				teql_neigh_release(xchg(&dat->ncache, NULL));
				break;
			}

		} while ((prev = q) != master->slaves);
	}
}
コード例 #30
0
ファイル: ccmni.c プロジェクト: Elnter/j608_kernel
static void ccmni_dump(int md_id, int rx_ch, unsigned int flag)
{
	ccmni_ctl_block_t *ctlb = ccmni_ctl_blk[md_id];
	ccmni_instance_t *ccmni = NULL;
	ccmni_instance_t *ccmni_tmp = NULL;
	int ccmni_idx = 0;
	struct net_device *dev = NULL;
	struct netdev_queue *dev_queue = NULL;
	
	if (ctlb == NULL)
		return;
		
	ccmni_idx = get_ccmni_idx_from_ch(md_id, rx_ch);
	if (unlikely(ccmni_idx < 0)) {
		CCMNI_ERR_MSG(md_id, "CCMNI rx(%d) skb ch error\n", rx_ch);
		return;
	}	
	
	ccmni_tmp = ctlb->ccmni_inst[ccmni_idx];
	if (unlikely(ccmni_tmp == NULL))
		return;
	
	if ((ccmni_tmp->dev->stats.rx_packets == 0) && (ccmni_tmp->dev->stats.tx_packets == 0))
		return;

	dev = ccmni_tmp->dev;
	/*ccmni diff from ccmni_tmp for MD IRAT*/
	ccmni = (ccmni_instance_t *)netdev_priv(dev);
	dev_queue = netdev_get_tx_queue(dev, 0);
	CCMNI_INF_MSG(md_id, "CCMNI%d(%d,%d), rx=(%ld,%ld), tx=(%ld,%ld), txq_len=%d, tx_busy=%ld, dev_sta=(0x%lx,0x%lx,0x%x)\n", \
		ccmni->index, atomic_read(&ccmni->usage), atomic_read(&ccmni_tmp->usage), dev->stats.rx_packets, \
		dev->stats.rx_bytes, dev->stats.tx_packets, dev->stats.tx_bytes, dev->qdisc->q.qlen, \
		ccmni->tx_busy_cnt, dev->state, dev_queue->state, dev->flags);

	return;
}