Esempio n. 1
0
static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
{
	int count = 0;
	struct sk_buff *skb;

	zap_completion_queue();
	refill_skbs();
repeat:

	skb = alloc_skb(len, GFP_ATOMIC);
	if (!skb)
		skb = skb_dequeue(&skb_pool);

	if (!skb) {
		if (++count < 10) {
			netpoll_poll(np);
			goto repeat;
		}
		return NULL;
	}

	atomic_set(&skb->users, 1);
	skb_reserve(skb, reserve);
	return skb;
}
Esempio n. 2
0
void netpoll_poll(struct netpoll *np)
{
	if (!np->dev || !netif_running(np->dev) || !np->dev->poll_controller)
		return;

	/* Process pending work on NIC */
	np->dev->poll_controller(np->dev);
	if (np->dev->poll)
		poll_napi(np);

	service_arp_queue(np->dev->npinfo);

	zap_completion_queue();
}
Esempio n. 3
0
static struct sk_buff * find_skb(struct netpoll *np, int len, int reserve)
{
	int once = 1, count = 0;
	unsigned long flags;
	struct sk_buff *skb = NULL;

	zap_completion_queue();
repeat:
	if (nr_skbs < MAX_SKBS)
		refill_skbs();

	skb = alloc_skb(len, GFP_ATOMIC);

	if (!skb) {
		spin_lock_irqsave(&skb_list_lock, flags);
		skb = skbs;
		if (skb) {
			skbs = skb->next;
			skb->next = NULL;
			nr_skbs--;
		}
		spin_unlock_irqrestore(&skb_list_lock, flags);
	}

	if(!skb) {
		count++;
		if (once && (count == 1000000)) {
			printk("out of netpoll skbs!\n");
			once = 0;
		}
		netpoll_poll(np);
		goto repeat;
	}

	atomic_set(&skb->users, 1);
	skb_reserve(skb, reserve);
	return skb;
}
Esempio n. 4
0
void netpoll_poll_dev(struct net_device *dev)
{

	if (!dev || !netif_running(dev))
		return;

	if (dev->poll_controller)
		return;

	dev->poll_controller(dev);

	if (dev->poll) {
		if (dev->npinfo->poll_owner != smp_processor_id() &&
		    spin_trylock(&dev->npinfo->poll_lock)) {
			poll_one_napi(dev->npinfo, dev, 16);
			spin_unlock(&dev->npinfo->poll_lock);
		}
	}

	service_arp_queue(dev->npinfo);

	zap_completion_queue();
}