static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve) { int count = 0; struct sk_buff *skb; zap_completion_queue(); refill_skbs(); repeat: skb = alloc_skb(len, GFP_ATOMIC); if (!skb) skb = skb_dequeue(&skb_pool); if (!skb) { if (++count < 10) { netpoll_poll(np); goto repeat; } return NULL; } atomic_set(&skb->users, 1); skb_reserve(skb, reserve); return skb; }
static struct sk_buff * find_skb(struct netpoll *np, int len, int reserve) { int once = 1, count = 0; unsigned long flags; struct sk_buff *skb = NULL; zap_completion_queue(); repeat: if (nr_skbs < MAX_SKBS) refill_skbs(); skb = alloc_skb(len, GFP_ATOMIC); if (!skb) { spin_lock_irqsave(&skb_list_lock, flags); skb = skbs; if (skb) { skbs = skb->next; skb->next = NULL; nr_skbs--; } spin_unlock_irqrestore(&skb_list_lock, flags); } if(!skb) { count++; if (once && (count == 1000000)) { printk("out of netpoll skbs!\n"); once = 0; } netpoll_poll(np); goto repeat; } atomic_set(&skb->users, 1); skb_reserve(skb, reserve); return skb; }
int netpoll_setup(struct netpoll *np) { struct net_device *ndev = NULL; struct in_device *in_dev; struct netpoll_info *npinfo; unsigned long flags; int err; if (np->dev_name) ndev = dev_get_by_name(np->dev_name); if (!ndev) { printk(KERN_ERR "%s: %s doesn't exist, aborting.\n", np->name, np->dev_name); return -ENODEV; } np->dev = ndev; if (!ndev->npinfo) { npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL); if (!npinfo) { err = -ENOMEM; goto release; } npinfo->rx_flags = 0; npinfo->rx_np = NULL; spin_lock_init(&npinfo->poll_lock); npinfo->poll_owner = -1; spin_lock_init(&npinfo->rx_lock); skb_queue_head_init(&npinfo->arp_tx); skb_queue_head_init(&npinfo->txq); INIT_DELAYED_WORK(&npinfo->tx_work, queue_process); atomic_set(&npinfo->refcnt, 1); } else { npinfo = ndev->npinfo; atomic_inc(&npinfo->refcnt); } if (!ndev->poll_controller) { printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n", np->name, np->dev_name); err = -ENOTSUPP; goto release; } if (!netif_running(ndev)) { unsigned long atmost, atleast; printk(KERN_INFO "%s: device %s not up yet, forcing it\n", np->name, np->dev_name); rtnl_lock(); err = dev_open(ndev); rtnl_unlock(); if (err) { printk(KERN_ERR "%s: failed to open %s\n", np->name, ndev->name); goto release; } atleast = jiffies + HZ/10; atmost = jiffies + 4*HZ; while (!netif_carrier_ok(ndev)) { if (time_after(jiffies, atmost)) { printk(KERN_NOTICE "%s: timeout waiting for carrier\n", np->name); break; } cond_resched(); } /* If carrier appears to come up instantly, we don't * trust it and pause so that we don't pump all our * queued console messages into the bitbucket. */ if (time_before(jiffies, atleast)) { printk(KERN_NOTICE "%s: carrier detect appears" " untrustworthy, waiting 4 seconds\n", np->name); msleep(4000); } } if (is_zero_ether_addr(np->local_mac) && ndev->dev_addr) memcpy(np->local_mac, ndev->dev_addr, 6); if (!np->local_ip) { rcu_read_lock(); in_dev = __in_dev_get_rcu(ndev); if (!in_dev || !in_dev->ifa_list) { rcu_read_unlock(); printk(KERN_ERR "%s: no IP address for %s, aborting\n", np->name, np->dev_name); err = -EDESTADDRREQ; goto release; } np->local_ip = ntohl(in_dev->ifa_list->ifa_local); rcu_read_unlock(); printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n", np->name, HIPQUAD(np->local_ip)); } if (np->rx_hook) { spin_lock_irqsave(&npinfo->rx_lock, flags); npinfo->rx_flags |= NETPOLL_RX_ENABLED; npinfo->rx_np = np; spin_unlock_irqrestore(&npinfo->rx_lock, flags); } /* fill up the skb queue */ refill_skbs(); /* last thing to do is link it to the net device structure */ ndev->npinfo = npinfo; /* avoid racing with NAPI reading npinfo */ synchronize_rcu(); return 0; release: if (!ndev->npinfo) kfree(npinfo); np->dev = NULL; dev_put(ndev); return err; }