static int __init ifb_init_one(int index) { struct net_device *dev_ifb; struct ifb_private *dp; int err; dev_ifb = alloc_netdev(sizeof(struct ifb_private), "ifb%d", ifb_setup); if (!dev_ifb) return -ENOMEM; dp = netdev_priv(dev_ifb); u64_stats_init(&dp->rsync); u64_stats_init(&dp->tsync); dev_ifb->rtnl_link_ops = &ifb_link_ops; err = register_netdevice(dev_ifb); if (err < 0) goto err; return 0; err: free_netdev(dev_ifb); return err; }
/** * fm10k_setup_tx_resources - allocate Tx resources (Descriptors) * @tx_ring: tx descriptor ring (for a specific queue) to setup * * Return 0 on success, negative on failure **/ int fm10k_setup_tx_resources(struct fm10k_ring *tx_ring) { struct device *dev = tx_ring->dev; int size; size = sizeof(struct fm10k_tx_buffer) * tx_ring->count; tx_ring->tx_buffer = vzalloc(size); if (!tx_ring->tx_buffer) goto err; u64_stats_init(&tx_ring->syncp); /* round up to nearest 4K */ tx_ring->size = tx_ring->count * sizeof(struct fm10k_tx_desc); tx_ring->size = ALIGN(tx_ring->size, 4096); tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, GFP_KERNEL); if (!tx_ring->desc) goto err; return 0; err: vfree(tx_ring->tx_buffer); tx_ring->tx_buffer = NULL; return -ENOMEM; }
/** * fm10k_setup_rx_resources - allocate Rx resources (Descriptors) * @rx_ring: rx descriptor ring (for a specific queue) to setup * * Returns 0 on success, negative on failure **/ int fm10k_setup_rx_resources(struct fm10k_ring *rx_ring) { struct device *dev = rx_ring->dev; int size; size = sizeof(struct fm10k_rx_buffer) * rx_ring->count; rx_ring->rx_buffer = vzalloc(size); if (!rx_ring->rx_buffer) goto err; u64_stats_init(&rx_ring->syncp); /* Round up to nearest 4K */ rx_ring->size = rx_ring->count * sizeof(union fm10k_rx_desc); rx_ring->size = ALIGN(rx_ring->size, 4096); rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); if (!rx_ring->desc) goto err; return 0; err: vfree(rx_ring->rx_buffer); rx_ring->rx_buffer = NULL; return -ENOMEM; }
static int nlmon_dev_init(struct net_device *dev) { int i; dev->lstats = alloc_percpu(struct pcpu_lstats); for_each_possible_cpu(i) { struct pcpu_lstats *nlmstats; nlmstats = per_cpu_ptr(dev->lstats, i); u64_stats_init(&nlmstats->syncp); } return dev->lstats == NULL ? -ENOMEM : 0; }
static int ifb_dev_init(struct net_device *dev) { struct ifb_dev_private *dp = netdev_priv(dev); struct ifb_q_private *txp; int i; txp = kcalloc(dev->num_tx_queues, sizeof(*txp), GFP_KERNEL); if (!txp) return -ENOMEM; dp->tx_private = txp; for (i = 0; i < dev->num_tx_queues; i++,txp++) { txp->txqnum = i; txp->dev = dev; __skb_queue_head_init(&txp->rq); __skb_queue_head_init(&txp->tq); u64_stats_init(&txp->rsync); u64_stats_init(&txp->tsync); tasklet_init(&txp->ifb_tasklet, ifb_ri_tasklet, (unsigned long)txp); netif_tx_start_queue(netdev_get_tx_queue(dev, i)); } return 0; }
static int veth_dev_init(struct net_device *dev) { int i; dev->vstats = alloc_percpu(struct pcpu_vstats); if (!dev->vstats) return -ENOMEM; for_each_possible_cpu(i) { struct pcpu_vstats *veth_stats; veth_stats = per_cpu_ptr(dev->vstats, i); u64_stats_init(&veth_stats->syncp); } return 0; }
int ip_tunnel_init(struct net_device *dev) { struct ip_tunnel *tunnel = netdev_priv(dev); struct iphdr *iph = &tunnel->parms.iph; int i, err; dev->destructor = ip_tunnel_dev_free; dev->tstats = alloc_percpu(struct pcpu_sw_netstats); if (!dev->tstats) return -ENOMEM; for_each_possible_cpu(i) { struct pcpu_sw_netstats *ipt_stats; ipt_stats = per_cpu_ptr(dev->tstats, i); u64_stats_init(&ipt_stats->syncp); } tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst); if (!tunnel->dst_cache) { free_percpu(dev->tstats); return -ENOMEM; } err = gro_cells_init(&tunnel->gro_cells, dev); if (err) { free_percpu(tunnel->dst_cache); free_percpu(dev->tstats); return err; } tunnel->dev = dev; tunnel->net = dev_net(dev); strcpy(tunnel->parms.name, dev->name); iph->version = 4; iph->ihl = 5; return 0; }