void usbnet_disconnect (struct usb_interface *intf) { struct usbnet *dev; struct usb_device *xdev; struct net_device *net; dev = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); if (!dev) return; xdev = interface_to_usbdev (intf); if (netif_msg_probe (dev)) devinfo (dev, "unregister '%s' usb-%s-%s, %s", intf->dev.driver->name, xdev->bus->bus_name, xdev->devpath, dev->driver_info->description); net = dev->net; unregister_netdev (net); /* we don't hold rtnl here ... */ flush_scheduled_work (); if (dev->driver_info->unbind) dev->driver_info->unbind (dev, intf); free_netdev(net); usb_put_dev (xdev); }
static int xgbe_config_irqs(struct xgbe_prv_data *pdata) { int ret; ret = xgbe_config_multi_msi(pdata); if (!ret) goto out; ret = pci_alloc_irq_vectors(pdata->pcidev, 1, 1, PCI_IRQ_LEGACY | PCI_IRQ_MSI); if (ret < 0) { dev_info(pdata->dev, "single IRQ enablement failed\n"); return ret; } pdata->isr_as_tasklet = pdata->pcidev->msi_enabled ? 1 : 0; pdata->irq_count = 1; pdata->channel_irq_count = 1; pdata->dev_irq = pci_irq_vector(pdata->pcidev, 0); pdata->ecc_irq = pci_irq_vector(pdata->pcidev, 0); pdata->i2c_irq = pci_irq_vector(pdata->pcidev, 0); pdata->an_irq = pci_irq_vector(pdata->pcidev, 0); if (netif_msg_probe(pdata)) dev_dbg(pdata->dev, "single %s interrupt enabled\n", pdata->pcidev->msi_enabled ? "MSI" : "legacy"); out: if (netif_msg_probe(pdata)) { unsigned int i; dev_dbg(pdata->dev, " dev irq=%d\n", pdata->dev_irq); dev_dbg(pdata->dev, " ecc irq=%d\n", pdata->ecc_irq); dev_dbg(pdata->dev, " i2c irq=%d\n", pdata->i2c_irq); dev_dbg(pdata->dev, " an irq=%d\n", pdata->an_irq); for (i = 0; i < pdata->channel_irq_count; i++) dev_dbg(pdata->dev, " dma%u irq=%d\n", i, pdata->channel_irq[i]); } return 0; }
static int pl_reset(struct usbnet *dev) { int status; /* some units seem to need this reset, others reject it utterly. * FIXME be more like "naplink" or windows drivers. */ status = pl_set_QuickLink_features(dev, PL_S_EN|PL_RESET_OUT|PL_RESET_IN|PL_PEER_E); if (status != 0 && netif_msg_probe(dev)) netif_dbg(dev, link, dev->net, "pl_reset --> %d\n", status); return 0; }
static int pl_reset(struct usbnet *dev) { int status; /* */ status = pl_set_QuickLink_features(dev, PL_S_EN|PL_RESET_OUT|PL_RESET_IN|PL_PEER_E); if (status != 0 && netif_msg_probe(dev)) netif_dbg(dev, link, dev->net, "pl_reset --> %d\n", status); return 0; }
void unregister_networkdev(struct bcm_mini_adapter *Adapter) { struct net_device *net = Adapter->dev; struct bcm_interface_adapter *IntfAdapter = Adapter->pvInterfaceAdapter; struct usb_interface *udev = IntfAdapter->interface; struct usb_device *xdev = IntfAdapter->udev; if (netif_msg_probe(Adapter)) dev_info(&udev->dev, PFX "%s: unregister usb-%s%s\n", net->name, xdev->bus->bus_name, xdev->devpath); unregister_netdev(Adapter->dev); }
int register_networkdev(struct bcm_mini_adapter *Adapter) { struct net_device *net = Adapter->dev; struct bcm_interface_adapter *IntfAdapter = Adapter->pvInterfaceAdapter; struct usb_interface *udev = IntfAdapter->interface; struct usb_device *xdev = IntfAdapter->udev; int result; net->netdev_ops = &bcmNetDevOps; net->ethtool_ops = &bcm_ethtool_ops; net->mtu = MTU_SIZE; /* 1400 Bytes */ net->tx_queue_len = TX_QLEN; net->flags |= IFF_NOARP; netif_carrier_off(net); SET_NETDEV_DEVTYPE(net, &wimax_type); /* Read the MAC Address from EEPROM */ result = ReadMacAddressFromNVM(Adapter); if (result != STATUS_SUCCESS) { dev_err(&udev->dev, PFX "Error in Reading the mac Address: %d", result); return -EIO; } result = register_netdev(net); if (result) return result; gblpnetdev = Adapter->dev; if (netif_msg_probe(Adapter)) dev_info(&udev->dev, PFX "%s: register usb-%s-%s %pM\n", net->name, xdev->bus->bus_name, xdev->devpath, net->dev_addr); return 0; }
static int xgbe_config_multi_msi(struct xgbe_prv_data *pdata) { unsigned int vector_count; unsigned int i, j; int ret; vector_count = XGBE_MSI_BASE_COUNT; vector_count += max(pdata->rx_ring_count, pdata->tx_ring_count); ret = pci_alloc_irq_vectors(pdata->pcidev, XGBE_MSI_MIN_COUNT, vector_count, PCI_IRQ_MSI | PCI_IRQ_MSIX); if (ret < 0) { dev_info(pdata->dev, "multi MSI/MSI-X enablement failed\n"); return ret; } pdata->isr_as_tasklet = 1; pdata->irq_count = ret; pdata->dev_irq = pci_irq_vector(pdata->pcidev, 0); pdata->ecc_irq = pci_irq_vector(pdata->pcidev, 1); pdata->i2c_irq = pci_irq_vector(pdata->pcidev, 2); pdata->an_irq = pci_irq_vector(pdata->pcidev, 3); for (i = XGBE_MSI_BASE_COUNT, j = 0; i < ret; i++, j++) pdata->channel_irq[j] = pci_irq_vector(pdata->pcidev, i); pdata->channel_irq_count = j; pdata->per_channel_irq = 1; pdata->channel_irq_mode = XGBE_IRQ_MODE_LEVEL; if (netif_msg_probe(pdata)) dev_dbg(pdata->dev, "multi %s interrupts enabled\n", pdata->pcidev->msix_enabled ? "MSI-X" : "MSI"); return 0; }
static void kevent(void *data) { struct usbnet *dev = (struct usbnet *)data; #else static void kevent(struct work_struct *work) { struct usbnet *dev = container_of(work, struct usbnet, kevent); #endif int status; /* usb_clear_halt() needs a thread context */ if (test_bit(EVENT_TX_HALT, &dev->flags)) { unlink_urbs(dev, &dev->txq); status = usb_clear_halt(dev->udev, dev->out); if (status < 0 && status != -EPIPE && status != -ESHUTDOWN) { if (netif_msg_tx_err(dev)) deverr(dev, "can't clear tx halt, status %d", status); } else { clear_bit(EVENT_TX_HALT, &dev->flags); if (status != -ESHUTDOWN) netif_wake_queue(dev->net); } } if (test_bit(EVENT_RX_HALT, &dev->flags)) { unlink_urbs(dev, &dev->rxq); status = usb_clear_halt(dev->udev, dev->in); if (status < 0 && status != -EPIPE && status != -ESHUTDOWN) { if (netif_msg_rx_err(dev)) deverr(dev, "can't clear rx halt, status %d", status); } else { clear_bit(EVENT_RX_HALT, &dev->flags); tasklet_schedule(&dev->bh); } } /* tasklet could resubmit itself forever if memory is tight */ if (test_bit(EVENT_RX_MEMORY, &dev->flags)) { struct urb *urb = NULL; if (netif_running(dev->net)) urb = usb_alloc_urb(0, GFP_KERNEL); else clear_bit(EVENT_RX_MEMORY, &dev->flags); if (urb != NULL) { clear_bit(EVENT_RX_MEMORY, &dev->flags); #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14) urb->transfer_flags |= URB_ASYNC_UNLINK; #endif rx_submit(dev, urb, GFP_KERNEL); tasklet_schedule(&dev->bh); } } if (test_bit(EVENT_LINK_RESET, &dev->flags)) { struct driver_info *info = dev->driver_info; int retval = 0; clear_bit(EVENT_LINK_RESET, &dev->flags); if (info->link_reset) { retval = info->link_reset(dev); if (retval < 0) { devinfo(dev, "link reset failed (%d) usbnet usb-%s-%s, %s", retval, dev->udev->bus->bus_name, dev->udev->devpath, info->description); } } } if (dev->flags) devdbg(dev, "kevent done, flags = 0x%lx", dev->flags); } /*-------------------------------------------------------------------------*/ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19) static void tx_complete(struct urb *urb, struct pt_regs *regs) #else static void tx_complete(struct urb *urb) #endif { struct sk_buff *skb = (struct sk_buff *) urb->context; struct skb_data *entry = (struct skb_data *) skb->cb; struct usbnet *dev = entry->dev; if (urb->status == 0) { dev->stats.tx_packets++; dev->stats.tx_bytes += entry->length; } else { dev->stats.tx_errors++; switch (urb->status) { case -EPIPE: axusbnet_defer_kevent(dev, EVENT_TX_HALT); break; /* software-driven interface shutdown */ case -ECONNRESET: /* async unlink */ case -ESHUTDOWN: /* hardware gone */ break; /* like rx, tx gets controller i/o faults during khubd delays */ /* and so it uses the same throttling mechanism. */ case -EPROTO: case -ETIME: case -EILSEQ: if (!timer_pending(&dev->delay)) { mod_timer(&dev->delay, jiffies + THROTTLE_JIFFIES); if (netif_msg_link(dev)) devdbg(dev, "tx throttle %d", urb->status); } netif_stop_queue(dev->net); break; default: if (netif_msg_tx_err(dev)) devdbg(dev, "tx err %d", entry->urb->status); break; } } urb->dev = NULL; entry->state = tx_done; defer_bh(dev, skb, &dev->txq); } /*-------------------------------------------------------------------------*/ static void axusbnet_tx_timeout(struct net_device *net) { struct usbnet *dev = netdev_priv(net); unlink_urbs(dev, &dev->txq); tasklet_schedule(&dev->bh); /* FIXME: device recovery -- reset? */ } /*-------------------------------------------------------------------------*/ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32) static int #else static netdev_tx_t #endif axusbnet_start_xmit(struct sk_buff *skb, struct net_device *net) { struct usbnet *dev = netdev_priv(net); int length; struct urb *urb = NULL; struct skb_data *entry; struct driver_info *info = dev->driver_info; unsigned long flags; int retval; /* some devices want funky USB-level framing, for */ /* win32 driver (usually) and/or hardware quirks */ if (info->tx_fixup) { skb = info->tx_fixup(dev, skb, GFP_ATOMIC); if (!skb) { if (netif_msg_tx_err(dev)) devdbg(dev, "can't tx_fixup skb"); goto drop; } } length = skb->len; urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { if (netif_msg_tx_err(dev)) devdbg(dev, "no urb"); goto drop; } entry = (struct skb_data *) skb->cb; entry->urb = urb; entry->dev = dev; entry->state = tx_start; entry->length = length; usb_fill_bulk_urb(urb, dev->udev, dev->out, skb->data, skb->len, tx_complete, skb); /* don't assume the hardware handles USB_ZERO_PACKET * NOTE: strictly conforming cdc-ether devices should expect * the ZLP here, but ignore the one-byte packet. */ if (!(info->flags & FLAG_SEND_ZLP) && (length % dev->maxpacket) == 0) { urb->transfer_buffer_length++; if (skb_tailroom(skb)) { skb->data[skb->len] = 0; __skb_put(skb, 1); } } spin_lock_irqsave(&dev->txq.lock, flags); switch ((retval = usb_submit_urb(urb, GFP_ATOMIC))) { case -EPIPE: netif_stop_queue(net); axusbnet_defer_kevent(dev, EVENT_TX_HALT); break; default: if (netif_msg_tx_err(dev)) devdbg(dev, "tx: submit urb err %d", retval); break; case 0: net->trans_start = jiffies; __skb_queue_tail(&dev->txq, skb); if (dev->txq.qlen >= TX_QLEN(dev)) netif_stop_queue(net); } spin_unlock_irqrestore(&dev->txq.lock, flags); if (retval) { if (netif_msg_tx_err(dev)) devdbg(dev, "drop, code %d", retval); drop: dev->stats.tx_dropped++; if (skb) dev_kfree_skb_any(skb); usb_free_urb(urb); } else if (netif_msg_tx_queued(dev)) { devdbg(dev, "> tx, len %d, type 0x%x", length, skb->protocol); } return NETDEV_TX_OK; } /*-------------------------------------------------------------------------*/ /* tasklet (work deferred from completions, in_irq) or timer */ static void axusbnet_bh(unsigned long param) { struct usbnet *dev = (struct usbnet *) param; struct sk_buff *skb; struct skb_data *entry; while ((skb = skb_dequeue(&dev->done))) { entry = (struct skb_data *) skb->cb; switch (entry->state) { case rx_done: entry->state = rx_cleanup; rx_process(dev, skb); continue; case tx_done: case rx_cleanup: usb_free_urb(entry->urb); dev_kfree_skb(skb); continue; default: devdbg(dev, "bogus skb state %d", entry->state); } } /* waiting for all pending urbs to complete? */ if (dev->wait) { if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) wake_up(dev->wait); /* or are we maybe short a few urbs? */ } else if (netif_running(dev->net) && netif_device_present(dev->net) && !timer_pending(&dev->delay) && !test_bit(EVENT_RX_HALT, &dev->flags)) { int temp = dev->rxq.qlen; int qlen = RX_QLEN(dev); if (temp < qlen) { struct urb *urb; int i; /* don't refill the queue all at once */ for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) { urb = usb_alloc_urb(0, GFP_ATOMIC); if (urb != NULL) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14) urb->transfer_flags |= URB_ASYNC_UNLINK; #endif rx_submit(dev, urb, GFP_ATOMIC); } } if (temp != dev->rxq.qlen && netif_msg_link(dev)) devdbg(dev, "rxqlen %d --> %d", temp, dev->rxq.qlen); if (dev->rxq.qlen < qlen) tasklet_schedule(&dev->bh); } if (dev->txq.qlen < TX_QLEN(dev)) netif_wake_queue(dev->net); } } /*------------------------------------------------------------------------- * * USB Device Driver support * *-------------------------------------------------------------------------*/ /* precondition: never called in_interrupt */ static void axusbnet_disconnect(struct usb_interface *intf) { struct usbnet *dev; struct usb_device *xdev; struct net_device *net; dev = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); if (!dev) return; xdev = interface_to_usbdev(intf); if (netif_msg_probe(dev)) devinfo(dev, "unregister '%s' usb-%s-%s, %s", intf->dev.driver->name, xdev->bus->bus_name, xdev->devpath, dev->driver_info->description); net = dev->net; unregister_netdev(net); /* we don't hold rtnl here ... */ flush_scheduled_work(); if (dev->driver_info->unbind) dev->driver_info->unbind(dev, intf); free_netdev(net); usb_put_dev(xdev); } /*-------------------------------------------------------------------------*/ /* precondition: never called in_interrupt */ static int axusbnet_probe(struct usb_interface *udev, const struct usb_device_id *prod) { struct usbnet *dev; struct net_device *net; struct usb_host_interface *interface; struct driver_info *info; struct usb_device *xdev; int status; const char *name; name = udev->dev.driver->name; info = (struct driver_info *) prod->driver_info; if (!info) { printk(KERN_ERR "blacklisted by %s\n", name); return -ENODEV; } xdev = interface_to_usbdev(udev); interface = udev->cur_altsetting; usb_get_dev(xdev); status = -ENOMEM; /* set up our own records */ net = alloc_etherdev(sizeof(*dev)); if (!net) { printk(KERN_ERR "can't kmalloc dev"); goto out; } dev = netdev_priv(net); dev->udev = xdev; dev->intf = udev; dev->driver_info = info; dev->driver_name = name; dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK); skb_queue_head_init(&dev->rxq); skb_queue_head_init(&dev->txq); skb_queue_head_init(&dev->done); dev->bh.func = axusbnet_bh; dev->bh.data = (unsigned long) dev; #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) INIT_WORK(&dev->kevent, kevent, dev); #else INIT_WORK(&dev->kevent, kevent); #endif dev->delay.function = axusbnet_bh; dev->delay.data = (unsigned long) dev; init_timer(&dev->delay); /* mutex_init(&dev->phy_mutex); */ dev->net = net; /* rx and tx sides can use different message sizes; * bind() should set rx_urb_size in that case. */ dev->hard_mtu = net->mtu + net->hard_header_len; #if 0 /* dma_supported() is deeply broken on almost all architectures */ /* possible with some EHCI controllers */ if (dma_supported(&udev->dev, DMA_BIT_MASK(64))) net->features |= NETIF_F_HIGHDMA; #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30) net->open = axusbnet_open, net->stop = axusbnet_stop, net->hard_start_xmit = axusbnet_start_xmit, net->tx_timeout = axusbnet_tx_timeout, net->get_stats = axusbnet_get_stats; #endif net->watchdog_timeo = TX_TIMEOUT_JIFFIES; net->ethtool_ops = &axusbnet_ethtool_ops; /* allow device-specific bind/init procedures */ /* NOTE net->name still not usable ... */ status = info->bind(dev, udev); if (status < 0) { deverr(dev, "Binding device failed: %d", status); goto out1; } /* maybe the remote can't receive an Ethernet MTU */ if (net->mtu > (dev->hard_mtu - net->hard_header_len)) net->mtu = dev->hard_mtu - net->hard_header_len; status = init_status(dev, udev); if (status < 0) goto out3; if (!dev->rx_urb_size) dev->rx_urb_size = dev->hard_mtu; dev->maxpacket = usb_maxpacket(dev->udev, dev->out, 1); SET_NETDEV_DEV(net, &udev->dev); status = register_netdev(net); if (status) { deverr(dev, "net device registration failed: %d", status); goto out3; } if (netif_msg_probe(dev)) devinfo(dev, "register '%s' at usb-%s-%s, %s, %pM", udev->dev.driver->name, xdev->bus->bus_name, xdev->devpath, dev->driver_info->description, net->dev_addr); /* ok, it's ready to go. */ usb_set_intfdata(udev, dev); /* start as if the link is up */ netif_device_attach(net); return 0; out3: if (info->unbind) info->unbind(dev, udev); out1: free_netdev(net); out: usb_put_dev(xdev); return status; } /*-------------------------------------------------------------------------*/ /* * suspend the whole driver as soon as the first interface is suspended * resume only when the last interface is resumed */ static int axusbnet_suspend(struct usb_interface *intf, #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 10) pm_message_t message) #else u32 message) #endif { struct usbnet *dev = usb_get_intfdata(intf); if (!dev->suspend_count++) { /* * accelerate emptying of the rx and queues, to avoid * having everything error out. */ netif_device_detach(dev->net); (void) unlink_urbs(dev, &dev->rxq); (void) unlink_urbs(dev, &dev->txq); usb_kill_urb(dev->interrupt); /* * reattach so runtime management can use and * wake the device */ netif_device_attach(dev->net); } return 0; } static int axusbnet_resume(struct usb_interface *intf) { struct usbnet *dev = usb_get_intfdata(intf); int retval = 0; if (!--dev->suspend_count) tasklet_schedule(&dev->bh); retval = init_status(dev, intf); if (retval < 0) return retval; if (dev->interrupt) { retval = usb_submit_urb(dev->interrupt, GFP_KERNEL); if (retval < 0 && netif_msg_ifup(dev)) deverr(dev, "intr submit %d", retval); } return retval; }
int usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) { struct usbnet *dev; struct net_device *net; struct usb_host_interface *interface; struct driver_info *info; struct usb_device *xdev; int status; const char *name; name = udev->dev.driver->name; info = (struct driver_info *) prod->driver_info; if (!info) { dev_dbg (&udev->dev, "blacklisted by %s\n", name); return -ENODEV; } xdev = interface_to_usbdev (udev); interface = udev->cur_altsetting; usb_get_dev (xdev); status = -ENOMEM; // set up our own records //net = alloc_etherdev(sizeof(*dev)); net = alloc_netdev( sizeof(*dev), "usbeth%d", ether_setup ); if (!net) { dbg ("can't kmalloc dev"); goto out; } dev = netdev_priv(net); dev->udev = xdev; dev->driver_info = info; dev->driver_name = name; dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK); skb_queue_head_init (&dev->rxq); skb_queue_head_init (&dev->txq); skb_queue_head_init (&dev->done); dev->bh.func = usbnet_bh; dev->bh.data = (unsigned long) dev; INIT_WORK (&dev->kevent, kevent); dev->delay.function = usbnet_bh; dev->delay.data = (unsigned long) dev; init_timer (&dev->delay); mutex_init (&dev->phy_mutex); SET_MODULE_OWNER (net); dev->net = net; strcpy (net->name, "usbeth%d"); memcpy (net->dev_addr, node_id, sizeof node_id); /* rx and tx sides can use different message sizes; * bind() should set rx_urb_size in that case. */ dev->hard_mtu = net->mtu + net->hard_header_len; #if 0 // dma_supported() is deeply broken on almost all architectures // possible with some EHCI controllers if (dma_supported (&udev->dev, DMA_64BIT_MASK)) net->features |= NETIF_F_HIGHDMA; #endif net->change_mtu = usbnet_change_mtu; net->get_stats = usbnet_get_stats; net->hard_start_xmit = usbnet_start_xmit; net->open = usbnet_open; net->stop = usbnet_stop; net->watchdog_timeo = TX_TIMEOUT_JIFFIES; net->tx_timeout = usbnet_tx_timeout; net->ethtool_ops = &usbnet_ethtool_ops; // allow device-specific bind/init procedures // NOTE net->name still not usable ... if (info->bind) { status = info->bind (dev, udev); if (status < 0) goto out1; // heuristic: "usb%d" for links we know are two-host, // else "eth%d" when there's reasonable doubt. userspace // can rename the link if it knows better. if ((dev->driver_info->flags & FLAG_ETHER) != 0 && (net->dev_addr [0] & 0x02) == 0) strcpy (net->name, "usbeth%d"); /* maybe the remote can't receive an Ethernet MTU */ if (net->mtu > (dev->hard_mtu - net->hard_header_len)) net->mtu = dev->hard_mtu - net->hard_header_len; } else if (!info->in || !info->out) status = usbnet_get_endpoints (dev, udev); else { dev->in = usb_rcvbulkpipe (xdev, info->in); dev->out = usb_sndbulkpipe (xdev, info->out); if (!(info->flags & FLAG_NO_SETINT)) status = usb_set_interface (xdev, interface->desc.bInterfaceNumber, interface->desc.bAlternateSetting); else status = 0; } if (status == 0 && dev->status) status = init_status (dev, udev); if (status < 0) goto out3; if (!dev->rx_urb_size) dev->rx_urb_size = dev->hard_mtu; dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1); SET_NETDEV_DEV(net, &udev->dev); status = register_netdev (net); if (status) goto out3; if (netif_msg_probe (dev)) devinfo (dev, "register '%s' at usb-%s-%s, %s, " "%02x:%02x:%02x:%02x:%02x:%02x", udev->dev.driver->name, xdev->bus->bus_name, xdev->devpath, dev->driver_info->description, net->dev_addr [0], net->dev_addr [1], net->dev_addr [2], net->dev_addr [3], net->dev_addr [4], net->dev_addr [5]); // ok, it's ready to go. usb_set_intfdata (udev, dev); // start as if the link is up netif_device_attach (net); return 0; out3: if (info->unbind) info->unbind (dev, udev); out1: free_netdev(net); out: usb_put_dev(xdev); return status; }
int generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags) { int retval; struct net_device *net = dev->net; struct cdc_state *info = (void *) &dev->data; union { void *buf; struct rndis_msg_hdr *header; struct rndis_init *init; struct rndis_init_c *init_c; struct rndis_query *get; struct rndis_query_c *get_c; struct rndis_set *set; struct rndis_set_c *set_c; struct rndis_halt *halt; } u; u32 tmp; __le32 phym_unspec, *phym; int reply_len; unsigned char *bp; /* we can't rely on i/o from stack working, or stack allocation */ u.buf = kmalloc(CONTROL_BUFFER_SIZE, GFP_KERNEL); if (!u.buf) return -ENOMEM; retval = usbnet_generic_cdc_bind(dev, intf); if (retval < 0) goto fail; u.init->msg_type = RNDIS_MSG_INIT; u.init->msg_len = cpu_to_le32(sizeof *u.init); u.init->major_version = cpu_to_le32(1); u.init->minor_version = cpu_to_le32(0); /* max transfer (in spec) is 0x4000 at full speed, but for * TX we'll stick to one Ethernet packet plus RNDIS framing. * For RX we handle drivers that zero-pad to end-of-packet. * Don't let userspace change these settings. * * NOTE: there still seems to be wierdness here, as if we need * to do some more things to make sure WinCE targets accept this. * They default to jumbograms of 8KB or 16KB, which is absurd * for such low data rates and which is also more than Linux * can usually expect to allocate for SKB data... */ net->hard_header_len += sizeof (struct rndis_data_hdr); dev->hard_mtu = net->mtu + net->hard_header_len; dev->maxpacket = usb_maxpacket(dev->udev, dev->out, 1); if (dev->maxpacket == 0) { if (netif_msg_probe(dev)) dev_dbg(&intf->dev, "dev->maxpacket can't be 0\n"); retval = -EINVAL; goto fail_and_release; } dev->rx_urb_size = dev->hard_mtu + (dev->maxpacket + 1); dev->rx_urb_size &= ~(dev->maxpacket - 1); u.init->max_transfer_size = cpu_to_le32(dev->rx_urb_size); net->netdev_ops = &rndis_netdev_ops; retval = rndis_command(dev, u.header, CONTROL_BUFFER_SIZE); if (unlikely(retval < 0)) { /* it might not even be an RNDIS device!! */ dev_err(&intf->dev, "RNDIS init failed, %d\n", retval); goto fail_and_release; } tmp = le32_to_cpu(u.init_c->max_transfer_size); if (tmp < dev->hard_mtu) { if (tmp <= net->hard_header_len) { dev_err(&intf->dev, "dev can't take %u byte packets (max %u)\n", dev->hard_mtu, tmp); retval = -EINVAL; goto halt_fail_and_release; } dev_warn(&intf->dev, "dev can't take %u byte packets (max %u), " "adjusting MTU to %u\n", dev->hard_mtu, tmp, tmp - net->hard_header_len); dev->hard_mtu = tmp; net->mtu = dev->hard_mtu - net->hard_header_len; } /* REVISIT: peripheral "alignment" request is ignored ... */ dev_dbg(&intf->dev, "hard mtu %u (%u from dev), rx buflen %Zu, align %d\n", dev->hard_mtu, tmp, dev->rx_urb_size, 1 << le32_to_cpu(u.init_c->packet_alignment)); /* module has some device initialization code needs to be done right * after RNDIS_INIT */ if (dev->driver_info->early_init && dev->driver_info->early_init(dev) != 0) goto halt_fail_and_release; /* Check physical medium */ phym = NULL; reply_len = sizeof *phym; retval = rndis_query(dev, intf, u.buf, OID_GEN_PHYSICAL_MEDIUM, 0, (void **) &phym, &reply_len); if (retval != 0 || !phym) { /* OID is optional so don't fail here. */ phym_unspec = RNDIS_PHYSICAL_MEDIUM_UNSPECIFIED; phym = &phym_unspec; } if ((flags & FLAG_RNDIS_PHYM_WIRELESS) && *phym != RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN) { if (netif_msg_probe(dev)) dev_dbg(&intf->dev, "driver requires wireless " "physical medium, but device is not.\n"); retval = -ENODEV; goto halt_fail_and_release; } if ((flags & FLAG_RNDIS_PHYM_NOT_WIRELESS) && *phym == RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN) { if (netif_msg_probe(dev)) dev_dbg(&intf->dev, "driver requires non-wireless " "physical medium, but device is wireless.\n"); retval = -ENODEV; goto halt_fail_and_release; } /* Get designated host ethernet address */ reply_len = ETH_ALEN; retval = rndis_query(dev, intf, u.buf, OID_802_3_PERMANENT_ADDRESS, 48, (void **) &bp, &reply_len); if (unlikely(retval< 0)) { dev_err(&intf->dev, "rndis get ethaddr, %d\n", retval); goto halt_fail_and_release; } memcpy(net->dev_addr, bp, ETH_ALEN); memcpy(net->perm_addr, bp, ETH_ALEN); /* set a nonzero filter to enable data transfers */ memset(u.set, 0, sizeof *u.set); u.set->msg_type = RNDIS_MSG_SET; u.set->msg_len = cpu_to_le32(4 + sizeof *u.set); u.set->oid = OID_GEN_CURRENT_PACKET_FILTER; u.set->len = cpu_to_le32(4); u.set->offset = cpu_to_le32((sizeof *u.set) - 8); *(__le32 *)(u.buf + sizeof *u.set) = RNDIS_DEFAULT_FILTER; retval = rndis_command(dev, u.header, CONTROL_BUFFER_SIZE); if (unlikely(retval < 0)) { dev_err(&intf->dev, "rndis set packet filter, %d\n", retval); goto halt_fail_and_release; } retval = 0; kfree(u.buf); return retval; halt_fail_and_release: memset(u.halt, 0, sizeof *u.halt); u.halt->msg_type = RNDIS_MSG_HALT; u.halt->msg_len = cpu_to_le32(sizeof *u.halt); (void) rndis_command(dev, (void *)u.halt, CONTROL_BUFFER_SIZE); fail_and_release: usb_set_intfdata(info->data, NULL); usb_driver_release_interface(driver_of(intf), info->data); info->data = NULL; fail: kfree(u.buf); return retval; }
static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct xgbe_prv_data *pdata; struct device *dev = &pdev->dev; void __iomem * const *iomap_table; struct pci_dev *rdev; unsigned int ma_lo, ma_hi; unsigned int reg; int bar_mask; int ret; pdata = xgbe_alloc_pdata(dev); if (IS_ERR(pdata)) { ret = PTR_ERR(pdata); goto err_alloc; } pdata->pcidev = pdev; pci_set_drvdata(pdev, pdata); /* Get the version data */ pdata->vdata = (struct xgbe_version_data *)id->driver_data; ret = pcim_enable_device(pdev); if (ret) { dev_err(dev, "pcim_enable_device failed\n"); goto err_pci_enable; } /* Obtain the mmio areas for the device */ bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); ret = pcim_iomap_regions(pdev, bar_mask, XGBE_DRV_NAME); if (ret) { dev_err(dev, "pcim_iomap_regions failed\n"); goto err_pci_enable; } iomap_table = pcim_iomap_table(pdev); if (!iomap_table) { dev_err(dev, "pcim_iomap_table failed\n"); ret = -ENOMEM; goto err_pci_enable; } pdata->xgmac_regs = iomap_table[XGBE_XGMAC_BAR]; if (!pdata->xgmac_regs) { dev_err(dev, "xgmac ioremap failed\n"); ret = -ENOMEM; goto err_pci_enable; } pdata->xprop_regs = pdata->xgmac_regs + XGBE_MAC_PROP_OFFSET; pdata->xi2c_regs = pdata->xgmac_regs + XGBE_I2C_CTRL_OFFSET; if (netif_msg_probe(pdata)) { dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs); dev_dbg(dev, "xprop_regs = %p\n", pdata->xprop_regs); dev_dbg(dev, "xi2c_regs = %p\n", pdata->xi2c_regs); } pdata->xpcs_regs = iomap_table[XGBE_XPCS_BAR]; if (!pdata->xpcs_regs) { dev_err(dev, "xpcs ioremap failed\n"); ret = -ENOMEM; goto err_pci_enable; } if (netif_msg_probe(pdata)) dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs); /* Set the PCS indirect addressing definition registers */ rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0)); if (rdev && (rdev->vendor == PCI_VENDOR_ID_AMD) && (rdev->device == 0x15d0)) { pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF; pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT; } else { pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF; pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT; } pci_dev_put(rdev); /* Configure the PCS indirect addressing support */ reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg); pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET); pdata->xpcs_window <<= 6; pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE); pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7); pdata->xpcs_window_mask = pdata->xpcs_window_size - 1; if (netif_msg_probe(pdata)) { dev_dbg(dev, "xpcs window def = %#010x\n", pdata->xpcs_window_def_reg); dev_dbg(dev, "xpcs window sel = %#010x\n", pdata->xpcs_window_sel_reg); dev_dbg(dev, "xpcs window = %#010x\n", pdata->xpcs_window); dev_dbg(dev, "xpcs window size = %#010x\n", pdata->xpcs_window_size); dev_dbg(dev, "xpcs window mask = %#010x\n", pdata->xpcs_window_mask); } pci_set_master(pdev); /* Enable all interrupts in the hardware */ XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff); /* Retrieve the MAC address */ ma_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO); ma_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI); pdata->mac_addr[0] = ma_lo & 0xff; pdata->mac_addr[1] = (ma_lo >> 8) & 0xff; pdata->mac_addr[2] = (ma_lo >> 16) & 0xff; pdata->mac_addr[3] = (ma_lo >> 24) & 0xff; pdata->mac_addr[4] = ma_hi & 0xff; pdata->mac_addr[5] = (ma_hi >> 8) & 0xff; if (!XP_GET_BITS(ma_hi, XP_MAC_ADDR_HI, VALID) || !is_valid_ether_addr(pdata->mac_addr)) { dev_err(dev, "invalid mac address\n"); ret = -EINVAL; goto err_pci_enable; } /* Clock settings */ pdata->sysclk_rate = XGBE_V2_DMA_CLOCK_FREQ; pdata->ptpclk_rate = XGBE_V2_PTP_CLOCK_FREQ; /* Set the DMA coherency values */ pdata->coherent = 1; pdata->arcr = XGBE_DMA_PCI_ARCR; pdata->awcr = XGBE_DMA_PCI_AWCR; pdata->awarcr = XGBE_DMA_PCI_AWARCR; /* Set the maximum channels and queues */ reg = XP_IOREAD(pdata, XP_PROP_1); pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA); pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA); pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES); pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES); if (netif_msg_probe(pdata)) { dev_dbg(dev, "max tx/rx channel count = %u/%u\n", pdata->tx_max_channel_count, pdata->tx_max_channel_count); dev_dbg(dev, "max tx/rx hw queue count = %u/%u\n", pdata->tx_max_q_count, pdata->rx_max_q_count); } /* Set the hardware channel and queue counts */ xgbe_set_counts(pdata); /* Set the maximum fifo amounts */ reg = XP_IOREAD(pdata, XP_PROP_2); pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE); pdata->tx_max_fifo_size *= 16384; pdata->tx_max_fifo_size = min(pdata->tx_max_fifo_size, pdata->vdata->tx_max_fifo_size); pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE); pdata->rx_max_fifo_size *= 16384; pdata->rx_max_fifo_size = min(pdata->rx_max_fifo_size, pdata->vdata->rx_max_fifo_size); if (netif_msg_probe(pdata)) dev_dbg(dev, "max tx/rx max fifo size = %u/%u\n", pdata->tx_max_fifo_size, pdata->rx_max_fifo_size); /* Configure interrupt support */ ret = xgbe_config_irqs(pdata); if (ret) goto err_pci_enable; /* Configure the netdev resource */ ret = xgbe_config_netdev(pdata); if (ret) goto err_irq_vectors; netdev_notice(pdata->netdev, "net device enabled\n"); return 0; err_irq_vectors: pci_free_irq_vectors(pdata->pcidev); err_pci_enable: xgbe_free_pdata(pdata); err_alloc: dev_notice(dev, "net device not enabled\n"); return ret; }
static int xgbe_probe(struct platform_device *pdev) { struct xgbe_prv_data *pdata; struct net_device *netdev; struct device *dev = &pdev->dev, *phy_dev; struct platform_device *phy_pdev; struct resource *res; const char *phy_mode; unsigned int i, phy_memnum, phy_irqnum; enum dev_dma_attr attr; int ret; DBGPR("--> xgbe_probe\n"); netdev = alloc_etherdev_mq(sizeof(struct xgbe_prv_data), XGBE_MAX_DMA_CHANNELS); if (!netdev) { dev_err(dev, "alloc_etherdev failed\n"); ret = -ENOMEM; goto err_alloc; } SET_NETDEV_DEV(netdev, dev); pdata = netdev_priv(netdev); pdata->netdev = netdev; pdata->pdev = pdev; pdata->adev = ACPI_COMPANION(dev); pdata->dev = dev; platform_set_drvdata(pdev, netdev); spin_lock_init(&pdata->lock); spin_lock_init(&pdata->xpcs_lock); mutex_init(&pdata->rss_mutex); spin_lock_init(&pdata->tstamp_lock); pdata->msg_enable = netif_msg_init(debug, default_msg_level); set_bit(XGBE_DOWN, &pdata->dev_state); /* Check if we should use ACPI or DT */ pdata->use_acpi = dev->of_node ? 0 : 1; phy_pdev = xgbe_get_phy_pdev(pdata); if (!phy_pdev) { dev_err(dev, "unable to obtain phy device\n"); ret = -EINVAL; goto err_phydev; } phy_dev = &phy_pdev->dev; if (pdev == phy_pdev) { /* New style device tree or ACPI: * The XGBE and PHY resources are grouped together with * the PHY resources listed last */ phy_memnum = xgbe_resource_count(pdev, IORESOURCE_MEM) - 3; phy_irqnum = xgbe_resource_count(pdev, IORESOURCE_IRQ) - 1; } else { /* Old style device tree: * The XGBE and PHY resources are separate */ phy_memnum = 0; phy_irqnum = 0; } /* Set and validate the number of descriptors for a ring */ BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT); pdata->tx_desc_count = XGBE_TX_DESC_CNT; if (pdata->tx_desc_count & (pdata->tx_desc_count - 1)) { dev_err(dev, "tx descriptor count (%d) is not valid\n", pdata->tx_desc_count); ret = -EINVAL; goto err_io; } BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_RX_DESC_CNT); pdata->rx_desc_count = XGBE_RX_DESC_CNT; if (pdata->rx_desc_count & (pdata->rx_desc_count - 1)) { dev_err(dev, "rx descriptor count (%d) is not valid\n", pdata->rx_desc_count); ret = -EINVAL; goto err_io; } /* Obtain the mmio areas for the device */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); pdata->xgmac_regs = devm_ioremap_resource(dev, res); if (IS_ERR(pdata->xgmac_regs)) { dev_err(dev, "xgmac ioremap failed\n"); ret = PTR_ERR(pdata->xgmac_regs); goto err_io; } if (netif_msg_probe(pdata)) dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs); res = platform_get_resource(pdev, IORESOURCE_MEM, 1); pdata->xpcs_regs = devm_ioremap_resource(dev, res); if (IS_ERR(pdata->xpcs_regs)) { dev_err(dev, "xpcs ioremap failed\n"); ret = PTR_ERR(pdata->xpcs_regs); goto err_io; } if (netif_msg_probe(pdata)) dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs); res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++); pdata->rxtx_regs = devm_ioremap_resource(dev, res); if (IS_ERR(pdata->rxtx_regs)) { dev_err(dev, "rxtx ioremap failed\n"); ret = PTR_ERR(pdata->rxtx_regs); goto err_io; } if (netif_msg_probe(pdata)) dev_dbg(dev, "rxtx_regs = %p\n", pdata->rxtx_regs); res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++); pdata->sir0_regs = devm_ioremap_resource(dev, res); if (IS_ERR(pdata->sir0_regs)) { dev_err(dev, "sir0 ioremap failed\n"); ret = PTR_ERR(pdata->sir0_regs); goto err_io; } if (netif_msg_probe(pdata)) dev_dbg(dev, "sir0_regs = %p\n", pdata->sir0_regs); res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++); pdata->sir1_regs = devm_ioremap_resource(dev, res); if (IS_ERR(pdata->sir1_regs)) { dev_err(dev, "sir1 ioremap failed\n"); ret = PTR_ERR(pdata->sir1_regs); goto err_io; } if (netif_msg_probe(pdata)) dev_dbg(dev, "sir1_regs = %p\n", pdata->sir1_regs); /* Retrieve the MAC address */ ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY, pdata->mac_addr, sizeof(pdata->mac_addr)); if (ret || !is_valid_ether_addr(pdata->mac_addr)) { dev_err(dev, "invalid %s property\n", XGBE_MAC_ADDR_PROPERTY); if (!ret) ret = -EINVAL; goto err_io; } /* Retrieve the PHY mode - it must be "xgmii" */ ret = device_property_read_string(dev, XGBE_PHY_MODE_PROPERTY, &phy_mode); if (ret || strcmp(phy_mode, phy_modes(PHY_INTERFACE_MODE_XGMII))) { dev_err(dev, "invalid %s property\n", XGBE_PHY_MODE_PROPERTY); if (!ret) ret = -EINVAL; goto err_io; } pdata->phy_mode = PHY_INTERFACE_MODE_XGMII; /* Check for per channel interrupt support */ if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY)) pdata->per_channel_irq = 1; /* Retrieve the PHY speedset */ ret = device_property_read_u32(phy_dev, XGBE_SPEEDSET_PROPERTY, &pdata->speed_set); if (ret) { dev_err(dev, "invalid %s property\n", XGBE_SPEEDSET_PROPERTY); goto err_io; } switch (pdata->speed_set) { case XGBE_SPEEDSET_1000_10000: case XGBE_SPEEDSET_2500_10000: break; default: dev_err(dev, "invalid %s property\n", XGBE_SPEEDSET_PROPERTY); ret = -EINVAL; goto err_io; } /* Retrieve the PHY configuration properties */ if (device_property_present(phy_dev, XGBE_BLWC_PROPERTY)) { ret = device_property_read_u32_array(phy_dev, XGBE_BLWC_PROPERTY, pdata->serdes_blwc, XGBE_SPEEDS); if (ret) { dev_err(dev, "invalid %s property\n", XGBE_BLWC_PROPERTY); goto err_io; } } else { memcpy(pdata->serdes_blwc, xgbe_serdes_blwc, sizeof(pdata->serdes_blwc)); } if (device_property_present(phy_dev, XGBE_CDR_RATE_PROPERTY)) { ret = device_property_read_u32_array(phy_dev, XGBE_CDR_RATE_PROPERTY, pdata->serdes_cdr_rate, XGBE_SPEEDS); if (ret) { dev_err(dev, "invalid %s property\n", XGBE_CDR_RATE_PROPERTY); goto err_io; } } else { memcpy(pdata->serdes_cdr_rate, xgbe_serdes_cdr_rate, sizeof(pdata->serdes_cdr_rate)); } if (device_property_present(phy_dev, XGBE_PQ_SKEW_PROPERTY)) { ret = device_property_read_u32_array(phy_dev, XGBE_PQ_SKEW_PROPERTY, pdata->serdes_pq_skew, XGBE_SPEEDS); if (ret) { dev_err(dev, "invalid %s property\n", XGBE_PQ_SKEW_PROPERTY); goto err_io; } } else { memcpy(pdata->serdes_pq_skew, xgbe_serdes_pq_skew, sizeof(pdata->serdes_pq_skew)); } if (device_property_present(phy_dev, XGBE_TX_AMP_PROPERTY)) { ret = device_property_read_u32_array(phy_dev, XGBE_TX_AMP_PROPERTY, pdata->serdes_tx_amp, XGBE_SPEEDS); if (ret) { dev_err(dev, "invalid %s property\n", XGBE_TX_AMP_PROPERTY); goto err_io; } } else { memcpy(pdata->serdes_tx_amp, xgbe_serdes_tx_amp, sizeof(pdata->serdes_tx_amp)); } if (device_property_present(phy_dev, XGBE_DFE_CFG_PROPERTY)) { ret = device_property_read_u32_array(phy_dev, XGBE_DFE_CFG_PROPERTY, pdata->serdes_dfe_tap_cfg, XGBE_SPEEDS); if (ret) { dev_err(dev, "invalid %s property\n", XGBE_DFE_CFG_PROPERTY); goto err_io; } } else { memcpy(pdata->serdes_dfe_tap_cfg, xgbe_serdes_dfe_tap_cfg, sizeof(pdata->serdes_dfe_tap_cfg)); } if (device_property_present(phy_dev, XGBE_DFE_ENA_PROPERTY)) { ret = device_property_read_u32_array(phy_dev, XGBE_DFE_ENA_PROPERTY, pdata->serdes_dfe_tap_ena, XGBE_SPEEDS); if (ret) { dev_err(dev, "invalid %s property\n", XGBE_DFE_ENA_PROPERTY); goto err_io; } } else { memcpy(pdata->serdes_dfe_tap_ena, xgbe_serdes_dfe_tap_ena, sizeof(pdata->serdes_dfe_tap_ena)); } /* Obtain device settings unique to ACPI/OF */ if (pdata->use_acpi) ret = xgbe_acpi_support(pdata); else ret = xgbe_of_support(pdata); if (ret) goto err_io; /* Set the DMA coherency values */ attr = device_get_dma_attr(dev); if (attr == DEV_DMA_NOT_SUPPORTED) { dev_err(dev, "DMA is not supported"); goto err_io; } pdata->coherent = (attr == DEV_DMA_COHERENT); if (pdata->coherent) { pdata->axdomain = XGBE_DMA_OS_AXDOMAIN; pdata->arcache = XGBE_DMA_OS_ARCACHE; pdata->awcache = XGBE_DMA_OS_AWCACHE; } else { pdata->axdomain = XGBE_DMA_SYS_AXDOMAIN; pdata->arcache = XGBE_DMA_SYS_ARCACHE; pdata->awcache = XGBE_DMA_SYS_AWCACHE; } /* Get the device interrupt */ ret = platform_get_irq(pdev, 0); if (ret < 0) { dev_err(dev, "platform_get_irq 0 failed\n"); goto err_io; } pdata->dev_irq = ret; /* Get the auto-negotiation interrupt */ ret = platform_get_irq(phy_pdev, phy_irqnum++); if (ret < 0) { dev_err(dev, "platform_get_irq phy 0 failed\n"); goto err_io; } pdata->an_irq = ret; netdev->irq = pdata->dev_irq; netdev->base_addr = (unsigned long)pdata->xgmac_regs; memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len); /* Set all the function pointers */ xgbe_init_all_fptrs(pdata); /* Issue software reset to device */ pdata->hw_if.exit(pdata); /* Populate the hardware features */ xgbe_get_all_hw_features(pdata); /* Set default configuration data */ xgbe_default_config(pdata); /* Set the DMA mask */ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(pdata->hw_feat.dma_width)); if (ret) { dev_err(dev, "dma_set_mask_and_coherent failed\n"); goto err_io; } /* Calculate the number of Tx and Rx rings to be created * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set * the number of Tx queues to the number of Tx channels * enabled * -Rx (DMA) Channels do not map 1-to-1 so use the actual * number of Rx queues */ pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(), pdata->hw_feat.tx_ch_cnt); pdata->tx_q_count = pdata->tx_ring_count; ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count); if (ret) { dev_err(dev, "error setting real tx queue count\n"); goto err_io; } pdata->rx_ring_count = min_t(unsigned int, netif_get_num_default_rss_queues(), pdata->hw_feat.rx_ch_cnt); pdata->rx_q_count = pdata->hw_feat.rx_q_cnt; ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count); if (ret) { dev_err(dev, "error setting real rx queue count\n"); goto err_io; } /* Initialize RSS hash key and lookup table */ netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key)); for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++) XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, i % pdata->rx_ring_count); XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1); XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1); XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1); /* Call MDIO/PHY initialization routine */ pdata->phy_if.phy_init(pdata); /* Set device operations */ netdev->netdev_ops = xgbe_get_netdev_ops(); netdev->ethtool_ops = xgbe_get_ethtool_ops(); #ifdef CONFIG_AMD_XGBE_DCB netdev->dcbnl_ops = xgbe_get_dcbnl_ops(); #endif /* Set device features */ netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_FILTER; if (pdata->hw_feat.rss) netdev->hw_features |= NETIF_F_RXHASH; netdev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6; netdev->features |= netdev->hw_features; pdata->netdev_features = netdev->features; netdev->priv_flags |= IFF_UNICAST_FLT; /* Use default watchdog timeout */ netdev->watchdog_timeo = 0; xgbe_init_rx_coalesce(pdata); xgbe_init_tx_coalesce(pdata); netif_carrier_off(netdev); ret = register_netdev(netdev); if (ret) { dev_err(dev, "net device registration failed\n"); goto err_io; } /* Create the PHY/ANEG name based on netdev name */ snprintf(pdata->an_name, sizeof(pdata->an_name) - 1, "%s-pcs", netdev_name(netdev)); /* Create workqueues */ pdata->dev_workqueue = create_singlethread_workqueue(netdev_name(netdev)); if (!pdata->dev_workqueue) { netdev_err(netdev, "device workqueue creation failed\n"); ret = -ENOMEM; goto err_netdev; } pdata->an_workqueue = create_singlethread_workqueue(pdata->an_name); if (!pdata->an_workqueue) { netdev_err(netdev, "phy workqueue creation failed\n"); ret = -ENOMEM; goto err_wq; } xgbe_ptp_register(pdata); xgbe_debugfs_init(pdata); platform_device_put(phy_pdev); netdev_notice(netdev, "net device enabled\n"); DBGPR("<-- xgbe_probe\n"); return 0; err_wq: destroy_workqueue(pdata->dev_workqueue); err_netdev: unregister_netdev(netdev); err_io: platform_device_put(phy_pdev); err_phydev: free_netdev(netdev); err_alloc: dev_notice(dev, "net device not enabled\n"); return ret; }
static int __devinit cyrf6936_probe(struct spi_device *spi) { struct net_device *dev; struct cyrf6936_net *p; int ret; dev = alloc_netdev(sizeof(*p), "cwu%d", cyrf6936_net_setup); if (!dev) { dev_err(&spi->dev, "failed to allocate device\n"); return -ENOMEM; } p = netdev_priv(dev); mutex_init(&p->lock); p->spi = spi; p->netdev = dev; p->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV); INIT_WORK(&p->poll_work, cyrf6936_poll_work); INIT_WORK(&p->tx_work, cyrf6936_tx_work); SET_NETDEV_DEV(dev, &spi->dev); spi_set_drvdata(spi, p); /* initialize pre-made spi message */ p->spi_xfer.len = 2; p->spi_xfer.bits_per_word = 16; p->spi_xfer.rx_buf = &p->spi_rxbuf; p->spi_xfer.tx_buf = &p->spi_txbuf; spi_message_init(&p->spi_msg); spi_message_add_tail(&p->spi_xfer, &p->spi_msg); /* reset device */ cyrf6936_wreg(p, MODE_OVERRIDE, RST); /* check if chip is connected */ if (cyrf6936_rreg(p, EOP_CTRL) != DEFVAL_EOP_CTRL) { if (netif_msg_probe(p)) dev_err(&spi->dev, "chip not found\n"); ret = -ENODEV; goto err_chk; } /* switch irq/poll mode */ if (spi->irq) { p->netdev->irq = spi->irq; ret = request_irq(spi->irq, cyrf6936_irq, 0, dev->name, p); if (ret < 0) { if (netif_msg_probe(p)) dev_err(&spi->dev, "request irq failed"); goto err_irq; } } else { p->pollmode = 1; init_timer(&p->poll_timer); } /* setup wireless extensions */ cyrf6936_iw_setup(p); ret = register_netdev(dev); if (ret) { if (netif_msg_probe(p)) dev_err(&spi->dev, "register netdev error\n"); goto err_reg; } return 0; err_reg: free_irq(spi->irq, p); err_chk: err_irq: free_netdev(dev); return ret; }
int generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags) { int retval; struct net_device *net = dev->net; struct cdc_state *info = (void *) &dev->data; union { void *buf; struct rndis_msg_hdr *header; struct rndis_init *init; struct rndis_init_c *init_c; struct rndis_query *get; struct rndis_query_c *get_c; struct rndis_set *set; struct rndis_set_c *set_c; struct rndis_halt *halt; } u; u32 tmp; __le32 phym_unspec, *phym; int reply_len; unsigned char *bp; u.buf = kmalloc(CONTROL_BUFFER_SIZE, GFP_KERNEL); if (!u.buf) return -ENOMEM; retval = usbnet_generic_cdc_bind(dev, intf); if (retval < 0) goto fail; u.init->msg_type = RNDIS_MSG_INIT; u.init->msg_len = cpu_to_le32(sizeof *u.init); u.init->major_version = cpu_to_le32(1); u.init->minor_version = cpu_to_le32(0); net->hard_header_len += sizeof (struct rndis_data_hdr); dev->hard_mtu = net->mtu + net->hard_header_len; dev->maxpacket = usb_maxpacket(dev->udev, dev->out, 1); if (dev->maxpacket == 0) { if (netif_msg_probe(dev)) dev_dbg(&intf->dev, "dev->maxpacket can't be 0\n"); retval = -EINVAL; goto fail_and_release; } dev->rx_urb_size = dev->hard_mtu + (dev->maxpacket + 1); dev->rx_urb_size &= ~(dev->maxpacket - 1); u.init->max_transfer_size = cpu_to_le32(dev->rx_urb_size); net->netdev_ops = &rndis_netdev_ops; retval = rndis_command(dev, u.header, CONTROL_BUFFER_SIZE); if (unlikely(retval < 0)) { dev_err(&intf->dev, "RNDIS init failed, %d\n", retval); goto fail_and_release; } tmp = le32_to_cpu(u.init_c->max_transfer_size); if (tmp < dev->hard_mtu) { if (tmp <= net->hard_header_len) { dev_err(&intf->dev, "dev can't take %u byte packets (max %u)\n", dev->hard_mtu, tmp); retval = -EINVAL; goto halt_fail_and_release; } dev_warn(&intf->dev, "dev can't take %u byte packets (max %u), " "adjusting MTU to %u\n", dev->hard_mtu, tmp, tmp - net->hard_header_len); dev->hard_mtu = tmp; net->mtu = dev->hard_mtu - net->hard_header_len; } dev_dbg(&intf->dev, "hard mtu %u (%u from dev), rx buflen %Zu, align %d\n", dev->hard_mtu, tmp, dev->rx_urb_size, 1 << le32_to_cpu(u.init_c->packet_alignment)); if (dev->driver_info->early_init && dev->driver_info->early_init(dev) != 0) goto halt_fail_and_release; phym = NULL; reply_len = sizeof *phym; retval = rndis_query(dev, intf, u.buf, OID_GEN_PHYSICAL_MEDIUM, 0, (void **) &phym, &reply_len); if (retval != 0 || !phym) { phym_unspec = RNDIS_PHYSICAL_MEDIUM_UNSPECIFIED; phym = &phym_unspec; } if ((flags & FLAG_RNDIS_PHYM_WIRELESS) && *phym != RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN) { if (netif_msg_probe(dev)) dev_dbg(&intf->dev, "driver requires wireless " "physical medium, but device is not.\n"); retval = -ENODEV; goto halt_fail_and_release; } if ((flags & FLAG_RNDIS_PHYM_NOT_WIRELESS) && *phym == RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN) { if (netif_msg_probe(dev)) dev_dbg(&intf->dev, "driver requires non-wireless " "physical medium, but device is wireless.\n"); retval = -ENODEV; goto halt_fail_and_release; } reply_len = ETH_ALEN; retval = rndis_query(dev, intf, u.buf, OID_802_3_PERMANENT_ADDRESS, 48, (void **) &bp, &reply_len); if (unlikely(retval< 0)) { dev_err(&intf->dev, "rndis get ethaddr, %d\n", retval); goto halt_fail_and_release; } memcpy(net->dev_addr, bp, ETH_ALEN); memcpy(net->perm_addr, bp, ETH_ALEN); memset(u.set, 0, sizeof *u.set); u.set->msg_type = RNDIS_MSG_SET; u.set->msg_len = cpu_to_le32(4 + sizeof *u.set); u.set->oid = OID_GEN_CURRENT_PACKET_FILTER; u.set->len = cpu_to_le32(4); u.set->offset = cpu_to_le32((sizeof *u.set) - 8); *(__le32 *)(u.buf + sizeof *u.set) = RNDIS_DEFAULT_FILTER; retval = rndis_command(dev, u.header, CONTROL_BUFFER_SIZE); if (unlikely(retval < 0)) { dev_err(&intf->dev, "rndis set packet filter, %d\n", retval); goto halt_fail_and_release; } retval = 0; kfree(u.buf); return retval; halt_fail_and_release: memset(u.halt, 0, sizeof *u.halt); u.halt->msg_type = RNDIS_MSG_HALT; u.halt->msg_len = cpu_to_le32(sizeof *u.halt); (void) rndis_command(dev, (void *)u.halt, CONTROL_BUFFER_SIZE); fail_and_release: usb_set_intfdata(info->data, NULL); usb_driver_release_interface(driver_of(intf), info->data); info->data = NULL; fail: kfree(u.buf); return retval; }
static int netvsc_probe(struct hv_device *dev, const struct hv_vmbus_device_id *dev_id) { struct net_device *net = NULL; struct net_device_context *net_device_ctx; struct netvsc_device_info device_info; struct netvsc_device *nvdev; int ret; net = alloc_etherdev(sizeof(struct net_device_context)); if (!net) return -ENOMEM; netif_carrier_off(net); net_device_ctx = netdev_priv(net); net_device_ctx->device_ctx = dev; net_device_ctx->msg_enable = netif_msg_init(debug, default_msg); if (netif_msg_probe(net_device_ctx)) netdev_dbg(net, "netvsc msg_enable: %d\n", net_device_ctx->msg_enable); hv_set_drvdata(dev, net); INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); INIT_WORK(&net_device_ctx->work, do_set_multicast, (void *)&net_device_ctx->work); #if defined(RHEL_RELEASE_VERSION) && (RHEL_RELEASE_CODE > 1291) net->netdev_ops = &device_ops; #else net->open = netvsc_open; net->hard_start_xmit = netvsc_start_xmit; net->stop = netvsc_close; net->get_stats = netvsc_get_stats; net->set_multicast_list = netvsc_set_multicast_list; net->change_mtu = netvsc_change_mtu; #endif #if defined(RHEL_RELEASE_VERSION) && (RHEL_RELEASE_CODE > 1291) net->hw_features = NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; #endif net->features = NETIF_F_HW_VLAN_TX | NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO; net->ethtool_ops = ðtool_ops; SET_NETDEV_DEV(net, &dev->device); /* Notify the netvsc driver of the new device */ device_info.ring_size = ring_size; ret = rndis_filter_device_add(dev, &device_info); if (ret != 0) { netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); free_netdev(net); hv_set_drvdata(dev, NULL); return ret; } memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); nvdev = hv_get_drvdata(dev); #ifdef NOTYET netif_set_real_num_tx_queues(net, nvdev->num_chn); netif_set_real_num_rx_queues(net, nvdev->num_chn); dev_info(&dev->device, "real num tx,rx queues:%u, %u\n", net->real_num_tx_queues, nvdev->num_chn); #endif ret = register_netdev(net); if (ret != 0) { pr_err("Unable to register netdev.\n"); rndis_filter_device_remove(dev); free_netdev(net); } else { schedule_delayed_work(&net_device_ctx->dwork.work, 0); } return ret; }
static int rndis_bind(struct usbnet *dev, struct usb_interface *intf) { int retval; struct net_device *net = dev->net; struct cdc_state *info = (void *) &dev->data; union { void *buf; struct rndis_msg_hdr *header; struct rndis_init *init; struct rndis_init_c *init_c; struct rndis_query *get; struct rndis_query_c *get_c; struct rndis_set *set; struct rndis_set_c *set_c; } u; u32 tmp; int reply_len; unsigned char *bp; /* we can't rely on i/o from stack working, or stack allocation */ u.buf = kmalloc(CONTROL_BUFFER_SIZE, GFP_KERNEL); if (!u.buf) return -ENOMEM; retval = usbnet_generic_cdc_bind(dev, intf); if (retval < 0) goto fail; u.init->msg_type = RNDIS_MSG_INIT; u.init->msg_len = ccpu2(sizeof *u.init); u.init->major_version = ccpu2(1); u.init->minor_version = ccpu2(0); /* max transfer (in spec) is 0x4000 at full speed, but for * TX we'll stick to one Ethernet packet plus RNDIS framing. * For RX we handle drivers that zero-pad to end-of-packet. * Don't let userspace change these settings. * * NOTE: there still seems to be wierdness here, as if we need * to do some more things to make sure WinCE targets accept this. * They default to jumbograms of 8KB or 16KB, which is absurd * for such low data rates and which is also more than Linux * can usually expect to allocate for SKB data... */ net->hard_header_len += sizeof (struct rndis_data_hdr); dev->hard_mtu = net->mtu + net->hard_header_len; dev->maxpacket = usb_maxpacket(dev->udev, dev->out, 1); if (dev->maxpacket == 0) { if (netif_msg_probe(dev)) dev_dbg(&intf->dev, "dev->maxpacket can't be 0\n"); retval = -EINVAL; goto fail_and_release; } dev->rx_urb_size = dev->hard_mtu + (dev->maxpacket + 1); dev->rx_urb_size &= ~(dev->maxpacket - 1); u.init->max_transfer_size = cpu_to_le32(dev->rx_urb_size); net->change_mtu = NULL; retval = rndis_command(dev, u.header); if (unlikely(retval < 0)) { /* it might not even be an RNDIS device!! */ dev_err(&intf->dev, "RNDIS init failed, %d\n", retval); goto fail_and_release; } tmp = le32_to_cpu(u.init_c->max_transfer_size); if (tmp < dev->hard_mtu) { dev_err(&intf->dev, "dev can't take %u byte packets (max %u)\n", dev->hard_mtu, tmp); retval = -EINVAL; goto fail_and_release; } /* REVISIT: peripheral "alignment" request is ignored ... */ dev_dbg(&intf->dev, "hard mtu %u (%u from dev), rx buflen %Zu, align %d\n", dev->hard_mtu, tmp, dev->rx_urb_size, 1 << le32_to_cpu(u.init_c->packet_alignment)); /* Get designated host ethernet address */ reply_len = ETH_ALEN; retval = rndis_query(dev, intf, u.buf, OID_802_3_PERMANENT_ADDRESS, 48, (void **) &bp, &reply_len); if (unlikely(retval< 0)) { dev_err(&intf->dev, "rndis get ethaddr, %d\n", retval); goto fail_and_release; } memcpy(net->dev_addr, bp, ETH_ALEN); /* set a nonzero filter to enable data transfers */ memset(u.set, 0, sizeof *u.set); u.set->msg_type = RNDIS_MSG_SET; u.set->msg_len = ccpu2(4 + sizeof *u.set); u.set->oid = OID_GEN_CURRENT_PACKET_FILTER; u.set->len = ccpu2(4); u.set->offset = ccpu2((sizeof *u.set) - 8); *(__le32 *)(u.buf + sizeof *u.set) = ccpu2(DEFAULT_FILTER); retval = rndis_command(dev, u.header); if (unlikely(retval < 0)) { dev_err(&intf->dev, "rndis set packet filter, %d\n", retval); goto fail_and_release; } retval = 0; kfree(u.buf); return retval; fail_and_release: usb_set_intfdata(info->data, NULL); usb_driver_release_interface(driver_of(intf), info->data); info->data = NULL; fail: kfree(u.buf); return retval; }
static int netvsc_probe(struct hv_device *dev, const struct hv_vmbus_device_id *dev_id) { struct net_device *net = NULL; struct net_device_context *net_device_ctx; struct netvsc_device_info device_info; struct netvsc_device *nvdev; int ret; u32 max_needed_headroom; net = alloc_etherdev_mq(sizeof(struct net_device_context), num_online_cpus()); if (!net) return -ENOMEM; max_needed_headroom = sizeof(struct hv_netvsc_packet) + RNDIS_AND_PPI_SIZE; netif_carrier_off(net); net_device_ctx = netdev_priv(net); net_device_ctx->device_ctx = dev; net_device_ctx->msg_enable = netif_msg_init(debug, default_msg); if (netif_msg_probe(net_device_ctx)) netdev_dbg(net, "netvsc msg_enable: %d\n", net_device_ctx->msg_enable); net_device_ctx->tx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats); if (!net_device_ctx->tx_stats) { free_netdev(net); return -ENOMEM; } net_device_ctx->rx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats); if (!net_device_ctx->rx_stats) { free_percpu(net_device_ctx->tx_stats); free_netdev(net); return -ENOMEM; } hv_set_drvdata(dev, net); INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); INIT_WORK(&net_device_ctx->work, do_set_multicast); net->netdev_ops = &device_ops; net->hw_features = NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; net->features = NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO; net->ethtool_ops = ðtool_ops; SET_NETDEV_DEV(net, &dev->device); /* * Request additional head room in the skb. * We will use this space to build the rndis * heaser and other state we need to maintain. */ net->needed_headroom = max_needed_headroom; /* Notify the netvsc driver of the new device */ device_info.ring_size = ring_size; device_info.max_num_vrss_chns = max_num_vrss_chns; ret = rndis_filter_device_add(dev, &device_info); if (ret != 0) { netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); netvsc_free_netdev(net); hv_set_drvdata(dev, NULL); return ret; } memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); nvdev = hv_get_drvdata(dev); netif_set_real_num_tx_queues(net, nvdev->num_chn); netif_set_real_num_rx_queues(net, nvdev->num_chn); ret = register_netdev(net); if (ret != 0) { pr_err("Unable to register netdev.\n"); rndis_filter_device_remove(dev); netvsc_free_netdev(net); } else { schedule_delayed_work(&net_device_ctx->dwork, 0); } return ret; }