static int netvsc_remove(struct hv_device *dev) { struct net_device *net; struct net_device_context *ndev_ctx; struct netvsc_device *net_device; net_device = hv_get_drvdata(dev); net = net_device->ndev; if (net == NULL) { dev_err(&dev->device, "No net device to remove\n"); return 0; } net_device->start_remove = true; ndev_ctx = netdev_priv(net); cancel_delayed_work_sync(&ndev_ctx->dwork); cancel_work_sync(&ndev_ctx->work); /* Stop outbound asap */ netif_tx_disable(net); unregister_netdev(net); /* * Call to the vsc driver to let it know that the device is being * removed */ rndis_filter_device_remove(dev); free_netdev(net); return 0; }
static int nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr) { netif_tx_disable(repr->netdev); return nfp_flower_cmsg_portmod(repr, false, repr->netdev->mtu, false); }
static int netvsc_change_mtu(struct net_device *ndev, int mtu) { struct net_device_context *ndevctx = netdev_priv(ndev); struct hv_device *hdev = ndevctx->device_ctx; struct netvsc_device *nvdev = hv_get_drvdata(hdev); struct netvsc_device_info device_info; int limit = ETH_DATA_LEN; if (nvdev == NULL || nvdev->destroy) return -ENODEV; if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2) limit = NETVSC_MTU - ETH_HLEN; if (mtu < 68 || mtu > limit) return -EINVAL; nvdev->start_remove = true; cancel_work_sync(&ndevctx->work); netif_tx_disable(ndev); rndis_filter_device_remove(hdev); ndev->mtu = mtu; ndevctx->device_ctx = hdev; hv_set_drvdata(hdev, ndev); device_info.ring_size = ring_size; rndis_filter_device_add(hdev, &device_info); netif_wake_queue(ndev); return 0; }
static int ccmni_close(struct net_device *dev) { ccmni_instance_t *ccmni = (ccmni_instance_t *)netdev_priv(dev); ccmni_ctl_block_t *ccmni_ctl = ccmni_ctl_blk[ccmni->md_id]; ccmni_instance_t *ccmni_tmp = NULL; if (unlikely(ccmni_ctl == NULL)) { CCMNI_ERR_MSG(ccmni->md_id, "%s_Close: MD%d ctlb is NULL\n", dev->name, ccmni->md_id); return -1; } atomic_dec(&ccmni->usage); ccmni_tmp = ccmni_ctl->ccmni_inst[ccmni->index]; if (ccmni != ccmni_tmp) atomic_dec(&ccmni_tmp->usage); if (ccmni_ctl->ccci_ops->md_ability & MODEM_CAP_CCMNI_MQ) netif_tx_disable(dev); else netif_stop_queue(dev); if (unlikely(ccmni_ctl->ccci_ops->md_ability & MODEM_CAP_NAPI)) napi_disable(&ccmni->napi); CCMNI_INF_MSG(ccmni->md_id, "%s_Close: cnt=(%d, %d)\n", dev->name, atomic_read(&ccmni->usage), atomic_read(&ccmni_tmp->usage)); return 0; }
static int netvsc_close(struct net_device *net) { struct net_device_context *net_device_ctx = netdev_priv(net); struct hv_device *device_obj = net_device_ctx->device_ctx; struct netvsc_device *nvdev = net_device_ctx->nvdev; int ret; u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20; struct vmbus_channel *chn; netif_tx_disable(net); /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */ cancel_work_sync(&net_device_ctx->work); ret = rndis_filter_close(device_obj); if (ret != 0) { netdev_err(net, "unable to close device (ret %d).\n", ret); return ret; } /* Ensure pending bytes in ring are read */ while (true) { aread = 0; for (i = 0; i < nvdev->num_chn; i++) { chn = nvdev->chn_table[i]; if (!chn) continue; hv_get_ringbuffer_availbytes(&chn->inbound, &aread, &awrite); if (aread) break; hv_get_ringbuffer_availbytes(&chn->outbound, &aread, &awrite); if (aread) break; } retry++; if (retry > retry_max || aread == 0) break; msleep(msec); if (msec < 1000) msec *= 2; } if (aread) { netdev_err(net, "Ring buffer not empty after closing rndis\n"); ret = -ETIMEDOUT; } return ret; }
static int netvsc_close(struct net_device *net) { struct net_device_context *net_device_ctx = netdev_priv(net); struct netvsc_device *nvdev = net_device_ctx->nvdev; int ret = 0; u32 aread, i, msec = 10, retry = 0, retry_max = 20; struct vmbus_channel *chn; netif_tx_disable(net); /* No need to close rndis filter if it is removed already */ if (!nvdev) goto out; ret = rndis_filter_close(nvdev); if (ret != 0) { netdev_err(net, "unable to close device (ret %d).\n", ret); return ret; } /* Ensure pending bytes in ring are read */ while (true) { aread = 0; for (i = 0; i < nvdev->num_chn; i++) { chn = nvdev->chan_table[i].channel; if (!chn) continue; aread = hv_get_bytes_to_read(&chn->inbound); if (aread) break; aread = hv_get_bytes_to_read(&chn->outbound); if (aread) break; } retry++; if (retry > retry_max || aread == 0) break; msleep(msec); if (msec < 1000) msec *= 2; } if (aread) { netdev_err(net, "Ring buffer not empty after closing rndis\n"); ret = -ETIMEDOUT; } out: return ret; }
static void epping_stop_adapter(epping_adapter_t *pAdapter) { if (pAdapter && pAdapter->started) { EPPING_LOG(LOG1, FL("Disabling queues")); netif_tx_disable(pAdapter->dev); netif_carrier_off(pAdapter->dev); pAdapter->started = false; #if defined(MSM_PLATFORM) && defined(HIF_PCI) && defined(CONFIG_CNSS) cnss_request_bus_bandwidth(CNSS_BUS_WIDTH_LOW); #endif } }
static void s6gmac_adjust_link(struct net_device *dev) { struct s6gmac *pd = netdev_priv(dev); struct phy_device *phydev = pd->phydev; if (pd->link.isup && (!phydev->link || (pd->link.mbit != phydev->speed) || (pd->link.full != phydev->duplex))) { pd->link.isup = 0; netif_tx_disable(dev); if (!phydev->link) { netif_carrier_off(dev); phy_print_status(phydev); } } if (!pd->link.isup && phydev->link) { if (pd->link.full != phydev->duplex) { u32 maccfg = readl(pd->reg + S6_GMAC_MACCONF2); if (phydev->duplex) maccfg |= 1 << S6_GMAC_MACCONF2_FULL; else maccfg &= ~(1 << S6_GMAC_MACCONF2_FULL); writel(maccfg, pd->reg + S6_GMAC_MACCONF2); } if (pd->link.giga != (phydev->speed == 1000)) { u32 fifocfg = readl(pd->reg + S6_GMAC_FIFOCONF5); u32 maccfg = readl(pd->reg + S6_GMAC_MACCONF2); maccfg &= ~(S6_GMAC_MACCONF2_IFMODE_MASK << S6_GMAC_MACCONF2_IFMODE); if (phydev->speed == 1000) { fifocfg |= 1 << S6_GMAC_FIFOCONF5_CFGBYTM; maccfg |= S6_GMAC_MACCONF2_IFMODE_BYTE << S6_GMAC_MACCONF2_IFMODE; } else { fifocfg &= ~(1 << S6_GMAC_FIFOCONF5_CFGBYTM); maccfg |= S6_GMAC_MACCONF2_IFMODE_NIBBLE << S6_GMAC_MACCONF2_IFMODE; } writel(fifocfg, pd->reg + S6_GMAC_FIFOCONF5); writel(maccfg, pd->reg + S6_GMAC_MACCONF2); } if (!s6dmac_fifo_full(pd->tx_dma, pd->tx_chan)) netif_wake_queue(dev); s6gmac_linkisup(dev, 1); } }
static int netvsc_close(struct net_device *net) { struct net_device_context *net_device_ctx = netdev_priv(net); struct hv_device *device_obj = net_device_ctx->device_ctx; int ret; netif_tx_disable(net); /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */ cancel_work_sync(&net_device_ctx->work); ret = rndis_filter_close(device_obj); if (ret != 0) netdev_err(net, "unable to close device (ret %d).\n", ret); return ret; }
epping_adapter_t *epping_add_adapter(epping_context_t *pEpping_ctx, tSirMacAddr macAddr, tCDF_CON_MODE device_mode) { struct net_device *dev; epping_adapter_t *pAdapter; dev = alloc_netdev(sizeof(epping_adapter_t), "wifi%d", #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)) NET_NAME_UNKNOWN, #endif ether_setup); if (dev == NULL) { EPPING_LOG(CDF_TRACE_LEVEL_FATAL, "%s: Cannot allocate epping_adapter_t\n", __func__); return NULL; } pAdapter = netdev_priv(dev); cdf_mem_zero(pAdapter, sizeof(*pAdapter)); pAdapter->dev = dev; pAdapter->pEpping_ctx = pEpping_ctx; pAdapter->device_mode = device_mode; /* station, SAP, etc */ cdf_mem_copy(dev->dev_addr, (void *)macAddr, sizeof(tSirMacAddr)); cdf_mem_copy(pAdapter->macAddressCurrent.bytes, macAddr, sizeof(tSirMacAddr)); cdf_spinlock_init(&pAdapter->data_lock); cdf_nbuf_queue_init(&pAdapter->nodrop_queue); pAdapter->epping_timer_state = EPPING_TX_TIMER_STOPPED; cdf_softirq_timer_init(epping_get_cdf_ctx(), &pAdapter->epping_timer, epping_timer_expire, dev, CDF_TIMER_TYPE_SW); dev->type = ARPHRD_IEEE80211; dev->netdev_ops = &epping_drv_ops; dev->watchdog_timeo = 5 * HZ; /* XXX */ dev->tx_queue_len = EPPING_TXBUF - 1; /* 1 for mgmt frame */ if (epping_register_adapter(pAdapter) == 0) { EPPING_LOG(LOG1, FL("Disabling queues")); netif_tx_disable(dev); netif_carrier_off(dev); return pAdapter; } else { epping_destroy_adapter(pAdapter); return NULL; } }
static int ec_bhf_stop(struct net_device *net_dev) { struct ec_bhf_priv *priv = netdev_priv(net_dev); struct device *dev = PRIV_TO_DEV(priv); hrtimer_cancel(&priv->hrtimer); ec_bhf_reset(priv); netif_tx_disable(net_dev); dma_free_coherent(dev, priv->tx_buf.alloc_len, priv->tx_buf.alloc, priv->tx_buf.alloc_phys); dma_free_coherent(dev, priv->rx_buf.alloc_len, priv->rx_buf.alloc, priv->rx_buf.alloc_phys); return 0; }
static void hfi1_vnic_down(struct hfi1_vnic_vport_info *vinfo) { struct hfi1_devdata *dd = vinfo->dd; u8 i; clear_bit(HFI1_VNIC_UP, &vinfo->flags); netif_carrier_off(vinfo->netdev); netif_tx_disable(vinfo->netdev); idr_remove(&dd->vnic.vesw_idr, vinfo->vesw_id); /* ensure irqs see the change */ hfi1_vnic_synchronize_irq(dd); /* remove unread skbs */ for (i = 0; i < vinfo->num_rx_q; i++) { struct hfi1_vnic_rx_queue *rxq = &vinfo->rxq[i]; napi_disable(&rxq->napi); skb_queue_purge(&rxq->skbq); } }
static int ccmni_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { int md_id, md_id_irat, usage_cnt; ccmni_instance_t *ccmni_irat; ccmni_instance_t *ccmni = (ccmni_instance_t *)netdev_priv(dev); ccmni_instance_t *ccmni_tmp = NULL; ccmni_ctl_block_t *ctlb = NULL; ccmni_ctl_block_t *ctlb_irat = NULL; unsigned int timeout = 0; switch (cmd) { case SIOCSTXQSTATE: /* ifru_ivalue[3~0]:start/stop; ifru_ivalue[7~4]:reserve; */ /* ifru_ivalue[15~8]:user id, bit8=rild, bit9=thermal */ /* ifru_ivalue[31~16]: watchdog timeout value */ ctlb = ccmni_ctl_blk[ccmni->md_id]; if ((ifr->ifr_ifru.ifru_ivalue & 0xF) == 0) { if (atomic_read(&ccmni->usage) > 0) { atomic_dec(&ccmni->usage); if (ctlb->ccci_ops->md_ability & MODEM_CAP_CCMNI_MQ) netif_tx_disable(dev); else netif_stop_queue(dev); /* stop queue won't stop Tx watchdog (ndo_tx_timeout) */ timeout = (ifr->ifr_ifru.ifru_ivalue & 0xFFFF0000) >> 16; if (timeout == 0) dev->watchdog_timeo = 60*HZ; else dev->watchdog_timeo = timeout*HZ; ccmni_tmp = ctlb->ccmni_inst[ccmni->index]; if (ccmni_tmp != ccmni) { /* iRAT ccmni */ usage_cnt = atomic_read(&ccmni->usage); atomic_set(&ccmni_tmp->usage, usage_cnt); } } } else { if (atomic_read(&ccmni->usage) <= 0) {
/* * Set the current state of a WiMAX device [unlocking version of * wimax_state_change(). */ void __wimax_state_change(struct wimax_dev *wimax_dev, enum wimax_st new_state) { struct device *dev = wimax_dev_to_dev(wimax_dev); enum wimax_st old_state = wimax_dev->state; struct sk_buff *stch_skb; void *header; d_fnstart(3, dev, "(wimax_dev %p new_state %u [old %u])\n", wimax_dev, new_state, old_state); if (WARN_ON(new_state >= __WIMAX_ST_INVALID)) { dev_err(dev, "SW BUG: requesting invalid state %u\n", new_state); goto out; } if (old_state == new_state) goto out; header = NULL; /* gcc complains? can't grok why */ stch_skb = wimax_gnl_re_state_change_alloc( wimax_dev, new_state, old_state, &header); /* Verify the state transition and do exit-from-state actions */ switch (old_state) { case __WIMAX_ST_NULL: __check_new_state(old_state, new_state, 1 << WIMAX_ST_DOWN); break; case WIMAX_ST_DOWN: __check_new_state(old_state, new_state, 1 << __WIMAX_ST_QUIESCING | 1 << WIMAX_ST_UNINITIALIZED | 1 << WIMAX_ST_RADIO_OFF); break; case __WIMAX_ST_QUIESCING: __check_new_state(old_state, new_state, 1 << WIMAX_ST_DOWN); break; case WIMAX_ST_UNINITIALIZED: __check_new_state(old_state, new_state, 1 << __WIMAX_ST_QUIESCING | 1 << WIMAX_ST_RADIO_OFF); break; case WIMAX_ST_RADIO_OFF: __check_new_state(old_state, new_state, 1 << __WIMAX_ST_QUIESCING | 1 << WIMAX_ST_READY); break; case WIMAX_ST_READY: __check_new_state(old_state, new_state, 1 << __WIMAX_ST_QUIESCING | 1 << WIMAX_ST_RADIO_OFF | 1 << WIMAX_ST_SCANNING | 1 << WIMAX_ST_CONNECTING | 1 << WIMAX_ST_CONNECTED); break; case WIMAX_ST_SCANNING: __check_new_state(old_state, new_state, 1 << __WIMAX_ST_QUIESCING | 1 << WIMAX_ST_RADIO_OFF | 1 << WIMAX_ST_READY | 1 << WIMAX_ST_CONNECTING | 1 << WIMAX_ST_CONNECTED); break; case WIMAX_ST_CONNECTING: __check_new_state(old_state, new_state, 1 << __WIMAX_ST_QUIESCING | 1 << WIMAX_ST_RADIO_OFF | 1 << WIMAX_ST_READY | 1 << WIMAX_ST_SCANNING | 1 << WIMAX_ST_CONNECTED); break; case WIMAX_ST_CONNECTED: __check_new_state(old_state, new_state, 1 << __WIMAX_ST_QUIESCING | 1 << WIMAX_ST_RADIO_OFF | 1 << WIMAX_ST_READY); netif_tx_disable(wimax_dev->net_dev); netif_carrier_off(wimax_dev->net_dev); break; case __WIMAX_ST_INVALID: default: dev_err(dev, "SW BUG: wimax_dev %p is in unknown state %u\n", wimax_dev, wimax_dev->state); WARN_ON(1); goto out; } /* Execute the actions of entry to the new state */ switch (new_state) { case __WIMAX_ST_NULL: dev_err(dev, "SW BUG: wimax_dev %p entering NULL state " "from %u\n", wimax_dev, wimax_dev->state); WARN_ON(1); /* Nobody can enter this state */ break; case WIMAX_ST_DOWN: break; case __WIMAX_ST_QUIESCING: break; case WIMAX_ST_UNINITIALIZED: break; case WIMAX_ST_RADIO_OFF: break; case WIMAX_ST_READY: break; case WIMAX_ST_SCANNING: break; case WIMAX_ST_CONNECTING: break; case WIMAX_ST_CONNECTED: netif_carrier_on(wimax_dev->net_dev); netif_wake_queue(wimax_dev->net_dev); break; case __WIMAX_ST_INVALID: default: BUG(); } __wimax_state_set(wimax_dev, new_state); if (stch_skb) wimax_gnl_re_state_change_send(wimax_dev, stch_skb, header); out: d_fnend(3, dev, "(wimax_dev %p new_state %u [old %u]) = void\n", wimax_dev, new_state, old_state); return; }