int qtnf_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev) { struct net_device *netdev = wdev->netdev; struct qtnf_vif *vif; if (WARN_ON(!netdev)) return -EFAULT; vif = qtnf_netdev_get_priv(wdev->netdev); if (qtnf_cmd_send_del_intf(vif)) pr_err("VIF%u.%u: failed to delete VIF\n", vif->mac->macid, vif->vifid); /* Stop data */ netif_tx_stop_all_queues(netdev); if (netif_carrier_ok(netdev)) netif_carrier_off(netdev); if (netdev->reg_state == NETREG_REGISTERED) unregister_netdevice(netdev); vif->netdev->ieee80211_ptr = NULL; vif->netdev = NULL; vif->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED; eth_zero_addr(vif->mac_addr); return 0; }
int rtw_hw_suspend23a(struct rtw_adapter *padapter) { struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv; struct net_device *pnetdev = padapter->pnetdev; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; if ((!padapter->bup) || (padapter->bDriverStopped) || (padapter->bSurpriseRemoved)) { DBG_8723A("padapter->bup =%d bDriverStopped =%d bSurpriseRemoved = %d\n", padapter->bup, padapter->bDriverStopped, padapter->bSurpriseRemoved); goto error_exit; } if (padapter) { /* system suspend */ LeaveAllPowerSaveMode23a(padapter); DBG_8723A("==> rtw_hw_suspend23a\n"); down(&pwrpriv->lock); pwrpriv->bips_processing = true; /* padapter->net_closed = true; */ /* s1. */ if (pnetdev) { netif_carrier_off(pnetdev); netif_tx_stop_all_queues(pnetdev); } /* s2. */ rtw_disassoc_cmd23a(padapter, 500, false); /* s2-2. indicate disconnect to os */ /* rtw_indicate_disconnect23a(padapter); */ if (check_fwstate(pmlmepriv, _FW_LINKED)) { _clr_fwstate_(pmlmepriv, _FW_LINKED); rtw_led_control(padapter, LED_CTL_NO_LINK); rtw_os_indicate_disconnect23a(padapter); /* donnot enqueue cmd */ rtw_lps_ctrl_wk_cmd23a(padapter, LPS_CTRL_DISCONNECT, 0); } /* s2-3. */ rtw_free_assoc_resources23a(padapter, 1); /* s2-4. */ rtw_free_network_queue23a(padapter); rtw_ips_dev_unload23a(padapter); pwrpriv->rf_pwrstate = rf_off; pwrpriv->bips_processing = false; up(&pwrpriv->lock); } else { goto error_exit; } return 0; error_exit: DBG_8723A("%s, failed\n", __func__); return -1; }
static void xlgmac_stop(struct xlgmac_pdata *pdata) { struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; struct net_device *netdev = pdata->netdev; struct xlgmac_channel *channel; struct netdev_queue *txq; unsigned int i; netif_tx_stop_all_queues(netdev); xlgmac_stop_timers(pdata); hw_ops->disable_tx(pdata); hw_ops->disable_rx(pdata); xlgmac_free_irqs(pdata); xlgmac_napi_disable(pdata, 1); hw_ops->exit(pdata); channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { if (!channel->tx_ring) continue; txq = netdev_get_tx_queue(netdev, channel->queue_index); netdev_tx_reset_queue(txq); } }
/* called from irq-context */ void r92su_disconnect_bss_event(struct r92su *r92su) { netif_tx_stop_all_queues(r92su->wdev.netdev); netif_carrier_off(r92su->wdev.netdev); queue_work(r92su->wq, &r92su->disconnect_work); }
/*********************************************************** * mv_eth_start -- * * start a network device. connect and enable interrupts * * set hw defaults. fill rx buffers. restart phy link * * auto neg. set device link flags. report status. * ***********************************************************/ static int mv_eth_start(struct net_device *dev) { struct eth_port *priv = MV_ETH_PRIV(dev); int group; /* in default link is down */ netif_carrier_off(dev); /* Stop the TX queue - it will be enabled upon PHY status change after link-up interrupt/timer */ netif_tx_stop_all_queues(dev); /* fill rx buffers, start rx/tx activity, set coalescing */ if (mv_eth_start_internals(priv, dev->mtu) != 0) { printk(KERN_ERR "%s: start internals failed\n", dev->name); goto error; } /* enable polling on the port, must be used after netif_poll_disable */ if (priv->flags & MV_ETH_F_CONNECT_LINUX) for (group = 0; group < CONFIG_MV_ETH_NAPI_GROUPS; group++) napi_enable(priv->napiGroup[group]); if ((priv->flags & MV_ETH_F_LINK_UP) && !(priv->flags & MV_ETH_F_EXT_SWITCH)) { if (mv_eth_ctrl_is_tx_enabled(priv)) { netif_carrier_on(dev); netif_tx_wake_all_queues(dev); } printk(KERN_NOTICE "%s: link up\n", dev->name); } #ifdef CONFIG_MV_ETH_SWITCH_LINK if (priv->flags & MV_ETH_F_EXT_SWITCH) { struct eth_netdev *dev_priv = MV_DEV_PRIV(dev); dev_priv->link_map = 0; mv_switch_link_update_event(dev_priv->port_map, 1); } #endif /* CONFIG_MV_ETH_SWITCH_LINK */ if (priv->flags & MV_ETH_F_CONNECT_LINUX) { /* connect to port interrupt line */ if (request_irq(dev->irq, mv_eth_isr, (IRQF_DISABLED|IRQF_SAMPLE_RANDOM), "mv_eth", priv)) { printk(KERN_ERR "cannot request irq %d for %s port %d\n", dev->irq, dev->name, priv->port); if (priv->flags & MV_ETH_F_CONNECT_LINUX) napi_disable(priv->napiGroup[CPU_GROUP_DEF]); goto error; } /* unmask interrupts */ mv_eth_interrupts_unmask(priv); smp_call_function_many(cpu_online_mask, (smp_call_func_t)mv_eth_interrupts_unmask, (void *)priv, 1); printk(KERN_NOTICE "%s: started\n", dev->name); } return 0; error: printk(KERN_ERR "%s: start failed\n", dev->name); return -1; }
/*********************************************************** * mv_eth_stop -- * * stop interface with linux core. stop port activity. * * free skb's from rings. * ***********************************************************/ int mv_eth_stop(struct net_device *dev) { struct eth_port *priv = MV_ETH_PRIV(dev); struct cpu_ctrl *cpuCtrl; int group, cpu; /* first make sure that the port finished its Rx polling - see tg3 */ for (group = 0; group < CONFIG_MV_ETH_NAPI_GROUPS; group++) napi_disable(priv->napiGroup[group]); /* stop upper layer */ netif_carrier_off(dev); netif_tx_stop_all_queues(dev); /* stop tx/rx activity, mask all interrupts, relese skb in rings,*/ mv_eth_stop_internals(priv); for_each_possible_cpu(cpu) { cpuCtrl = priv->cpu_config[cpu]; del_timer(&cpuCtrl->tx_done_timer); clear_bit(MV_ETH_F_TX_DONE_TIMER_BIT, &(cpuCtrl->flags)); del_timer(&cpuCtrl->cleanup_timer); clear_bit(MV_ETH_F_CLEANUP_TIMER_BIT, &(cpuCtrl->flags)); } if (dev->irq != 0) free_irq(dev->irq, priv); printk(KERN_NOTICE "%s: stopped\n", dev->name); return 0; }
/*----------------------------------------------------------------------------*/ BOOLEAN kalUninitBowDevice(IN P_GLUE_INFO_T prGlueInfo) { P_ADAPTER_T prAdapter; ASSERT(prGlueInfo); prAdapter = prGlueInfo->prAdapter; ASSERT(prAdapter); /* ASSERT(prGlueInfo->rBowInfo.fgIsRegistered == TRUE); */ if (prGlueInfo->rBowInfo.fgIsNetRegistered == TRUE) { prGlueInfo->rBowInfo.fgIsNetRegistered = FALSE; bowUninit(prAdapter); if (netif_carrier_ok(prGlueInfo->rBowInfo.prDevHandler)) { netif_carrier_off(prGlueInfo->rBowInfo.prDevHandler); } netif_tx_stop_all_queues(prGlueInfo->rBowInfo.prDevHandler); /* netdevice unregistration & free */ unregister_netdev(prGlueInfo->rBowInfo.prDevHandler); free_netdev(prGlueInfo->rBowInfo.prDevHandler); prGlueInfo->rBowInfo.prDevHandler = NULL; return TRUE; } else { return FALSE; } }
static void cpmac_check_status(struct net_device *dev) { struct cpmac_priv *priv = netdev_priv(dev); u32 macstatus = cpmac_read(priv->regs, CPMAC_MAC_STATUS); int rx_channel = (macstatus >> 8) & 7; int rx_code = (macstatus >> 12) & 15; int tx_channel = (macstatus >> 16) & 7; int tx_code = (macstatus >> 20) & 15; if (rx_code || tx_code) { if (netif_msg_drv(priv) && net_ratelimit()) { /* Can't find any documentation on what these *error codes actually are. So just log them and hope.. */ if (rx_code) printk(KERN_WARNING "%s: host error %d on rx " "channel %d (macstatus %08x), resetting\n", dev->name, rx_code, rx_channel, macstatus); if (tx_code) printk(KERN_WARNING "%s: host error %d on tx " "channel %d (macstatus %08x), resetting\n", dev->name, tx_code, tx_channel, macstatus); } netif_tx_stop_all_queues(dev); cpmac_hw_stop(dev); if (schedule_work(&priv->reset_work)) atomic_inc(&priv->reset_pending); if (unlikely(netif_msg_hw(priv))) cpmac_dump_regs(dev); } cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff); }
static int xenvif_close(struct net_device *dev) { struct xenvif *vif = netdev_priv(dev); if (netif_carrier_ok(dev)) xenvif_down(vif); netif_tx_stop_all_queues(dev); return 0; }
static int xenvif_close(struct net_device *dev) { struct xenvif *vif = netdev_priv(dev); if (test_bit(VIF_STATUS_CONNECTED, &vif->status)) xenvif_down(vif); netif_tx_stop_all_queues(dev); return 0; }
/* * This function stops all queues in net_device */ void mwifiex_stop_net_dev_queue(struct net_device *netdev, struct mwifiex_adapter *adapter) { unsigned long dev_queue_flags; spin_lock_irqsave(&adapter->queue_lock, dev_queue_flags); netif_tx_stop_all_queues(netdev); spin_unlock_irqrestore(&adapter->queue_lock, dev_queue_flags); }
void wil_link_off(struct wil6210_priv *wil) { struct net_device *ndev = wil_to_ndev(wil); wil_dbg_misc(wil, "%s()\n", __func__); netif_tx_stop_all_queues(ndev); netif_carrier_off(ndev); }
static int ixgbevf_set_tso(struct net_device *netdev, u32 data) { if (data) { netdev->features |= NETIF_F_TSO; netdev->features |= NETIF_F_TSO6; } else { netif_tx_stop_all_queues(netdev); netdev->features &= ~NETIF_F_TSO; netdev->features &= ~NETIF_F_TSO6; netif_tx_start_all_queues(netdev); } return 0; }
int netdev_close(struct net_device *pnetdev) { struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev); struct dvobj_priv *dvobj = adapter_to_dvobj(padapter); RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+88eu_drv - drv_close\n")); if (padapter->pwrctrlpriv.bInternalAutoSuspend) { if (padapter->pwrctrlpriv.rf_pwrstate == rf_off) padapter->pwrctrlpriv.ps_flag = true; } padapter->net_closed = true; if (padapter->pwrctrlpriv.rf_pwrstate == rf_on) { DBG_88E("(2)88eu_drv - drv_close, bup =%d, hw_init_completed =%d\n", padapter->bup, padapter->hw_init_completed); /* s1. */ if (pnetdev) { if (!rtw_netif_queue_stopped(pnetdev)) netif_tx_stop_all_queues(pnetdev); } /* s2. */ LeaveAllPowerSaveMode(padapter); rtw_disassoc_cmd(padapter, 500, false); /* s2-2. indicate disconnect to os */ rtw_indicate_disconnect(padapter); /* s2-3. */ rtw_free_assoc_resources(padapter, 1); /* s2-4. */ rtw_free_network_queue(padapter, true); /* Close LED */ rtw_led_control(padapter, LED_CTL_POWER_OFF); } nat25_db_cleanup(padapter); #ifdef CONFIG_88EU_P2P rtw_p2p_enable(padapter, P2P_ROLE_DISABLE); #endif /* CONFIG_88EU_P2P */ kfree(dvobj->firmware.szFwBuffer); dvobj->firmware.szFwBuffer = NULL; RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-88eu_drv - drv_close\n")); DBG_88E("-88eu_drv - drv_close, bup =%d\n", padapter->bup); return 0; }
/* fjes_close - Disables a network interface */ static int fjes_close(struct net_device *netdev) { struct fjes_adapter *adapter = netdev_priv(netdev); struct fjes_hw *hw = &adapter->hw; unsigned long flags; int epidx; netif_tx_stop_all_queues(netdev); netif_carrier_off(netdev); fjes_hw_raise_epstop(hw); napi_disable(&adapter->napi); spin_lock_irqsave(&hw->rx_status_lock, flags); for (epidx = 0; epidx < hw->max_epid; epidx++) { if (epidx == hw->my_epid) continue; if (fjes_hw_get_partner_ep_status(hw, epidx) == EP_PARTNER_SHARED) adapter->hw.ep_shm_info[epidx] .tx.info->v1i.rx_status &= ~FJES_RX_POLL_WORK; } spin_unlock_irqrestore(&hw->rx_status_lock, flags); fjes_free_irq(adapter); cancel_delayed_work_sync(&adapter->interrupt_watch_task); cancel_work_sync(&adapter->unshare_watch_task); adapter->unshare_watch_bitmask = 0; cancel_work_sync(&adapter->raise_intr_rxdata_task); cancel_work_sync(&adapter->tx_stall_task); cancel_work_sync(&hw->update_zone_task); cancel_work_sync(&hw->epstop_task); fjes_hw_wait_epstop(hw); fjes_free_resources(adapter); return 0; }
int netdev_close(struct net_device *pnetdev) { struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev); struct hal_data_8188e *rtlhal = GET_HAL_DATA(padapter); RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+88eu_drv - drv_close\n")); if (padapter->pwrctrlpriv.bInternalAutoSuspend) { if (padapter->pwrctrlpriv.rf_pwrstate == rf_off) padapter->pwrctrlpriv.ps_flag = true; } padapter->net_closed = true; if (padapter->pwrctrlpriv.rf_pwrstate == rf_on) { DBG_88E("(2)88eu_drv - drv_close, bup =%d, hw_init_completed =%d\n", padapter->bup, padapter->hw_init_completed); /* s1. */ if (pnetdev) { if (!rtw_netif_queue_stopped(pnetdev)) netif_tx_stop_all_queues(pnetdev); } /* s2. */ LeaveAllPowerSaveMode(padapter); rtw_disassoc_cmd(padapter, 500, false); /* s2-2. indicate disconnect to os */ rtw_indicate_disconnect(padapter); /* s2-3. */ rtw_free_assoc_resources(padapter, 1); /* s2-4. */ rtw_free_network_queue(padapter, true); /* Close LED */ rtw_led_control(padapter, LED_CTL_POWER_OFF); } kfree(rtlhal->pfirmware); rtlhal->pfirmware = NULL; RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-88eu_drv - drv_close\n")); DBG_88E("-88eu_drv - drv_close, bup =%d\n", padapter->bup); return 0; }
static void qtnf_vif_reset_handler(struct work_struct *work) { struct qtnf_vif *vif = container_of(work, struct qtnf_vif, reset_work); rtnl_lock(); if (vif->wdev.iftype == NL80211_IFTYPE_UNSPECIFIED) { rtnl_unlock(); return; } /* stop tx completely */ netif_tx_stop_all_queues(vif->netdev); if (netif_carrier_ok(vif->netdev)) netif_carrier_off(vif->netdev); qtnf_cfg80211_vif_reset(vif); rtnl_unlock(); }
static int netdev_close(struct net_device *pnetdev) { struct rtw_adapter *padapter = netdev_priv(pnetdev); RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+871x_drv - drv_close\n")); padapter->net_closed = true; if (padapter->pwrctrlpriv.rf_pwrstate == rf_on) { DBG_8723A("(2)871x_drv - drv_close, bup =%d, " "hw_init_completed =%d\n", padapter->bup, padapter->hw_init_completed); /* s1. */ if (pnetdev) { if (!rtw_netif_queue_stopped(pnetdev)) netif_tx_stop_all_queues(pnetdev); } /* s2. */ LeaveAllPowerSaveMode23a(padapter); rtw_disassoc_cmd23a(padapter, 500, false); /* s2-2. indicate disconnect to os */ rtw_indicate_disconnect23a(padapter); /* s2-3. */ rtw_free_assoc_resources23a(padapter, 1); /* s2-4. */ rtw_free_network_queue23a(padapter); /* Close LED */ rtw_led_control(padapter, LED_CTL_POWER_OFF); } rtw_scan_abort23a(padapter); RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-871x_drv - drv_close\n")); DBG_8723A("-871x_drv - drv_close, bup =%d\n", padapter->bup); return 0; }
/*----------------------------------------------------------------------------*/ static int bowStop(IN struct net_device *prDev) { P_GLUE_INFO_T prGlueInfo = NULL; P_ADAPTER_T prAdapter = NULL; ASSERT(prDev); prGlueInfo = *((P_GLUE_INFO_T *) netdev_priv(prDev)); ASSERT(prGlueInfo); prAdapter = prGlueInfo->prAdapter; ASSERT(prAdapter); /* 1. stop TX queue */ netif_tx_stop_all_queues(prDev); /* 2. turn of carrier */ if (netif_carrier_ok(prDev)) { netif_carrier_off(prDev); } return 0; };
static void sreset_stop_adapter(struct rtw_adapter *padapter) { struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; if (padapter == NULL) return; DBG_8723A("%s(%s)\n", __func__, padapter->pnetdev->name); if (!rtw_netif_queue_stopped(padapter->pnetdev)) netif_tx_stop_all_queues(padapter->pnetdev); rtw_cancel_all_timer23a(padapter); /* TODO: OS and HCI independent */ tasklet_kill(&pxmitpriv->xmit_tasklet); if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY)) rtw_scan_abort23a(padapter); if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) rtw23a_join_to_handler((unsigned long)padapter); }
void *wil_if_alloc(struct device *dev) { struct net_device *ndev; struct wireless_dev *wdev; struct wil6210_priv *wil; struct ieee80211_channel *ch; int rc = 0; wdev = wil_cfg80211_init(dev); if (IS_ERR(wdev)) { dev_err(dev, "wil_cfg80211_init failed\n"); return wdev; } wil = wdev_to_wil(wdev); wil->wdev = wdev; wil_dbg_misc(wil, "%s()\n", __func__); rc = wil_priv_init(wil); if (rc) { dev_err(dev, "wil_priv_init failed\n"); goto out_wdev; } wdev->iftype = NL80211_IFTYPE_STATION; /* TODO */ /* default monitor channel */ ch = wdev->wiphy->bands[IEEE80211_BAND_60GHZ]->channels; cfg80211_chandef_create(&wdev->preset_chandef, ch, NL80211_CHAN_NO_HT); ndev = alloc_netdev(0, "wlan%d", NET_NAME_UNKNOWN, wil_dev_setup); if (!ndev) { dev_err(dev, "alloc_netdev_mqs failed\n"); rc = -ENOMEM; goto out_priv; } ndev->netdev_ops = &wil_netdev_ops; wil_set_ethtoolops(ndev); ndev->ieee80211_ptr = wdev; ndev->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXHASH; ndev->features |= ndev->hw_features; SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy)); wdev->netdev = ndev; netif_napi_add(ndev, &wil->napi_rx, wil6210_netdev_poll_rx, WIL6210_NAPI_BUDGET); netif_tx_napi_add(ndev, &wil->napi_tx, wil6210_netdev_poll_tx, WIL6210_NAPI_BUDGET); netif_tx_stop_all_queues(ndev); return wil; out_priv: wil_priv_deinit(wil); out_wdev: wil_wdev_free(wil); return ERR_PTR(rc); }
static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb) { struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus); struct qtnf_pcie_bus_priv *priv = &ps->base; dma_addr_t txbd_paddr, skb_paddr; struct qtnf_pearl_tx_bd *txbd; unsigned long flags; int len, i; u32 info; int ret = 0; spin_lock_irqsave(&priv->tx_lock, flags); if (!qtnf_tx_queue_ready(ps)) { if (skb->dev) { netif_tx_stop_all_queues(skb->dev); priv->tx_stopped = 1; } spin_unlock_irqrestore(&priv->tx_lock, flags); return NETDEV_TX_BUSY; } i = priv->tx_bd_w_index; priv->tx_skb[i] = skb; len = skb->len; skb_paddr = pci_map_single(priv->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); if (pci_dma_mapping_error(priv->pdev, skb_paddr)) { pr_err("skb DMA mapping error: %pad\n", &skb_paddr); ret = -ENOMEM; goto tx_done; } txbd = &ps->tx_bd_vbase[i]; txbd->addr = cpu_to_le32(QTN_HOST_LO32(skb_paddr)); txbd->addr_h = cpu_to_le32(QTN_HOST_HI32(skb_paddr)); info = (len & QTN_PCIE_TX_DESC_LEN_MASK) << QTN_PCIE_TX_DESC_LEN_SHIFT; txbd->info = cpu_to_le32(info); /* sync up all descriptor updates before passing them to EP */ dma_wmb(); /* write new TX descriptor to PCIE_RX_FIFO on EP */ txbd_paddr = ps->tx_bd_pbase + i * sizeof(struct qtnf_pearl_tx_bd); #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT writel(QTN_HOST_HI32(txbd_paddr), PCIE_HDP_HOST_WR_DESC0_H(ps->pcie_reg_base)); #endif writel(QTN_HOST_LO32(txbd_paddr), PCIE_HDP_HOST_WR_DESC0(ps->pcie_reg_base)); if (++i >= priv->tx_bd_num) i = 0; priv->tx_bd_w_index = i; tx_done: if (ret && skb) { pr_err_ratelimited("drop skb\n"); if (skb->dev) skb->dev->stats.tx_dropped++; dev_kfree_skb_any(skb); } priv->tx_done_count++; spin_unlock_irqrestore(&priv->tx_lock, flags); qtnf_pearl_data_tx_reclaim(ps); return NETDEV_TX_OK; }
static int ipheth_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(intf); struct usb_host_interface *hintf; struct usb_endpoint_descriptor *endp; struct ipheth_device *dev; struct net_device *netdev; int i; int retval; netdev = alloc_etherdev(sizeof(struct ipheth_device)); if (!netdev) return -ENOMEM; netdev->netdev_ops = &ipheth_netdev_ops; netdev->watchdog_timeo = IPHETH_TX_TIMEOUT; strcpy(netdev->name, "eth%d"); dev = netdev_priv(netdev); dev->udev = udev; dev->net = netdev; dev->intf = intf; dev->confirmed_pairing = false; /* Set up endpoints */ hintf = usb_altnum_to_altsetting(intf, IPHETH_ALT_INTFNUM); if (hintf == NULL) { retval = -ENODEV; dev_err(&intf->dev, "Unable to find alternate settings interface\n"); goto err_endpoints; } for (i = 0; i < hintf->desc.bNumEndpoints; i++) { endp = &hintf->endpoint[i].desc; if (usb_endpoint_is_bulk_in(endp)) dev->bulk_in = endp->bEndpointAddress; else if (usb_endpoint_is_bulk_out(endp)) dev->bulk_out = endp->bEndpointAddress; } if (!(dev->bulk_in && dev->bulk_out)) { retval = -ENODEV; dev_err(&intf->dev, "Unable to find endpoints\n"); goto err_endpoints; } dev->ctrl_buf = kmalloc(IPHETH_CTRL_BUF_SIZE, GFP_KERNEL); if (dev->ctrl_buf == NULL) { retval = -ENOMEM; goto err_alloc_ctrl_buf; } retval = ipheth_get_macaddr(dev); if (retval) goto err_get_macaddr; INIT_DELAYED_WORK(&dev->carrier_work, ipheth_carrier_check_work); retval = ipheth_alloc_urbs(dev); if (retval) { dev_err(&intf->dev, "error allocating urbs: %d\n", retval); goto err_alloc_urbs; } usb_set_intfdata(intf, dev); SET_NETDEV_DEV(netdev, &intf->dev); netdev->ethtool_ops = &ops; retval = register_netdev(netdev); if (retval) { dev_err(&intf->dev, "error registering netdev: %d\n", retval); retval = -EIO; goto err_register_netdev; } // carrier down and transmit queues stopped until packet from device netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); dev_info(&intf->dev, "Apple iPhone USB Ethernet device attached\n"); return 0; err_register_netdev: ipheth_free_urbs(dev); err_alloc_urbs: err_get_macaddr: err_alloc_ctrl_buf: kfree(dev->ctrl_buf); err_endpoints: free_netdev(netdev); return retval; }
int hdd_mon_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { v_U16_t rt_hdr_len; struct ieee80211_hdr *hdr; hdd_adapter_t *pPgBkAdapter, *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev); struct ieee80211_radiotap_header *rtap_hdr = (struct ieee80211_radiotap_header *)skb->data; /*Supplicant sends the EAPOL packet on monitor interface*/ pPgBkAdapter = pAdapter->sessionCtx.monitor.pAdapterForTx; if(pPgBkAdapter == NULL) { VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s: No Adapter to piggy back. Dropping the pkt on monitor inf", __func__); goto fail; /* too short to be possibly valid */ } /* check if toal skb length is greater then radio tab header length of not */ if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header))) goto fail; /* too short to be possibly valid */ /* check if radio tap header version is correct or not */ if (unlikely(rtap_hdr->it_version)) goto fail; /* only version 0 is supported */ /*Strip off the radio tap header*/ rt_hdr_len = ieee80211_get_radiotap_len(skb->data); /* check if skb length if greator then total radio tap header length ot not*/ if (unlikely(skb->len < rt_hdr_len)) goto fail; /* Update the trans_start for this netdev */ dev->trans_start = jiffies; /* * fix up the pointers accounting for the radiotap * header still being in there. */ skb_set_mac_header(skb, rt_hdr_len); skb_set_network_header(skb, rt_hdr_len); skb_set_transport_header(skb, rt_hdr_len); /* Pull rtap header out of the skb */ skb_pull(skb, rt_hdr_len); /*Supplicant adds: radiotap Hdr + radiotap data + 80211 Header. So after * radio tap header and 802.11 header starts */ hdr = (struct ieee80211_hdr *)skb->data; /* Send data frames through the normal Data path. In this path we will * conver rcvd 802.11 packet to 802.3 packet */ if ( (hdr->frame_control & HDD_FRAME_TYPE_MASK) == HDD_FRAME_TYPE_DATA) { v_U8_t da[6]; v_U8_t sa[6]; memcpy (da, hdr->addr1, VOS_MAC_ADDR_SIZE); memcpy (sa, hdr->addr2, VOS_MAC_ADDR_SIZE); /* Pull 802.11 MAC header */ skb_pull(skb, HDD_80211_HEADER_LEN); if ( HDD_FRAME_SUBTYPE_QOSDATA == (hdr->frame_control & HDD_FRAME_SUBTYPE_MASK)) { skb_pull(skb, HDD_80211_HEADER_QOS_CTL); } /* Pull LLC header */ skb_pull(skb, HDD_LLC_HDR_LEN); /* Create space for Ethernet header */ skb_push(skb, HDD_MAC_HDR_SIZE*2); memcpy(&skb->data[0], da, HDD_MAC_HDR_SIZE); memcpy(&skb->data[HDD_DEST_ADDR_OFFSET], sa, HDD_MAC_HDR_SIZE); /* Only EAPOL Data packets are allowed through monitor interface */ if (vos_be16_to_cpu( (*(unsigned short*)&skb->data[HDD_ETHERTYPE_802_1_X_FRAME_OFFSET]) ) != HDD_ETHERTYPE_802_1_X) { VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s: Not a Eapol packet. Drop this frame", __func__); //If not EAPOL frames, drop them. kfree_skb(skb); return NETDEV_TX_OK; } skb->protocol = htons(HDD_ETHERTYPE_802_1_X); hdd_hostapd_select_queue(pPgBkAdapter->dev, skb); return hdd_softap_hard_start_xmit( skb, pPgBkAdapter->dev ); } else { VOS_STATUS status; WLANTL_ACEnumType ac = 0; skb_list_node_t *pktNode = NULL; v_SIZE_t pktListSize = 0; spin_lock(&pAdapter->wmm_tx_queue[ac].lock); //If we have already reached the max queue size, disable the TX queue if ( pAdapter->wmm_tx_queue[ac].count == pAdapter->wmm_tx_queue[ac].max_size) { /* We want to process one packet at a time, so lets disable all TX queues * and re-enable the queues once we get TX feedback for this packet */ netif_tx_stop_all_queues(pAdapter->dev); pAdapter->isTxSuspended[ac] = VOS_TRUE; spin_unlock(&pAdapter->wmm_tx_queue[ac].lock); return NETDEV_TX_BUSY; } spin_unlock(&pAdapter->wmm_tx_queue[ac].lock); //Use the skb->cb field to hold the list node information pktNode = (skb_list_node_t *)&skb->cb; //Stick the OS packet inside this node. pktNode->skb = skb; INIT_LIST_HEAD(&pktNode->anchor); //Insert the OS packet into the appropriate AC queue spin_lock(&pAdapter->wmm_tx_queue[ac].lock); status = hdd_list_insert_back_size( &pAdapter->wmm_tx_queue[ac], &pktNode->anchor, &pktListSize ); spin_unlock(&pAdapter->wmm_tx_queue[ac].lock); if ( !VOS_IS_STATUS_SUCCESS( status ) ) { VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR, "%s:Insert Tx queue failed. Pkt dropped", __FUNCTION__); kfree_skb(skb); return NETDEV_TX_OK; } if ( pktListSize == 1 ) { /* In this context we cannot acquire any mutex etc. And to transmit * this packet we need to call SME API. So to take care of this we will * schedule a workqueue */ schedule_work(&pPgBkAdapter->monTxWorkQueue); } return NETDEV_TX_OK; } fail: VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_WARN, "%s: Packet Rcvd at Monitor interface is not proper," " Dropping the packet", __func__); kfree_skb(skb); return NETDEV_TX_OK; }
static int rtw_suspend(struct usb_interface *pusb_intf, pm_message_t message) { struct dvobj_priv *dvobj = usb_get_intfdata(pusb_intf); struct rtw_adapter *padapter = dvobj->if1; struct net_device *pnetdev = padapter->pnetdev; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv; int ret = 0; unsigned long start_time = jiffies; DBG_8723A("==> %s (%s:%d)\n", __func__, current->comm, current->pid); if ((!padapter->bup) || (padapter->bDriverStopped) || (padapter->bSurpriseRemoved)) { DBG_8723A("padapter->bup =%d bDriverStopped =%d bSurpriseRemoved = %d\n", padapter->bup, padapter->bDriverStopped, padapter->bSurpriseRemoved); goto exit; } pwrpriv->bInSuspend = true; rtw_cancel_all_timer23a(padapter); LeaveAllPowerSaveMode23a(padapter); down(&pwrpriv->lock); /* padapter->net_closed = true; */ /* s1. */ if (pnetdev) { netif_carrier_off(pnetdev); netif_tx_stop_all_queues(pnetdev); } /* s2. */ rtw_disassoc_cmd23a(padapter, 0, false); if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) && check_fwstate(pmlmepriv, _FW_LINKED)) { DBG_8723A("%s:%d %s(%pM), length:%d assoc_ssid.length:%d\n", __func__, __LINE__, pmlmepriv->cur_network.network.Ssid.ssid, pmlmepriv->cur_network.network.MacAddress, pmlmepriv->cur_network.network.Ssid.ssid_len, pmlmepriv->assoc_ssid.ssid_len); rtw_set_roaming(padapter, 1); } /* s2-2. indicate disconnect to os */ rtw_indicate_disconnect23a(padapter); /* s2-3. */ rtw_free_assoc_resources23a(padapter, 1); /* s2-4. */ rtw_free_network_queue23a(padapter); rtw_dev_unload(padapter); up(&pwrpriv->lock); if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY)) rtw_cfg80211_indicate_scan_done( wdev_to_priv(padapter->rtw_wdev), true); if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) rtw_indicate_disconnect23a(padapter); exit: DBG_8723A("<=== %s return %d.............. in %dms\n", __func__, ret, jiffies_to_msecs(jiffies - start_time)); return ret; }
int _netdev_open(struct net_device *pnetdev) { uint status; struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev); struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv; RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+88eu_drv - dev_open\n")); DBG_88E("+88eu_drv - drv_open, bup =%d\n", padapter->bup); if (pwrctrlpriv->ps_flag) { padapter->net_closed = false; goto netdev_open_normal_process; } if (!padapter->bup) { padapter->bDriverStopped = false; padapter->bSurpriseRemoved = false; status = rtw_hal_init(padapter); if (status == _FAIL) { RT_TRACE(_module_os_intfs_c_, _drv_err_, ("rtl88eu_hal_init(): Can't init h/w!\n")); goto netdev_open_error; } pr_info("MAC Address = %pM\n", pnetdev->dev_addr); status = rtw_start_drv_threads(padapter); if (status == _FAIL) { pr_info("Initialize driver software resource Failed!\n"); goto netdev_open_error; } if (init_hw_mlme_ext(padapter) == _FAIL) { pr_info("can't init mlme_ext_priv\n"); goto netdev_open_error; } if (padapter->intf_start) padapter->intf_start(padapter); rtw_proc_init_one(pnetdev); rtw_led_control(padapter, LED_CTL_NO_LINK); padapter->bup = true; } padapter->net_closed = false; _set_timer(&padapter->mlmepriv.dynamic_chk_timer, 2000); padapter->pwrctrlpriv.bips_processing = false; rtw_set_pwr_state_check_timer(&padapter->pwrctrlpriv); if (!rtw_netif_queue_stopped(pnetdev)) netif_tx_start_all_queues(pnetdev); else netif_tx_wake_all_queues(pnetdev); netdev_open_normal_process: RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-88eu_drv - dev_open\n")); DBG_88E("-88eu_drv - drv_open, bup =%d\n", padapter->bup); return 0; netdev_open_error: padapter->bup = false; netif_carrier_off(pnetdev); netif_tx_stop_all_queues(pnetdev); RT_TRACE(_module_os_intfs_c_, _drv_err_, ("-88eu_drv - dev_open, fail!\n")); DBG_88E("-88eu_drv - drv_open fail, bup =%d\n", padapter->bup); return -1; }
/**============================================================================ @brief hdd_tx_fetch_packet_cbk() - Callback function invoked by TL to fetch a packet for transmission. @param vosContext : [in] pointer to VOS context @param staId : [in] Station for which TL is requesting a pkt @param ac : [in] access category requested by TL @param pVosPacket : [out] pointer to VOS packet packet pointer @param pPktMetaInfo : [out] pointer to meta info for the pkt @return : VOS_STATUS_E_EMPTY if no packets to transmit : VOS_STATUS_E_FAILURE if any errors encountered : VOS_STATUS_SUCCESS otherwise ===========================================================================*/ VOS_STATUS hdd_tx_fetch_packet_cbk( v_VOID_t *vosContext, v_U8_t *pStaId, WLANTL_ACEnumType ac, vos_pkt_t **ppVosPacket, WLANTL_MetaInfoType *pPktMetaInfo ) { VOS_STATUS status = VOS_STATUS_E_FAILURE; hdd_adapter_t *pAdapter = NULL; hdd_context_t *pHddCtx = NULL; hdd_list_node_t *anchor = NULL; skb_list_node_t *pktNode = NULL; struct sk_buff *skb = NULL; vos_pkt_t *pVosPacket = NULL; v_MACADDR_t* pDestMacAddress = NULL; v_TIME_t timestamp; WLANTL_ACEnumType newAc; v_SIZE_t size = 0; tANI_U8 acAdmitted, i; //Sanity check on inputs if ( ( NULL == vosContext ) || ( NULL == pStaId ) || ( NULL == ppVosPacket ) || ( NULL == pPktMetaInfo ) ) { VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,"%s: Null Params being passed", __FUNCTION__); return VOS_STATUS_E_FAILURE; } //Get the HDD context. pHddCtx = (hdd_context_t *)vos_get_context( VOS_MODULE_ID_HDD, vosContext ); if(pHddCtx == NULL) { VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,"%s: HDD adapter context is Null", __FUNCTION__); return VOS_STATUS_E_FAILURE; } pAdapter = pHddCtx->sta_to_adapter[*pStaId]; if( NULL == pAdapter ) { VOS_ASSERT(0); return VOS_STATUS_E_FAILURE; } ++pAdapter->hdd_stats.hddTxRxStats.txFetched; *ppVosPacket = NULL; //Make sure the AC being asked for is sane if( ac >= WLANTL_MAX_AC || ac < 0) { VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,"%s: Invalid AC %d passed by TL", __FUNCTION__, ac); return VOS_STATUS_E_FAILURE; } ++pAdapter->hdd_stats.hddTxRxStats.txFetchedAC[ac]; #ifdef HDD_WMM_DEBUG VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,"%s: AC %d passed by TL", __FUNCTION__, ac); #endif // HDD_WMM_DEBUG // We find an AC with packets // or we determine we have no more packets to send // HDD is not allowed to change AC. // has this AC been admitted? or // To allow EAPOL packets when not authenticated if (unlikely((0==pAdapter->hddWmmStatus.wmmAcStatus[ac].wmmAcAccessAllowed) && (WLAN_HDD_GET_STATION_CTX_PTR(pAdapter))->conn_info.uIsAuthenticated)) { ++pAdapter->hdd_stats.hddTxRxStats.txFetchEmpty; #ifdef HDD_WMM_DEBUG VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s: no packets pending", __FUNCTION__); #endif // HDD_WMM_DEBUG return VOS_STATUS_E_FAILURE; } // do we have any packets pending in this AC? hdd_list_size( &pAdapter->wmm_tx_queue[ac], &size ); if( size > 0 ) { // yes, so process it #ifdef HDD_WMM_DEBUG VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s: AC %d has packets pending", __FUNCTION__, ac); #endif // HDD_WMM_DEBUG } else { ++pAdapter->hdd_stats.hddTxRxStats.txFetchEmpty; #ifdef HDD_WMM_DEBUG VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s: no packets pending", __FUNCTION__); #endif // HDD_WMM_DEBUG return VOS_STATUS_E_FAILURE; } //Get the vos packet. I don't want to dequeue and enqueue again if we are out of VOS resources //This simplifies the locking and unlocking of Tx queue status = vos_pkt_wrap_data_packet( &pVosPacket, VOS_PKT_TYPE_TX_802_3_DATA, NULL, //OS Pkt is not being passed hdd_tx_low_resource_cbk, pAdapter ); if (status == VOS_STATUS_E_ALREADY || status == VOS_STATUS_E_RESOURCES) { //Remember VOS is in a low resource situation pAdapter->isVosOutOfResource = VOS_TRUE; ++pAdapter->hdd_stats.hddTxRxStats.txFetchLowResources; VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_WARN,"%s: VOSS in Low Resource scenario", __FUNCTION__); //TL will now think we have no more packets in this AC return VOS_STATUS_E_FAILURE; } //Remove the packet from the queue spin_lock_bh(&pAdapter->wmm_tx_queue[ac].lock); status = hdd_list_remove_front( &pAdapter->wmm_tx_queue[ac], &anchor ); spin_unlock_bh(&pAdapter->wmm_tx_queue[ac].lock); if(VOS_STATUS_SUCCESS == status) { //If success then we got a valid packet from some AC pktNode = list_entry(anchor, skb_list_node_t, anchor); skb = pktNode->skb; } else { ++pAdapter->hdd_stats.hddTxRxStats.txFetchDequeueError; VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_WARN, "%s: Error in de-queuing " "skb from Tx queue status = %d", __FUNCTION__, status ); vos_pkt_return_packet(pVosPacket); return VOS_STATUS_E_FAILURE; } //Attach skb to VOS packet. status = vos_pkt_set_os_packet( pVosPacket, skb ); if (status != VOS_STATUS_SUCCESS) { VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_WARN,"%s: Error attaching skb", __FUNCTION__); vos_pkt_return_packet(pVosPacket); ++pAdapter->stats.tx_dropped; ++pAdapter->hdd_stats.hddTxRxStats.txFetchDequeueError; kfree_skb(skb); return VOS_STATUS_E_FAILURE; } //Just being paranoid. To be removed later if(pVosPacket == NULL) { VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_WARN,"%s: VOS packet returned by VOSS is NULL", __FUNCTION__); ++pAdapter->stats.tx_dropped; ++pAdapter->hdd_stats.hddTxRxStats.txFetchDequeueError; kfree_skb(skb); return VOS_STATUS_E_FAILURE; } //Return VOS packet to TL; *ppVosPacket = pVosPacket; //Fill out the meta information needed by TL //FIXME This timestamp is really the time stamp of wrap_data_packet vos_pkt_get_timestamp( pVosPacket, ×tamp ); pPktMetaInfo->usTimeStamp = (v_U16_t)timestamp; if(pAdapter->sessionCtx.station.conn_info.uIsAuthenticated == VOS_TRUE) pPktMetaInfo->ucIsEapol = 0; else pPktMetaInfo->ucIsEapol = hdd_IsEAPOLPacket( pVosPacket ) ? 1 : 0; #ifdef FEATURE_WLAN_WAPI // Override usIsEapol value when its zero for WAPI case pPktMetaInfo->ucIsWai = hdd_IsWAIPacket( pVosPacket ) ? 1 : 0; #endif /* FEATURE_WLAN_WAPI */ if ((HDD_WMM_USER_MODE_NO_QOS == pHddCtx->cfg_ini->WmmMode) || (!pAdapter->hddWmmStatus.wmmQap)) { // either we don't want QoS or the AP doesn't support QoS pPktMetaInfo->ucUP = 0; pPktMetaInfo->ucTID = 0; } else { /* 1. Check if ACM is set for this AC * 2. If set, check if this AC had already admitted * 3. If not already admitted, downgrade the UP to next best UP */ if(!pAdapter->hddWmmStatus.wmmAcStatus[ac].wmmAcAccessRequired || pAdapter->hddWmmStatus.wmmAcStatus[ac].wmmAcTspecValid) { pPktMetaInfo->ucUP = pktNode->userPriority; pPktMetaInfo->ucTID = pPktMetaInfo->ucUP; } else { //Downgrade the UP acAdmitted = pAdapter->hddWmmStatus.wmmAcStatus[ac].wmmAcTspecValid; newAc = WLANTL_AC_BK; for (i=ac-1; i>0; i--) { if (pAdapter->hddWmmStatus.wmmAcStatus[i].wmmAcAccessRequired == 0) { newAc = i; break; } } pPktMetaInfo->ucUP = hddWmmAcToHighestUp[newAc]; pPktMetaInfo->ucTID = pPktMetaInfo->ucUP; VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_LOW,"Downgrading UP %d to UP %d ", pktNode->userPriority, pPktMetaInfo->ucUP); } } pPktMetaInfo->ucType = 0; //FIXME Don't know what this is pPktMetaInfo->ucDisableFrmXtl = 0; //802.3 frame so we need to xlate if ( 1 < size ) { pPktMetaInfo->bMorePackets = 1; //HDD has more packets to send } else { pPktMetaInfo->bMorePackets = 0; } //Extract the destination address from ethernet frame pDestMacAddress = (v_MACADDR_t*)skb->data; pPktMetaInfo->ucBcast = vos_is_macaddr_broadcast( pDestMacAddress ) ? 1 : 0; pPktMetaInfo->ucMcast = vos_is_macaddr_group( pDestMacAddress ) ? 1 : 0; // if we are in a backpressure situation see if we can turn the hose back on if ( (pAdapter->isTxSuspended[ac]) && (size <= HDD_TX_QUEUE_LOW_WATER_MARK) ) { ++pAdapter->hdd_stats.hddTxRxStats.txFetchDePressured; ++pAdapter->hdd_stats.hddTxRxStats.txFetchDePressuredAC[ac]; VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_WARN, "%s: TX queue[%d] re-enabled", __FUNCTION__, ac); pAdapter->isTxSuspended[ac] = VOS_FALSE; netif_tx_wake_queue(netdev_get_tx_queue(pAdapter->dev, skb_get_queue_mapping(skb) )); } // We're giving the packet to TL so consider it transmitted from // a statistics perspective. We account for it here instead of // when the packet is returned for two reasons. First, TL will // manipulate the skb to the point where the len field is not // accurate, leading to inaccurate byte counts if we account for // it later. Second, TL does not provide any feedback as to // whether or not the packet was successfully sent over the air, // so the packet counts will be the same regardless of where we // account for them pAdapter->stats.tx_bytes += skb->len; ++pAdapter->stats.tx_packets; ++pAdapter->hdd_stats.hddTxRxStats.txFetchDequeued; ++pAdapter->hdd_stats.hddTxRxStats.txFetchDequeuedAC[ac]; if(pHddCtx->cfg_ini->thermalMitigationEnable) { if(mutex_lock_interruptible(&pHddCtx->tmInfo.tmOperationLock)) { VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR, "%s: Tm Lock fail", __FUNCTION__); return VOS_STATUS_E_FAILURE; } if(WLAN_HDD_TM_LEVEL_1 < pHddCtx->tmInfo.currentTmLevel) { if(0 == pHddCtx->tmInfo.txFrameCount) { /* Just recovered from sleep timeout */ pHddCtx->tmInfo.lastOpenTs = timestamp; } if(((timestamp - pHddCtx->tmInfo.lastOpenTs) > (pHddCtx->tmInfo.tmAction.txOperationDuration / 10)) && (pHddCtx->tmInfo.txFrameCount >= pHddCtx->tmInfo.tmAction.txBlockFrameCountThreshold)) { spin_lock(&pAdapter->wmm_tx_queue[ac].lock); /* During TX open duration, TX frame count is larger than threshold * Block TX during Sleep time */ netif_tx_stop_all_queues(pAdapter->dev); spin_unlock(&pAdapter->wmm_tx_queue[ac].lock); pHddCtx->tmInfo.lastblockTs = timestamp; if(VOS_TIMER_STATE_STOPPED == vos_timer_getCurrentState(&pHddCtx->tmInfo.txSleepTimer)) { vos_timer_start(&pHddCtx->tmInfo.txSleepTimer, pHddCtx->tmInfo.tmAction.txSleepDuration); } } else if(((timestamp - pHddCtx->tmInfo.lastOpenTs) > (pHddCtx->tmInfo.tmAction.txOperationDuration / 10)) && (pHddCtx->tmInfo.txFrameCount < pHddCtx->tmInfo.tmAction.txBlockFrameCountThreshold)) { /* During TX open duration, TX frame count is less than threshold * Reset count and timestamp to prepare next cycle */ pHddCtx->tmInfo.lastOpenTs = timestamp; pHddCtx->tmInfo.txFrameCount = 0; } else { /* Do Nothing */ } pHddCtx->tmInfo.txFrameCount++; } mutex_unlock(&pHddCtx->tmInfo.tmOperationLock); } #ifdef HDD_WMM_DEBUG VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,"%s: Valid VOS PKT returned to TL", __FUNCTION__); #endif // HDD_WMM_DEBUG return status; }
/* * --------------------------------------------------------------------------- * unregister_unifi_sdio * * Call from SDIO driver when it detects that UniFi has been removed. * * Arguments: * bus_id Number of the card that was ejected. * * Returns: * None. * --------------------------------------------------------------------------- */ static void unregister_unifi_sdio(int bus_id) { unifi_priv_t *priv; int interfaceTag=0; u8 reason = CONFIG_IND_EXIT; if ((bus_id < 0) || (bus_id >= MAX_UNIFI_DEVS)) { unifi_error(NULL, "unregister_unifi_sdio: invalid device %d\n", bus_id); return; } priv = Unifi_instances[bus_id]; if (priv == NULL) { unifi_error(priv, "unregister_unifi_sdio: device %d is not registered\n", bus_id); return; } /* Stop the network traffic before freeing the core. */ for(interfaceTag=0;interfaceTag<priv->totalInterfaceCount;interfaceTag++) { netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag]; if(interfacePriv->netdev_registered) { netif_carrier_off(priv->netdev[interfaceTag]); netif_tx_stop_all_queues(priv->netdev[interfaceTag]); } } #ifdef CSR_NATIVE_LINUX /* * If the unifi thread was started, signal it to stop. This * should cause any userspace processes with open unifi device to * close them. */ uf_stop_thread(priv, &priv->bh_thread); /* Unregister the interrupt handler */ if (csr_sdio_linux_remove_irq(priv->sdio)) { unifi_notice(priv, "csr_sdio_linux_remove_irq failed to talk to card.\n"); } /* Ensure no MLME functions are waiting on a the mlme_event semaphore. */ uf_abort_mlme(priv); #endif /* CSR_NATIVE_LINUX */ ul_log_config_ind(priv, &reason, sizeof(u8)); /* Deregister the UDI hook from the core. */ unifi_remove_udi_hook(priv->card, logging_handler); uf_put_instance(bus_id); /* * Wait until the device is cleaned up. i.e., when all userspace * processes have closed any open unifi devices. */ wait_event(Unifi_cleanup_wq, In_use[bus_id] == UNIFI_DEV_CLEANUP); unifi_trace(NULL, UDBG5, "Received clean up event\n"); /* Now we can free the private context and the char device nodes */ cleanup_unifi_sdio(priv); } /* unregister_unifi_sdio() */
int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct iwm_priv *iwm = ndev_to_iwm(netdev); struct wireless_dev *wdev = iwm_to_wdev(iwm); struct iwm_tx_info *tx_info; struct iwm_tx_queue *txq; struct iwm_sta_info *sta_info; u8 *dst_addr, sta_id; u16 queue; int ret; if (!test_bit(IWM_STATUS_ASSOCIATED, &iwm->status)) { IWM_DBG_TX(iwm, DBG, "LINK: stop netif_all_queues: " "not associated\n"); netif_tx_stop_all_queues(netdev); goto drop; } queue = skb_get_queue_mapping(skb); BUG_ON(queue >= IWM_TX_DATA_QUEUES); txq = &iwm->txq[queue]; if ((skb_queue_len(&txq->queue) > IWM_TX_LIST_SIZE) || (skb_queue_len(&txq->stopped_queue) > IWM_TX_LIST_SIZE)) { IWM_DBG_TX(iwm, DBG, "LINK: stop netif_subqueue[%d]\n", queue); netif_stop_subqueue(netdev, queue); return NETDEV_TX_BUSY; } ret = ieee80211_data_from_8023(skb, netdev->dev_addr, wdev->iftype, iwm->bssid, 0); if (ret) { IWM_ERR(iwm, "build wifi header failed\n"); goto drop; } dst_addr = ((struct ieee80211_hdr *)(skb->data))->addr1; for (sta_id = 0; sta_id < IWM_STA_TABLE_NUM; sta_id++) { sta_info = &iwm->sta_table[sta_id]; if (sta_info->valid && !memcmp(dst_addr, sta_info->addr, ETH_ALEN)) break; } if (sta_id == IWM_STA_TABLE_NUM) { IWM_ERR(iwm, "STA %pM not found in sta_table, Tx ignored\n", dst_addr); goto drop; } tx_info = skb_to_tx_info(skb); tx_info->sta = sta_id; tx_info->color = sta_info->color; if (sta_info->qos) tx_info->tid = skb->priority; else tx_info->tid = IWM_UMAC_MGMT_TID; spin_lock_bh(&iwm->txq[queue].lock); skb_queue_tail(&iwm->txq[queue].queue, skb); spin_unlock_bh(&iwm->txq[queue].lock); queue_work(iwm->txq[queue].wq, &iwm->txq[queue].worker); netdev->stats.tx_packets++; netdev->stats.tx_bytes += skb->len; return NETDEV_TX_OK; drop: netdev->stats.tx_dropped++; dev_kfree_skb_any(skb); return NETDEV_TX_OK; }
static int cpmac_poll(struct napi_struct *napi, int budget) { struct sk_buff *skb; struct cpmac_desc *desc, *restart; struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi); int received = 0, processed = 0; spin_lock(&priv->rx_lock); if (unlikely(!priv->rx_head)) { if (netif_msg_rx_err(priv) && net_ratelimit()) printk(KERN_WARNING "%s: rx: polling, but no queue\n", priv->dev->name); spin_unlock(&priv->rx_lock); netif_rx_complete(priv->dev, napi); return 0; } desc = priv->rx_head; restart = NULL; while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) { processed++; if ((desc->dataflags & CPMAC_EOQ) != 0) { /* The last update to eoq->hw_next didn't happen * soon enough, and the receiver stopped here. *Remember this descriptor so we can restart * the receiver after freeing some space. */ if (unlikely(restart)) { if (netif_msg_rx_err(priv)) printk(KERN_ERR "%s: poll found a" " duplicate EOQ: %p and %p\n", priv->dev->name, restart, desc); goto fatal_error; } restart = desc->next; } skb = cpmac_rx_one(priv, desc); if (likely(skb)) { netif_receive_skb(skb); received++; } desc = desc->next; } if (desc != priv->rx_head) { /* We freed some buffers, but not the whole ring, * add what we did free to the rx list */ desc->prev->hw_next = (u32)0; priv->rx_head->prev->hw_next = priv->rx_head->mapping; } /* Optimization: If we did not actually process an EOQ (perhaps because * of quota limits), check to see if the tail of the queue has EOQ set. * We should immediately restart in that case so that the receiver can * restart and run in parallel with more packet processing. * This lets us handle slightly larger bursts before running * out of ring space (assuming dev->weight < ring_size) */ if (!restart && (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ)) == CPMAC_EOQ && (priv->rx_head->dataflags & CPMAC_OWN) != 0) { /* reset EOQ so the poll loop (above) doesn't try to * restart this when it eventually gets to this descriptor. */ priv->rx_head->prev->dataflags &= ~CPMAC_EOQ; restart = priv->rx_head; } if (restart) { priv->dev->stats.rx_errors++; priv->dev->stats.rx_fifo_errors++; if (netif_msg_rx_err(priv) && net_ratelimit()) printk(KERN_WARNING "%s: rx dma ring overrun\n", priv->dev->name); if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) { if (netif_msg_drv(priv)) printk(KERN_ERR "%s: cpmac_poll is trying to " "restart rx from a descriptor that's " "not free: %p\n", priv->dev->name, restart); goto fatal_error; } cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping); } priv->rx_head = desc; spin_unlock(&priv->rx_lock); if (unlikely(netif_msg_rx_status(priv))) printk(KERN_DEBUG "%s: poll processed %d packets\n", priv->dev->name, received); if (processed == 0) { /* we ran out of packets to read, * revert to interrupt-driven mode */ netif_rx_complete(priv->dev, napi); cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); return 0; } return 1; fatal_error: /* Something went horribly wrong. * Reset hardware to try to recover rather than wedging. */ if (netif_msg_drv(priv)) { printk(KERN_ERR "%s: cpmac_poll is confused. " "Resetting hardware\n", priv->dev->name); cpmac_dump_all_desc(priv->dev); printk(KERN_DEBUG "%s: RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n", priv->dev->name, cpmac_read(priv->regs, CPMAC_RX_PTR(0)), cpmac_read(priv->regs, CPMAC_RX_ACK(0))); } spin_unlock(&priv->rx_lock); netif_rx_complete(priv->dev, napi); netif_tx_stop_all_queues(priv->dev); napi_disable(&priv->napi); atomic_inc(&priv->reset_pending); cpmac_hw_stop(priv->dev); if (!schedule_work(&priv->reset_work)) atomic_dec(&priv->reset_pending); return 0; }