/*---------------------------------------------------------------- * p80211knetdev_hard_start_xmit * * Linux netdevice method for transmitting a frame. * * Arguments: * skb Linux sk_buff containing the frame. * netdev Linux netdevice. * * Side effects: * If the lower layers report that buffers are full. netdev->tbusy * will be set to prevent higher layers from sending more traffic. * * Note: If this function returns non-zero, higher layers retain * ownership of the skb. * * Returns: * zero on success, non-zero on failure. ----------------------------------------------------------------*/ static int p80211knetdev_hard_start_xmit( struct sk_buff *skb, netdevice_t *netdev) { int result = 0; int txresult = -1; wlandevice_t *wlandev = netdev->ml_priv; p80211_hdr_t p80211_hdr; p80211_metawep_t p80211_wep; DBFENTER; if (skb == NULL) { return 0; } if (wlandev->state != WLAN_DEVICE_OPEN) { result = 1; goto failed; } memset(&p80211_hdr, 0, sizeof(p80211_hdr_t)); memset(&p80211_wep, 0, sizeof(p80211_metawep_t)); #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,3,38) ) if ( test_and_set_bit(0, (void*)&(netdev->tbusy)) != 0 ) { /* We've been called w/ tbusy set, has the tx */ /* path stalled? */ WLAN_LOG_DEBUG(1, "called when tbusy set\n"); result = 1; goto failed; } #else if ( netif_queue_stopped(netdev) ) { WLAN_LOG_DEBUG(1, "called when queue stopped.\n"); result = 1; goto failed; } netif_stop_queue(netdev); /* No timeout handling here, 2.3.38+ kernels call the * timeout function directly. * TODO: Add timeout handling. */ #endif /* Check to see that a valid mode is set */ switch( wlandev->macmode ) { case WLAN_MACMODE_IBSS_STA: case WLAN_MACMODE_ESS_STA: case WLAN_MACMODE_ESS_AP: break; default: /* Mode isn't set yet, just drop the frame * and return success . * TODO: we need a saner way to handle this */ if(skb->protocol != ETH_P_80211_RAW) { p80211netdev_start_queue(wlandev); WLAN_LOG_NOTICE( "Tx attempt prior to association, frame dropped.\n"); wlandev->linux_stats.tx_dropped++; result = 0; goto failed; } break; } /* Check for raw transmits */ if(skb->protocol == ETH_P_80211_RAW) { if (!capable(CAP_NET_ADMIN)) { result = 1; goto failed; } /* move the header over */ memcpy(&p80211_hdr, skb->data, sizeof(p80211_hdr_t)); skb_pull(skb, sizeof(p80211_hdr_t)); } else { if ( skb_ether_to_p80211(wlandev, wlandev->ethconv, skb, &p80211_hdr, &p80211_wep) != 0 ) { /* convert failed */ WLAN_LOG_DEBUG(1, "ether_to_80211(%d) failed.\n", wlandev->ethconv); result = 1; goto failed; } } if ( wlandev->txframe == NULL ) { result = 1; goto failed; } netdev->trans_start = jiffies; wlandev->linux_stats.tx_packets++; /* count only the packet payload */ wlandev->linux_stats.tx_bytes += skb->len; txresult = wlandev->txframe(wlandev, skb, &p80211_hdr, &p80211_wep); if ( txresult == 0) { /* success and more buf */ /* avail, re: hw_txdata */ p80211netdev_wake_queue(wlandev); result = 0; } else if ( txresult == 1 ) { /* success, no more avail */ WLAN_LOG_DEBUG(3, "txframe success, no more bufs\n"); /* netdev->tbusy = 1; don't set here, irqhdlr */ /* may have already cleared it */ result = 0; } else if ( txresult == 2 ) { /* alloc failure, drop frame */ WLAN_LOG_DEBUG(3, "txframe returned alloc_fail\n"); result = 1; } else { /* buffer full or queue busy, drop frame. */ WLAN_LOG_DEBUG(3, "txframe returned full or busy\n"); result = 1; } failed: /* Free up the WEP buffer if it's not the same as the skb */ if ((p80211_wep.data) && (p80211_wep.data != skb->data)) kfree(p80211_wep.data); /* we always free the skb here, never in a lower level. */ if (!result) dev_kfree_skb(skb); DBFEXIT; return result; }
static int netdev_open(struct net_device *pnetdev) { uint status; _adapter *padapter = (_adapter *)rtw_netdev_priv(pnetdev); struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv; RT_TRACE(_module_os_intfs_c_,_drv_info_,("+871x_drv - dev_open\n")); DBG_8192C("+871x_drv - drv_open, bup=%d\n", padapter->bup); if(pwrctrlpriv->ps_flag == _TRUE){ padapter->net_closed = _FALSE; goto netdev_open_normal_process; } if(padapter->bup == _FALSE) { padapter->bDriverStopped = _FALSE; padapter->bSurpriseRemoved = _FALSE; padapter->bCardDisableWOHSM = _FALSE; status = rtw_hal_init(padapter); if (status ==_FAIL) { RT_TRACE(_module_os_intfs_c_,_drv_err_,("rtl871x_hal_init(): Can't init h/w!\n")); goto netdev_open_error; } DBG_8192C("MAC Address = "MAC_FMT"\n", MAC_ARG(pnetdev->dev_addr)); status=rtw_start_drv_threads(padapter); if(status ==_FAIL) { RT_TRACE(_module_os_intfs_c_,_drv_err_,("Initialize driver software resource Failed!\n")); goto netdev_open_error; } if (init_hw_mlme_ext(padapter) == _FAIL) { RT_TRACE(_module_os_intfs_c_,_drv_err_,("can't init mlme_ext_priv\n")); goto netdev_open_error; } #ifdef CONFIG_DRVEXT_MODULE init_drvext(padapter); #endif if(padapter->intf_start) { padapter->intf_start(padapter); } #ifdef CONFIG_PROC_DEBUG #ifndef RTK_DMP_PLATFORM rtw_proc_init_one(pnetdev); #endif #endif rtw_led_control(padapter, LED_CTL_NO_LINK); padapter->bup = _TRUE; } padapter->net_closed = _FALSE; _set_timer(&padapter->mlmepriv.dynamic_chk_timer, 2000); if(( pwrctrlpriv->power_mgnt != PS_MODE_ACTIVE ) ||(padapter->pwrctrlpriv.bHWPwrPindetect)) { padapter->pwrctrlpriv.bips_processing = _FALSE; rtw_set_pwr_state_check_timer(&padapter->pwrctrlpriv); } //netif_carrier_on(pnetdev);//call this func when rtw_joinbss_event_callback return success if(!netif_queue_stopped(pnetdev)) netif_start_queue(pnetdev); else netif_wake_queue(pnetdev); #ifdef CONFIG_BR_EXT #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)) rcu_read_lock(); #endif // (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)) //if(check_fwstate(pmlmepriv, WIFI_STATION_STATE|WIFI_ADHOC_STATE) == _TRUE) { //struct net_bridge *br = pnetdev->br_port->br;//->dev->dev_addr; #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35)) if (pnetdev->br_port) #else // (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35)) if (rcu_dereference(padapter->pnetdev->rx_handler_data)) #endif // (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35)) { struct net_device *br_netdev; #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)) br_netdev = dev_get_by_name(CONFIG_BR_EXT_BRNAME); #else // (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)) struct net *devnet = NULL; #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) devnet = pnetdev->nd_net; #else // (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) devnet = dev_net(pnetdev); #endif // (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) br_netdev = dev_get_by_name(devnet, CONFIG_BR_EXT_BRNAME); #endif // (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)) if (br_netdev) { memcpy(padapter->br_mac, br_netdev->dev_addr, ETH_ALEN); dev_put(br_netdev); } else printk("%s()-%d: dev_get_by_name(%s) failed!", __FUNCTION__, __LINE__, CONFIG_BR_EXT_BRNAME); } padapter->ethBrExtInfo.addPPPoETag = 1; } #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)) rcu_read_unlock(); #endif // (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)) #endif // CONFIG_BR_EXT netdev_open_normal_process: RT_TRACE(_module_os_intfs_c_,_drv_info_,("-871x_drv - dev_open\n")); DBG_8192C("-871x_drv - drv_open, bup=%d\n", padapter->bup); return 0; netdev_open_error: padapter->bup = _FALSE; netif_carrier_off(pnetdev); netif_stop_queue(pnetdev); RT_TRACE(_module_os_intfs_c_,_drv_err_,("-871x_drv - dev_open, fail!\n")); DBG_8192C("-871x_drv - drv_open fail, bup=%d\n", padapter->bup); return (-1); }
/*---------------------------------------------------------------- * p80211knetdev_hard_start_xmit * * Linux netdevice method for transmitting a frame. * * Arguments: * skb Linux sk_buff containing the frame. * netdev Linux netdevice. * * Side effects: * If the lower layers report that buffers are full. netdev->tbusy * will be set to prevent higher layers from sending more traffic. * * Note: If this function returns non-zero, higher layers retain * ownership of the skb. * * Returns: * zero on success, non-zero on failure. ----------------------------------------------------------------*/ static int p80211knetdev_hard_start_xmit(struct sk_buff *skb, netdevice_t *netdev) { int result = 0; int txresult = -1; wlandevice_t *wlandev = netdev->ml_priv; p80211_hdr_t p80211_hdr; p80211_metawep_t p80211_wep; if (skb == NULL) return NETDEV_TX_OK; if (wlandev->state != WLAN_DEVICE_OPEN) { result = 1; goto failed; } memset(&p80211_hdr, 0, sizeof(p80211_hdr_t)); memset(&p80211_wep, 0, sizeof(p80211_metawep_t)); if (netif_queue_stopped(netdev)) { pr_debug("called when queue stopped.\n"); result = 1; goto failed; } netif_stop_queue(netdev); /* Check to see that a valid mode is set */ switch (wlandev->macmode) { case WLAN_MACMODE_IBSS_STA: case WLAN_MACMODE_ESS_STA: case WLAN_MACMODE_ESS_AP: break; default: /* Mode isn't set yet, just drop the frame * and return success . * TODO: we need a saner way to handle this */ if (skb->protocol != ETH_P_80211_RAW) { netif_start_queue(wlandev->netdev); printk(KERN_NOTICE "Tx attempt prior to association, frame dropped.\n"); wlandev->linux_stats.tx_dropped++; result = 0; goto failed; } break; } /* Check for raw transmits */ if (skb->protocol == ETH_P_80211_RAW) { if (!capable(CAP_NET_ADMIN)) { result = 1; goto failed; } /* move the header over */ memcpy(&p80211_hdr, skb->data, sizeof(p80211_hdr_t)); skb_pull(skb, sizeof(p80211_hdr_t)); } else { if (skb_ether_to_p80211 (wlandev, wlandev->ethconv, skb, &p80211_hdr, &p80211_wep) != 0) { /* convert failed */ pr_debug("ether_to_80211(%d) failed.\n", wlandev->ethconv); result = 1; goto failed; } } if (wlandev->txframe == NULL) { result = 1; goto failed; } netdev->trans_start = jiffies; wlandev->linux_stats.tx_packets++; /* count only the packet payload */ wlandev->linux_stats.tx_bytes += skb->len; txresult = wlandev->txframe(wlandev, skb, &p80211_hdr, &p80211_wep); if (txresult == 0) { /* success and more buf */ /* avail, re: hw_txdata */ netif_wake_queue(wlandev->netdev); result = NETDEV_TX_OK; } else if (txresult == 1) { /* success, no more avail */ pr_debug("txframe success, no more bufs\n"); /* netdev->tbusy = 1; don't set here, irqhdlr */ /* may have already cleared it */ result = NETDEV_TX_OK; } else if (txresult == 2) { /* alloc failure, drop frame */ pr_debug("txframe returned alloc_fail\n"); result = NETDEV_TX_BUSY; } else { /* buffer full or queue busy, drop frame. */ pr_debug("txframe returned full or busy\n"); result = NETDEV_TX_BUSY; } failed: /* Free up the WEP buffer if it's not the same as the skb */ if ((p80211_wep.data) && (p80211_wep.data != skb->data)) kzfree(p80211_wep.data); /* we always free the skb here, never in a lower level. */ if (!result) dev_kfree_skb(skb); return result; }
/* * In Ad-Hoc mode, the IBSS is created if not found in scan list. * In both Ad-Hoc and infra mode, an deauthentication is performed * first. */ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss, struct cfg80211_ssid *req_ssid) { int ret; struct mwifiex_adapter *adapter = priv->adapter; struct mwifiex_bssdescriptor *bss_desc = NULL; priv->scan_block = false; if (bss) { /* Allocate and fill new bss descriptor */ bss_desc = kzalloc(sizeof(struct mwifiex_bssdescriptor), GFP_KERNEL); if (!bss_desc) { dev_err(priv->adapter->dev, " failed to alloc bss_desc\n"); return -ENOMEM; } ret = mwifiex_fill_new_bss_desc(priv, bss, bss_desc); if (ret) goto done; } if (priv->bss_mode == NL80211_IFTYPE_STATION) { /* Infra mode */ ret = mwifiex_deauthenticate(priv, NULL); if (ret) goto done; ret = mwifiex_check_network_compatibility(priv, bss_desc); if (ret) goto done; dev_dbg(adapter->dev, "info: SSID found in scan list ... " "associating...\n"); if (!netif_queue_stopped(priv->netdev)) mwifiex_stop_net_dev_queue(priv->netdev, adapter); if (netif_carrier_ok(priv->netdev)) netif_carrier_off(priv->netdev); /* Clear any past association response stored for * application retrieval */ priv->assoc_rsp_size = 0; ret = mwifiex_associate(priv, bss_desc); /* If auth type is auto and association fails using open mode, * try to connect using shared mode */ if (ret == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG && priv->sec_info.is_authtype_auto && priv->sec_info.wep_enabled) { priv->sec_info.authentication_mode = NL80211_AUTHTYPE_SHARED_KEY; ret = mwifiex_associate(priv, bss_desc); } if (bss) cfg80211_put_bss(bss); } else { /* Adhoc mode */ /* If the requested SSID matches current SSID, return */ if (bss_desc && bss_desc->ssid.ssid_len && (!mwifiex_ssid_cmp(&priv->curr_bss_params.bss_descriptor. ssid, &bss_desc->ssid))) { kfree(bss_desc); return 0; } /* Exit Adhoc mode first */ dev_dbg(adapter->dev, "info: Sending Adhoc Stop\n"); ret = mwifiex_deauthenticate(priv, NULL); if (ret) goto done; priv->adhoc_is_link_sensed = false; ret = mwifiex_check_network_compatibility(priv, bss_desc); if (!netif_queue_stopped(priv->netdev)) mwifiex_stop_net_dev_queue(priv->netdev, adapter); if (netif_carrier_ok(priv->netdev)) netif_carrier_off(priv->netdev); if (!ret) { dev_dbg(adapter->dev, "info: network found in scan" " list. Joining...\n"); ret = mwifiex_adhoc_join(priv, bss_desc); if (bss) cfg80211_put_bss(bss); } else { dev_dbg(adapter->dev, "info: Network not found in " "the list, creating adhoc with ssid = %s\n", req_ssid->ssid); ret = mwifiex_adhoc_start(priv, req_ssid); } } done: kfree(bss_desc); return ret; }
int qdisc_restart(struct net_device *dev) { struct Qdisc *q = dev->qdisc; struct sk_buff *skb; /* Dequeue packet */ if ((skb = q->dequeue(q)) != NULL) { unsigned nolock = (dev->features & NETIF_F_LLTX); /* * When the driver has LLTX set it does its own locking * in start_xmit. No need to add additional overhead by * locking again. These checks are worth it because * even uncongested locks can be quite expensive. * The driver can do trylock like here too, in case * of lock congestion it should return -1 and the packet * will be requeued. */ if (!nolock) { if (!spin_trylock(&dev->xmit_lock)) { collision: /* So, someone grabbed the driver. */ /* It may be transient configuration error, when hard_start_xmit() recurses. We detect it by checking xmit owner and drop the packet when deadloop is detected. */ if (dev->xmit_lock_owner == smp_processor_id()) { kfree_skb(skb); if (net_ratelimit()) printk(KERN_DEBUG "Dead loop on netdevice %s, fix it urgently!\n", dev->name); return -1; } __get_cpu_var(netdev_rx_stat).cpu_collision++; goto requeue; } /* Remember that the driver is grabbed by us. */ dev->xmit_lock_owner = smp_processor_id(); } { /* And release queue */ spin_unlock(&dev->queue_lock); if (!netif_queue_stopped(dev)) { int ret; if (netdev_nit) dev_queue_xmit_nit(skb, dev); ret = dev->hard_start_xmit(skb, dev); if (ret == NETDEV_TX_OK) { if (!nolock) { dev->xmit_lock_owner = -1; spin_unlock(&dev->xmit_lock); } spin_lock(&dev->queue_lock); return -1; } if (ret == NETDEV_TX_LOCKED && nolock) { spin_lock(&dev->queue_lock); goto collision; } } /* NETDEV_TX_BUSY - we need to requeue */ /* Release the driver */ if (!nolock) { dev->xmit_lock_owner = -1; spin_unlock(&dev->xmit_lock); } spin_lock(&dev->queue_lock); q = dev->qdisc; } /* Device kicked us out :( This is possible in three cases: 0. driver is locked 1. fastroute is enabled 2. device cannot determine busy state before start of transmission (f.e. dialout) 3. device is buggy (ppp) */ requeue: q->ops->requeue(skb, q); netif_schedule(dev); return 1; } return q->q.qlen; }
void vRunCommand(void *hDeviceContext) { PSDevice pDevice = (PSDevice)hDeviceContext; PSMgmtObject pMgmt = &(pDevice->sMgmtObj); PWLAN_IE_SSID pItemSSID; PWLAN_IE_SSID pItemSSIDCurr; CMD_STATUS Status; unsigned int ii; BYTE byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80}; struct sk_buff *skb; BYTE byData; if (pDevice->dwDiagRefCount != 0) return; if (pDevice->bCmdRunning != TRUE) return; spin_lock_irq(&pDevice->lock); switch ( pDevice->eCommandState ) { case WLAN_CMD_SCAN_START: pDevice->byReAssocCount = 0; if (pDevice->bRadioOff == TRUE) { s_bCommandComplete(pDevice); spin_unlock_irq(&pDevice->lock); return; } if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) { s_bCommandComplete(pDevice); spin_unlock_irq(&pDevice->lock); return; } pItemSSID = (PWLAN_IE_SSID)pMgmt->abyScanSSID; if (pMgmt->uScanChannel == 0 ) { pMgmt->uScanChannel = pDevice->byMinChannel; } if (pMgmt->uScanChannel > pDevice->byMaxChannel) { pMgmt->eScanState = WMAC_NO_SCANNING; if (pDevice->byBBType != pDevice->byScanBBType) { pDevice->byBBType = pDevice->byScanBBType; CARDvSetBSSMode(pDevice); } if (pDevice->bUpdateBBVGA) { BBvSetShortSlotTime(pDevice); BBvSetVGAGainOffset(pDevice, pDevice->byBBVGACurrent); BBvUpdatePreEDThreshold(pDevice, FALSE); } vAdHocBeaconRestart(pDevice); CARDbSetMediaChannel(pDevice, pMgmt->uCurrChannel); if (pMgmt->bCurrBSSIDFilterOn) { MACvRegBitsOn(pDevice, MAC_REG_RCR, RCR_BSSID); pDevice->byRxMode |= RCR_BSSID; } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Scanning, set back to channel: [%d]\n", pMgmt->uCurrChannel); pDevice->bStopDataPkt = FALSE; s_bCommandComplete(pDevice); spin_unlock_irq(&pDevice->lock); return; } else { if (!ChannelValid(pDevice->byZoneType, pMgmt->uScanChannel)) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Invalid channel pMgmt->uScanChannel = %d \n",pMgmt->uScanChannel); s_bCommandComplete(pDevice); spin_unlock_irq(&pDevice->lock); return; } if (pMgmt->uScanChannel == pDevice->byMinChannel) { pMgmt->abyScanBSSID[0] = 0xFF; pMgmt->abyScanBSSID[1] = 0xFF; pMgmt->abyScanBSSID[2] = 0xFF; pMgmt->abyScanBSSID[3] = 0xFF; pMgmt->abyScanBSSID[4] = 0xFF; pMgmt->abyScanBSSID[5] = 0xFF; pItemSSID->byElementID = WLAN_EID_SSID; pMgmt->eScanState = WMAC_IS_SCANNING; pDevice->byScanBBType = pDevice->byBBType; pDevice->bStopDataPkt = TRUE; MACvRegBitsOff(pDevice, MAC_REG_RCR, RCR_BSSID); pDevice->byRxMode &= ~RCR_BSSID; } vAdHocBeaconStop(pDevice); if ((pDevice->byBBType != BB_TYPE_11A) && (pMgmt->uScanChannel > CB_MAX_CHANNEL_24G)) { pDevice->byBBType = BB_TYPE_11A; CARDvSetBSSMode(pDevice); } else if ((pDevice->byBBType == BB_TYPE_11A) && (pMgmt->uScanChannel <= CB_MAX_CHANNEL_24G)) { pDevice->byBBType = BB_TYPE_11G; CARDvSetBSSMode(pDevice); } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Scanning.... channel: [%d]\n", pMgmt->uScanChannel); CARDbSetMediaChannel(pDevice, pMgmt->uScanChannel); if (pDevice->bUpdateBBVGA) { BBvSetShortSlotTime(pDevice); BBvSetVGAGainOffset(pDevice, pDevice->abyBBVGA[0]); BBvUpdatePreEDThreshold(pDevice, TRUE); } pMgmt->uScanChannel++; while (!ChannelValid(pDevice->byZoneType, pMgmt->uScanChannel) && pMgmt->uScanChannel <= pDevice->byMaxChannel ){ pMgmt->uScanChannel++; } if (pMgmt->uScanChannel > pDevice->byMaxChannel) { pDevice->eCommandState = WLAN_CMD_SCAN_END; } if ((pMgmt->b11hEnable == FALSE) || (pMgmt->uScanChannel < CB_MAX_CHANNEL_24G)) { s_vProbeChannel(pDevice); spin_unlock_irq(&pDevice->lock); vCommandTimerWait((void *) pDevice, 100); return; } else { spin_unlock_irq(&pDevice->lock); vCommandTimerWait((void *) pDevice, WCMD_PASSIVE_SCAN_TIME); return; } } break; case WLAN_CMD_SCAN_END: if (pDevice->byBBType != pDevice->byScanBBType) { pDevice->byBBType = pDevice->byScanBBType; CARDvSetBSSMode(pDevice); } if (pDevice->bUpdateBBVGA) { BBvSetShortSlotTime(pDevice); BBvSetVGAGainOffset(pDevice, pDevice->byBBVGACurrent); BBvUpdatePreEDThreshold(pDevice, FALSE); } vAdHocBeaconRestart(pDevice); CARDbSetMediaChannel(pDevice, pMgmt->uCurrChannel); if (pMgmt->bCurrBSSIDFilterOn) { MACvRegBitsOn(pDevice, MAC_REG_RCR, RCR_BSSID); pDevice->byRxMode |= RCR_BSSID; } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Scanning, set back to channel: [%d]\n", pMgmt->uCurrChannel); pMgmt->eScanState = WMAC_NO_SCANNING; pDevice->bStopDataPkt = FALSE; #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT if(pMgmt->eScanType == WMAC_SCAN_PASSIVE) { union iwreq_data wrqu; PRINT_K("wireless_send_event--->SIOCGIWSCAN(scan done)\n"); memset(&wrqu, 0, sizeof(wrqu)); wireless_send_event(pDevice->dev, SIOCGIWSCAN, &wrqu, NULL); } #endif s_bCommandComplete(pDevice); break; case WLAN_CMD_DISASSOCIATE_START : pDevice->byReAssocCount = 0; if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (pMgmt->eCurrState != WMAC_STATE_ASSOC)) { s_bCommandComplete(pDevice); spin_unlock_irq(&pDevice->lock); return; } else { #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT pDevice->bwextstep0 = FALSE; pDevice->bwextstep1 = FALSE; pDevice->bwextstep2 = FALSE; pDevice->bwextstep3 = FALSE; pDevice->bWPASuppWextEnabled = FALSE; #endif pDevice->fWPA_Authened = FALSE; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Send Disassociation Packet..\n"); vMgrDisassocBeginSta((void *) pDevice, pMgmt, pMgmt->abyCurrBSSID, (8), &Status); pDevice->bLinkPass = FALSE; ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_SLOW); pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID; pItemSSID->len = 0; memset(pItemSSID->abySSID, 0, WLAN_SSID_MAXLEN); pMgmt->eCurrState = WMAC_STATE_IDLE; pMgmt->sNodeDBTable[0].bActive = FALSE; } netif_stop_queue(pDevice->dev); if (pDevice->bNeedRadioOFF == TRUE) CARDbRadioPowerOff(pDevice); s_bCommandComplete(pDevice); break; case WLAN_CMD_SSID_START: pDevice->byReAssocCount = 0; if (pDevice->bRadioOff == TRUE) { s_bCommandComplete(pDevice); spin_unlock_irq(&pDevice->lock); return; } memcpy(pMgmt->abyAdHocSSID,pMgmt->abyDesireSSID, ((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->len + WLAN_IEHDR_LEN); pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID; pItemSSIDCurr = (PWLAN_IE_SSID)pMgmt->abyCurrSSID; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" cmd: desire ssid = %s\n", pItemSSID->abySSID); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" cmd: curr ssid = %s\n", pItemSSIDCurr->abySSID); if (pMgmt->eCurrState == WMAC_STATE_ASSOC) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Cmd pMgmt->eCurrState == WMAC_STATE_ASSOC\n"); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pItemSSID->len =%d\n",pItemSSID->len); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pItemSSIDCurr->len = %d\n",pItemSSIDCurr->len); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" desire ssid = %s\n", pItemSSID->abySSID); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" curr ssid = %s\n", pItemSSIDCurr->abySSID); } if ((pMgmt->eCurrState == WMAC_STATE_ASSOC) || ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)&& (pMgmt->eCurrState == WMAC_STATE_JOINTED))) { if (pItemSSID->len == pItemSSIDCurr->len) { if (memcmp(pItemSSID->abySSID, pItemSSIDCurr->abySSID, pItemSSID->len) == 0) { s_bCommandComplete(pDevice); spin_unlock_irq(&pDevice->lock); return; } } netif_stop_queue(pDevice->dev); pDevice->bLinkPass = FALSE; ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_SLOW); } pMgmt->eCurrState = WMAC_STATE_IDLE; pMgmt->eCurrMode = WMAC_MODE_STANDBY; PSvDisablePowerSaving((void *) pDevice); BSSvClearNodeDBTable(pDevice, 0); vMgrJoinBSSBegin((void *) pDevice, &Status); if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (pMgmt->eCurrState == WMAC_STATE_JOINTED)) { if (pMgmt->eCurrState >= WMAC_STATE_AUTH) { vMgrDeAuthenBeginSta((void *)pDevice, pMgmt, pMgmt->abyCurrBSSID, (3), &Status); } vMgrAuthenBeginSta((void *) pDevice, pMgmt, &Status); if (Status == CMD_STATUS_SUCCESS) { pDevice->byLinkWaitCount = 0; pDevice->eCommandState = WLAN_AUTHENTICATE_WAIT; vCommandTimerWait((void *) pDevice, AUTHENTICATE_TIMEOUT); spin_unlock_irq(&pDevice->lock); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Set eCommandState = WLAN_AUTHENTICATE_WAIT\n"); return; } } else if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) { if (pMgmt->eCurrState == WMAC_STATE_JOINTED) { if (netif_queue_stopped(pDevice->dev)){ netif_wake_queue(pDevice->dev); } pDevice->bLinkPass = TRUE; ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_INTER); pMgmt->sNodeDBTable[0].bActive = TRUE; pMgmt->sNodeDBTable[0].uInActiveCount = 0; } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "CreateOwn IBSS by CurrMode = IBSS_STA\n"); vMgrCreateOwnIBSS((void *) pDevice, &Status); if (Status != CMD_STATUS_SUCCESS){ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_IBSS_CREATE fail!\n"); } BSSvAddMulticastNode(pDevice); } s_bClearBSSID_SCAN(pDevice); } else if (pMgmt->eCurrMode == WMAC_MODE_STANDBY) { if (pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA || pMgmt->eConfigMode == WMAC_CONFIG_AUTO) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "CreateOwn IBSS by CurrMode = STANDBY\n"); vMgrCreateOwnIBSS((void *) pDevice, &Status); if (Status != CMD_STATUS_SUCCESS){ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_IBSS_CREATE fail!\n"); } BSSvAddMulticastNode(pDevice); s_bClearBSSID_SCAN(pDevice); } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Disconnect SSID none\n"); #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT { union iwreq_data wrqu; memset(&wrqu, 0, sizeof (wrqu)); wrqu.ap_addr.sa_family = ARPHRD_ETHER; PRINT_K("wireless_send_event--->SIOCGIWAP(disassociated:vMgrJoinBSSBegin Fail !!)\n"); wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL); } #endif } } s_bCommandComplete(pDevice); break; case WLAN_AUTHENTICATE_WAIT : DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState == WLAN_AUTHENTICATE_WAIT\n"); if (pMgmt->eCurrState == WMAC_STATE_AUTH) { pDevice->byLinkWaitCount = 0; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCurrState == WMAC_STATE_AUTH\n"); vMgrAssocBeginSta((void *) pDevice, pMgmt, &Status); if (Status == CMD_STATUS_SUCCESS) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState = WLAN_ASSOCIATE_WAIT\n"); pDevice->byLinkWaitCount = 0; pDevice->eCommandState = WLAN_ASSOCIATE_WAIT; vCommandTimerWait((void *) pDevice, ASSOCIATE_TIMEOUT); spin_unlock_irq(&pDevice->lock); return; } } else if(pMgmt->eCurrState < WMAC_STATE_AUTHPENDING) { printk("WLAN_AUTHENTICATE_WAIT:Authen Fail???\n"); } else if(pDevice->byLinkWaitCount <= 4){ pDevice->byLinkWaitCount ++; printk("WLAN_AUTHENTICATE_WAIT:wait %d times!!\n",pDevice->byLinkWaitCount); spin_unlock_irq(&pDevice->lock); vCommandTimerWait((void *) pDevice, AUTHENTICATE_TIMEOUT/2); return; } pDevice->byLinkWaitCount = 0; s_bCommandComplete(pDevice); break; case WLAN_ASSOCIATE_WAIT : if (pMgmt->eCurrState == WMAC_STATE_ASSOC) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCurrState == WMAC_STATE_ASSOC\n"); if (pDevice->ePSMode != WMAC_POWER_CAM) { PSvEnablePowerSaving((void *) pDevice, pMgmt->wListenInterval); } pDevice->byLinkWaitCount = 0; pDevice->byReAssocCount = 0; pDevice->bLinkPass = TRUE; ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_INTER); s_bClearBSSID_SCAN(pDevice); if (netif_queue_stopped(pDevice->dev)){ netif_wake_queue(pDevice->dev); } if(pDevice->IsTxDataTrigger != FALSE) { del_timer(&pDevice->sTimerTxData); init_timer(&pDevice->sTimerTxData); pDevice->sTimerTxData.data = (unsigned long) pDevice; pDevice->sTimerTxData.function = (TimerFunction)BSSvSecondTxData; pDevice->sTimerTxData.expires = RUN_AT(10*HZ); pDevice->fTxDataInSleep = FALSE; pDevice->nTxDataTimeCout = 0; } else { } pDevice->IsTxDataTrigger = TRUE; add_timer(&pDevice->sTimerTxData); } else if(pMgmt->eCurrState < WMAC_STATE_ASSOCPENDING) { printk("WLAN_ASSOCIATE_WAIT:Association Fail???\n"); } else if(pDevice->byLinkWaitCount <= 4){ pDevice->byLinkWaitCount ++; printk("WLAN_ASSOCIATE_WAIT:wait %d times!!\n",pDevice->byLinkWaitCount); spin_unlock_irq(&pDevice->lock); vCommandTimerWait((void *) pDevice, ASSOCIATE_TIMEOUT/2); return; } pDevice->byLinkWaitCount = 0; s_bCommandComplete(pDevice); break; case WLAN_CMD_AP_MODE_START : DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState == WLAN_CMD_AP_MODE_START\n"); if (pMgmt->eConfigMode == WMAC_CONFIG_AP) { del_timer(&pMgmt->sTimerSecondCallback); pMgmt->eCurrState = WMAC_STATE_IDLE; pMgmt->eCurrMode = WMAC_MODE_STANDBY; pDevice->bLinkPass = FALSE; ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_SLOW); if (pDevice->bEnableHostWEP == TRUE) BSSvClearNodeDBTable(pDevice, 1); else BSSvClearNodeDBTable(pDevice, 0); pDevice->uAssocCount = 0; pMgmt->eCurrState = WMAC_STATE_IDLE; pDevice->bFixRate = FALSE; vMgrCreateOwnIBSS((void *) pDevice, &Status); if (Status != CMD_STATUS_SUCCESS) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "vMgrCreateOwnIBSS fail!\n"); } MACvRegBitsOff(pDevice, MAC_REG_RCR, RCR_UNICAST); pDevice->byRxMode &= ~RCR_UNICAST; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wcmd: rx_mode = %x\n", pDevice->byRxMode ); BSSvAddMulticastNode(pDevice); if (netif_queue_stopped(pDevice->dev)){ netif_wake_queue(pDevice->dev); } pDevice->bLinkPass = TRUE; ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_INTER); add_timer(&pMgmt->sTimerSecondCallback); } s_bCommandComplete(pDevice); break; case WLAN_CMD_TX_PSPACKET_START : if (pMgmt->sNodeDBTable[0].bRxPSPoll) { while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[0].sTxPSQueue)) != NULL) { if (skb_queue_empty(&pMgmt->sNodeDBTable[0].sTxPSQueue)) { pMgmt->abyPSTxMap[0] &= ~byMask[0]; pDevice->bMoreData = FALSE; } else { pDevice->bMoreData = TRUE; } if (nsDMA_tx_packet(pDevice, TYPE_AC0DMA, skb) != 0) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Multicast ps tx fail \n"); } pMgmt->sNodeDBTable[0].wEnQueueCnt--; } } for (ii = 1; ii < (MAX_NODE_NUM + 1); ii++) { if (pMgmt->sNodeDBTable[ii].bActive && pMgmt->sNodeDBTable[ii].bRxPSPoll) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Index=%d Enqueu Cnt= %d\n", ii, pMgmt->sNodeDBTable[ii].wEnQueueCnt); while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) != NULL) { if (skb_queue_empty(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) { pMgmt->abyPSTxMap[pMgmt->sNodeDBTable[ii].wAID >> 3] &= ~byMask[pMgmt->sNodeDBTable[ii].wAID & 7]; pDevice->bMoreData = FALSE; } else { pDevice->bMoreData = TRUE; } if (nsDMA_tx_packet(pDevice, TYPE_AC0DMA, skb) != 0) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "sta ps tx fail \n"); } pMgmt->sNodeDBTable[ii].wEnQueueCnt--; if (pMgmt->sNodeDBTable[ii].bPSEnable) break; } if (skb_queue_empty(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) { pMgmt->abyPSTxMap[pMgmt->sNodeDBTable[ii].wAID >> 3] &= ~byMask[pMgmt->sNodeDBTable[ii].wAID & 7]; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Index=%d PS queue clear \n", ii); }
static int netdev_open(struct net_device *pnetdev) { uint status; _adapter *padapter = (_adapter *)netdev_priv(pnetdev); HAL_DATA_TYPE *pHalData = GET_HAL_DATA(padapter); struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv; RT_TRACE(_module_os_intfs_c_,_drv_info_,("+871x_drv - dev_open\n")); printk("+8192cu_drv - drv_open, bup=%d\n", padapter->bup); if(padapter->bup == _FALSE) { padapter->bDriverStopped = _FALSE; padapter->bSurpriseRemoved = _FALSE; padapter->bCardDisableWOHSM = _FALSE; status = rtw_hal_init(padapter); if (status ==_FAIL) { RT_TRACE(_module_os_intfs_c_,_drv_err_,("rtl871x_hal_init(): Can't init h/w!\n")); goto netdev_open_error; } if ( rtw_initmac == NULL ) // Use the mac address stored in the Efuse { _rtw_memcpy(pnetdev->dev_addr, padapter->eeprompriv.mac_addr, ETH_ALEN); } else { // Use the user specifiy mac address. // Commented by Albert 2010/07/19 // The "myid" function will get the wifi mac address from eeprompriv structure instead of netdev structure. // So, we have to overwrite the mac_addr stored in the eeprompriv structure. // In this case, the real mac address won't be used anymore. // So that, the eeprompriv.mac_addr should store the mac which users specify. _rtw_memcpy( padapter->eeprompriv.mac_addr, pnetdev->dev_addr, ETH_ALEN ); } printk("MAC Address = %x-%x-%x-%x-%x-%x\n", pnetdev->dev_addr[0], pnetdev->dev_addr[1], pnetdev->dev_addr[2], pnetdev->dev_addr[3], pnetdev->dev_addr[4], pnetdev->dev_addr[5]); status=rtw_start_drv_threads(padapter); if(status ==_FAIL) { RT_TRACE(_module_os_intfs_c_,_drv_err_,("Initialize driver software resource Failed!\n")); goto netdev_open_error; } if (init_mlme_ext_priv(padapter) == _FAIL) { RT_TRACE(_module_os_intfs_c_,_drv_err_,("can't init mlme_ext_priv\n")); goto netdev_open_error; } #ifdef CONFIG_DRVEXT_MODULE init_drvext(padapter); #endif #ifdef CONFIG_USB_HCI if(pHalData->hal_ops.inirp_init == NULL) { RT_TRACE(_module_os_intfs_c_,_drv_err_,("Initialize dvobjpriv.inirp_init error!!!\n")); goto netdev_open_error; } else { pHalData->hal_ops.inirp_init(padapter); } #endif #ifdef CONFIG_PROC_DEBUG rtw_proc_init_one(pnetdev); #endif padapter->bup = _TRUE; padapter->ledpriv.LedControlHandler(padapter, LED_CTL_POWER_ON); } padapter->net_closed = _FALSE; if( pwrctrlpriv->power_mgnt != PS_MODE_ACTIVE ) { padapter->pwrctrlpriv.bips_processing = _FALSE; _set_timer(&padapter->pwrctrlpriv.pwr_state_check_timer, padapter->pwrctrlpriv.pwr_state_check_inverval); } //netif_carrier_on(pnetdev);//call this func when rtw_joinbss_event_callback return success if(!netif_queue_stopped(pnetdev)) netif_start_queue(pnetdev); else netif_wake_queue(pnetdev); RT_TRACE(_module_os_intfs_c_,_drv_info_,("-871x_drv - dev_open\n")); printk("-871x_drv - drv_open, bup=%d\n", padapter->bup); return 0; netdev_open_error: padapter->bup = _FALSE; netif_carrier_off(pnetdev); netif_stop_queue(pnetdev); RT_TRACE(_module_os_intfs_c_,_drv_err_,("-871x_drv - dev_open, fail!\n")); printk("-871x_drv - drv_open fail, bup=%d\n", padapter->bup); return (-1); }
/* The interrupt handler. * This is called from the CPM handler, not the MPC core interrupt. */ static void scc_enet_interrupt(void *dev_id, struct pt_regs *regs) { struct net_device *dev = dev_id; volatile struct scc_enet_private *cep; volatile cbd_t *bdp; ushort int_events; int must_restart; cep = (struct scc_enet_private *)dev->priv; /* Get the interrupt events that caused us to be here. */ int_events = cep->sccp->scc_scce; cep->sccp->scc_scce = int_events; must_restart = 0; /* Handle receive event in its own function. */ if (int_events & SCCE_ENET_RXF) scc_enet_rx(dev_id); /* Check for a transmit error. The manual is a little unclear * about this, so the debug code until I get it figured out. It * appears that if TXE is set, then TXB is not set. However, * if carrier sense is lost during frame transmission, the TXE * bit is set, "and continues the buffer transmission normally." * I don't know if "normally" implies TXB is set when the buffer * descriptor is closed.....trial and error :-). */ /* Transmit OK, or non-fatal error. Update the buffer descriptors. */ if (int_events & (SCCE_ENET_TXE | SCCE_ENET_TXB)) { spin_lock(&cep->lock); bdp = cep->dirty_tx; while ((bdp->cbd_sc&BD_ENET_TX_READY)==0) { if ((bdp==cep->cur_tx) && (cep->tx_full == 0)) break; if (bdp->cbd_sc & BD_ENET_TX_HB) /* No heartbeat */ cep->stats.tx_heartbeat_errors++; if (bdp->cbd_sc & BD_ENET_TX_LC) /* Late collision */ cep->stats.tx_window_errors++; if (bdp->cbd_sc & BD_ENET_TX_RL) /* Retrans limit */ cep->stats.tx_aborted_errors++; if (bdp->cbd_sc & BD_ENET_TX_UN) /* Underrun */ cep->stats.tx_fifo_errors++; if (bdp->cbd_sc & BD_ENET_TX_CSL) /* Carrier lost */ cep->stats.tx_carrier_errors++; /* No heartbeat or Lost carrier are not really bad errors. * The others require a restart transmit command. */ if (bdp->cbd_sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) { must_restart = 1; cep->stats.tx_errors++; } cep->stats.tx_packets++; /* Deferred means some collisions occurred during transmit, * but we eventually sent the packet OK. */ if (bdp->cbd_sc & BD_ENET_TX_DEF) cep->stats.collisions++; /* Free the sk buffer associated with this last transmit. */ dev_kfree_skb_irq(cep->tx_skbuff[cep->skb_dirty]); cep->skb_dirty = (cep->skb_dirty + 1) & TX_RING_MOD_MASK; /* Update pointer to next buffer descriptor to be transmitted. */ if (bdp->cbd_sc & BD_ENET_TX_WRAP) bdp = cep->tx_bd_base; else bdp++; /* I don't know if we can be held off from processing these * interrupts for more than one frame time. I really hope * not. In such a case, we would now want to check the * currently available BD (cur_tx) and determine if any * buffers between the dirty_tx and cur_tx have also been * sent. We would want to process anything in between that * does not have BD_ENET_TX_READY set. */ /* Since we have freed up a buffer, the ring is no longer * full. */ if (cep->tx_full) { cep->tx_full = 0; if (netif_queue_stopped(dev)) netif_wake_queue(dev); } cep->dirty_tx = (cbd_t *)bdp; } if (must_restart) { volatile cpm8xx_t *cp; /* Some transmit errors cause the transmitter to shut * down. We now issue a restart transmit. Since the * errors close the BD and update the pointers, the restart * _should_ pick up without having to reset any of our * pointers either. */ cp = cpmp; cp->cp_cpcr = mk_cr_cmd(CPM_CR_ENET, CPM_CR_RESTART_TX) | CPM_CR_FLG; while (cp->cp_cpcr & CPM_CR_FLG); } spin_unlock(&cep->lock); } /* Check for receive busy, i.e. packets coming but no place to * put them. This "can't happen" because the receive interrupt * is tossing previous frames. */ if (int_events & SCCE_ENET_BSY) { cep->stats.rx_dropped++; printk("CPM ENET: BSY can't happen.\n"); } return; }
static netdev_tx_t start_xmit (struct sk_buff *skb, struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); struct netdev_desc *txdesc; unsigned entry; u32 ioaddr; u64 tfc_vlan_tag = 0; if (np->link_status == 0) { /* Link Down */ dev_kfree_skb(skb); return NETDEV_TX_OK; } ioaddr = dev->base_addr; entry = np->cur_tx % TX_RING_SIZE; np->tx_skbuff[entry] = skb; txdesc = &np->tx_ring[entry]; #if 0 if (skb->ip_summed == CHECKSUM_PARTIAL) { txdesc->status |= cpu_to_le64 (TCPChecksumEnable | UDPChecksumEnable | IPChecksumEnable); } #endif if (np->vlan) { tfc_vlan_tag = VLANTagInsert | ((u64)np->vlan << 32) | ((u64)skb->priority << 45); } txdesc->fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data, skb->len, PCI_DMA_TODEVICE)); txdesc->fraginfo |= cpu_to_le64((u64)skb->len << 48); /* DL2K bug: DMA fails to get next descriptor ptr in 10Mbps mode * Work around: Always use 1 descriptor in 10Mbps mode */ if (entry % np->tx_coalesce == 0 || np->speed == 10) txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag | WordAlignDisable | TxDMAIndicate | (1 << FragCountShift)); else txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag | WordAlignDisable | (1 << FragCountShift)); /* TxDMAPollNow */ writel (readl (ioaddr + DMACtrl) | 0x00001000, ioaddr + DMACtrl); /* Schedule ISR */ writel(10000, ioaddr + CountDown); np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE; if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE < TX_QUEUE_LEN - 1 && np->speed != 10) { /* do nothing */ } else if (!netif_queue_stopped(dev)) { netif_stop_queue (dev); } /* The first TFDListPtr */ if (readl (dev->base_addr + TFDListPtr0) == 0) { writel (np->tx_ring_dma + entry * sizeof (struct netdev_desc), dev->base_addr + TFDListPtr0); writel (0, dev->base_addr + TFDListPtr1); } return NETDEV_TX_OK; }
static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev) { struct rmnet_private *p = netdev_priv(dev); unsigned long flags; int awake; int ret = 0; if (netif_queue_stopped(dev)) { pr_err("[%s]fatal: rmnet_xmit called when " "netif_queue is stopped", dev->name); return 0; } spin_lock_irqsave(&p->lock, flags); awake = msm_bam_dmux_ul_power_vote(); if (!awake) { /* send SKB once wakeup is complete */ netif_stop_queue(dev); p->waiting_for_ul_skb = skb; spin_unlock_irqrestore(&p->lock, flags); ret = 0; goto exit; } spin_unlock_irqrestore(&p->lock, flags); ret = _rmnet_xmit(skb, dev); if (ret == -EPERM) { ret = NETDEV_TX_BUSY; goto exit; } /* * detected SSR a bit early. shut some things down now, and leave * the rest to the main ssr handling code when that happens later */ if (ret == -EFAULT) { netif_carrier_off(dev); dev_kfree_skb_any(skb); ret = 0; goto exit; } if (ret == -EAGAIN) { /* * This should not happen * EAGAIN means we attempted to overflow the high watermark * Clearly the queue is not stopped like it should be, so * stop it and return BUSY to the TCP/IP framework. It will * retry this packet with the queue is restarted which happens * in the write_done callback when the low watermark is hit. */ netif_stop_queue(dev); ret = NETDEV_TX_BUSY; goto exit; } spin_lock_irqsave(&p->tx_queue_lock, flags); if (msm_bam_dmux_is_ch_full(p->ch_id)) { netif_stop_queue(dev); DBG0("%s: High WM hit, stopping queue=%p\n", __func__, skb); } spin_unlock_irqrestore(&p->tx_queue_lock, flags); exit: msm_bam_dmux_ul_power_unvote(); return ret; }
VOID vCommandTimer ( IN HANDLE hDeviceContext ) { PSDevice pDevice = (PSDevice)hDeviceContext; PSMgmtObject pMgmt = pDevice->pMgmt; PWLAN_IE_SSID pItemSSID; PWLAN_IE_SSID pItemSSIDCurr; CMD_STATUS Status; UINT ii; BYTE byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80}; struct sk_buff *skb; if (pDevice->dwDiagRefCount != 0) return; if (pDevice->bCmdRunning != TRUE) return; spin_lock_irq(&pDevice->lock); switch ( pDevice->eCommandState ) { case WLAN_CMD_SCAN_START: pDevice->byReAssocCount = 0; if (pDevice->bRadioOff == TRUE) { s_bCommandComplete(pDevice); spin_unlock_irq(&pDevice->lock); return; } if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) { s_bCommandComplete(pDevice); CARDbSetBSSID(pMgmt->pAdapter, pMgmt->abyCurrBSSID, OP_MODE_AP); spin_unlock_irq(&pDevice->lock); return; } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState= WLAN_CMD_SCAN_START\n"); pItemSSID = (PWLAN_IE_SSID)pMgmt->abyScanSSID; if (pDevice->iTDUsed[TYPE_AC0DMA] != 0){ spin_unlock_irq(&pDevice->lock); vCommandTimerWait((HANDLE)pDevice, 10); return; }; if (pMgmt->uScanChannel == 0 ) { pMgmt->uScanChannel = pDevice->byMinChannel; } if (pMgmt->uScanChannel > pDevice->byMaxChannel) { pMgmt->eScanState = WMAC_NO_SCANNING; CARDbSetChannel(pMgmt->pAdapter, pMgmt->uCurrChannel); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Scanning, set back to channel: [%d]\n", pMgmt->uCurrChannel); if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) { CARDbSetBSSID(pMgmt->pAdapter, pMgmt->abyCurrBSSID, OP_MODE_ADHOC); } else { CARDbSetBSSID(pMgmt->pAdapter, pMgmt->abyCurrBSSID, OP_MODE_INFRASTRUCTURE); } vAdHocBeaconRestart(pDevice); s_bCommandComplete(pDevice); } else { if (!ChannelValid(pDevice->byZoneType, pMgmt->uScanChannel)) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Invalid channel pMgmt->uScanChannel = %d \n",pMgmt->uScanChannel); s_bCommandComplete(pDevice); return; } if (pMgmt->uScanChannel == pDevice->byMinChannel) { pMgmt->abyScanBSSID[0] = 0xFF; pMgmt->abyScanBSSID[1] = 0xFF; pMgmt->abyScanBSSID[2] = 0xFF; pMgmt->abyScanBSSID[3] = 0xFF; pMgmt->abyScanBSSID[4] = 0xFF; pMgmt->abyScanBSSID[5] = 0xFF; pItemSSID->byElementID = WLAN_EID_SSID; pMgmt->eScanState = WMAC_IS_SCANNING; } vAdHocBeaconStop(pDevice); if (CARDbSetChannel(pMgmt->pAdapter, pMgmt->uScanChannel) == TRUE) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"SCAN Channel: %d\n", pMgmt->uScanChannel); } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"SET SCAN Channel Fail: %d\n", pMgmt->uScanChannel); } CARDbSetBSSID(pMgmt->pAdapter, pMgmt->abyCurrBSSID, OP_MODE_UNKNOWN); pMgmt->uScanChannel++; if (!ChannelValid(pDevice->byZoneType, pMgmt->uScanChannel) && pMgmt->uScanChannel <= pDevice->byMaxChannel ){ pMgmt->uScanChannel=pDevice->byMaxChannel+1; pMgmt->eCommandState = WLAN_CMD_SCAN_END; } if ((pMgmt->b11hEnable == FALSE) || (pMgmt->uScanChannel < CB_MAX_CHANNEL_24G)) { s_vProbeChannel(pDevice); spin_unlock_irq(&pDevice->lock); vCommandTimerWait((HANDLE)pDevice, WCMD_ACTIVE_SCAN_TIME); return; } else { spin_unlock_irq(&pDevice->lock); vCommandTimerWait((HANDLE)pDevice, WCMD_PASSIVE_SCAN_TIME); return; } } break; case WLAN_CMD_SCAN_END: CARDbSetChannel(pMgmt->pAdapter, pMgmt->uCurrChannel); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Scanning, set back to channel: [%d]\n", pMgmt->uCurrChannel); if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) { CARDbSetBSSID(pMgmt->pAdapter, pMgmt->abyCurrBSSID, OP_MODE_ADHOC); } else { CARDbSetBSSID(pMgmt->pAdapter, pMgmt->abyCurrBSSID, OP_MODE_INFRASTRUCTURE); } pMgmt->eScanState = WMAC_NO_SCANNING; vAdHocBeaconRestart(pDevice); #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT if(pMgmt->eScanType == WMAC_SCAN_PASSIVE) { union iwreq_data wrqu; memset(&wrqu, 0, sizeof(wrqu)); wireless_send_event(pDevice->dev, SIOCGIWSCAN, &wrqu, NULL); } #endif s_bCommandComplete(pDevice); break; case WLAN_CMD_DISASSOCIATE_START : pDevice->byReAssocCount = 0; if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (pMgmt->eCurrState != WMAC_STATE_ASSOC)) { s_bCommandComplete(pDevice); spin_unlock_irq(&pDevice->lock); return; } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Send Disassociation Packet..\n"); vMgrDisassocBeginSta((HANDLE)pDevice, pMgmt, pMgmt->abyCurrBSSID, (8), &Status); pDevice->bLinkPass = FALSE; pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID; pItemSSID->len = 0; memset(pItemSSID->abySSID, 0, WLAN_SSID_MAXLEN); pMgmt->eCurrState = WMAC_STATE_IDLE; pMgmt->sNodeDBTable[0].bActive = FALSE; } netif_stop_queue(pDevice->dev); pDevice->eCommandState = WLAN_DISASSOCIATE_WAIT; if (pDevice->iTDUsed[TYPE_TXDMA0] != 0){ vCommandTimerWait((HANDLE)pDevice, 10); spin_unlock_irq(&pDevice->lock); return; }; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" CARDbRadioPowerOff\n"); s_bCommandComplete(pDevice); break; case WLAN_DISASSOCIATE_WAIT : if (pDevice->iTDUsed[TYPE_TXDMA0] != 0){ vCommandTimerWait((HANDLE)pDevice, 10); spin_unlock_irq(&pDevice->lock); return; }; s_bCommandComplete(pDevice); break; case WLAN_CMD_SSID_START: pDevice->byReAssocCount = 0; if (pDevice->bRadioOff == TRUE) { s_bCommandComplete(pDevice); spin_unlock_irq(&pDevice->lock); return; } printk("chester-abyDesireSSID=%s\n",((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->abySSID); pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID; pItemSSIDCurr = (PWLAN_IE_SSID)pMgmt->abyCurrSSID; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" cmd: desire ssid = %s\n", pItemSSID->abySSID); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" cmd: curr ssid = %s\n", pItemSSIDCurr->abySSID); if (pMgmt->eCurrState == WMAC_STATE_ASSOC) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Cmd pMgmt->eCurrState == WMAC_STATE_ASSOC\n"); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pItemSSID->len =%d\n",pItemSSID->len); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pItemSSIDCurr->len = %d\n",pItemSSIDCurr->len); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" desire ssid = %s\n", pItemSSID->abySSID); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" curr ssid = %s\n", pItemSSIDCurr->abySSID); } if ((pMgmt->eCurrState == WMAC_STATE_ASSOC) || ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)&& (pMgmt->eCurrState == WMAC_STATE_JOINTED))) { if (pItemSSID->len == pItemSSIDCurr->len) { if (memcmp(pItemSSID->abySSID, pItemSSIDCurr->abySSID, pItemSSID->len) == 0) { s_bCommandComplete(pDevice); spin_unlock_irq(&pDevice->lock); return; } } netif_stop_queue(pDevice->dev); pDevice->bLinkPass = FALSE; } pMgmt->eCurrState = WMAC_STATE_IDLE; pMgmt->eCurrMode = WMAC_MODE_STANDBY; PSvDisablePowerSaving((HANDLE)pDevice); BSSvClearNodeDBTable(pDevice, 0); vMgrJoinBSSBegin((HANDLE)pDevice, &Status); if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (pMgmt->eCurrState == WMAC_STATE_JOINTED)) { if (pMgmt->eCurrState>= WMAC_STATE_AUTH) { vMgrDeAuthenBeginSta((HANDLE)pDevice, pMgmt, pMgmt->abyCurrBSSID, (3), &Status); } vMgrAuthenBeginSta((HANDLE)pDevice, pMgmt, &Status); if (Status == CMD_STATUS_SUCCESS) { pDevice->byLinkWaitCount = 0; pDevice->eCommandState = WLAN_AUTHENTICATE_WAIT; vCommandTimerWait((HANDLE)pDevice, AUTHENTICATE_TIMEOUT); spin_unlock_irq(&pDevice->lock); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Set eCommandState = WLAN_AUTHENTICATE_WAIT\n"); return; } } else if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) { if (pMgmt->eCurrState == WMAC_STATE_JOINTED) { if (netif_queue_stopped(pDevice->dev)){ netif_wake_queue(pDevice->dev); } pDevice->bLinkPass = TRUE; pMgmt->sNodeDBTable[0].bActive = TRUE; pMgmt->sNodeDBTable[0].uInActiveCount = 0; bClearBSSID_SCAN(pDevice); } else { vMgrCreateOwnIBSS((HANDLE)pDevice, &Status); if (Status != CMD_STATUS_SUCCESS){ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " WLAN_CMD_IBSS_CREATE fail ! \n"); }; BSSvAddMulticastNode(pDevice); } } else if (pMgmt->eCurrMode == WMAC_MODE_STANDBY) { if (pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA || pMgmt->eConfigMode == WMAC_CONFIG_AUTO) { vMgrCreateOwnIBSS((HANDLE)pDevice, &Status); if (Status != CMD_STATUS_SUCCESS){ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" WLAN_CMD_IBSS_CREATE fail ! \n"); }; BSSvAddMulticastNode(pDevice); if (netif_queue_stopped(pDevice->dev)){ netif_wake_queue(pDevice->dev); } pDevice->bLinkPass = TRUE; } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Disconnect SSID none\n"); #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT { union iwreq_data wrqu; memset(&wrqu, 0, sizeof (wrqu)); wrqu.ap_addr.sa_family = ARPHRD_ETHER; printk("wireless_send_event--->SIOCGIWAP(disassociated:vMgrJoinBSSBegin Fail !!)\n"); wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL); } #endif } } s_bCommandComplete(pDevice); break; case WLAN_AUTHENTICATE_WAIT : DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState == WLAN_AUTHENTICATE_WAIT\n"); if (pMgmt->eCurrState == WMAC_STATE_AUTH) { pDevice->byLinkWaitCount = 0; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCurrState == WMAC_STATE_AUTH\n"); vMgrAssocBeginSta((HANDLE)pDevice, pMgmt, &Status); if (Status == CMD_STATUS_SUCCESS) { pDevice->byLinkWaitCount = 0; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState = WLAN_ASSOCIATE_WAIT\n"); pDevice->eCommandState = WLAN_ASSOCIATE_WAIT; vCommandTimerWait((HANDLE)pDevice, ASSOCIATE_TIMEOUT); spin_unlock_irq(&pDevice->lock); return; } } else if(pMgmt->eCurrState < WMAC_STATE_AUTHPENDING) { printk("WLAN_AUTHENTICATE_WAIT:Authen Fail???\n"); } else if(pDevice->byLinkWaitCount <= 4){ pDevice->byLinkWaitCount ++; printk("WLAN_AUTHENTICATE_WAIT:wait %d times!!\n",pDevice->byLinkWaitCount); spin_unlock_irq(&pDevice->lock); vCommandTimerWait((HANDLE)pDevice, AUTHENTICATE_TIMEOUT/2); return; } pDevice->byLinkWaitCount = 0; #if 0 #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT { union iwreq_data wrqu; memset(&wrqu, 0, sizeof (wrqu)); wrqu.ap_addr.sa_family = ARPHRD_ETHER; printk("wireless_send_event--->SIOCGIWAP(disassociated:AUTHENTICATE_WAIT_timeout)\n"); wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL); } #endif #endif s_bCommandComplete(pDevice); break; case WLAN_ASSOCIATE_WAIT : if (pMgmt->eCurrState == WMAC_STATE_ASSOC) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCurrState == WMAC_STATE_ASSOC\n"); if (pDevice->ePSMode != WMAC_POWER_CAM) { PSvEnablePowerSaving((HANDLE)pDevice, pMgmt->wListenInterval); } if (pMgmt->eAuthenMode >= WMAC_AUTH_WPA) { KeybRemoveAllKey(&(pDevice->sKey), pDevice->abyBSSID, pDevice->PortOffset); } pDevice->bLinkPass = TRUE; pDevice->byLinkWaitCount = 0; pDevice->byReAssocCount = 0; bClearBSSID_SCAN(pDevice); if (pDevice->byFOETuning) { BBvSetFOE(pDevice->PortOffset); PSbSendNullPacket(pDevice); } if (netif_queue_stopped(pDevice->dev)){ netif_wake_queue(pDevice->dev); } #ifdef TxInSleep if(pDevice->IsTxDataTrigger != FALSE) { del_timer(&pDevice->sTimerTxData); init_timer(&pDevice->sTimerTxData); pDevice->sTimerTxData.data = (ULONG)pDevice; pDevice->sTimerTxData.function = (TimerFunction)BSSvSecondTxData; pDevice->sTimerTxData.expires = RUN_AT(10*HZ); pDevice->fTxDataInSleep = FALSE; pDevice->nTxDataTimeCout = 0; } else { } pDevice->IsTxDataTrigger = TRUE; add_timer(&pDevice->sTimerTxData); #endif } else if(pMgmt->eCurrState < WMAC_STATE_ASSOCPENDING) { printk("WLAN_ASSOCIATE_WAIT:Association Fail???\n"); } else if(pDevice->byLinkWaitCount <= 4){ pDevice->byLinkWaitCount ++; printk("WLAN_ASSOCIATE_WAIT:wait %d times!!\n",pDevice->byLinkWaitCount); spin_unlock_irq(&pDevice->lock); vCommandTimerWait((HANDLE)pDevice, ASSOCIATE_TIMEOUT/2); return; } pDevice->byLinkWaitCount = 0; #if 0 #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT { union iwreq_data wrqu; memset(&wrqu, 0, sizeof (wrqu)); wrqu.ap_addr.sa_family = ARPHRD_ETHER; printk("wireless_send_event--->SIOCGIWAP(disassociated:ASSOCIATE_WAIT_timeout)\n"); wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL); } #endif #endif s_bCommandComplete(pDevice); break; case WLAN_CMD_AP_MODE_START : DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState == WLAN_CMD_AP_MODE_START\n"); if (pMgmt->eConfigMode == WMAC_CONFIG_AP) { del_timer(&pMgmt->sTimerSecondCallback); pMgmt->eCurrState = WMAC_STATE_IDLE; pMgmt->eCurrMode = WMAC_MODE_STANDBY; pDevice->bLinkPass = FALSE; if (pDevice->bEnableHostWEP == TRUE) BSSvClearNodeDBTable(pDevice, 1); else BSSvClearNodeDBTable(pDevice, 0); pDevice->uAssocCount = 0; pMgmt->eCurrState = WMAC_STATE_IDLE; pDevice->bFixRate = FALSE; vMgrCreateOwnIBSS((HANDLE)pDevice, &Status); if (Status != CMD_STATUS_SUCCESS){ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " vMgrCreateOwnIBSS fail ! \n"); }; MACvRegBitsOff(pDevice->PortOffset, MAC_REG_RCR, RCR_UNICAST); pDevice->byRxMode &= ~RCR_UNICAST; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wcmd: rx_mode = %x\n", pDevice->byRxMode ); BSSvAddMulticastNode(pDevice); if (netif_queue_stopped(pDevice->dev)){ netif_wake_queue(pDevice->dev); } pDevice->bLinkPass = TRUE; add_timer(&pMgmt->sTimerSecondCallback); } s_bCommandComplete(pDevice); break; case WLAN_CMD_TX_PSPACKET_START : if (pMgmt->sNodeDBTable[0].bRxPSPoll) { while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[0].sTxPSQueue)) != NULL) { if (skb_queue_empty(&pMgmt->sNodeDBTable[0].sTxPSQueue)) { pMgmt->abyPSTxMap[0] &= ~byMask[0]; pDevice->bMoreData = FALSE; } else { pDevice->bMoreData = TRUE; } if (!device_dma0_xmit(pDevice, skb, 0)) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Multicast ps tx fail \n"); } pMgmt->sNodeDBTable[0].wEnQueueCnt--; } }; for (ii = 1; ii < (MAX_NODE_NUM + 1); ii++) { if (pMgmt->sNodeDBTable[ii].bActive && pMgmt->sNodeDBTable[ii].bRxPSPoll) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Index=%d Enqueu Cnt= %d\n", ii, pMgmt->sNodeDBTable[ii].wEnQueueCnt); while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) != NULL) { if (skb_queue_empty(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) { pMgmt->abyPSTxMap[pMgmt->sNodeDBTable[ii].wAID >> 3] &= ~byMask[pMgmt->sNodeDBTable[ii].wAID & 7]; pDevice->bMoreData = FALSE; } else { pDevice->bMoreData = TRUE; } if (!device_dma0_xmit(pDevice, skb, ii)) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "sta ps tx fail \n"); } pMgmt->sNodeDBTable[ii].wEnQueueCnt--; if (pMgmt->sNodeDBTable[ii].bPSEnable) break; } if (skb_queue_empty(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) { pMgmt->abyPSTxMap[pMgmt->sNodeDBTable[ii].wAID >> 3] &= ~byMask[pMgmt->sNodeDBTable[ii].wAID & 7]; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Index=%d PS queue clear \n", ii); }
int qdisc_restart(struct net_device *dev) { struct Qdisc *q = dev->qdisc; struct sk_buff *skb; /* BRCM: bail out if queue is null */ if (!q) return 0; /* Dequeue packet */ if (!q) { if (net_ratelimit()) printk(KERN_DEBUG "HELP ME! qdisc_restart called, but no Qdisc!\n"); return 0; } if ((skb = q->dequeue(q)) != NULL) { if (spin_trylock(&dev->xmit_lock)) { /* Remember that the driver is grabbed by us. */ dev->xmit_lock_owner = smp_processor_id(); /* And release queue */ spin_unlock(&dev->queue_lock); if (!netif_queue_stopped(dev)) { if (netdev_nit #if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) && !(skb->imq_flags & IMQ_F_ENQUEUE) #endif ) dev_queue_xmit_nit(skb, dev); if (dev->hard_start_xmit(skb, dev) == 0) { dev->xmit_lock_owner = -1; spin_unlock(&dev->xmit_lock); spin_lock(&dev->queue_lock); return -1; } } /* Release the driver */ dev->xmit_lock_owner = -1; spin_unlock(&dev->xmit_lock); spin_lock(&dev->queue_lock); q = dev->qdisc; } else { /* So, someone grabbed the driver. */ /* It may be transient configuration error, when hard_start_xmit() recurses. We detect it by checking xmit owner and drop the packet when deadloop is detected. */ if (dev->xmit_lock_owner == smp_processor_id()) { kfree_skb(skb); if (net_ratelimit()) printk(KERN_DEBUG "Dead loop on netdevice %s, fix it urgently!\n", dev->name); return -1; } netdev_rx_stat[smp_processor_id()].cpu_collision++; } /* Device kicked us out :( This is possible in three cases: 0. driver is locked 1. fastroute is enabled 2. device cannot determine busy state before start of transmission (f.e. dialout) 3. device is buggy (ppp) */ q->ops->requeue(skb, q); netif_schedule(dev); return 1; } return q->q.qlen; }
/** * * This function intends to handle the activation of an interface * i.e. when it is brought Up/Active from a Down state. * */ static int netdev_open(struct net_device *pnetdev) { struct _adapter *padapter = (struct _adapter *)netdev_priv(pnetdev); mutex_lock(&padapter->mutex_start); if (padapter->bup == false) { padapter->bDriverStopped = false; padapter->bSurpriseRemoved = false; padapter->bup = true; if (rtl871x_hal_init(padapter) != _SUCCESS) goto netdev_open_error; if (r8712_initmac == NULL) /* Use the mac address stored in the Efuse */ memcpy(pnetdev->dev_addr, padapter->eeprompriv.mac_addr, ETH_ALEN); else { /* We have to inform f/w to use user-supplied MAC * address. */ msleep(200); r8712_setMacAddr_cmd(padapter, (u8 *)pnetdev->dev_addr); /* * The "myid" function will get the wifi mac address * from eeprompriv structure instead of netdev * structure. So, we have to overwrite the mac_addr * stored in the eeprompriv structure. In this case, * the real mac address won't be used anymore. So that, * the eeprompriv.mac_addr should store the mac which * users specify. */ memcpy(padapter->eeprompriv.mac_addr, pnetdev->dev_addr, ETH_ALEN); } if (start_drv_threads(padapter) != _SUCCESS) goto netdev_open_error; if (padapter->dvobjpriv.inirp_init == NULL) goto netdev_open_error; else padapter->dvobjpriv.inirp_init(padapter); r8712_set_ps_mode(padapter, padapter->registrypriv.power_mgnt, padapter->registrypriv.smart_ps); } if (!netif_queue_stopped(pnetdev)) netif_start_queue(pnetdev); else netif_wake_queue(pnetdev); if (video_mode) enable_video_mode(padapter, cbw40_enable); /* start driver mlme relation timer */ start_drv_timers(padapter); padapter->ledpriv.LedControlHandler(padapter, LED_CTL_NO_LINK); mutex_unlock(&padapter->mutex_start); return 0; netdev_open_error: padapter->bup = false; netif_carrier_off(pnetdev); netif_stop_queue(pnetdev); mutex_unlock(&padapter->mutex_start); return -1; }
/* * Proc info file read handler. * * This function is called when the 'info' file is opened for reading. * It prints the following driver related information - * - Driver name * - Driver version * - Driver extended version * - Interface name * - BSS mode * - Media state (connected or disconnected) * - MAC address * - Total number of Tx bytes * - Total number of Rx bytes * - Total number of Tx packets * - Total number of Rx packets * - Total number of dropped Tx packets * - Total number of dropped Rx packets * - Total number of corrupted Tx packets * - Total number of corrupted Rx packets * - Carrier status (on or off) * - Tx queue status (started or stopped) * * For STA mode drivers, it also prints the following extra - * - ESSID * - BSSID * - Channel * - Region code * - Multicast count * - Multicast addresses */ static ssize_t mwifiex_info_read(struct file *file, char __user *ubuf, size_t count, loff_t *ppos) { struct mwifiex_private *priv = (struct mwifiex_private *) file->private_data; struct net_device *netdev = priv->netdev; struct netdev_hw_addr *ha; unsigned long page = get_zeroed_page(GFP_KERNEL); char *p = (char *) page, fmt[64]; struct mwifiex_bss_info info; ssize_t ret; int i = 0; if (!p) return -ENOMEM; memset(&info, 0, sizeof(info)); ret = mwifiex_get_bss_info(priv, &info); if (ret) goto free_and_exit; mwifiex_drv_get_driver_version(priv->adapter, fmt, sizeof(fmt) - 1); if (!priv->version_str[0]) mwifiex_get_ver_ext(priv); p += sprintf(p, "driver_name = " "\"mwifiex\"\n"); p += sprintf(p, "driver_version = %s", fmt); p += sprintf(p, "\nverext = %s", priv->version_str); p += sprintf(p, "\ninterface_name=\"%s\"\n", netdev->name); p += sprintf(p, "bss_mode=\"%s\"\n", bss_modes[info.bss_mode]); p += sprintf(p, "media_state=\"%s\"\n", (!priv->media_connected ? "Disconnected" : "Connected")); p += sprintf(p, "mac_address=\"%pM\"\n", netdev->dev_addr); if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) { p += sprintf(p, "multicast_count=\"%d\"\n", netdev_mc_count(netdev)); p += sprintf(p, "essid=\"%s\"\n", info.ssid.ssid); p += sprintf(p, "bssid=\"%pM\"\n", info.bssid); p += sprintf(p, "channel=\"%d\"\n", (int) info.bss_chan); p += sprintf(p, "region_code = \"%02x\"\n", info.region_code); netdev_for_each_mc_addr(ha, netdev) p += sprintf(p, "multicast_address[%d]=\"%pM\"\n", i++, ha->addr); } p += sprintf(p, "num_tx_bytes = %lu\n", priv->stats.tx_bytes); p += sprintf(p, "num_rx_bytes = %lu\n", priv->stats.rx_bytes); p += sprintf(p, "num_tx_pkts = %lu\n", priv->stats.tx_packets); p += sprintf(p, "num_rx_pkts = %lu\n", priv->stats.rx_packets); p += sprintf(p, "num_tx_pkts_dropped = %lu\n", priv->stats.tx_dropped); p += sprintf(p, "num_rx_pkts_dropped = %lu\n", priv->stats.rx_dropped); p += sprintf(p, "num_tx_pkts_err = %lu\n", priv->stats.tx_errors); p += sprintf(p, "num_rx_pkts_err = %lu\n", priv->stats.rx_errors); p += sprintf(p, "carrier %s\n", ((netif_carrier_ok(priv->netdev)) ? "on" : "off")); p += sprintf(p, "tx queue %s\n", ((netif_queue_stopped(priv->netdev)) ? "stopped" : "started")); ret = simple_read_from_buffer(ubuf, count, ppos, (char *) page, (unsigned long) p - page); free_and_exit: free_page(page); return ret; }
/** * stmmac_xmit: * @skb : the socket buffer * @dev : device pointer * Description : Tx entry point of the driver. */ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); unsigned int txsize = priv->dma_tx_size; unsigned int entry; int i, csum_insertion = 0; int nfrags = skb_shinfo(skb)->nr_frags; struct dma_desc *desc, *first; if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) { if (!netif_queue_stopped(dev)) { netif_stop_queue(dev); /* This is a hard error, log it. */ pr_err("%s: BUG! Tx Ring full when queue awake\n", __func__); } return NETDEV_TX_BUSY; } entry = priv->cur_tx % txsize; #ifdef STMMAC_XMIT_DEBUG if ((skb->len > ETH_FRAME_LEN) || nfrags) pr_info("stmmac xmit:\n" "\tskb addr %p - len: %d - nopaged_len: %d\n" "\tn_frags: %d - ip_summed: %d - %s gso\n", skb, skb->len, skb_headlen(skb), nfrags, skb->ip_summed, !skb_is_gso(skb) ? "isn't" : "is"); #endif csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); desc = priv->dma_tx + entry; first = desc; #ifdef STMMAC_XMIT_DEBUG if ((nfrags > 0) || (skb->len > ETH_FRAME_LEN)) pr_debug("stmmac xmit: skb len: %d, nopaged_len: %d,\n" "\t\tn_frags: %d, ip_summed: %d\n", skb->len, skb_headlen(skb), nfrags, skb->ip_summed); #endif priv->tx_skbuff[entry] = skb; if (unlikely(skb->len >= BUF_SIZE_4KiB)) { entry = stmmac_handle_jumbo_frames(skb, dev, csum_insertion); desc = priv->dma_tx + entry; } else { unsigned int nopaged_len = skb_headlen(skb); desc->des2 = dma_map_single(priv->device, skb->data, nopaged_len, DMA_TO_DEVICE); priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum_insertion); } for (i = 0; i < nfrags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; int len = frag->size; entry = (++priv->cur_tx) % txsize; desc = priv->dma_tx + entry; TX_DBG("\t[entry %d] segment len: %d\n", entry, len); desc->des2 = dma_map_page(priv->device, frag->page, frag->page_offset, len, DMA_TO_DEVICE); priv->tx_skbuff[entry] = NULL; priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion); wmb(); priv->hw->desc->set_tx_owner(desc); } /* Interrupt on completition only for the latest segment */ priv->hw->desc->close_tx_desc(desc); #ifdef CONFIG_STMMAC_TIMER /* Clean IC while using timer */ if (likely(priv->tm->enable)) priv->hw->desc->clear_tx_ic(desc); #endif wmb(); /* To avoid raise condition */ priv->hw->desc->set_tx_owner(first); priv->cur_tx++; #ifdef STMMAC_XMIT_DEBUG if (netif_msg_pktdata(priv)) { pr_info("stmmac xmit: current=%d, dirty=%d, entry=%d, " "first=%p, nfrags=%d\n", (priv->cur_tx % txsize), (priv->dirty_tx % txsize), entry, first, nfrags); display_ring(priv->dma_tx, txsize); pr_info(">>> frame to be transmitted: "); print_pkt(skb->data, skb->len); } #endif if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) { TX_DBG("%s: stop transmitted packets\n", __func__); netif_stop_queue(dev); } dev->stats.tx_bytes += skb->len; skb_tx_timestamp(skb); priv->hw->dma->enable_dma_transmission(priv->ioaddr); return NETDEV_TX_OK; }
void xenvif_notify_tx_completion(struct xenvif *vif) { if (netif_queue_stopped(vif->dev) && xenvif_rx_schedulable(vif)) netif_wake_queue(vif->dev); }
static void bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *buf) { struct bnad *bnad = netdev_priv(netdev); int i, j, bi; unsigned long flags; struct rtnl_link_stats64 *net_stats64; u64 *stats64; u64 bmap; mutex_lock(&bnad->conf_mutex); if (bnad_get_stats_count_locked(netdev) != stats->n_stats) { mutex_unlock(&bnad->conf_mutex); return; } /* * Used bna_lock to sync reads from bna_stats, which is written * under the same lock */ spin_lock_irqsave(&bnad->bna_lock, flags); bi = 0; memset(buf, 0, stats->n_stats * sizeof(u64)); net_stats64 = (struct rtnl_link_stats64 *)buf; bnad_netdev_qstats_fill(bnad, net_stats64); bnad_netdev_hwstats_fill(bnad, net_stats64); bi = sizeof(*net_stats64) / sizeof(u64); /* Get netif_queue_stopped from stack */ bnad->stats.drv_stats.netif_queue_stopped = netif_queue_stopped(netdev); /* Fill driver stats into ethtool buffers */ stats64 = (u64 *)&bnad->stats.drv_stats; for (i = 0; i < sizeof(struct bnad_drv_stats) / sizeof(u64); i++) buf[bi++] = stats64[i]; /* Fill hardware stats excluding the rxf/txf into ethtool bufs */ stats64 = (u64 *) bnad->stats.bna_stats->hw_stats; for (i = 0; i < offsetof(struct bfi_ll_stats, rxf_stats[0]) / sizeof(u64); i++) buf[bi++] = stats64[i]; /* Fill txf stats into ethtool buffers */ bmap = (u64)bnad->bna.tx_mod.txf_bmap[0] | ((u64)bnad->bna.tx_mod.txf_bmap[1] << 32); for (i = 0; bmap && (i < BFI_LL_TXF_ID_MAX); i++) { if (bmap & 1) { stats64 = (u64 *)&bnad->stats.bna_stats-> hw_stats->txf_stats[i]; for (j = 0; j < sizeof(struct bfi_ll_stats_txf) / sizeof(u64); j++) buf[bi++] = stats64[j]; } bmap >>= 1; } /* Fill rxf stats into ethtool buffers */ bmap = (u64)bnad->bna.rx_mod.rxf_bmap[0] | ((u64)bnad->bna.rx_mod.rxf_bmap[1] << 32); for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) { if (bmap & 1) { stats64 = (u64 *)&bnad->stats.bna_stats-> hw_stats->rxf_stats[i]; for (j = 0; j < sizeof(struct bfi_ll_stats_rxf) / sizeof(u64); j++) buf[bi++] = stats64[j]; } bmap >>= 1; } /* Fill per Q stats into ethtool buffers */ bi = bnad_per_q_stats_fill(bnad, buf, bi); spin_unlock_irqrestore(&bnad->bna_lock, flags); mutex_unlock(&bnad->conf_mutex); }
/* * This function handles the command response of ad-hoc start and * ad-hoc join. * * The function generates a device-connected event to notify * the applications, in case of successful ad-hoc start/join, and * saves the beacon buffer. */ int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv, struct host_cmd_ds_command *resp) { int ret = 0; struct mwifiex_adapter *adapter = priv->adapter; struct host_cmd_ds_802_11_ad_hoc_result *adhoc_result; struct mwifiex_bssdescriptor *bss_desc; adhoc_result = &resp->params.adhoc_result; bss_desc = priv->attempted_bss_desc; /* Join result code 0 --> SUCCESS */ if (le16_to_cpu(resp->result)) { dev_err(priv->adapter->dev, "ADHOC_RESP: failed\n"); if (priv->media_connected) mwifiex_reset_connect_state(priv); memset(&priv->curr_bss_params.bss_descriptor, 0x00, sizeof(struct mwifiex_bssdescriptor)); ret = -1; goto done; } /* Send a Media Connected event, according to the Spec */ priv->media_connected = true; if (le16_to_cpu(resp->command) == HostCmd_CMD_802_11_AD_HOC_START) { dev_dbg(priv->adapter->dev, "info: ADHOC_S_RESP %s\n", bss_desc->ssid.ssid); /* Update the created network descriptor with the new BSSID */ memcpy(bss_desc->mac_address, adhoc_result->bssid, ETH_ALEN); priv->adhoc_state = ADHOC_STARTED; } else { /* * Now the join cmd should be successful. * If BSSID has changed use SSID to compare instead of BSSID */ dev_dbg(priv->adapter->dev, "info: ADHOC_J_RESP %s\n", bss_desc->ssid.ssid); /* * Make a copy of current BSSID descriptor, only needed for * join since the current descriptor is already being used * for adhoc start */ memcpy(&priv->curr_bss_params.bss_descriptor, bss_desc, sizeof(struct mwifiex_bssdescriptor)); priv->adhoc_state = ADHOC_JOINED; } dev_dbg(priv->adapter->dev, "info: ADHOC_RESP: channel = %d\n", priv->adhoc_channel); dev_dbg(priv->adapter->dev, "info: ADHOC_RESP: BSSID = %pM\n", priv->curr_bss_params.bss_descriptor.mac_address); if (!netif_carrier_ok(priv->netdev)) netif_carrier_on(priv->netdev); if (netif_queue_stopped(priv->netdev)) netif_wake_queue(priv->netdev); mwifiex_save_curr_bcn(priv); done: /* Need to indicate IOCTL complete */ if (adapter->curr_cmd->wait_q_enabled) { if (ret) adapter->cmd_wait_q.status = -1; else adapter->cmd_wait_q.status = 0; } return ret; }
static int netdev_close(struct net_device *pnetdev) { _adapter *padapter = (_adapter *)netdev_priv(pnetdev); RT_TRACE(_module_os_intfs_c_,_drv_info_,("+871x_drv - drv_close\n")); if(padapter->pwrctrlpriv.bInternalAutoSuspend == _TRUE) { rfpwrstate_check(padapter); } padapter->net_closed = _TRUE; /* if(!padapter->hw_init_completed) { printk("(1)871x_drv - drv_close, bup=%d, hw_init_completed=%d\n", padapter->bup, padapter->hw_init_completed); padapter->bDriverStopped = _TRUE; rtw_dev_unload(padapter); } else*/ { printk("(2)8192cu_drv - drv_close, bup=%d, hw_init_completed=%d" #ifdef CONFIG_PLATFORM_ANDROID " bdisassoc_by_assoc=%d" #endif "\n" , padapter->bup , padapter->hw_init_completed #ifdef CONFIG_PLATFORM_ANDROID , padapter->bdisassoc_by_assoc #endif ); //s1. if(pnetdev) { if (!netif_queue_stopped(pnetdev)) netif_stop_queue(pnetdev); } #ifndef CONFIG_PLATFORM_ANDROID //if(!padapter->bdisassoc_by_assoc){ //#endif //s2. //s2-1. issue rtw_disassoc_cmd to fw rtw_disassoc_cmd(padapter); //s2-2. indicate disconnect to os rtw_indicate_disconnect(padapter); //s2-3. rtw_free_assoc_resources(padapter); //s2-4. rtw_free_network_queue(padapter,_TRUE); //#ifdef CONFIG_PLATFORM_ANDROID //} //padapter->bdisassoc_by_assoc=0;//FON #endif } // Close LED padapter->ledpriv.LedControlHandler(padapter, LED_CTL_POWER_OFF); RT_TRACE(_module_os_intfs_c_,_drv_info_,("-871x_drv - drv_close\n")); printk("-871x_drv - drv_close, bup=%d\n", padapter->bup); return 0; }
/* * Association firmware command response handler * * The response buffer for the association command has the following * memory layout. * * For cases where an association response was not received (indicated * by the CapInfo and AId field): * * .------------------------------------------------------------. * | Header(4 * sizeof(t_u16)): Standard command response hdr | * .------------------------------------------------------------. * | cap_info/Error Return(t_u16): | * | 0xFFFF(-1): Internal error | * | 0xFFFE(-2): Authentication unhandled message | * | 0xFFFD(-3): Authentication refused | * | 0xFFFC(-4): Timeout waiting for AP response | * .------------------------------------------------------------. * | status_code(t_u16): | * | If cap_info is -1: | * | An internal firmware failure prevented the | * | command from being processed. The status_code | * | will be set to 1. | * | | * | If cap_info is -2: | * | An authentication frame was received but was | * | not handled by the firmware. IEEE Status | * | code for the failure is returned. | * | | * | If cap_info is -3: | * | An authentication frame was received and the | * | status_code is the IEEE Status reported in the | * | response. | * | | * | If cap_info is -4: | * | (1) Association response timeout | * | (2) Authentication response timeout | * .------------------------------------------------------------. * | a_id(t_u16): 0xFFFF | * .------------------------------------------------------------. * * * For cases where an association response was received, the IEEE * standard association response frame is returned: * * .------------------------------------------------------------. * | Header(4 * sizeof(t_u16)): Standard command response hdr | * .------------------------------------------------------------. * | cap_info(t_u16): IEEE Capability | * .------------------------------------------------------------. * | status_code(t_u16): IEEE Status Code | * .------------------------------------------------------------. * | a_id(t_u16): IEEE Association ID | * .------------------------------------------------------------. * | IEEE IEs(variable): Any received IEs comprising the | * | remaining portion of a received | * | association response frame. | * .------------------------------------------------------------. * * For simplistic handling, the status_code field can be used to determine * an association success (0) or failure (non-zero). */ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv, struct host_cmd_ds_command *resp) { struct mwifiex_adapter *adapter = priv->adapter; int ret = 0; struct ieee_types_assoc_rsp *assoc_rsp; struct mwifiex_bssdescriptor *bss_desc; u8 enable_data = true; assoc_rsp = (struct ieee_types_assoc_rsp *) &resp->params; priv->assoc_rsp_size = min(le16_to_cpu(resp->size) - S_DS_GEN, sizeof(priv->assoc_rsp_buf)); memcpy(priv->assoc_rsp_buf, &resp->params, priv->assoc_rsp_size); if (le16_to_cpu(assoc_rsp->status_code)) { priv->adapter->dbg.num_cmd_assoc_failure++; dev_err(priv->adapter->dev, "ASSOC_RESP: association failed, " "status code = %d, error = 0x%x, a_id = 0x%x\n", le16_to_cpu(assoc_rsp->status_code), le16_to_cpu(assoc_rsp->cap_info_bitmap), le16_to_cpu(assoc_rsp->a_id)); ret = -1; goto done; } /* Send a Media Connected event, according to the Spec */ priv->media_connected = true; priv->adapter->ps_state = PS_STATE_AWAKE; priv->adapter->pps_uapsd_mode = false; priv->adapter->tx_lock_flag = false; /* Set the attempted BSSID Index to current */ bss_desc = priv->attempted_bss_desc; dev_dbg(priv->adapter->dev, "info: ASSOC_RESP: %s\n", bss_desc->ssid.ssid); /* Make a copy of current BSSID descriptor */ memcpy(&priv->curr_bss_params.bss_descriptor, bss_desc, sizeof(struct mwifiex_bssdescriptor)); /* Update curr_bss_params */ priv->curr_bss_params.bss_descriptor.channel = bss_desc->phy_param_set.ds_param_set.current_chan; priv->curr_bss_params.band = (u8) bss_desc->bss_band; /* * Adjust the timestamps in the scan table to be relative to the newly * associated AP's TSF */ mwifiex_update_tsf_timestamps(priv, bss_desc); if (bss_desc->wmm_ie.vend_hdr.element_id == WLAN_EID_VENDOR_SPECIFIC) priv->curr_bss_params.wmm_enabled = true; else priv->curr_bss_params.wmm_enabled = false; if ((priv->wmm_required || bss_desc->bcn_ht_cap) && priv->curr_bss_params.wmm_enabled) priv->wmm_enabled = true; else priv->wmm_enabled = false; priv->curr_bss_params.wmm_uapsd_enabled = false; if (priv->wmm_enabled) priv->curr_bss_params.wmm_uapsd_enabled = ((bss_desc->wmm_ie.qos_info_bitmap & IEEE80211_WMM_IE_AP_QOSINFO_UAPSD) ? 1 : 0); dev_dbg(priv->adapter->dev, "info: ASSOC_RESP: curr_pkt_filter is %#x\n", priv->curr_pkt_filter); if (priv->sec_info.wpa_enabled || priv->sec_info.wpa2_enabled) priv->wpa_is_gtk_set = false; if (priv->wmm_enabled) { /* Don't re-enable carrier until we get the WMM_GET_STATUS event */ enable_data = false; } else { /* Since WMM is not enabled, setup the queues with the defaults */ mwifiex_wmm_setup_queue_priorities(priv, NULL); mwifiex_wmm_setup_ac_downgrade(priv); } if (enable_data) dev_dbg(priv->adapter->dev, "info: post association, re-enabling data flow\n"); /* Reset SNR/NF/RSSI values */ priv->data_rssi_last = 0; priv->data_nf_last = 0; priv->data_rssi_avg = 0; priv->data_nf_avg = 0; priv->bcn_rssi_last = 0; priv->bcn_nf_last = 0; priv->bcn_rssi_avg = 0; priv->bcn_nf_avg = 0; priv->rxpd_rate = 0; priv->rxpd_htinfo = 0; mwifiex_save_curr_bcn(priv); priv->adapter->dbg.num_cmd_assoc_success++; dev_dbg(priv->adapter->dev, "info: ASSOC_RESP: associated\n"); /* Add the ra_list here for infra mode as there will be only 1 ra always */ mwifiex_ralist_add(priv, priv->curr_bss_params.bss_descriptor.mac_address); if (!netif_carrier_ok(priv->netdev)) netif_carrier_on(priv->netdev); if (netif_queue_stopped(priv->netdev)) netif_wake_queue(priv->netdev); if (priv->sec_info.wpa_enabled || priv->sec_info.wpa2_enabled) priv->scan_block = true; done: /* Need to indicate IOCTL complete */ if (adapter->curr_cmd->wait_q_enabled) { if (ret) adapter->cmd_wait_q.status = -1; else adapter->cmd_wait_q.status = 0; } return ret; }
/** * wwan_xmit() - Transmits an skb. In charge of asking IPA * RM needed resources. In case that IPA RM is not ready, then * the skb is saved for tranmitting as soon as IPA RM resources * are granted. * * @skb: skb to be transmitted * @dev: network device * * Return codes: * 0: success * NETDEV_TX_BUSY: Error while transmitting the skb. Try again * later * -EFAULT: Error while transmitting the skb */ static int wwan_xmit(struct sk_buff *skb, struct net_device *dev) { struct wwan_private *wwan_ptr = netdev_priv(dev); unsigned long flags; int ret = 0; if (netif_queue_stopped(dev)) { pr_err("[%s]fatal: wwan_xmit called when netif_queue stopped\n", dev->name); return 0; } ret = ipa_rm_inactivity_timer_request_resource( ipa_rm_resource_by_ch_id[wwan_ptr->ch_id]); if (ret == -EINPROGRESS) { netif_stop_queue(dev); return NETDEV_TX_BUSY; } if (ret) { pr_err("[%s] fatal: ipa rm timer request resource failed %d\n", dev->name, ret); return -EFAULT; } ret = wwan_send_packet(skb, dev); if (ret == -EPERM) { ret = NETDEV_TX_BUSY; goto exit; } /* * detected SSR a bit early. shut some things down now, and leave * the rest to the main ssr handling code when that happens later */ if (ret == -EFAULT) { netif_carrier_off(dev); dev_kfree_skb_any(skb); ret = 0; goto exit; } if (ret == -EAGAIN) { /* * This should not happen * EAGAIN means we attempted to overflow the high watermark * Clearly the queue is not stopped like it should be, so * stop it and return BUSY to the TCP/IP framework. It will * retry this packet with the queue is restarted which happens * in the write_done callback when the low watermark is hit. */ netif_stop_queue(dev); ret = NETDEV_TX_BUSY; goto exit; } spin_lock_irqsave(&wwan_ptr->lock, flags); if (a2_mux_is_ch_full(a2_mux_lcid_by_ch_id[wwan_ptr->ch_id])) { netif_stop_queue(dev); pr_debug("%s: High WM hit, stopping queue=%p\n", __func__, skb); } spin_unlock_irqrestore(&wwan_ptr->lock, flags); return ret; exit: ipa_rm_inactivity_timer_release_resource( ipa_rm_resource_by_ch_id[wwan_ptr->ch_id]); return ret; }
/** * @brief Proc read function for info * * @param page Pointer to buffer * @param start Read data starting position * @param offset Offset * @param count Counter * @param eof End of file flag * @param data Data to output * * @return Number of output data */ static int woal_info_proc_read(char *page, char **start, off_t offset, int count, int *eof, void *data) { char *p = page; struct net_device *netdev = data; char fmt[MLAN_MAX_VER_STR_LEN]; moal_private *priv = (moal_private *) netdev_priv(netdev); #ifdef STA_SUPPORT int i = 0; moal_handle *handle = priv->phandle; mlan_bss_info info; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) struct dev_mc_list *mcptr = netdev->mc_list; int mc_count = netdev->mc_count; #else struct netdev_hw_addr *mcptr = NULL; int mc_count = netdev_mc_count(netdev); #endif /* < 2.6.35 */ #else #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,29) int i = 0; #endif /* >= 2.6.29 */ #endif #ifdef UAP_SUPPORT mlan_ds_uap_stats ustats; #endif ENTER(); if (offset) { *eof = 1; goto exit; } memset(fmt, 0, sizeof(fmt)); #ifdef UAP_SUPPORT if (GET_BSS_ROLE(priv) == MLAN_BSS_ROLE_UAP) { p += sprintf(p, "driver_name = " "\"uap\"\n"); woal_uap_get_version(priv, fmt, sizeof(fmt) - 1); if (MLAN_STATUS_SUCCESS != woal_uap_get_stats(priv, MOAL_PROC_WAIT, &ustats)) { *eof = 1; goto exit; } } #endif /* UAP_SUPPORT */ #ifdef STA_SUPPORT if (GET_BSS_ROLE(priv) == MLAN_BSS_ROLE_STA) { woal_get_version(handle, fmt, sizeof(fmt) - 1); memset(&info, 0, sizeof(info)); if (MLAN_STATUS_SUCCESS != woal_get_bss_info(priv, MOAL_PROC_WAIT, &info)) { *eof = 1; goto exit; } p += sprintf(p, "driver_name = " "\"wlan\"\n"); } #endif p += sprintf(p, "driver_version = %s", fmt); p += sprintf(p, "\ninterface_name=\"%s\"\n", netdev->name); #if defined(WIFI_DIRECT_SUPPORT) if (priv->bss_type == MLAN_BSS_TYPE_WIFIDIRECT) { if (GET_BSS_ROLE(priv) == MLAN_BSS_ROLE_STA) p += sprintf(p, "bss_mode = \"WIFIDIRECT-Client\"\n"); else p += sprintf(p, "bss_mode = \"WIFIDIRECT-GO\"\n"); } #endif #ifdef STA_SUPPORT if (priv->bss_type == MLAN_BSS_TYPE_STA) p += sprintf(p, "bss_mode =\"%s\"\n", szModes[info.bss_mode]); #endif p += sprintf(p, "media_state=\"%s\"\n", ((priv->media_connected == MFALSE) ? "Disconnected" : "Connected")); p += sprintf(p, "mac_address=\"%02x:%02x:%02x:%02x:%02x:%02x\"\n", netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2], netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]); #ifdef STA_SUPPORT if (GET_BSS_ROLE(priv) == MLAN_BSS_ROLE_STA) { p += sprintf(p, "multicast_count=\"%d\"\n", mc_count); p += sprintf(p, "essid=\"%s\"\n", info.ssid.ssid); p += sprintf(p, "bssid=\"%02x:%02x:%02x:%02x:%02x:%02x\"\n", info.bssid[0], info.bssid[1], info.bssid[2], info.bssid[3], info.bssid[4], info.bssid[5]); p += sprintf(p, "channel=\"%d\"\n", (int) info.bss_chan); p += sprintf(p, "region_code = \"%02x\"\n", (t_u8) info.region_code); /* * Put out the multicast list */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) for (i = 0; i < netdev->mc_count; i++) { p += sprintf(p, "multicast_address[%d]=\"%02x:%02x:%02x:%02x:%02x:%02x\"\n", i, mcptr->dmi_addr[0], mcptr->dmi_addr[1], mcptr->dmi_addr[2], mcptr->dmi_addr[3], mcptr->dmi_addr[4], mcptr->dmi_addr[5]); mcptr = mcptr->next; } #else netdev_for_each_mc_addr(mcptr, netdev) p += sprintf(p, "multicast_address[%d]=\"%02x:%02x:%02x:%02x:%02x:%02x\"\n", i++, mcptr->addr[0], mcptr->addr[1], mcptr->addr[2], mcptr->addr[3], mcptr->addr[4], mcptr->addr[5]); #endif /* < 2.6.35 */ } #endif p += sprintf(p, "num_tx_bytes = %lu\n", priv->stats.tx_bytes); p += sprintf(p, "num_rx_bytes = %lu\n", priv->stats.rx_bytes); p += sprintf(p, "num_tx_pkts = %lu\n", priv->stats.tx_packets); p += sprintf(p, "num_rx_pkts = %lu\n", priv->stats.rx_packets); p += sprintf(p, "num_tx_pkts_dropped = %lu\n", priv->stats.tx_dropped); p += sprintf(p, "num_rx_pkts_dropped = %lu\n", priv->stats.rx_dropped); p += sprintf(p, "num_tx_pkts_err = %lu\n", priv->stats.tx_errors); p += sprintf(p, "num_rx_pkts_err = %lu\n", priv->stats.rx_errors); p += sprintf(p, "carrier %s\n", ((netif_carrier_ok(priv->netdev)) ? "on" : "off")); #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,29) for (i = 0; i < netdev->num_tx_queues; i++) { p += sprintf(p, "tx queue %d: %s\n", i, ((netif_tx_queue_stopped(netdev_get_tx_queue(netdev, 0))) ? "stopped" : "started")); } #else p += sprintf(p, "tx queue %s\n", ((netif_queue_stopped(priv->netdev)) ? "stopped" : "started")); #endif #ifdef UAP_SUPPORT if (GET_BSS_ROLE(priv) == MLAN_BSS_ROLE_UAP) { p += sprintf(p, "tkip_mic_failures = %u\n", ustats.tkip_mic_failures); p += sprintf(p, "ccmp_decrypt_errors = %u\n", ustats.ccmp_decrypt_errors); p += sprintf(p, "wep_undecryptable_count = %u\n", ustats.wep_undecryptable_count); p += sprintf(p, "wep_icv_error_count = %u\n", ustats.wep_icv_error_count); p += sprintf(p, "decrypt_failure_count = %u\n", ustats.decrypt_failure_count); p += sprintf(p, "mcast_tx_count = %u\n", ustats.mcast_tx_count); p += sprintf(p, "failed_count = %u\n", ustats.failed_count); p += sprintf(p, "retry_count = %u\n", ustats.retry_count); p += sprintf(p, "multiple_retry_count = %u\n", ustats.multi_retry_count); p += sprintf(p, "frame_duplicate_count = %u\n", ustats.frame_dup_count); p += sprintf(p, "rts_success_count = %u\n", ustats.rts_success_count); p += sprintf(p, "rts_failure_count = %u\n", ustats.rts_failure_count); p += sprintf(p, "ack_failure_count = %u\n", ustats.ack_failure_count); p += sprintf(p, "rx_fragment_count = %u\n", ustats.rx_fragment_count); p += sprintf(p, "mcast_rx_frame_count = %u\n", ustats.mcast_rx_frame_count); p += sprintf(p, "fcs_error_count = %u\n", ustats.fcs_error_count); p += sprintf(p, "tx_frame_count = %u\n", ustats.tx_frame_count); p += sprintf(p, "rsna_tkip_cm_invoked = %u\n", ustats.rsna_tkip_cm_invoked); p += sprintf(p, "rsna_4way_hshk_failures = %u\n", ustats.rsna_4way_hshk_failures); } #endif /* UAP_SUPPORT */ exit: LEAVE(); return (p - page); }
static void fec_enet_tx(struct net_device *ndev) { struct fec_enet_private *fep; struct bufdesc *bdp; unsigned short status; struct sk_buff *skb; fep = netdev_priv(ndev); spin_lock(&fep->hw_lock); bdp = fep->dirty_tx; while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { if (bdp == fep->cur_tx && fep->tx_full == 0) break; dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); bdp->cbd_bufaddr = 0; skb = fep->tx_skbuff[fep->skb_dirty]; /* Check for errors. */ if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) { ndev->stats.tx_errors++; if (status & BD_ENET_TX_HB) /* No heartbeat */ ndev->stats.tx_heartbeat_errors++; if (status & BD_ENET_TX_LC) /* Late collision */ ndev->stats.tx_window_errors++; if (status & BD_ENET_TX_RL) /* Retrans limit */ ndev->stats.tx_aborted_errors++; if (status & BD_ENET_TX_UN) /* Underrun */ ndev->stats.tx_fifo_errors++; if (status & BD_ENET_TX_CSL) /* Carrier lost */ ndev->stats.tx_carrier_errors++; } else { ndev->stats.tx_packets++; } if (status & BD_ENET_TX_READY) printk("HEY! Enet xmit interrupt and TX_READY.\n"); /* Deferred means some collisions occurred during transmit, * but we eventually sent the packet OK. */ if (status & BD_ENET_TX_DEF) ndev->stats.collisions++; /* Free the sk buffer associated with this last transmit */ dev_kfree_skb_any(skb); fep->tx_skbuff[fep->skb_dirty] = NULL; fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK; /* Update pointer to next buffer descriptor to be transmitted */ if (status & BD_ENET_TX_WRAP) bdp = fep->tx_bd_base; else bdp++; /* Since we have freed up a buffer, the ring is no longer full */ if (fep->tx_full) { fep->tx_full = 0; if (netif_queue_stopped(ndev)) netif_wake_queue(ndev); } } fep->dirty_tx = bdp; spin_unlock(&fep->hw_lock); }
static void bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *buf) { struct bnad *bnad = netdev_priv(netdev); int i, j, bi = 0; unsigned long flags; struct rtnl_link_stats64 net_stats64; u64 *stats64; u32 bmap; mutex_lock(&bnad->conf_mutex); if (bnad_get_stats_count_locked(netdev) != stats->n_stats) { mutex_unlock(&bnad->conf_mutex); return; } /* * Used bna_lock to sync reads from bna_stats, which is written * under the same lock */ spin_lock_irqsave(&bnad->bna_lock, flags); memset(&net_stats64, 0, sizeof(net_stats64)); bnad_netdev_qstats_fill(bnad, &net_stats64); bnad_netdev_hwstats_fill(bnad, &net_stats64); buf[bi++] = net_stats64.rx_packets; buf[bi++] = net_stats64.tx_packets; buf[bi++] = net_stats64.rx_bytes; buf[bi++] = net_stats64.tx_bytes; buf[bi++] = net_stats64.rx_errors; buf[bi++] = net_stats64.tx_errors; buf[bi++] = net_stats64.rx_dropped; buf[bi++] = net_stats64.tx_dropped; buf[bi++] = net_stats64.multicast; buf[bi++] = net_stats64.collisions; buf[bi++] = net_stats64.rx_length_errors; buf[bi++] = net_stats64.rx_crc_errors; buf[bi++] = net_stats64.rx_frame_errors; buf[bi++] = net_stats64.tx_fifo_errors; /* Get netif_queue_stopped from stack */ bnad->stats.drv_stats.netif_queue_stopped = netif_queue_stopped(netdev); /* Fill driver stats into ethtool buffers */ stats64 = (u64 *)&bnad->stats.drv_stats; for (i = 0; i < sizeof(struct bnad_drv_stats) / sizeof(u64); i++) buf[bi++] = stats64[i]; /* Fill hardware stats excluding the rxf/txf into ethtool bufs */ stats64 = (u64 *) &bnad->stats.bna_stats->hw_stats; for (i = 0; i < offsetof(struct bfi_enet_stats, rxf_stats[0]) / sizeof(u64); i++) buf[bi++] = stats64[i]; /* Fill txf stats into ethtool buffers */ bmap = bna_tx_rid_mask(&bnad->bna); for (i = 0; bmap; i++) { if (bmap & 1) { stats64 = (u64 *)&bnad->stats.bna_stats-> hw_stats.txf_stats[i]; for (j = 0; j < sizeof(struct bfi_enet_stats_txf) / sizeof(u64); j++) buf[bi++] = stats64[j]; } bmap >>= 1; } /* Fill rxf stats into ethtool buffers */ bmap = bna_rx_rid_mask(&bnad->bna); for (i = 0; bmap; i++) { if (bmap & 1) { stats64 = (u64 *)&bnad->stats.bna_stats-> hw_stats.rxf_stats[i]; for (j = 0; j < sizeof(struct bfi_enet_stats_rxf) / sizeof(u64); j++) buf[bi++] = stats64[j]; } bmap >>= 1; } /* Fill per Q stats into ethtool buffers */ bi = bnad_per_q_stats_fill(bnad, buf, bi); spin_unlock_irqrestore(&bnad->bna_lock, flags); mutex_unlock(&bnad->conf_mutex); }
static void ri_tasklet(unsigned long dev) { struct net_device *_dev = (struct net_device *)dev; struct ifb_private *dp = netdev_priv(_dev); struct net_device_stats *stats = &_dev->stats; struct netdev_queue *txq; struct sk_buff *skb; txq = netdev_get_tx_queue(_dev, 0); dp->st_task_enter++; if ((skb = skb_peek(&dp->tq)) == NULL) { dp->st_txq_refl_try++; if (__netif_tx_trylock(txq)) { dp->st_rxq_enter++; while ((skb = skb_dequeue(&dp->rq)) != NULL) { skb_queue_tail(&dp->tq, skb); dp->st_rx2tx_tran++; } __netif_tx_unlock(txq); } else { /* reschedule */ dp->st_rxq_notenter++; goto resched; } } while ((skb = skb_dequeue(&dp->tq)) != NULL) { u32 from = G_TC_FROM(skb->tc_verd); skb->tc_verd = 0; skb->tc_verd = SET_TC_NCLS(skb->tc_verd); stats->tx_packets++; stats->tx_bytes +=skb->len; rcu_read_lock(); skb->dev = dev_get_by_index_rcu(&init_net, skb->skb_iif); if (!skb->dev) { rcu_read_unlock(); dev_kfree_skb(skb); stats->tx_dropped++; break; } rcu_read_unlock(); skb->skb_iif = _dev->ifindex; if (from & AT_EGRESS) { dp->st_rx_frm_egr++; dev_queue_xmit(skb); } else if (from & AT_INGRESS) { dp->st_rx_frm_ing++; skb_pull(skb, skb->dev->hard_header_len); netif_rx(skb); } else BUG(); } if (__netif_tx_trylock(txq)) { dp->st_rxq_check++; if ((skb = skb_peek(&dp->rq)) == NULL) { dp->tasklet_pending = 0; if (netif_queue_stopped(_dev)) netif_wake_queue(_dev); } else { dp->st_rxq_rsch++; __netif_tx_unlock(txq); goto resched; } __netif_tx_unlock(txq); } else { resched: dp->tasklet_pending = 1; tasklet_schedule(&dp->ifb_tasklet); } }
void vCommandTimer ( void *hDeviceContext ) { PSDevice pDevice = (PSDevice)hDeviceContext; PSMgmtObject pMgmt = pDevice->pMgmt; PWLAN_IE_SSID pItemSSID; PWLAN_IE_SSID pItemSSIDCurr; CMD_STATUS Status; unsigned int ii; unsigned char byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80}; struct sk_buff *skb; if (pDevice->dwDiagRefCount != 0) return; if (pDevice->bCmdRunning != true) return; spin_lock_irq(&pDevice->lock); switch ( pDevice->eCommandState ) { case WLAN_CMD_SCAN_START: pDevice->byReAssocCount = 0; if (pDevice->bRadioOff == true) { s_bCommandComplete(pDevice); spin_unlock_irq(&pDevice->lock); return; } if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) { s_bCommandComplete(pDevice); CARDbSetBSSID(pMgmt->pAdapter, pMgmt->abyCurrBSSID, OP_MODE_AP); spin_unlock_irq(&pDevice->lock); return; } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState= WLAN_CMD_SCAN_START\n"); pItemSSID = (PWLAN_IE_SSID)pMgmt->abyScanSSID; // wait all Data TD complete if (pDevice->iTDUsed[TYPE_AC0DMA] != 0){ spin_unlock_irq(&pDevice->lock); vCommandTimerWait((void *)pDevice, 10); return; } if (pMgmt->uScanChannel == 0 ) { pMgmt->uScanChannel = pDevice->byMinChannel; // Set Baseband to be more sensitive. } if (pMgmt->uScanChannel > pDevice->byMaxChannel) { pMgmt->eScanState = WMAC_NO_SCANNING; // Set Baseband's sensitivity back. // Set channel back set_channel(pMgmt->pAdapter, pMgmt->uCurrChannel); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Scanning, set back to channel: [%d]\n", pMgmt->uCurrChannel); if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) { CARDbSetBSSID(pMgmt->pAdapter, pMgmt->abyCurrBSSID, OP_MODE_ADHOC); } else { CARDbSetBSSID(pMgmt->pAdapter, pMgmt->abyCurrBSSID, OP_MODE_INFRASTRUCTURE); } vAdHocBeaconRestart(pDevice); s_bCommandComplete(pDevice); } else { //2008-8-4 <add> by chester if (!is_channel_valid(pMgmt->uScanChannel)) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Invalid channel pMgmt->uScanChannel = %d \n",pMgmt->uScanChannel); s_bCommandComplete(pDevice); return; } ; if (pMgmt->uScanChannel == pDevice->byMinChannel) { //pMgmt->eScanType = WMAC_SCAN_ACTIVE; pMgmt->abyScanBSSID[0] = 0xFF; pMgmt->abyScanBSSID[1] = 0xFF; pMgmt->abyScanBSSID[2] = 0xFF; pMgmt->abyScanBSSID[3] = 0xFF; pMgmt->abyScanBSSID[4] = 0xFF; pMgmt->abyScanBSSID[5] = 0xFF; pItemSSID->byElementID = WLAN_EID_SSID; // clear bssid list // BSSvClearBSSList((void *)pDevice, pDevice->bLinkPass); pMgmt->eScanState = WMAC_IS_SCANNING; } vAdHocBeaconStop(pDevice); if (set_channel(pMgmt->pAdapter, pMgmt->uScanChannel) == true) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"SCAN Channel: %d\n", pMgmt->uScanChannel); } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"SET SCAN Channel Fail: %d\n", pMgmt->uScanChannel); } CARDbSetBSSID(pMgmt->pAdapter, pMgmt->abyCurrBSSID, OP_MODE_UNKNOWN); ; ; pMgmt->uScanChannel++; //2008-8-4 <modify> by chester if (!is_channel_valid(pMgmt->uScanChannel) && pMgmt->uScanChannel <= pDevice->byMaxChannel ){ pMgmt->uScanChannel=pDevice->byMaxChannel+1; pMgmt->eCommandState = WLAN_CMD_SCAN_END; } if ((pMgmt->b11hEnable == false) || (pMgmt->uScanChannel < CB_MAX_CHANNEL_24G)) { s_vProbeChannel(pDevice); spin_unlock_irq(&pDevice->lock); vCommandTimerWait((void *)pDevice, WCMD_ACTIVE_SCAN_TIME); return; } else { spin_unlock_irq(&pDevice->lock); vCommandTimerWait((void *)pDevice, WCMD_PASSIVE_SCAN_TIME); return; } } break; case WLAN_CMD_SCAN_END: // Set Baseband's sensitivity back. // Set channel back set_channel(pMgmt->pAdapter, pMgmt->uCurrChannel); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Scanning, set back to channel: [%d]\n", pMgmt->uCurrChannel); if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) { CARDbSetBSSID(pMgmt->pAdapter, pMgmt->abyCurrBSSID, OP_MODE_ADHOC); } else { CARDbSetBSSID(pMgmt->pAdapter, pMgmt->abyCurrBSSID, OP_MODE_INFRASTRUCTURE); } pMgmt->eScanState = WMAC_NO_SCANNING; vAdHocBeaconRestart(pDevice); //2008-0409-07, <Add> by Einsn Liu #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT if(pMgmt->eScanType == WMAC_SCAN_PASSIVE) {//send scan event to wpa_Supplicant union iwreq_data wrqu; memset(&wrqu, 0, sizeof(wrqu)); wireless_send_event(pDevice->dev, SIOCGIWSCAN, &wrqu, NULL); } #endif s_bCommandComplete(pDevice); break; case WLAN_CMD_DISASSOCIATE_START : pDevice->byReAssocCount = 0; if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (pMgmt->eCurrState != WMAC_STATE_ASSOC)) { s_bCommandComplete(pDevice); spin_unlock_irq(&pDevice->lock); return; } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Send Disassociation Packet..\n"); // reason = 8 : disassoc because sta has left vMgrDisassocBeginSta((void *)pDevice, pMgmt, pMgmt->abyCurrBSSID, (8), &Status); pDevice->bLinkPass = false; // unlock command busy pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID; pItemSSID->len = 0; memset(pItemSSID->abySSID, 0, WLAN_SSID_MAXLEN); pMgmt->eCurrState = WMAC_STATE_IDLE; pMgmt->sNodeDBTable[0].bActive = false; // pDevice->bBeaconBufReady = false; } netif_stop_queue(pDevice->dev); pDevice->eCommandState = WLAN_DISASSOCIATE_WAIT; // wait all Control TD complete if (pDevice->iTDUsed[TYPE_TXDMA0] != 0){ vCommandTimerWait((void *)pDevice, 10); spin_unlock_irq(&pDevice->lock); return; } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" CARDbRadioPowerOff\n"); //2008-09-02 <mark> by chester // CARDbRadioPowerOff(pDevice); s_bCommandComplete(pDevice); break; case WLAN_DISASSOCIATE_WAIT : // wait all Control TD complete if (pDevice->iTDUsed[TYPE_TXDMA0] != 0){ vCommandTimerWait((void *)pDevice, 10); spin_unlock_irq(&pDevice->lock); return; } //2008-09-02 <mark> by chester // CARDbRadioPowerOff(pDevice); s_bCommandComplete(pDevice); break; case WLAN_CMD_SSID_START: pDevice->byReAssocCount = 0; if (pDevice->bRadioOff == true) { s_bCommandComplete(pDevice); spin_unlock_irq(&pDevice->lock); return; } ; ; //memcpy(pMgmt->abyAdHocSSID,pMgmt->abyDesireSSID, //((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->len + WLAN_IEHDR_LEN); pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID; pItemSSIDCurr = (PWLAN_IE_SSID)pMgmt->abyCurrSSID; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" cmd: desire ssid = %s\n", pItemSSID->abySSID); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" cmd: curr ssid = %s\n", pItemSSIDCurr->abySSID); if (pMgmt->eCurrState == WMAC_STATE_ASSOC) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Cmd pMgmt->eCurrState == WMAC_STATE_ASSOC\n"); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pItemSSID->len =%d\n",pItemSSID->len); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pItemSSIDCurr->len = %d\n",pItemSSIDCurr->len); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" desire ssid = %s\n", pItemSSID->abySSID); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" curr ssid = %s\n", pItemSSIDCurr->abySSID); } if ((pMgmt->eCurrState == WMAC_STATE_ASSOC) || ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)&& (pMgmt->eCurrState == WMAC_STATE_JOINTED))) { if (pItemSSID->len == pItemSSIDCurr->len) { if (memcmp(pItemSSID->abySSID, pItemSSIDCurr->abySSID, pItemSSID->len) == 0) { s_bCommandComplete(pDevice); spin_unlock_irq(&pDevice->lock); return; } } netif_stop_queue(pDevice->dev); pDevice->bLinkPass = false; } // set initial state pMgmt->eCurrState = WMAC_STATE_IDLE; pMgmt->eCurrMode = WMAC_MODE_STANDBY; PSvDisablePowerSaving((void *)pDevice); BSSvClearNodeDBTable(pDevice, 0); vMgrJoinBSSBegin((void *)pDevice, &Status); // if Infra mode if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (pMgmt->eCurrState == WMAC_STATE_JOINTED)) { // Call mgr to begin the deauthentication // reason = (3) because sta has left ESS if (pMgmt->eCurrState>= WMAC_STATE_AUTH) { vMgrDeAuthenBeginSta((void *)pDevice, pMgmt, pMgmt->abyCurrBSSID, (3), &Status); } // Call mgr to begin the authentication vMgrAuthenBeginSta((void *)pDevice, pMgmt, &Status); if (Status == CMD_STATUS_SUCCESS) { pDevice->byLinkWaitCount = 0; pDevice->eCommandState = WLAN_AUTHENTICATE_WAIT; vCommandTimerWait((void *)pDevice, AUTHENTICATE_TIMEOUT); spin_unlock_irq(&pDevice->lock); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Set eCommandState = WLAN_AUTHENTICATE_WAIT\n"); return; } } // if Adhoc mode else if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) { if (pMgmt->eCurrState == WMAC_STATE_JOINTED) { if (netif_queue_stopped(pDevice->dev)){ netif_wake_queue(pDevice->dev); } pDevice->bLinkPass = true; pMgmt->sNodeDBTable[0].bActive = true; pMgmt->sNodeDBTable[0].uInActiveCount = 0; bClearBSSID_SCAN(pDevice); } else { // start own IBSS vMgrCreateOwnIBSS((void *)pDevice, &Status); if (Status != CMD_STATUS_SUCCESS){ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " WLAN_CMD_IBSS_CREATE fail ! \n"); } BSSvAddMulticastNode(pDevice); } } // if SSID not found else if (pMgmt->eCurrMode == WMAC_MODE_STANDBY) { if (pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA || pMgmt->eConfigMode == WMAC_CONFIG_AUTO) { // start own IBSS vMgrCreateOwnIBSS((void *)pDevice, &Status); if (Status != CMD_STATUS_SUCCESS){ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" WLAN_CMD_IBSS_CREATE fail ! \n"); } BSSvAddMulticastNode(pDevice); if (netif_queue_stopped(pDevice->dev)){ netif_wake_queue(pDevice->dev); } pDevice->bLinkPass = true; } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Disconnect SSID none\n"); #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT // if(pDevice->bWPASuppWextEnabled == true) { union iwreq_data wrqu; memset(&wrqu, 0, sizeof (wrqu)); wrqu.ap_addr.sa_family = ARPHRD_ETHER; ; wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL); } #endif } } s_bCommandComplete(pDevice); break; case WLAN_AUTHENTICATE_WAIT : DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState == WLAN_AUTHENTICATE_WAIT\n"); if (pMgmt->eCurrState == WMAC_STATE_AUTH) { // Call mgr to begin the association pDevice->byLinkWaitCount = 0; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCurrState == WMAC_STATE_AUTH\n"); vMgrAssocBeginSta((void *)pDevice, pMgmt, &Status); if (Status == CMD_STATUS_SUCCESS) { pDevice->byLinkWaitCount = 0; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState = WLAN_ASSOCIATE_WAIT\n"); pDevice->eCommandState = WLAN_ASSOCIATE_WAIT; vCommandTimerWait((void *)pDevice, ASSOCIATE_TIMEOUT); spin_unlock_irq(&pDevice->lock); return; } } else if(pMgmt->eCurrState < WMAC_STATE_AUTHPENDING) { ; } else if(pDevice->byLinkWaitCount <= 4){ //mike add:wait another 2 sec if authenticated_frame delay! pDevice->byLinkWaitCount ++; ; spin_unlock_irq(&pDevice->lock); vCommandTimerWait((void *)pDevice, AUTHENTICATE_TIMEOUT/2); return; } pDevice->byLinkWaitCount = 0; #if 0 #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT // if(pDevice->bWPASuppWextEnabled == true) { union iwreq_data wrqu; memset(&wrqu, 0, sizeof (wrqu)); wrqu.ap_addr.sa_family = ARPHRD_ETHER; ; wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL); } #endif #endif s_bCommandComplete(pDevice); break; case WLAN_ASSOCIATE_WAIT : if (pMgmt->eCurrState == WMAC_STATE_ASSOC) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCurrState == WMAC_STATE_ASSOC\n"); if (pDevice->ePSMode != WMAC_POWER_CAM) { PSvEnablePowerSaving((void *)pDevice, pMgmt->wListenInterval); } if (pMgmt->eAuthenMode >= WMAC_AUTH_WPA) { KeybRemoveAllKey(&(pDevice->sKey), pDevice->abyBSSID, pDevice->PortOffset); } pDevice->bLinkPass = true; pDevice->byLinkWaitCount = 0; pDevice->byReAssocCount = 0; bClearBSSID_SCAN(pDevice); if (pDevice->byFOETuning) { BBvSetFOE(pDevice->PortOffset); PSbSendNullPacket(pDevice); } if (netif_queue_stopped(pDevice->dev)){ netif_wake_queue(pDevice->dev); } #ifdef TxInSleep if(pDevice->IsTxDataTrigger != false) { //TxDataTimer is not triggered at the first time // printk("Re-initial TxDataTimer****\n"); del_timer(&pDevice->sTimerTxData); init_timer(&pDevice->sTimerTxData); pDevice->sTimerTxData.data = (unsigned long) pDevice; pDevice->sTimerTxData.function = (TimerFunction)BSSvSecondTxData; pDevice->sTimerTxData.expires = RUN_AT(10*HZ); //10s callback pDevice->fTxDataInSleep = false; pDevice->nTxDataTimeCout = 0; } else { ; } pDevice->IsTxDataTrigger = true; add_timer(&pDevice->sTimerTxData); #endif } else if(pMgmt->eCurrState < WMAC_STATE_ASSOCPENDING) { ; } else if(pDevice->byLinkWaitCount <= 4){ //mike add:wait another 2 sec if associated_frame delay! pDevice->byLinkWaitCount ++; ; spin_unlock_irq(&pDevice->lock); vCommandTimerWait((void *)pDevice, ASSOCIATE_TIMEOUT/2); return; } pDevice->byLinkWaitCount = 0; #if 0 #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT // if(pDevice->bWPASuppWextEnabled == true) { union iwreq_data wrqu; memset(&wrqu, 0, sizeof (wrqu)); wrqu.ap_addr.sa_family = ARPHRD_ETHER; ; wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL); } #endif #endif s_bCommandComplete(pDevice); break; case WLAN_CMD_AP_MODE_START : DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState == WLAN_CMD_AP_MODE_START\n"); if (pMgmt->eConfigMode == WMAC_CONFIG_AP) { del_timer(&pMgmt->sTimerSecondCallback); pMgmt->eCurrState = WMAC_STATE_IDLE; pMgmt->eCurrMode = WMAC_MODE_STANDBY; pDevice->bLinkPass = false; if (pDevice->bEnableHostWEP == true) BSSvClearNodeDBTable(pDevice, 1); else BSSvClearNodeDBTable(pDevice, 0); pDevice->uAssocCount = 0; pMgmt->eCurrState = WMAC_STATE_IDLE; pDevice->bFixRate = false; vMgrCreateOwnIBSS((void *)pDevice, &Status); if (Status != CMD_STATUS_SUCCESS){ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " vMgrCreateOwnIBSS fail ! \n"); } // alway turn off unicast bit MACvRegBitsOff(pDevice->PortOffset, MAC_REG_RCR, RCR_UNICAST); pDevice->byRxMode &= ~RCR_UNICAST; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wcmd: rx_mode = %x\n", pDevice->byRxMode ); BSSvAddMulticastNode(pDevice); if (netif_queue_stopped(pDevice->dev)){ netif_wake_queue(pDevice->dev); } pDevice->bLinkPass = true; add_timer(&pMgmt->sTimerSecondCallback); } s_bCommandComplete(pDevice); break; case WLAN_CMD_TX_PSPACKET_START : // DTIM Multicast tx if (pMgmt->sNodeDBTable[0].bRxPSPoll) { while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[0].sTxPSQueue)) != NULL) { if (skb_queue_empty(&pMgmt->sNodeDBTable[0].sTxPSQueue)) { pMgmt->abyPSTxMap[0] &= ~byMask[0]; pDevice->bMoreData = false; } else { pDevice->bMoreData = true; } if (!device_dma0_xmit(pDevice, skb, 0)) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Multicast ps tx fail \n"); } pMgmt->sNodeDBTable[0].wEnQueueCnt--; } } // PS nodes tx for (ii = 1; ii < (MAX_NODE_NUM + 1); ii++) { if (pMgmt->sNodeDBTable[ii].bActive && pMgmt->sNodeDBTable[ii].bRxPSPoll) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Index=%d Enqueu Cnt= %d\n", ii, pMgmt->sNodeDBTable[ii].wEnQueueCnt); while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) != NULL) { if (skb_queue_empty(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) { // clear tx map pMgmt->abyPSTxMap[pMgmt->sNodeDBTable[ii].wAID >> 3] &= ~byMask[pMgmt->sNodeDBTable[ii].wAID & 7]; pDevice->bMoreData = false; } else { pDevice->bMoreData = true; } if (!device_dma0_xmit(pDevice, skb, ii)) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "sta ps tx fail \n"); } pMgmt->sNodeDBTable[ii].wEnQueueCnt--; // check if sta ps enable, wait next pspoll // if sta ps disable, send all pending buffers. if (pMgmt->sNodeDBTable[ii].bPSEnable) break; } if (skb_queue_empty(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) { // clear tx map pMgmt->abyPSTxMap[pMgmt->sNodeDBTable[ii].wAID >> 3] &= ~byMask[pMgmt->sNodeDBTable[ii].wAID & 7]; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Index=%d PS queue clear \n", ii); }
//extern int rfpwrstate_check(_adapter *padapter); static int netdev_close(struct net_device *pnetdev) { _adapter *padapter = (_adapter *)rtw_netdev_priv(pnetdev); RT_TRACE(_module_os_intfs_c_,_drv_info_,("+871x_drv - drv_close\n")); if(padapter->pwrctrlpriv.bInternalAutoSuspend == _TRUE) { //rfpwrstate_check(padapter); if(padapter->pwrctrlpriv.rf_pwrstate == rf_off) padapter->pwrctrlpriv.ps_flag = _TRUE; } padapter->net_closed = _TRUE; /* if(!padapter->hw_init_completed) { DBG_8192C("(1)871x_drv - drv_close, bup=%d, hw_init_completed=%d\n", padapter->bup, padapter->hw_init_completed); padapter->bDriverStopped = _TRUE; rtw_dev_unload(padapter); } else*/ if(padapter->pwrctrlpriv.rf_pwrstate == rf_on){ DBG_8192C("(2)871x_drv - drv_close, bup=%d, hw_init_completed=%d\n", padapter->bup, padapter->hw_init_completed); //s1. if(pnetdev) { if (!netif_queue_stopped(pnetdev)) netif_stop_queue(pnetdev); } #ifndef CONFIG_ANDROID //s2. //s2-1. issue rtw_disassoc_cmd to fw rtw_disassoc_cmd(padapter); //s2-2. indicate disconnect to os rtw_indicate_disconnect(padapter); //s2-3. rtw_free_assoc_resources(padapter, 1); //s2-4. rtw_free_network_queue(padapter,_TRUE); #endif // Close LED rtw_led_control(padapter, LED_CTL_POWER_OFF); } #ifdef CONFIG_BR_EXT //if (OPMODE & (WIFI_STATION_STATE | WIFI_ADHOC_STATE)) { //void nat25_db_cleanup(_adapter *priv); nat25_db_cleanup(padapter); } #endif // CONFIG_BR_EXT RT_TRACE(_module_os_intfs_c_,_drv_info_,("-871x_drv - drv_close\n")); DBG_8192C("-871x_drv - drv_close, bup=%d\n", padapter->bup); return 0; }
/** * stmmac_tx: * @priv: private driver structure * Description: it reclaims resources after transmission completes. */ static void stmmac_tx(struct stmmac_priv *priv) { unsigned int txsize = priv->dma_tx_size; while (priv->dirty_tx != priv->cur_tx) { int last; unsigned int entry = priv->dirty_tx % txsize; struct sk_buff *skb = priv->tx_skbuff[entry]; struct dma_desc *p = priv->dma_tx + entry; /* Check if the descriptor is owned by the DMA. */ if (priv->hw->desc->get_tx_owner(p)) break; /* Verify tx error by looking at the last segment */ last = priv->hw->desc->get_tx_ls(p); if (likely(last)) { int tx_error = priv->hw->desc->tx_status(&priv->dev->stats, &priv->xstats, p, priv->ioaddr); if (likely(tx_error == 0)) { priv->dev->stats.tx_packets++; priv->xstats.tx_pkt_n++; } else priv->dev->stats.tx_errors++; } TX_DBG("%s: curr %d, dirty %d\n", __func__, priv->cur_tx, priv->dirty_tx); if (likely(p->des2)) dma_unmap_single(priv->device, p->des2, priv->hw->desc->get_tx_len(p), DMA_TO_DEVICE); if (unlikely(p->des3)) p->des3 = 0; if (likely(skb != NULL)) { /* * If there's room in the queue (limit it to size) * we add this skb back into the pool, * if it's the right size. */ if ((skb_queue_len(&priv->rx_recycle) < priv->dma_rx_size) && skb_recycle_check(skb, priv->dma_buf_sz)) __skb_queue_head(&priv->rx_recycle, skb); else dev_kfree_skb(skb); priv->tx_skbuff[entry] = NULL; } priv->hw->desc->release_tx_desc(p); entry = (++priv->dirty_tx) % txsize; } if (unlikely(netif_queue_stopped(priv->dev) && stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) { netif_tx_lock(priv->dev); if (netif_queue_stopped(priv->dev) && stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv)) { TX_DBG("%s: restart transmit\n", __func__); netif_wake_queue(priv->dev); } netif_tx_unlock(priv->dev); } }
fec_enet_tx(struct net_device *dev) #endif { struct fec_enet_private *fep; volatile cbd_t *bdp; struct sk_buff *skb; fep = dev->priv; /* lock while transmitting */ spin_lock(&fep->lock); bdp = fep->dirty_tx; while ((bdp->cbd_sc&BD_ENET_TX_READY) == 0) { if (bdp == fep->cur_tx && fep->tx_full == 0) break; skb = fep->tx_skbuff[fep->skb_dirty]; /* Check for errors. */ if (bdp->cbd_sc & (BD_ENET_TX_HB | BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) { fep->stats.tx_errors++; if (bdp->cbd_sc & BD_ENET_TX_HB) /* No heartbeat */ fep->stats.tx_heartbeat_errors++; if (bdp->cbd_sc & BD_ENET_TX_LC) /* Late collision */ fep->stats.tx_window_errors++; if (bdp->cbd_sc & BD_ENET_TX_RL) /* Retrans limit */ fep->stats.tx_aborted_errors++; if (bdp->cbd_sc & BD_ENET_TX_UN) /* Underrun */ fep->stats.tx_fifo_errors++; if (bdp->cbd_sc & BD_ENET_TX_CSL) /* Carrier lost */ fep->stats.tx_carrier_errors++; } else { #ifdef CONFIG_FEC_PACKETHOOK /* Packet hook ... */ if (fep->ph_txhandler && ((struct ethhdr *)skb->data)->h_proto == fep->ph_proto) { fep->ph_txhandler((__u8*)skb->data, skb->len, regval, fep->ph_priv); } #endif fep->stats.tx_packets++; } #ifndef final_version if (bdp->cbd_sc & BD_ENET_TX_READY) printk("HEY! Enet xmit interrupt and TX_READY.\n"); #endif /* Deferred means some collisions occurred during transmit, * but we eventually sent the packet OK. */ if (bdp->cbd_sc & BD_ENET_TX_DEF) fep->stats.collisions++; /* Free the sk buffer associated with this last transmit. */ #if 0 printk("TXI: %x %x %x\n", bdp, skb, fep->skb_dirty); #endif dev_kfree_skb_irq (skb/*, FREE_WRITE*/); fep->tx_skbuff[fep->skb_dirty] = NULL; fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK; /* Update pointer to next buffer descriptor to be transmitted. */ if (bdp->cbd_sc & BD_ENET_TX_WRAP) bdp = fep->tx_bd_base; else bdp++; /* Since we have freed up a buffer, the ring is no longer * full. */ if (fep->tx_full) { fep->tx_full = 0; if (netif_queue_stopped(dev)) netif_wake_queue(dev); } #ifdef CONFIG_FEC_PACKETHOOK /* Re-read register. Not exactly guaranteed to be correct, but... */ if (fep->ph_regaddr) regval = *fep->ph_regaddr; #endif } fep->dirty_tx = (cbd_t *)bdp; spin_unlock(&fep->lock); }
static irqreturn_t ariadne_interrupt(int irq, void *data) { struct net_device *dev = (struct net_device *)data; volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr; struct ariadne_private *priv; int csr0, boguscnt; int handled = 0; lance->RAP = CSR0; /* PCnet-ISA Controller Status */ if (!(lance->RDP & INTR)) /* Check if any interrupt has been */ return IRQ_NONE; /* generated by the board */ priv = netdev_priv(dev); boguscnt = 10; while ((csr0 = lance->RDP) & (ERR | RINT | TINT) && --boguscnt >= 0) { /* Acknowledge all of the current interrupt sources ASAP */ lance->RDP = csr0 & ~(INEA | TDMD | STOP | STRT | INIT); #ifdef DEBUG if (ariadne_debug > 5) { netdev_dbg(dev, "interrupt csr0=%#02x new csr=%#02x [", csr0, lance->RDP); if (csr0 & INTR) pr_cont(" INTR"); if (csr0 & INEA) pr_cont(" INEA"); if (csr0 & RXON) pr_cont(" RXON"); if (csr0 & TXON) pr_cont(" TXON"); if (csr0 & TDMD) pr_cont(" TDMD"); if (csr0 & STOP) pr_cont(" STOP"); if (csr0 & STRT) pr_cont(" STRT"); if (csr0 & INIT) pr_cont(" INIT"); if (csr0 & ERR) pr_cont(" ERR"); if (csr0 & BABL) pr_cont(" BABL"); if (csr0 & CERR) pr_cont(" CERR"); if (csr0 & MISS) pr_cont(" MISS"); if (csr0 & MERR) pr_cont(" MERR"); if (csr0 & RINT) pr_cont(" RINT"); if (csr0 & TINT) pr_cont(" TINT"); if (csr0 & IDON) pr_cont(" IDON"); pr_cont(" ]\n"); } #endif if (csr0 & RINT) { /* Rx interrupt */ handled = 1; ariadne_rx(dev); } if (csr0 & TINT) { /* Tx-done interrupt */ int dirty_tx = priv->dirty_tx; handled = 1; while (dirty_tx < priv->cur_tx) { int entry = dirty_tx % TX_RING_SIZE; int status = lowb(priv->tx_ring[entry]->TMD1); if (status & TF_OWN) break; /* It still hasn't been Txed */ priv->tx_ring[entry]->TMD1 &= 0xff00; if (status & TF_ERR) { /* There was an major error, log it */ int err_status = priv->tx_ring[entry]->TMD3; dev->stats.tx_errors++; if (err_status & EF_RTRY) dev->stats.tx_aborted_errors++; if (err_status & EF_LCAR) dev->stats.tx_carrier_errors++; if (err_status & EF_LCOL) dev->stats.tx_window_errors++; if (err_status & EF_UFLO) { /* Ackk! On FIFO errors the Tx unit is turned off! */ dev->stats.tx_fifo_errors++; /* Remove this verbosity later! */ netdev_err(dev, "Tx FIFO error! Status %04x\n", csr0); /* Restart the chip */ lance->RDP = STRT; } } else { if (status & (TF_MORE | TF_ONE)) dev->stats.collisions++; dev->stats.tx_packets++; } dirty_tx++; } #ifndef final_version if (priv->cur_tx - dirty_tx >= TX_RING_SIZE) { netdev_err(dev, "out-of-sync dirty pointer, %d vs. %d, full=%d\n", dirty_tx, priv->cur_tx, priv->tx_full); dirty_tx += TX_RING_SIZE; } #endif if (priv->tx_full && netif_queue_stopped(dev) && dirty_tx > priv->cur_tx - TX_RING_SIZE + 2) { /* The ring is no longer full */ priv->tx_full = 0; netif_wake_queue(dev); } priv->dirty_tx = dirty_tx; } /* Log misc errors */ if (csr0 & BABL) { handled = 1; dev->stats.tx_errors++; /* Tx babble */ } if (csr0 & MISS) { handled = 1; dev->stats.rx_errors++; /* Missed a Rx frame */ } if (csr0 & MERR) { handled = 1; netdev_err(dev, "Bus master arbitration failure, status %04x\n", csr0); /* Restart the chip */ lance->RDP = STRT; } } /* Clear any other interrupt, and set interrupt enable */ lance->RAP = CSR0; /* PCnet-ISA Controller Status */ lance->RDP = INEA | BABL | CERR | MISS | MERR | IDON; if (ariadne_debug > 4) netdev_dbg(dev, "exiting interrupt, csr%d=%#04x\n", lance->RAP, lance->RDP); return IRQ_RETVAL(handled); }