Ejemplo n.º 1
0
int private_ioctl(PSDevice pDevice, struct ifreq *rq) {

	PSCmdRequest        pReq = (PSCmdRequest)rq;
    PSMgmtObject        pMgmt = &(pDevice->sMgmtObj);
	int 		        result = 0;
    PWLAN_IE_SSID       pItemSSID;
    SCmdBSSJoin         sJoinCmd;
    SCmdZoneTypeSet sZoneTypeCmd;
    SCmdScan            sScanCmd;
    SCmdStartAP         sStartAPCmd;
    SCmdSetWEP          sWEPCmd;
    SCmdValue           sValue;
    SBSSIDList          sList;
    SNodeList           sNodeList;
    PSBSSIDList         pList;
    PSNodeList          pNodeList;
    UINT                cbListCount;
    PKnownBSS           pBSS;
    PKnownNodeDB        pNode;
    UINT                ii, jj;
    SCmdLinkStatus      sLinkStatus;
    BYTE                abySuppRates[] = {WLAN_EID_SUPP_RATES, 4, 0x02, 0x04, 0x0B, 0x16};
    BYTE                abyNullAddr[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
    DWORD               dwKeyIndex= 0;
    BYTE                abyScanSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
    LONG                ldBm;

    pReq->wResult = 0;

    switch(pReq->wCmdCode) {

    case WLAN_CMD_BSS_SCAN:

        if (copy_from_user(&sScanCmd, pReq->data, sizeof(SCmdScan))) {
			result = -EFAULT;
			break;
		};

        pItemSSID = (PWLAN_IE_SSID)sScanCmd.ssid;
        if (pItemSSID->len != 0) {
            memset(abyScanSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
            memcpy(abyScanSSID, pItemSSID, pItemSSID->len + WLAN_IEHDR_LEN);
        }
        spin_lock_irq(&pDevice->lock);
        if (memcmp(pMgmt->abyCurrBSSID, &abyNullAddr[0], 6) == 0)
            BSSvClearBSSList((HANDLE)pDevice, FALSE);
        else
            BSSvClearBSSList((HANDLE)pDevice, pDevice->bLinkPass);
        DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_BSS_SCAN..begin \n");

        if (pItemSSID->len != 0)
            bScheduleCommand((HANDLE) pDevice, WLAN_CMD_BSSID_SCAN, abyScanSSID);
        else
            bScheduleCommand((HANDLE) pDevice, WLAN_CMD_BSSID_SCAN, NULL);
        spin_unlock_irq(&pDevice->lock);
        break;

    case WLAN_CMD_ZONETYPE_SET:
	//mike add :cann't support.
           result=-EOPNOTSUPP;
	  break;

        if (copy_from_user(&sZoneTypeCmd, pReq->data, sizeof(SCmdZoneTypeSet))) {
			result = -EFAULT;
			break;
		};

          if(sZoneTypeCmd.bWrite==TRUE) {
	  //////write zonetype
                if(sZoneTypeCmd.ZoneType == ZoneType_USA) {
                  //set to USA
                   printk("set_ZoneType:USA\n");
		}
                else if(sZoneTypeCmd.ZoneType == ZoneType_Japan) {
                  //set to Japan
                  printk("set_ZoneType:Japan\n");
		}
	       else if(sZoneTypeCmd.ZoneType == ZoneType_Europe) {
                  //set to Europe
                  printk("set_ZoneType:Europe\n");
		}
            }
	else {
          ///////read zonetype
	  BYTE                       zonetype=0;


           if(zonetype == 0x00)  { //USA
             sZoneTypeCmd.ZoneType = ZoneType_USA;
           }
	 else if(zonetype == 0x01) { //Japan
             sZoneTypeCmd.ZoneType = ZoneType_Japan;
	  }
	 else if(zonetype == 0x02) { //Europe
             sZoneTypeCmd.ZoneType = ZoneType_Europe;
	 }
	 else { //Unknown ZoneType
	        printk("Error:ZoneType[%x] Unknown ???\n",zonetype);
	         result = -EFAULT;
		break;
	 }
	   if (copy_to_user(pReq->data, &sZoneTypeCmd, sizeof(SCmdZoneTypeSet))) {
			result = -EFAULT;
			break;
		};
	}

	     break;

    case WLAN_CMD_BSS_JOIN:

        if (copy_from_user(&sJoinCmd, pReq->data, sizeof(SCmdBSSJoin))) {
			result = -EFAULT;
			break;
		};

        pItemSSID = (PWLAN_IE_SSID)sJoinCmd.ssid;
        memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
		memcpy(pMgmt->abyDesireSSID, pItemSSID, pItemSSID->len + WLAN_IEHDR_LEN);
	    if (sJoinCmd.wBSSType == ADHOC) {
	        pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA;
	        DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "ioct set to adhoc mode\n");
	    }
	    else {
	        pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA;
	        DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "ioct set to STA mode\n");
	    }
	    if (sJoinCmd.bPSEnable == TRUE) {
            pDevice->ePSMode = WMAC_POWER_FAST;
//            pDevice->ePSMode = WMAC_POWER_MAX;
            pMgmt->wListenInterval = 2;
            DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Power Saving On\n");
        }
        else {
            pDevice->ePSMode = WMAC_POWER_CAM;
            pMgmt->wListenInterval = 1;
            DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Power Saving Off \n");
        }

        if (sJoinCmd.bShareKeyAuth == TRUE){
            pMgmt->bShareKeyAlgorithm = TRUE;
            DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Share Key \n");
        }
        else {
            pMgmt->bShareKeyAlgorithm = FALSE;
            DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Open System \n");
        }
	    pDevice->uChannel = sJoinCmd.uChannel;
        netif_stop_queue(pDevice->dev);
        spin_lock_irq(&pDevice->lock);
        pMgmt->eCurrState = WMAC_STATE_IDLE;
        bScheduleCommand((HANDLE) pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID);
        bScheduleCommand((HANDLE) pDevice, WLAN_CMD_SSID, NULL);
        spin_unlock_irq(&pDevice->lock);
        break;

    case WLAN_CMD_SET_WEP:
        DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_SET_WEP Key. \n");
        memset(&sWEPCmd, 0 ,sizeof(SCmdSetWEP));
        if (copy_from_user(&sWEPCmd, pReq->data, sizeof(SCmdSetWEP))) {
			result = -EFAULT;
			break;
		};
	    if (sWEPCmd.bEnableWep != TRUE) {
	        int uu;

            pDevice->bEncryptionEnable = FALSE;
            pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
            spin_lock_irq(&pDevice->lock);
            for(uu=0;uu<MAX_KEY_TABLE;uu++)
                MACvDisableKeyEntry(pDevice,uu);
            spin_unlock_irq(&pDevice->lock);
            DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WEP function disable. \n");
            break;
        }

        for (ii = 0; ii < WLAN_WEP_NKEYS; ii ++) {
            if (sWEPCmd.bWepKeyAvailable[ii]) {
                if (ii == sWEPCmd.byKeyIndex)
//2006-1207-01<Modify>by Einsn Liu
//		    dwKeyIndex|= (1 << 31);
                    dwKeyIndex=ii|(1 << 31);
                else
                    dwKeyIndex = ii;
                spin_lock_irq(&pDevice->lock);
                KeybSetDefaultKey(  pDevice,
                                    &(pDevice->sKey),
                                    dwKeyIndex,
                                    sWEPCmd.auWepKeyLength[ii],
                                    NULL,
                                    (PBYTE)&sWEPCmd.abyWepKey[ii][0],
                                    KEY_CTL_WEP
                                  );
               spin_unlock_irq(&pDevice->lock);

            }
        }
        pDevice->byKeyIndex = sWEPCmd.byKeyIndex;
        pDevice->bTransmitKey = TRUE;
        pDevice->bEncryptionEnable = TRUE;
        pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;

        break;

    case WLAN_CMD_GET_LINK:
        DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_GET_LINK status. \n");

        memset(sLinkStatus.abySSID, 0 , WLAN_SSID_MAXLEN + 1);

        if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)
            sLinkStatus.wBSSType = ADHOC;
        else
            sLinkStatus.wBSSType = INFRA;

        if (pMgmt->eCurrState == WMAC_STATE_JOINTED)
            sLinkStatus.byState = ADHOC_JOINTED;
        else
            sLinkStatus.byState = ADHOC_STARTED;

        sLinkStatus.uChannel = pMgmt->uCurrChannel;
        if (pDevice->bLinkPass == TRUE) {
            sLinkStatus.bLink = TRUE;
 		    pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
		    memcpy(sLinkStatus.abySSID, pItemSSID->abySSID, pItemSSID->len);
		    memcpy(sLinkStatus.abyBSSID, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
		    sLinkStatus.uLinkRate = pMgmt->sNodeDBTable[0].wTxDataRate;
            DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Link Success ! \n");
        }
        else {
            sLinkStatus.bLink = FALSE;
        }
        if (copy_to_user(pReq->data, &sLinkStatus, sizeof(SCmdLinkStatus))) {
			result = -EFAULT;
			break;
		};

        break;

    case WLAN_CMD_GET_LISTLEN:
		cbListCount = 0;
		pBSS = &(pMgmt->sBSSList[0]);
        for (ii = 0; ii < MAX_BSS_NUM; ii++) {
            pBSS = &(pMgmt->sBSSList[ii]);
            if (!pBSS->bActive)
                continue;
            cbListCount++;
        };
        sList.uItem = cbListCount;
        if (copy_to_user(pReq->data, &sList, sizeof(SBSSIDList))) {
			result = -EFAULT;
			break;
		};
        pReq->wResult = 0;
        break;

    case WLAN_CMD_GET_LIST:
        if (copy_from_user(&sList, pReq->data, sizeof(SBSSIDList))) {
			result = -EFAULT;
			break;
		};
        pList = (PSBSSIDList)kmalloc(sizeof(SBSSIDList) + (sList.uItem * sizeof(SBSSIDItem)), (int)GFP_ATOMIC);
        if (pList == NULL) {
            result = -ENOMEM;
            break;
        }
		pList->uItem = sList.uItem;
		pBSS = &(pMgmt->sBSSList[0]);
        for (ii = 0, jj = 0; jj < MAX_BSS_NUM ; jj++) {
            pBSS = &(pMgmt->sBSSList[jj]);
            if (pBSS->bActive) {
    		    pList->sBSSIDList[ii].uChannel = pBSS->uChannel;
    		    pList->sBSSIDList[ii].wBeaconInterval = pBSS->wBeaconInterval;
    		    pList->sBSSIDList[ii].wCapInfo = pBSS->wCapInfo;
    		    RFvRSSITodBm(pDevice, (BYTE)(pBSS->uRSSI), &ldBm);
    		    pList->sBSSIDList[ii].uRSSI = (UINT)ldBm;
//    		    pList->sBSSIDList[ii].uRSSI = pBSS->uRSSI;
    		    memcpy(pList->sBSSIDList[ii].abyBSSID, pBSS->abyBSSID, WLAN_BSSID_LEN);
    		    pItemSSID = (PWLAN_IE_SSID)pBSS->abySSID;
    		    memset(pList->sBSSIDList[ii].abySSID, 0, WLAN_SSID_MAXLEN + 1);
    		    memcpy(pList->sBSSIDList[ii].abySSID, pItemSSID->abySSID, pItemSSID->len);
                if (WLAN_GET_CAP_INFO_ESS(pBSS->wCapInfo)) {
    		        pList->sBSSIDList[ii].byNetType = INFRA;
                }
                else {
    		        pList->sBSSIDList[ii].byNetType = ADHOC;
    		    }
    		    if (WLAN_GET_CAP_INFO_PRIVACY(pBSS->wCapInfo)) {
    		        pList->sBSSIDList[ii].bWEPOn = TRUE;
                }
                else {
    		        pList->sBSSIDList[ii].bWEPOn = FALSE;
    		    }
    		    ii ++;
    		    if (ii >= pList->uItem)
    		        break;
            }
        }

        if (copy_to_user(pReq->data, pList, sizeof(SBSSIDList) + (sList.uItem * sizeof(SBSSIDItem)))) {
			result = -EFAULT;
			break;
		};
        kfree(pList);
        pReq->wResult = 0;
        break;

    case WLAN_CMD_GET_MIB:
        if (copy_to_user(pReq->data, &(pDevice->s802_11Counter), sizeof(SDot11MIBCount))) {
			result = -EFAULT;
			break;
		};
        break;

    case WLAN_CMD_GET_STAT:
        if (copy_to_user(pReq->data, &(pDevice->scStatistic), sizeof(SStatCounter))) {
			result = -EFAULT;
			break;
		};
        break;
    case WLAN_CMD_STOP_MAC:

        DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_STOP_MAC\n");
        // Todo xxxxxx
        netif_stop_queue(pDevice->dev);
        spin_lock_irq(&pDevice->lock);
        if (pDevice->bRadioOff == FALSE) {
            CARDbRadioPowerOff(pDevice);
        }
        pDevice->bLinkPass = FALSE;
        ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_SLOW);
        memset(pMgmt->abyCurrBSSID, 0, 6);
        pMgmt->eCurrState = WMAC_STATE_IDLE;
//        del_timer(&pDevice->sTimerCommand);
//        del_timer(&pMgmt->sTimerSecondCallback);
        pDevice->bCmdRunning = FALSE;
        spin_unlock_irq(&pDevice->lock);

        break;

    case WLAN_CMD_START_MAC:

        DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_START_MAC\n");
        // Todo xxxxxxx
        if (pDevice->bRadioOff == TRUE)
            CARDbRadioPowerOn(pDevice);
        break;

    case WLAN_CMD_SET_HOSTAPD:

        DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_SET_HOSTAPD\n");

        if (copy_from_user(&sValue, pReq->data, sizeof(SCmdValue))) {
			result = -EFAULT;
			break;
		};
		if (sValue.dwValue == 1) {
            if (hostap_set_hostapd(pDevice, 1, 1) == 0){
                DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Enable HOSTAP\n");
            }
            else {
			    result = -EFAULT;
			    break;
			}
        }
        else {
            hostap_set_hostapd(pDevice, 0, 1);
            DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Disable HOSTAP\n");
        }

        break;

    case WLAN_CMD_SET_HOSTAPD_STA:

        DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_SET_HOSTAPD_STA\n");

        break;
    case WLAN_CMD_SET_802_1X:

        DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_SET_802_1X\n");
        if (copy_from_user(&sValue, pReq->data, sizeof(SCmdValue))) {
			result = -EFAULT;
			break;
		};

		if (sValue.dwValue == 1) {
            pDevice->bEnable8021x = TRUE;
            DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Enable 802.1x\n");
        }
        else {
            pDevice->bEnable8021x = FALSE;
            DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Disable 802.1x\n");
        }

        break;


    case WLAN_CMD_SET_HOST_WEP:

        DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_SET_HOST_WEP\n");
        if (copy_from_user(&sValue, pReq->data, sizeof(SCmdValue))) {
			result = -EFAULT;
			break;
		};

		if (sValue.dwValue == 1) {
            pDevice->bEnableHostWEP = TRUE;
            DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Enable HostWEP\n");
        }
        else {
            pDevice->bEnableHostWEP = FALSE;
            DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Disable HostWEP\n");
        }

        break;

    case WLAN_CMD_SET_WPA:
         DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_SET_WPA\n");

        if (copy_from_user(&sValue, pReq->data, sizeof(SCmdValue))) {
			result = -EFAULT;
			break;
		};
		if (sValue.dwValue == 1) {
                     DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "up wpadev\n");
		   memcpy(pDevice->wpadev->dev_addr, pDevice->dev->dev_addr, U_ETHER_ADDR_LEN);
		   pDevice->bWPADEVUp = TRUE;
        }
        else {
            DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "close wpadev\n");
	   pDevice->bWPADEVUp = FALSE;
        }

        break;

    case WLAN_CMD_AP_START:

        DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_AP_START\n");
        if (pDevice->bRadioOff == TRUE) {
            CARDbRadioPowerOn(pDevice);
            add_timer(&pMgmt->sTimerSecondCallback);
        }
        if (copy_from_user(&sStartAPCmd, pReq->data, sizeof(SCmdStartAP))) {
			result = -EFAULT;
			break;
		};

	    if (sStartAPCmd.wBSSType == AP) {
	        pMgmt->eConfigMode = WMAC_CONFIG_AP;
	        DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "ioct set to AP mode\n");
	    }
	    else {
	        DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "ioct BSS type not set to AP mode\n");
			result = -EFAULT;
			break;
	    }


	    if (sStartAPCmd.wBBPType == PHY80211g) {
            pMgmt->byAPBBType = PHY_TYPE_11G;
        }
        else if (sStartAPCmd.wBBPType == PHY80211a) {
                 pMgmt->byAPBBType = PHY_TYPE_11A;
        }
        else {
            pMgmt->byAPBBType = PHY_TYPE_11B;
        }

        pItemSSID = (PWLAN_IE_SSID)sStartAPCmd.ssid;
        memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
		memcpy(pMgmt->abyDesireSSID, pItemSSID, pItemSSID->len + WLAN_IEHDR_LEN);

	    if ((sStartAPCmd.uChannel > 0)&&(sStartAPCmd.uChannel <= 14))
	        pDevice->uChannel = sStartAPCmd.uChannel;

	    if ((sStartAPCmd.uBeaconInt >= 20) && (sStartAPCmd.uBeaconInt <= 1000))
            pMgmt->wIBSSBeaconPeriod = sStartAPCmd.uBeaconInt;
        else
            pMgmt->wIBSSBeaconPeriod = 100;

        if (sStartAPCmd.bShareKeyAuth == TRUE){
            pMgmt->bShareKeyAlgorithm = TRUE;
            DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Share Key \n");
        }
        else {
            pMgmt->bShareKeyAlgorithm = FALSE;
            DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Open System \n");
        }
        memcpy(pMgmt->abyIBSSSuppRates, abySuppRates, 6);

        if (sStartAPCmd.byBasicRate & BIT3) {
            pMgmt->abyIBSSSuppRates[2] |= BIT7;
            pMgmt->abyIBSSSuppRates[3] |= BIT7;
            pMgmt->abyIBSSSuppRates[4] |= BIT7;
            pMgmt->abyIBSSSuppRates[5] |= BIT7;
        }else if (sStartAPCmd.byBasicRate & BIT2) {
             pMgmt->abyIBSSSuppRates[2] |= BIT7;
             pMgmt->abyIBSSSuppRates[3] |= BIT7;
             pMgmt->abyIBSSSuppRates[4] |= BIT7;
        }else if (sStartAPCmd.byBasicRate & BIT1) {
             pMgmt->abyIBSSSuppRates[2] |= BIT7;
             pMgmt->abyIBSSSuppRates[3] |= BIT7;
        }else if (sStartAPCmd.byBasicRate & BIT1) {
             pMgmt->abyIBSSSuppRates[2] |= BIT7;
        }else {
            //default 1,2M
             pMgmt->abyIBSSSuppRates[2] |= BIT7;
             pMgmt->abyIBSSSuppRates[3] |= BIT7;
        }

        DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Support Rate= %x %x %x %x\n",
                pMgmt->abyIBSSSuppRates[2],
                pMgmt->abyIBSSSuppRates[3],
                pMgmt->abyIBSSSuppRates[4],
                pMgmt->abyIBSSSuppRates[5]
                );

        netif_stop_queue(pDevice->dev);
        spin_lock_irq(&pDevice->lock);
        bScheduleCommand((HANDLE)pDevice, WLAN_CMD_RUN_AP, NULL);
        spin_unlock_irq(&pDevice->lock);
        break;

    case WLAN_CMD_GET_NODE_CNT:

		cbListCount = 0;
		pNode = &(pMgmt->sNodeDBTable[0]);
        for (ii = 0; ii < (MAX_NODE_NUM + 1); ii++) {
            pNode = &(pMgmt->sNodeDBTable[ii]);
            if (!pNode->bActive)
                continue;
            cbListCount++;
        };

        sNodeList.uItem = cbListCount;
        if (copy_to_user(pReq->data, &sNodeList, sizeof(SNodeList))) {
			result = -EFAULT;
			break;
		};
        pReq->wResult = 0;
        break;

    case WLAN_CMD_GET_NODE_LIST:

        if (copy_from_user(&sNodeList, pReq->data, sizeof(SNodeList))) {
			result = -EFAULT;
			break;
		};
        pNodeList = (PSNodeList)kmalloc(sizeof(SNodeList) + (sNodeList.uItem * sizeof(SNodeItem)), (int)GFP_ATOMIC);
        if (pNodeList == NULL) {
            result = -ENOMEM;
            break;
        }
		pNodeList->uItem = sNodeList.uItem;
		pNode = &(pMgmt->sNodeDBTable[0]);
        for (ii = 0, jj = 0; ii < (MAX_NODE_NUM + 1); ii++) {
            pNode = &(pMgmt->sNodeDBTable[ii]);
            if (pNode->bActive) {
    		    pNodeList->sNodeList[jj].wAID = pNode->wAID;
    		    memcpy(pNodeList->sNodeList[jj].abyMACAddr, pNode->abyMACAddr, WLAN_ADDR_LEN);
    		    pNodeList->sNodeList[jj].wTxDataRate = pNode->wTxDataRate;
    		    pNodeList->sNodeList[jj].wInActiveCount = (WORD)pNode->uInActiveCount;
    		    pNodeList->sNodeList[jj].wEnQueueCnt = (WORD)pNode->wEnQueueCnt;
    		    pNodeList->sNodeList[jj].wFlags = (WORD)pNode->dwFlags;
    		    pNodeList->sNodeList[jj].bPWBitOn = pNode->bPSEnable;
    		    pNodeList->sNodeList[jj].byKeyIndex = pNode->byKeyIndex;
    		    pNodeList->sNodeList[jj].wWepKeyLength = pNode->uWepKeyLength;
    		    memcpy(&(pNodeList->sNodeList[jj].abyWepKey[0]), &(pNode->abyWepKey[0]), WEP_KEYMAXLEN);
                DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "key= %2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
                            pNodeList->sNodeList[jj].abyWepKey[0],
                            pNodeList->sNodeList[jj].abyWepKey[1],
                            pNodeList->sNodeList[jj].abyWepKey[2],
                            pNodeList->sNodeList[jj].abyWepKey[3],
                            pNodeList->sNodeList[jj].abyWepKey[4]
                           );
    		    pNodeList->sNodeList[jj].bIsInFallback = pNode->bIsInFallback;
    		    pNodeList->sNodeList[jj].uTxFailures = pNode->uTxFailures;
    		    pNodeList->sNodeList[jj].uTxAttempts = pNode->uTxAttempts;
    		    pNodeList->sNodeList[jj].wFailureRatio = (WORD)pNode->uFailureRatio;
    		    jj ++;
    		    if (jj >= pNodeList->uItem)
    		        break;
    		}
		};
        if (copy_to_user(pReq->data, pNodeList, sizeof(SNodeList) + (sNodeList.uItem * sizeof(SNodeItem)))) {
			result = -EFAULT;
			break;
		};
        kfree(pNodeList);
        pReq->wResult = 0;
        break;

#ifdef WPA_SM_Transtatus
    case 0xFF:
        memset(wpa_Result.ifname,0,sizeof(wpa_Result.ifname));
	    wpa_Result.proto = 0;
	    wpa_Result.key_mgmt = 0;
	    wpa_Result.eap_type = 0;
	    wpa_Result.authenticated = FALSE;
	      pDevice->fWPA_Authened = FALSE;
        if (copy_from_user(&wpa_Result, pReq->data, sizeof(wpa_Result))) {
            result = -EFAULT;
			break;
		}
//DavidWang  for some AP maybe good authenticate
   if(wpa_Result.key_mgmt==0x20)
      pMgmt->Cisco_cckm =1;
    else
    pMgmt->Cisco_cckm =0;
//DavidWang

if(wpa_Result.authenticated==TRUE) {
   #ifdef SndEvt_ToAPI
   {
     union iwreq_data      wrqu;

     pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;

     memset(&wrqu, 0, sizeof(wrqu));
     wrqu.data.flags = RT_WPACONNECTED_EVENT_FLAG;
     wrqu.data.length =pItemSSID->len;
     wireless_send_event(pDevice->dev, IWEVCUSTOM, &wrqu, pItemSSID->abySSID);
   }
   #endif
         pDevice->fWPA_Authened = TRUE;           //is successful peer to wpa_Result.authenticated?
}

        //printk("get private wpa_supplicant announce WPA SM\n");
	//printk("wpa-->ifname=%s\n",wpa_Result.ifname);
	//printk("wpa-->proto=%d\n",wpa_Result.proto);
	//printk("wpa-->key-mgmt=%d\n",wpa_Result.key_mgmt);
	//printk("wpa-->eap_type=%d\n",wpa_Result.eap_type);
	//printk("wpa-->authenticated is %s\n",(wpa_Result.authenticated==TRUE)?"TRUE":"FALSE");

	pReq->wResult = 0;
        break;
#endif

    default:
        DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Private command not support..\n");
    }

    return result;
}
Ejemplo n.º 2
0
/**
 * gether_setup - initialize one ethernet-over-usb link
 * @g: gadget to associated with these links
 * @ethaddr: NULL, or a buffer in which the ethernet address of the
 *	host side of the link is recorded
 * Context: may sleep
 *
 * This sets up the single network link that may be exported by a
 * gadget driver using this framework.  The link layer addresses are
 * set up using module parameters.
 *
 * Returns negative errno, or zero on success
 */
int __init gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
{
	struct eth_dev		*dev;
	struct net_device	*net;
	int			status;

	if (the_dev)
		return -EBUSY;

	net = alloc_etherdev(sizeof *dev);
	if (!net)
		return -ENOMEM;

	dev = netdev_priv(net);
	spin_lock_init(&dev->lock);
	spin_lock_init(&dev->req_lock);
	INIT_WORK(&dev->work, eth_work);
	INIT_LIST_HEAD(&dev->tx_reqs);
	INIT_LIST_HEAD(&dev->rx_reqs);

	/* network device setup */
	dev->net = net;
	strcpy(net->name, "usb%d");

	if (get_ether_addr(dev_addr, net->dev_addr))
		dev_warn(&g->dev,
			"using random %s ethernet address\n", "self");
	if (get_ether_addr(host_addr, dev->host_mac))
		dev_warn(&g->dev,
			"using random %s ethernet address\n", "host");

	if (ethaddr)
		memcpy(ethaddr, dev->host_mac, ETH_ALEN);

	net->change_mtu = eth_change_mtu;
	net->hard_start_xmit = eth_start_xmit;
	net->open = eth_open;
	net->stop = eth_stop;
	/* watchdog_timeo, tx_timeout ... */
	/* set_multicast_list */
	SET_ETHTOOL_OPS(net, &ops);

	/* two kinds of host-initiated state changes:
	 *  - iff DATA transfer is active, carrier is "on"
	 *  - tx queueing enabled if open *and* carrier is "on"
	 */
	netif_stop_queue(net);
	netif_carrier_off(net);

	dev->gadget = g;
	SET_NETDEV_DEV(net, &g->dev);

	status = register_netdev(net);
	if (status < 0) {
		dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
		free_netdev(net);
	} else {
		DECLARE_MAC_BUF(tmp);

		INFO(dev, "MAC %s\n", print_mac(tmp, net->dev_addr));
		INFO(dev, "HOST MAC %s\n", print_mac(tmp, dev->host_mac));

		the_dev = dev;
	}

	return status;
}
static int elmc_send_packet(struct sk_buff *skb, struct net_device *dev)
{
	int len;
	int i;
#ifndef NO_NOPCOMMANDS
	int next_nop;
#endif
	struct priv *p = netdev_priv(dev);

	netif_stop_queue(dev);

	len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN;

	if (len != skb->len)
		memset((char *) p->xmit_cbuffs[p->xmit_count], 0, ETH_ZLEN);
	skb_copy_from_linear_data(skb, (char *) p->xmit_cbuffs[p->xmit_count], skb->len);

#if (NUM_XMIT_BUFFS == 1)
#ifdef NO_NOPCOMMANDS
	p->xmit_buffs[0]->size = TBD_LAST | len;
	for (i = 0; i < 16; i++) {
		p->scb->cbl_offset = make16(p->xmit_cmds[0]);
		p->scb->cmd = CUC_START;
		p->xmit_cmds[0]->cmd_status = 0;
			elmc_attn586();
		dev->trans_start = jiffies;
		if (!i) {
			dev_kfree_skb(skb);
		}
		WAIT_4_SCB_CMD();
		if ((p->scb->status & CU_ACTIVE)) {	/* test it, because CU sometimes doesn't start immediately */
			break;
		}
		if (p->xmit_cmds[0]->cmd_status) {
			break;
		}
		if (i == 15) {
			printk(KERN_WARNING "%s: Can't start transmit-command.\n", dev->name);
		}
	}
#else
	next_nop = (p->nop_point + 1) & 0x1;
	p->xmit_buffs[0]->size = TBD_LAST | len;

	p->xmit_cmds[0]->cmd_link = p->nop_cmds[next_nop]->cmd_link
	    = make16((p->nop_cmds[next_nop]));
	p->xmit_cmds[0]->cmd_status = p->nop_cmds[next_nop]->cmd_status = 0;

	p->nop_cmds[p->nop_point]->cmd_link = make16((p->xmit_cmds[0]));
	dev->trans_start = jiffies;
	p->nop_point = next_nop;
	dev_kfree_skb(skb);
#endif
#else
	p->xmit_buffs[p->xmit_count]->size = TBD_LAST | len;
	if ((next_nop = p->xmit_count + 1) == NUM_XMIT_BUFFS) {
		next_nop = 0;
	}
	p->xmit_cmds[p->xmit_count]->cmd_status = 0;
	p->xmit_cmds[p->xmit_count]->cmd_link = p->nop_cmds[next_nop]->cmd_link
	    = make16((p->nop_cmds[next_nop]));
	p->nop_cmds[next_nop]->cmd_status = 0;
		p->nop_cmds[p->xmit_count]->cmd_link = make16((p->xmit_cmds[p->xmit_count]));
	dev->trans_start = jiffies;
	p->xmit_count = next_nop;
	if (p->xmit_count != p->xmit_last)
		netif_wake_queue(dev);
	dev_kfree_skb(skb);
#endif
	return 0;
}
Ejemplo n.º 4
0
static int internal_dev_stop(struct net_device *netdev)
{
	netif_stop_queue(netdev);
	return 0;
}
//  The function start_xmit is called when there is one packet to transmit.
static int ccmni_v2_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	int ret = NETDEV_TX_OK;
	int result = 0;
	int read_out, avai_in, avai_out, q_length, q_idx;
#if CCMNI_DBG_INFO
	dbg_info_ccmni_t	*dbg_info;
#endif

	unsigned char *ccmni_ptr;
	ccmni_v2_instance_t		*ccmni = netdev_priv(dev);
	ccmni_v2_ctl_block_t	*ctl_b = (ccmni_v2_ctl_block_t *)(ccmni->owner);
	int						md_id = ctl_b->m_md_id;
	ccci_msg_t				msg;
   
	spin_lock_bh(&ccmni->spinlock);

	if (ctl_b->ccci_is_ready==0) 
	{
		CCCI_DBG_MSG(md_id, "net", "CCMNI%d transfer data fail when modem not ready \n", ccmni->channel);
		ret = NETDEV_TX_BUSY;
		goto _ccmni_start_xmit_busy;
	}

	read_out = ccmni->shared_mem->tx_control.read_out;
	avai_in  = ccmni->shared_mem->tx_control.avai_in;
	avai_out = ccmni->shared_mem->tx_control.avai_out;
	q_length = ccmni->shared_mem->tx_control.q_length;

	if ((read_out < 0) || (avai_out < 0) || (avai_in < 0) || (q_length < 0))
	{
		CCCI_DBG_MSG(md_id, "net", "CCMNI%d_read fail: avai_out=%d, read_out=%d, avai_in=%d, q_length=%d\n", \
			ccmni->channel, avai_out, read_out, avai_in, q_length);
		goto _ccmni_start_xmit_busy;
	}
	
	if ((read_out >= q_length) || (avai_out >= q_length) || (avai_in >= q_length))
	{
		CCCI_DBG_MSG(md_id, "net", "CCMNI%d_read fail: avai_out=%d, read_out=%d, avai_in=%d, q_length=%d\n", \
			ccmni->channel, avai_out, read_out, avai_in, q_length);
		goto _ccmni_start_xmit_busy;
	}

	//Choose Q index
	q_idx = avai_out;
	ccmni_ptr = ccmni->shared_mem->q_tx_ringbuff[q_idx].ptr;

	//check if too many data waiting to be read out or Q not initialized yet
	//ccmni_ptr=NULL when not initialized???? haow.wang
	if ((q_idx == avai_in) || (ccmni_ptr == NULL) )
	{
		CCCI_DBG_MSG(md_id, "net", "CCMNI%d TX busy and stop queue: q_idx=%d, skb->len=%d \n", \
			ccmni->channel, q_idx, skb->len);
        CCCI_DBG_MSG(md_id, "net", "       TX read_out = %d  avai_out = %d avai_in = %d\n", \
			ccmni->shared_mem->tx_control.read_out, ccmni->shared_mem->tx_control.avai_out, ccmni->shared_mem->tx_control.avai_in); 
        CCCI_DBG_MSG(md_id, "net", "       RX read_out = %d  avai_out = %d avai_in = %d\n", \
			ccmni->shared_mem->rx_control.read_out, ccmni->shared_mem->rx_control.avai_out, ccmni->shared_mem->rx_control.avai_in);
				
		netif_stop_queue(ccmni->dev);

		//Set CCMNI ready to ZERO, and wait for the ACK from modem side.
		ccmni->ready = 0;
        ret          = NETDEV_TX_BUSY;
		goto _ccmni_start_xmit_busy;
	}

	ccmni_ptr = ccmni_v2_phys_to_virt(md_id, (unsigned char *)(ccmni->shared_mem->q_tx_ringbuff[q_idx].ptr));

    CCCI_CCMNI_MSG(md_id, "CCMNI%d_start_xmit: skb_len=%d, ccmni_ready=%d \n", \
		ccmni->channel, skb->len, ccmni->ready);
    
	if (skb->len > CCMNI_MTU)
	{
		//Sanity check; this should not happen!
		//Digest and return OK.
		CCCI_DBG_MSG(md_id, "net", "CCMNI%d packet size exceed 1500 bytes: size=%d \n", \
			ccmni->channel, skb->len);
		dev->stats.tx_dropped++;
		goto _ccmni_start_xmit_exit;
	}

#if CCMNI_DBG_INFO
	//DBG info
	dbg_info = (dbg_info_ccmni_t *)(ccmni_ptr - CCMNI_BUFF_HEADER_SIZE - CCMNI_BUFF_DBG_INFO_SIZE);
	dbg_info->avai_out_no = q_idx;
#endif

	memcpy(ccmni_ptr, skb->data, skb->len);
	ccmni->shared_mem->q_tx_ringbuff[q_idx].len = skb->len;

	//End byte
	*(unsigned char*)(ccmni_ptr + skb->len) = CCMNI_DATA_END;

	mb();

	//Update avail_out after data buffer filled
	q_idx++;
	ccmni->shared_mem->tx_control.avai_out = (q_idx & (q_length - 1));

	mb();

	msg.addr = 0;
	msg.len = skb->len;
	msg.channel = ccmni->uart_tx;
	msg.reserved = 0;
	result = ccci_message_send(md_id, &msg, 1);
	if (result==-CCCI_ERR_CCIF_NO_PHYSICAL_CHANNEL)
	{
		set_bit(CCMNI_SEND_PENDING,&ccmni->flags);
		ccmni->send_len +=skb->len;
		mod_timer(&ccmni->timer,jiffies);
	}
	else if (result==sizeof(ccci_msg_t))
		clear_bit(CCMNI_SEND_PENDING,&ccmni->flags);

	dev->stats.tx_packets++;
	dev->stats.tx_bytes  += skb->len;
    
_ccmni_start_xmit_exit:

	dev_kfree_skb(skb);

_ccmni_start_xmit_busy:
    
	spin_unlock_bh(&ccmni->spinlock);
    
	return ret;
}
Ejemplo n.º 6
0
int
islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
{
	islpci_private *priv = netdev_priv(ndev);
	isl38xx_control_block *cb = priv->control_block;
	u32 index;
	dma_addr_t pci_map_address;
	int frame_size;
	isl38xx_fragment *fragment;
	int offset;
	struct sk_buff *newskb;
	int newskb_offset;
	unsigned long flags;
	unsigned char wds_mac[6];
	u32 curr_frag;

#if VERBOSE > SHOW_ERROR_MESSAGES
	DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_transmit \n");
#endif

	/* lock the driver code */
	spin_lock_irqsave(&priv->slock, flags);

	/* check whether the destination queue has enough fragments for the frame */
	curr_frag = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_TX_DATA_LQ]);
	if (unlikely(curr_frag - priv->free_data_tx >= ISL38XX_CB_TX_QSIZE)) {
		printk(KERN_ERR "%s: transmit device queue full when awake\n",
		       ndev->name);
		netif_stop_queue(ndev);

		/* trigger the device */
		isl38xx_w32_flush(priv->device_base, ISL38XX_DEV_INT_UPDATE,
				  ISL38XX_DEV_INT_REG);
		udelay(ISL38XX_WRITEIO_DELAY);
		goto drop_free;
	}
	/* Check alignment and WDS frame formatting. The start of the packet should
	 * be aligned on a 4-byte boundary. If WDS is enabled add another 6 bytes
	 * and add WDS address information */
	if (likely(((long) skb->data & 0x03) | init_wds)) {
		/* get the number of bytes to add and re-allign */
		offset = (4 - (long) skb->data) & 0x03;
		offset += init_wds ? 6 : 0;

		/* check whether the current skb can be used  */
		if (!skb_cloned(skb) && (skb_tailroom(skb) >= offset)) {
			unsigned char *src = skb->data;

#if VERBOSE > SHOW_ERROR_MESSAGES
			DEBUG(SHOW_TRACING, "skb offset %i wds %i\n", offset,
			      init_wds);
#endif

			/* align the buffer on 4-byte boundary */
			skb_reserve(skb, (4 - (long) skb->data) & 0x03);
			if (init_wds) {
				/* wds requires an additional address field of 6 bytes */
				skb_put(skb, 6);
#ifdef ISLPCI_ETH_DEBUG
				printk("islpci_eth_transmit:wds_mac\n");
#endif
				memmove(skb->data + 6, src, skb->len);
				skb_copy_to_linear_data(skb, wds_mac, 6);
			} else {
				memmove(skb->data, src, skb->len);
			}

#if VERBOSE > SHOW_ERROR_MESSAGES
			DEBUG(SHOW_TRACING, "memmove %p %p %i \n", skb->data,
			      src, skb->len);
#endif
		} else {
			newskb =
			    dev_alloc_skb(init_wds ? skb->len + 6 : skb->len);
			if (unlikely(newskb == NULL)) {
				printk(KERN_ERR "%s: Cannot allocate skb\n",
				       ndev->name);
				goto drop_free;
			}
			newskb_offset = (4 - (long) newskb->data) & 0x03;

			/* Check if newskb->data is aligned */
			if (newskb_offset)
				skb_reserve(newskb, newskb_offset);

			skb_put(newskb, init_wds ? skb->len + 6 : skb->len);
			if (init_wds) {
				skb_copy_from_linear_data(skb,
							  newskb->data + 6,
							  skb->len);
				skb_copy_to_linear_data(newskb, wds_mac, 6);
#ifdef ISLPCI_ETH_DEBUG
				printk("islpci_eth_transmit:wds_mac\n");
#endif
			} else
				skb_copy_from_linear_data(skb, newskb->data,
							  skb->len);

#if VERBOSE > SHOW_ERROR_MESSAGES
			DEBUG(SHOW_TRACING, "memcpy %p %p %i wds %i\n",
			      newskb->data, skb->data, skb->len, init_wds);
#endif

			newskb->dev = skb->dev;
			dev_kfree_skb_irq(skb);
			skb = newskb;
		}
	}
	/* display the buffer contents for debugging */
#if VERBOSE > SHOW_ERROR_MESSAGES
	DEBUG(SHOW_BUFFER_CONTENTS, "\ntx %p ", skb->data);
	display_buffer((char *) skb->data, skb->len);
#endif

	/* map the skb buffer to pci memory for DMA operation */
	pci_map_address = pci_map_single(priv->pdev,
					 (void *) skb->data, skb->len,
					 PCI_DMA_TODEVICE);
	if (unlikely(pci_map_address == 0)) {
		printk(KERN_WARNING "%s: cannot map buffer to PCI\n",
		       ndev->name);
		goto drop_free;
	}
	/* Place the fragment in the control block structure. */
	index = curr_frag % ISL38XX_CB_TX_QSIZE;
	fragment = &cb->tx_data_low[index];

	priv->pci_map_tx_address[index] = pci_map_address;
	/* store the skb address for future freeing  */
	priv->data_low_tx[index] = skb;
	/* set the proper fragment start address and size information */
	frame_size = skb->len;
	fragment->size = cpu_to_le16(frame_size);
	fragment->flags = cpu_to_le16(0);	/* set to 1 if more fragments */
	fragment->address = cpu_to_le32(pci_map_address);
	curr_frag++;

	/* The fragment address in the control block must have been
	 * written before announcing the frame buffer to device. */
	wmb();
	cb->driver_curr_frag[ISL38XX_CB_TX_DATA_LQ] = cpu_to_le32(curr_frag);

	if (curr_frag - priv->free_data_tx + ISL38XX_MIN_QTHRESHOLD
	    > ISL38XX_CB_TX_QSIZE) {
		/* stop sends from upper layers */
		netif_stop_queue(ndev);

		/* set the full flag for the transmission queue */
		priv->data_low_tx_full = 1;
	}

	/* set the transmission time */
	ndev->trans_start = jiffies;
	ndev->stats.tx_packets++;
	ndev->stats.tx_bytes += skb->len;

	/* trigger the device */
	islpci_trigger(priv);

	/* unlock the driver code */
	spin_unlock_irqrestore(&priv->slock, flags);

	return 0;

      drop_free:
	ndev->stats.tx_dropped++;
	spin_unlock_irqrestore(&priv->slock, flags);
	dev_kfree_skb(skb);
	return NETDEV_TX_OK;
}
Ejemplo n.º 7
0
static int nr_close(struct net_device *dev)
{
	ax25_listen_release((ax25_address *)dev->dev_addr, NULL);
	netif_stop_queue(dev);
	return 0;
}
Ejemplo n.º 8
0
static int vnet_start_xmit(struct sk_buff *skb, struct net_device *net)
{
	struct pdp_info *dev = (struct pdp_info *)net->ml_priv;

#ifdef USE_LOOPBACK_PING
	int ret;
	struct sk_buff *skb2;
	struct icmphdr *icmph;
	struct iphdr *iph;
#endif

   DPRINTK(2, "BEGIN\n");

#ifdef USE_LOOPBACK_PING
	dev->vn_dev.stats.tx_bytes += skb->len;
	dev->vn_dev.stats.tx_packets++;

	skb2 = alloc_skb(skb->len, GFP_ATOMIC);
	if (skb2 == NULL) {
		DPRINTK(1, "alloc_skb() failed\n");
		dev_kfree_skb_any(skb);
		return -ENOMEM;
	}

	memcpy(skb2->data, skb->data, skb->len);
	skb_put(skb2, skb->len);
	dev_kfree_skb_any(skb);

	icmph = (struct icmphdr *)(skb2->data + sizeof(struct iphdr));
	iph = (struct iphdr *)skb2->data;

	icmph->type = __constant_htons(ICMP_ECHOREPLY);

	ret = iph->daddr;
	iph->daddr = iph->saddr;
	iph->saddr = ret;
	iph->check = 0;
	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);

	skb2->dev = net;
	skb2->protocol = __constant_htons(ETH_P_IP);

	netif_rx(skb2);

	dev->vn_dev.stats.rx_packets++;
	dev->vn_dev.stats.rx_bytes += skb->len;
#else
#if 0
   if (vnet_start_xmit_flag != 0) {
        if (vnet_start_xmit_count > 100000) {
            vnet_start_xmit_flag = 0;
            DPRINTK(1, "vnet_start_xmit_count exceed.. clear vnet_start_xmit_flag \n");
        }
        EPRINTK("vnet_start_xmit() return -EAGAIN \n");
        vnet_start_xmit_count++; 
        return -EAGAIN;
    }
    vnet_start_xmit_count = 0;
   vnet_start_xmit_flag = 1; 
#endif
	workqueue_data = (unsigned long)skb;
	PREPARE_WORK(&dev->vn_dev.xmit_task,vnet_defer_xmit);
	schedule_work(&dev->vn_dev.xmit_task);
	netif_stop_queue(net);
#endif

   DPRINTK(2, "END\n");
	return 0;
}
Ejemplo n.º 9
0
static int
ether1_sendpacket (struct sk_buff *skb, struct net_device *dev)
{
	int tmp, tst, nopaddr, txaddr, tbdaddr, dataddr;
	unsigned long flags;
	tx_t tx;
	tbd_t tbd;
	nop_t nop;

	if (priv(dev)->restart) {
		printk(KERN_WARNING "%s: resetting device\n", dev->name);

		ether1_reset(dev);

		if (ether1_init_for_open(dev))
			printk(KERN_ERR "%s: unable to restart interface\n", dev->name);
		else
			priv(dev)->restart = 0;
	}

	if (skb->len < ETH_ZLEN) {
		if (skb_padto(skb, ETH_ZLEN))
			goto out;
	}

	/*
	 * insert packet followed by a nop
	 */
	txaddr = ether1_txalloc (dev, TX_SIZE);
	tbdaddr = ether1_txalloc (dev, TBD_SIZE);
	dataddr = ether1_txalloc (dev, skb->len);
	nopaddr = ether1_txalloc (dev, NOP_SIZE);

	tx.tx_status = 0;
	tx.tx_command = CMD_TX | CMD_INTR;
	tx.tx_link = nopaddr;
	tx.tx_tbdoffset = tbdaddr;
	tbd.tbd_opts = TBD_EOL | skb->len;
	tbd.tbd_link = I82586_NULL;
	tbd.tbd_bufl = dataddr;
	tbd.tbd_bufh = 0;
	nop.nop_status = 0;
	nop.nop_command = CMD_NOP;
	nop.nop_link = nopaddr;

	local_irq_save(flags);
	ether1_writebuffer (dev, &tx, txaddr, TX_SIZE);
	ether1_writebuffer (dev, &tbd, tbdaddr, TBD_SIZE);
	ether1_writebuffer (dev, skb->data, dataddr, skb->len);
	ether1_writebuffer (dev, &nop, nopaddr, NOP_SIZE);
	tmp = priv(dev)->tx_link;
	priv(dev)->tx_link = nopaddr;

	/* now reset the previous nop pointer */
	ether1_writew(dev, txaddr, tmp, nop_t, nop_link, NORMALIRQS);

	local_irq_restore(flags);

	/* handle transmit */

	/* check to see if we have room for a full sized ether frame */
	tmp = priv(dev)->tx_head;
	tst = ether1_txalloc (dev, TX_SIZE + TBD_SIZE + NOP_SIZE + ETH_FRAME_LEN);
	priv(dev)->tx_head = tmp;
	dev_kfree_skb (skb);

	if (tst == -1)
		netif_stop_queue(dev);

 out:
	return NETDEV_TX_OK;
}
Ejemplo n.º 10
0
static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
					   struct net_device *ndev)
{
	int ret;
	struct brcmf_if *ifp = netdev_priv(ndev);
	struct brcmf_pub *drvr = ifp->drvr;
	struct ethhdr *eh;

	brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);

	/* Can the device send data? */
	if (drvr->bus_if->state != BRCMF_BUS_DATA) {
		brcmf_err("xmit rejected state=%d\n", drvr->bus_if->state);
		netif_stop_queue(ndev);
		dev_kfree_skb(skb);
		ret = -ENODEV;
		goto done;
	}

	if (!drvr->iflist[ifp->bssidx]) {
		brcmf_err("bad ifidx %d\n", ifp->bssidx);
		netif_stop_queue(ndev);
		dev_kfree_skb(skb);
		ret = -ENODEV;
		goto done;
	}

	/* Make sure there's enough room for any header */
	if (skb_headroom(skb) < drvr->hdrlen) {
		struct sk_buff *skb2;

		brcmf_dbg(INFO, "%s: insufficient headroom\n",
			  brcmf_ifname(drvr, ifp->bssidx));
		drvr->bus_if->tx_realloc++;
		skb2 = skb_realloc_headroom(skb, drvr->hdrlen);
		dev_kfree_skb(skb);
		skb = skb2;
		if (skb == NULL) {
			brcmf_err("%s: skb_realloc_headroom failed\n",
				  brcmf_ifname(drvr, ifp->bssidx));
			ret = -ENOMEM;
			goto done;
		}
	}

	/* validate length for ether packet */
	if (skb->len < sizeof(*eh)) {
		ret = -EINVAL;
		dev_kfree_skb(skb);
		goto done;
	}

	ret = brcmf_fws_process_skb(ifp, skb);

done:
	if (ret) {
		ifp->stats.tx_dropped++;
	} else {
		ifp->stats.tx_packets++;
		ifp->stats.tx_bytes += skb->len;
	}

	/* Return ok: we always eat the packet */
	return NETDEV_TX_OK;
}
Ejemplo n.º 11
0
static int vboxNetAdpLinuxStop(struct net_device *pNetDev)
{
    netif_stop_queue(pNetDev);
    return 0;
}
Ejemplo n.º 12
0
static int sa1100_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct sa1100_irda *si = dev->priv;
	int speed = irda_get_next_speed(skb);

	/*
	 * Does this packet contain a request to change the interface
	 * speed?  If so, remember it until we complete the transmission
	 * of this frame.
	 */
	if (speed != si->speed && speed != -1)
		si->newspeed = speed;

	/*
	 * If this is an empty frame, we can bypass a lot.
	 */
	if (skb->len == 0) {
		if (si->newspeed) {
			si->newspeed = 0;
			sa1100_irda_set_speed(si, speed);
		}
		dev_kfree_skb(skb);
		return 0;
	}

	if (!IS_FIR(si)) {
		netif_stop_queue(dev);

		si->tx_buff.data = si->tx_buff.head;
		si->tx_buff.len  = async_wrap_skb(skb, si->tx_buff.data,
						  si->tx_buff.truesize);

		/*
		 * Set the transmit interrupt enable.  This will fire
		 * off an interrupt immediately.  Note that we disable
		 * the receiver so we won't get spurious characteres
		 * received.
		 */
		Ser2UTCR3 = UTCR3_TIE | UTCR3_TXE;

		dev_kfree_skb(skb);
	} else {
		int mtt = irda_get_mtt(skb);

		/*
		 * We must not be transmitting...
		 */
		BUG_ON(si->txskb);

		netif_stop_queue(dev);

		si->txskb = skb;
		si->txbuf_dma = dma_map_single(si->dev, skb->data,
					 skb->len, DMA_TO_DEVICE);

		sa1100_start_dma(si->txdma, si->txbuf_dma, skb->len);

		/*
		 * If we have a mean turn-around time, impose the specified
		 * specified delay.  We could shorten this by timing from
		 * the point we received the packet.
		 */
		if (mtt)
			udelay(mtt);

		Ser2HSCR0 = si->hscr0 | HSCR0_HSSP | HSCR0_TXE;
	}

	dev->trans_start = jiffies;

	return 0;
}
Ejemplo n.º 13
0
/*----------------------------------------------------------------
* p80211knetdev_hard_start_xmit
*
* Linux netdevice method for transmitting a frame.
*
* Arguments:
*	skb	Linux sk_buff containing the frame.
*	netdev	Linux netdevice.
*
* Side effects:
*	If the lower layers report that buffers are full. netdev->tbusy
*	will be set to prevent higher layers from sending more traffic.
*
*	Note: If this function returns non-zero, higher layers retain
*	      ownership of the skb.
*
* Returns:
*	zero on success, non-zero on failure.
----------------------------------------------------------------*/
static int p80211knetdev_hard_start_xmit(struct sk_buff *skb,
					 netdevice_t *netdev)
{
	int result = 0;
	int txresult = -1;
	wlandevice_t *wlandev = netdev->ml_priv;
	union p80211_hdr p80211_hdr;
	struct p80211_metawep p80211_wep;

	if (skb == NULL)
		return NETDEV_TX_OK;

	if (wlandev->state != WLAN_DEVICE_OPEN) {
		result = 1;
		goto failed;
	}

	memset(&p80211_hdr, 0, sizeof(union p80211_hdr));
	memset(&p80211_wep, 0, sizeof(struct p80211_metawep));

	if (netif_queue_stopped(netdev)) {
		pr_debug("called when queue stopped.\n");
		result = 1;
		goto failed;
	}

	netif_stop_queue(netdev);

	/* Check to see that a valid mode is set */
	switch (wlandev->macmode) {
	case WLAN_MACMODE_IBSS_STA:
	case WLAN_MACMODE_ESS_STA:
	case WLAN_MACMODE_ESS_AP:
		break;
	default:
		/* Mode isn't set yet, just drop the frame
		 * and return success .
		 * TODO: we need a saner way to handle this
		 */
		if (skb->protocol != ETH_P_80211_RAW) {
			netif_start_queue(wlandev->netdev);
			printk(KERN_NOTICE
			       "Tx attempt prior to association, frame dropped.\n");
			wlandev->linux_stats.tx_dropped++;
			result = 0;
			goto failed;
		}
		break;
	}

	/* Check for raw transmits */
	if (skb->protocol == ETH_P_80211_RAW) {
		if (!capable(CAP_NET_ADMIN)) {
			result = 1;
			goto failed;
		}
		/* move the header over */
		memcpy(&p80211_hdr, skb->data, sizeof(union p80211_hdr));
		skb_pull(skb, sizeof(union p80211_hdr));
	} else {
		if (skb_ether_to_p80211
		    (wlandev, wlandev->ethconv, skb, &p80211_hdr,
		     &p80211_wep) != 0) {
			/* convert failed */
			pr_debug("ether_to_80211(%d) failed.\n",
				 wlandev->ethconv);
			result = 1;
			goto failed;
		}
	}
	if (wlandev->txframe == NULL) {
		result = 1;
		goto failed;
	}

	netdev->trans_start = jiffies;

	wlandev->linux_stats.tx_packets++;
	/* count only the packet payload */
	wlandev->linux_stats.tx_bytes += skb->len;

	txresult = wlandev->txframe(wlandev, skb, &p80211_hdr, &p80211_wep);

	if (txresult == 0) {
		/* success and more buf */
		/* avail, re: hw_txdata */
		netif_wake_queue(wlandev->netdev);
		result = NETDEV_TX_OK;
	} else if (txresult == 1) {
		/* success, no more avail */
		pr_debug("txframe success, no more bufs\n");
		/* netdev->tbusy = 1;  don't set here, irqhdlr */
		/*   may have already cleared it */
		result = NETDEV_TX_OK;
	} else if (txresult == 2) {
		/* alloc failure, drop frame */
		pr_debug("txframe returned alloc_fail\n");
		result = NETDEV_TX_BUSY;
	} else {
		/* buffer full or queue busy, drop frame. */
		pr_debug("txframe returned full or busy\n");
		result = NETDEV_TX_BUSY;
	}

failed:
	/* Free up the WEP buffer if it's not the same as the skb */
	if ((p80211_wep.data) && (p80211_wep.data != skb->data))
		kzfree(p80211_wep.data);

	/* we always free the skb here, never in a lower level. */
	if (!result)
		dev_kfree_skb(skb);

	return result;
}
Ejemplo n.º 14
0
static int vnet_stop(struct net_device *ndev)
{
	netif_stop_queue(ndev);
	return 0;
}
Ejemplo n.º 15
0
/**
 * gether_setup - initialize one ethernet-over-usb link
 * @g: gadget to associated with these links
 * @ethaddr: NULL, or a buffer in which the ethernet address of the
 *	host side of the link is recorded
 * Context: may sleep
 *
 * This sets up the single network link that may be exported by a
 * gadget driver using this framework.  The link layer addresses are
 * set up using module parameters.
 *
 * Returns negative errno, or zero on success
 */
int gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
{
	struct eth_dev		*dev;
	struct net_device	*net;
	int			status;

	if (the_dev)
		return -EBUSY;

	net = alloc_etherdev(sizeof *dev);
	if (!net)
		return -ENOMEM;

	dev = netdev_priv(net);
	spin_lock_init(&dev->lock);
	spin_lock_init(&dev->req_lock);
	INIT_WORK(&dev->work, eth_work);
	INIT_LIST_HEAD(&dev->tx_reqs);
	INIT_LIST_HEAD(&dev->rx_reqs);

	skb_queue_head_init(&dev->rx_frames);

	/* network device setup */
	dev->net = net;
	strcpy(net->name, "usb%d");

	if (get_ether_addr(dev_addr, net->dev_addr))
		dev_warn(&g->dev,
			"using random %s ethernet address\n", "self");
	if (get_ether_addr(host_addr, dev->host_mac))
		dev_warn(&g->dev,
			"using random %s ethernet address\n", "host");

	if (ethaddr)
		memcpy(ethaddr, dev->host_mac, ETH_ALEN);

	net->netdev_ops = &eth_netdev_ops;

	SET_ETHTOOL_OPS(net, &ops);

	/* two kinds of host-initiated state changes:
	 *  - iff DATA transfer is active, carrier is "on"
	 *  - tx queueing enabled if open *and* carrier is "on"
	 */
	netif_stop_queue(net);
	netif_carrier_off(net);

	dev->gadget = g;
	SET_NETDEV_DEV(net, &g->dev);
	SET_NETDEV_DEVTYPE(net, &gadget_type);

	status = register_netdev(net);
	if (status < 0) {
		dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
		free_netdev(net);
	} else {
		INFO(dev, "MAC %pM\n", net->dev_addr);
		INFO(dev, "HOST MAC %pM\n", dev->host_mac);

		the_dev = dev;
	}

	return status;
}
static int interface_release(struct net_device *dev)
{
	netif_stop_queue(dev);
	return 0;
}
Ejemplo n.º 17
0
static int bnep_net_close(struct net_device *dev)
{
	netif_stop_queue(dev);
	return 0;
}
Ejemplo n.º 18
0
Archivo: fec.c Proyecto: CoerWatt/linux
static netdev_tx_t
fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	const struct platform_device_id *id_entry =
				platform_get_device_id(fep->pdev);
	struct bufdesc *bdp;
	void *bufaddr;
	unsigned short	status;
	unsigned int index;

	if (!fep->link) {
		/* Link is down or autonegotiation is in progress. */
		return NETDEV_TX_BUSY;
	}

	/* Fill in a Tx ring entry */
	bdp = fep->cur_tx;

	status = bdp->cbd_sc;

	if (status & BD_ENET_TX_READY) {
		/* Ooops.  All transmit buffers are full.  Bail out.
		 * This should not happen, since ndev->tbusy should be set.
		 */
		printk("%s: tx queue full!.\n", ndev->name);
		return NETDEV_TX_BUSY;
	}

	/* Clear all of the status flags */
	status &= ~BD_ENET_TX_STATS;

	/* Set buffer length and buffer pointer */
	bufaddr = skb->data;
	bdp->cbd_datlen = skb->len;

	/*
	 * On some FEC implementations data must be aligned on
	 * 4-byte boundaries. Use bounce buffers to copy data
	 * and get it aligned. Ugh.
	 */
	if (fep->bufdesc_ex)
		index = (struct bufdesc_ex *)bdp -
			(struct bufdesc_ex *)fep->tx_bd_base;
	else
		index = bdp - fep->tx_bd_base;

	if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
		memcpy(fep->tx_bounce[index], skb->data, skb->len);
		bufaddr = fep->tx_bounce[index];
	}

	/*
	 * Some design made an incorrect assumption on endian mode of
	 * the system that it's running on. As the result, driver has to
	 * swap every frame going to and coming from the controller.
	 */
	if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
		swap_buffer(bufaddr, skb->len);

	/* Save skb pointer */
	fep->tx_skbuff[index] = skb;

	/* Push the data cache so the CPM does not get stale memory
	 * data.
	 */
	bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
			FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);

	/* Send it on its way.  Tell FEC it's ready, interrupt when done,
	 * it's the last BD of the frame, and to put the CRC on the end.
	 */
	status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
			| BD_ENET_TX_LAST | BD_ENET_TX_TC);
	bdp->cbd_sc = status;

	if (fep->bufdesc_ex) {

		struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
		ebdp->cbd_bdu = 0;
		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
			fep->hwts_tx_en)) {
			ebdp->cbd_esc = (BD_ENET_TX_TS | BD_ENET_TX_INT);
			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
		} else {

			ebdp->cbd_esc = BD_ENET_TX_INT;
		}
	}
	/* If this was the last BD in the ring, start at the beginning again. */
	if (status & BD_ENET_TX_WRAP)
		bdp = fep->tx_bd_base;
	else
		bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);

	fep->cur_tx = bdp;

	if (fep->cur_tx == fep->dirty_tx)
		netif_stop_queue(ndev);

	/* Trigger transmission start */
	writel(0, fep->hwp + FEC_X_DES_ACTIVE);

	skb_tx_timestamp(skb);

	return NETDEV_TX_OK;
}
Ejemplo n.º 19
0
static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
				    struct bgmac_dma_ring *ring,
				    struct sk_buff *skb)
{
	struct device *dma_dev = bgmac->core->dma_dev;
	struct net_device *net_dev = bgmac->net_dev;
	int index = ring->end % BGMAC_TX_RING_SLOTS;
	struct bgmac_slot_info *slot = &ring->slots[index];
	int nr_frags;
	u32 flags;
	int i;

	if (skb->len > BGMAC_DESC_CTL1_LEN) {
		bgmac_err(bgmac, "Too long skb (%d)\n", skb->len);
		goto err_drop;
	}

	if (skb->ip_summed == CHECKSUM_PARTIAL)
		skb_checksum_help(skb);

	nr_frags = skb_shinfo(skb)->nr_frags;

	/* ring->end - ring->start will return the number of valid slots,
	 * even when ring->end overflows
	 */
	if (ring->end - ring->start + nr_frags + 1 >= BGMAC_TX_RING_SLOTS) {
		bgmac_err(bgmac, "TX ring is full, queue should be stopped!\n");
		netif_stop_queue(net_dev);
		return NETDEV_TX_BUSY;
	}

	slot->dma_addr = dma_map_single(dma_dev, skb->data, skb_headlen(skb),
					DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr)))
		goto err_dma_head;

	flags = BGMAC_DESC_CTL0_SOF;
	if (!nr_frags)
		flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC;

	bgmac_dma_tx_add_buf(bgmac, ring, index, skb_headlen(skb), flags);
	flags = 0;

	for (i = 0; i < nr_frags; i++) {
		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
		int len = skb_frag_size(frag);

		index = (index + 1) % BGMAC_TX_RING_SLOTS;
		slot = &ring->slots[index];
		slot->dma_addr = skb_frag_dma_map(dma_dev, frag, 0,
						  len, DMA_TO_DEVICE);
		if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr)))
			goto err_dma;

		if (i == nr_frags - 1)
			flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC;

		bgmac_dma_tx_add_buf(bgmac, ring, index, len, flags);
	}

	slot->skb = skb;
	ring->end += nr_frags + 1;
	netdev_sent_queue(net_dev, skb->len);

	wmb();

	/* Increase ring->end to point empty slot. We tell hardware the first
	 * slot it should *not* read.
	 */
	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
		    ring->index_base +
		    (ring->end % BGMAC_TX_RING_SLOTS) *
		    sizeof(struct bgmac_dma_desc));

	if (ring->end - ring->start >= BGMAC_TX_RING_SLOTS - 8)
		netif_stop_queue(net_dev);

	return NETDEV_TX_OK;

err_dma:
	dma_unmap_single(dma_dev, slot->dma_addr, skb_headlen(skb),
			 DMA_TO_DEVICE);

	while (i > 0) {
		int index = (ring->end + i) % BGMAC_TX_RING_SLOTS;
		struct bgmac_slot_info *slot = &ring->slots[index];
		u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1);
		int len = ctl1 & BGMAC_DESC_CTL1_LEN;

		dma_unmap_page(dma_dev, slot->dma_addr, len, DMA_TO_DEVICE);
	}

err_dma_head:
	bgmac_err(bgmac, "Mapping error of skb on ring 0x%X\n",
		  ring->mmio_base);

err_drop:
	dev_kfree_skb(skb);
	return NETDEV_TX_OK;
}
static int netdev_open(struct net_device *pnetdev)
{
	uint status;	
	_adapter *padapter = (_adapter *)rtw_netdev_priv(pnetdev);
	struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv;

	RT_TRACE(_module_os_intfs_c_,_drv_info_,("+871x_drv - dev_open\n"));
	DBG_8192C("+871x_drv - drv_open, bup=%d\n", padapter->bup);

       if(padapter->bup == _FALSE)
    	{    
		padapter->bDriverStopped = _FALSE;
	 	padapter->bSurpriseRemoved = _FALSE;	 
		padapter->bCardDisableWOHSM = _FALSE;        	
	
		status = rtw_hal_init(padapter);		
		if (status ==_FAIL)
		{			
			RT_TRACE(_module_os_intfs_c_,_drv_err_,("rtl871x_hal_init(): Can't init h/w!\n"));
			goto netdev_open_error;
		}
		
		DBG_8192C("MAC Address = %x-%x-%x-%x-%x-%x\n", 
				 pnetdev->dev_addr[0], pnetdev->dev_addr[1], pnetdev->dev_addr[2], pnetdev->dev_addr[3], pnetdev->dev_addr[4], pnetdev->dev_addr[5]);		

		
		status=rtw_start_drv_threads(padapter);
		if(status ==_FAIL)
		{			
			RT_TRACE(_module_os_intfs_c_,_drv_err_,("Initialize driver software resource Failed!\n"));			
			goto netdev_open_error;			
		}


		if (init_mlme_ext_priv(padapter) == _FAIL)
		{
			RT_TRACE(_module_os_intfs_c_,_drv_err_,("can't init mlme_ext_priv\n"));
			goto netdev_open_error;
		}


#ifdef CONFIG_DRVEXT_MODULE
		init_drvext(padapter);
#endif

		if(padapter->intf_start)
		{
			padapter->intf_start(padapter);
		}

#ifdef CONFIG_PROC_DEBUG
#ifndef RTK_DMP_PLATFORM
		rtw_proc_init_one(pnetdev);
#endif
#endif
		padapter->bup = _TRUE;
	}
	padapter->net_closed = _FALSE;

	_set_timer(&padapter->mlmepriv.dynamic_chk_timer, 2000);

	if(( pwrctrlpriv->power_mgnt != PS_MODE_ACTIVE ) ||(padapter->pwrctrlpriv.bHWPwrPindetect))
	{
		padapter->pwrctrlpriv.bips_processing = _FALSE;	
		_set_timer(&padapter->pwrctrlpriv.pwr_state_check_timer, padapter->pwrctrlpriv.pwr_state_check_inverval);
 	}

	//netif_carrier_on(pnetdev);//call this func when rtw_joinbss_event_callback return success       
 	if(!netif_queue_stopped(pnetdev))
      		netif_start_queue(pnetdev);
	else
		netif_wake_queue(pnetdev);

	RT_TRACE(_module_os_intfs_c_,_drv_info_,("-871x_drv - dev_open\n"));
	DBG_8192C("-871x_drv - drv_open, bup=%d\n", padapter->bup);
		
	 return 0;
	
netdev_open_error:

	padapter->bup = _FALSE;
	
	netif_carrier_off(pnetdev);	
	netif_stop_queue(pnetdev);
	
	RT_TRACE(_module_os_intfs_c_,_drv_err_,("-871x_drv - dev_open, fail!\n"));
	DBG_8192C("-871x_drv - drv_open fail, bup=%d\n", padapter->bup);
	
	return (-1);
	
}
Ejemplo n.º 21
0
static int rose_close(struct net_device *dev)
{
	netif_stop_queue(dev);
	rose_del_loopback_node((rose_address *)dev->dev_addr);
	return 0;
}
Ejemplo n.º 22
0
/*
 * Transmit a packet
 */
static int
ether3_sendpacket(struct sk_buff *skb, struct net_device *dev)
{
	unsigned long flags;
	unsigned int length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
	unsigned int ptr, next_ptr;

	if (priv(dev)->broken) {
		dev_kfree_skb(skb);
		dev->stats.tx_dropped++;
		netif_start_queue(dev);
		return NETDEV_TX_OK;
	}

	length = (length + 1) & ~1;
	if (length != skb->len) {
		if (skb_padto(skb, length))
			goto out;
	}

	next_ptr = (priv(dev)->tx_head + 1) & 15;

	local_irq_save(flags);

	if (priv(dev)->tx_tail == next_ptr) {
		local_irq_restore(flags);
		return NETDEV_TX_BUSY;	/* unable to queue */
	}

	ptr		 = 0x600 * priv(dev)->tx_head;
	priv(dev)->tx_head = next_ptr;
	next_ptr	*= 0x600;

#define TXHDR_FLAGS (TXHDR_TRANSMIT|TXHDR_CHAINCONTINUE|TXHDR_DATAFOLLOWS|TXHDR_ENSUCCESS)

	ether3_setbuffer(dev, buffer_write, next_ptr);
	ether3_writelong(dev, 0);
	ether3_setbuffer(dev, buffer_write, ptr);
	ether3_writelong(dev, 0);
	ether3_writebuffer(dev, skb->data, length);
	ether3_writeword(dev, htons(next_ptr));
	ether3_writeword(dev, TXHDR_CHAINCONTINUE >> 16);
	ether3_setbuffer(dev, buffer_write, ptr);
	ether3_writeword(dev, htons((ptr + length + 4)));
	ether3_writeword(dev, TXHDR_FLAGS >> 16);
	ether3_ledon(dev);

	if (!(ether3_inw(REG_STATUS) & STAT_TXON)) {
		ether3_outw(ptr, REG_TRANSMITPTR);
		ether3_outw(priv(dev)->regs.command | CMD_TXON, REG_COMMAND);
	}

	next_ptr = (priv(dev)->tx_head + 1) & 15;
	local_irq_restore(flags);

	dev_kfree_skb(skb);

	if (priv(dev)->tx_tail == next_ptr)
		netif_stop_queue(dev);

 out:
	return NETDEV_TX_OK;
}
Ejemplo n.º 23
0
/*
 * Wireless Handler: set operation mode
 */
int iwctl_siwmode(struct net_device *dev, struct iw_request_info *info,
		union iwreq_data *wrqu, char *extra)
{
	struct vnt_private *pDevice = netdev_priv(dev);
	__u32 *wmode = &wrqu->mode;
	struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
	int rc = 0;

	DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWMODE\n");

	if (pMgmt == NULL)
		return -EFAULT;

	if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP && pDevice->bEnableHostapd) {
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
			"Can't set operation mode, hostapd is running\n");
		return rc;
	}

	switch (*wmode) {
	case IW_MODE_ADHOC:
		if (pMgmt->eConfigMode != WMAC_CONFIG_IBSS_STA) {
			pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA;
			if (pDevice->flags & DEVICE_FLAGS_OPENED)
				pDevice->bCommit = TRUE;
		}
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "set mode to ad-hoc \n");
		break;
	case IW_MODE_AUTO:
	case IW_MODE_INFRA:
		if (pMgmt->eConfigMode != WMAC_CONFIG_ESS_STA) {
			pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA;
			if (pDevice->flags & DEVICE_FLAGS_OPENED)
				pDevice->bCommit = TRUE;
		}
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "set mode to infrastructure \n");
		break;
	case IW_MODE_MASTER:

		pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA;
		rc = -EOPNOTSUPP;
		break;

		if (pMgmt->eConfigMode != WMAC_CONFIG_AP) {
			pMgmt->eConfigMode = WMAC_CONFIG_AP;
			if (pDevice->flags & DEVICE_FLAGS_OPENED)
				pDevice->bCommit = TRUE;
		}
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "set mode to Access Point \n");
		break;

	case IW_MODE_REPEAT:
		pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA;
		rc = -EOPNOTSUPP;
		break;
	default:
		rc = -EINVAL;
	}

	if (pDevice->bCommit) {
		if (pMgmt->eConfigMode == WMAC_CONFIG_AP) {
			netif_stop_queue(pDevice->dev);
			spin_lock_irq(&pDevice->lock);
			bScheduleCommand((void *) pDevice,
				WLAN_CMD_RUN_AP, NULL);
			spin_unlock_irq(&pDevice->lock);
		} else {
			DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
				"Commit the settings\n");

			spin_lock_irq(&pDevice->lock);

			if (pDevice->bLinkPass &&
				memcmp(pMgmt->abyCurrSSID,
					pMgmt->abyDesireSSID,
					WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN)) {
				bScheduleCommand((void *) pDevice,
					WLAN_CMD_DISASSOCIATE, NULL);
			} else {
				pDevice->bLinkPass = FALSE;
				pMgmt->eCurrState = WMAC_STATE_IDLE;
				memset(pMgmt->abyCurrBSSID, 0, 6);
			}

			ControlvMaskByte(pDevice,
				MESSAGE_REQUEST_MACREG,	MAC_REG_PAPEDELAY,
					LEDSTS_STS, LEDSTS_SLOW);

			netif_stop_queue(pDevice->dev);

			pMgmt->eScanType = WMAC_SCAN_ACTIVE;

			if (!pDevice->bWPASuppWextEnabled)
				bScheduleCommand((void *) pDevice,
					 WLAN_CMD_BSSID_SCAN,
					 pMgmt->abyDesireSSID);

			bScheduleCommand((void *) pDevice,
				 WLAN_CMD_SSID,
				 NULL);

			spin_unlock_irq(&pDevice->lock);
		}
		pDevice->bCommit = FALSE;
	}


	return rc;
}
Ejemplo n.º 24
0
int lbs_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	unsigned long flags;
	struct lbs_private *priv = dev->priv;
	struct txpd *txpd;
	char *p802x_hdr;
	uint16_t pkt_len;
	int ret;

	lbs_deb_enter(LBS_DEB_TX);

#if (ANDROID_POWER_SAVE == 1)

	if (priv->deepsleep == false)
		lbs_update_ps_timer();
	else
	{
		if (priv->wifi_ps_work_req == 0)
		{
			printk("Need to wakeup for xmit: ether_proto=%x.\n", skb->protocol);
			priv->wifi_ps_work_req = WIFI_PS_PRE_WAKE;
			/*
			 * If no netif_carrier_off, upper layer will try again,
			 * and this may cause tx timeout and trigger watchdog.
			 */
			netif_stop_queue(priv->dev);
			netif_carrier_off(priv->dev);
			queue_delayed_work(priv->work_thread, &priv->ps_work,
					   msecs_to_jiffies(5));
		}
		return NETDEV_TX_BUSY;
	}
#endif

	ret = NETDEV_TX_OK;

	/* We need to protect against the queues being restarted before
	   we get round to stopping them */
	spin_lock_irqsave(&priv->driver_lock, flags);

	if (priv->surpriseremoved)
		goto free;

	if (!skb->len || (skb->len > MRVDRV_ETH_TX_PACKET_BUFFER_SIZE)) {
		lbs_deb_tx("tx err: skb length %d 0 or > %zd\n",
		       skb->len, MRVDRV_ETH_TX_PACKET_BUFFER_SIZE);
		/* We'll never manage to send this one; drop it and return 'OK' */

		dev->stats.tx_dropped++;
		dev->stats.tx_errors++;
		goto free;
	}


	netif_stop_queue(priv->dev);

	if (priv->tx_pending_len) {
		/* This can happen if packets come in on the mesh and eth
		   device simultaneously -- there's no mutual exclusion on
		   hard_start_xmit() calls between devices. */
		lbs_deb_tx("Packet on %s while busy\n", dev->name);
		ret = NETDEV_TX_BUSY;
		goto unlock;
	}

	priv->tx_pending_len = -1;
	spin_unlock_irqrestore(&priv->driver_lock, flags);

	lbs_deb_hex(LBS_DEB_TX, "TX Data", skb->data, min_t(unsigned int, skb->len, 100));

	txpd = (void *)priv->tx_pending_buf;
	memset(txpd, 0, sizeof(struct txpd));

	p802x_hdr = skb->data;
	pkt_len = skb->len;

    /* copy destination address from 802.3 header */
    memcpy(txpd->tx_dest_addr_high, p802x_hdr, ETH_ALEN);

	txpd->tx_packet_length = cpu_to_le16(pkt_len);
	txpd->tx_packet_location = cpu_to_le32(sizeof(struct txpd));

	lbs_deb_hex(LBS_DEB_TX, "txpd", (u8 *) &txpd, sizeof(struct txpd));

	lbs_deb_hex(LBS_DEB_TX, "Tx Data", (u8 *) p802x_hdr, le16_to_cpu(txpd->tx_packet_length));

	memcpy(&txpd[1], p802x_hdr, le16_to_cpu(txpd->tx_packet_length));

	spin_lock_irqsave(&priv->driver_lock, flags);
	priv->tx_pending_len = pkt_len + sizeof(struct txpd);

	lbs_deb_tx("%s lined up packet\n", __func__);

	dev->stats.tx_packets++;
	dev->stats.tx_bytes += skb->len;

	dev->trans_start = jiffies;

	if (priv->monitormode) {
		/* Keep the skb to echo it back once Tx feedback is
		   received from FW */
		skb_orphan(skb);

		/* Keep the skb around for when we get feedback */
		priv->currenttxskb = skb;
	} else {
 free:
		dev_kfree_skb_any(skb);
	}
 unlock:
	spin_unlock_irqrestore(&priv->driver_lock, flags);
	wake_up(&priv->waitq);

	lbs_deb_leave_args(LBS_DEB_TX, "ret %d", ret);
	return ret;
}
Ejemplo n.º 25
0
static int eth_start_xmit(struct sk_buff *skb, struct net_device *net)
{
	struct eth_dev		*dev = netdev_priv(net);
	int			length = skb->len;
	int			retval;
	struct usb_request	*req = NULL;
	unsigned long		flags;
	struct usb_ep		*in;
	u16			cdc_filter;

	spin_lock_irqsave(&dev->lock, flags);
	if (dev->port_usb) {
		in = dev->port_usb->in_ep;
		cdc_filter = dev->port_usb->cdc_filter;
	} else {
		in = NULL;
		cdc_filter = 0;
	}
	spin_unlock_irqrestore(&dev->lock, flags);

	if (!in) {
		dev_kfree_skb_any(skb);
		return 0;
	}

	/* apply outgoing CDC or RNDIS filters */
	if (!is_promisc(cdc_filter)) {
		u8		*dest = skb->data;

		if (is_multicast_ether_addr(dest)) {
			u16	type;

			/* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
			 * SET_ETHERNET_MULTICAST_FILTERS requests
			 */
			if (is_broadcast_ether_addr(dest))
				type = USB_CDC_PACKET_TYPE_BROADCAST;
			else
				type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
			if (!(cdc_filter & type)) {
				dev_kfree_skb_any(skb);
				return 0;
			}
		}
		/* ignores USB_CDC_PACKET_TYPE_DIRECTED */
	}

	spin_lock_irqsave(&dev->req_lock, flags);
	/*
	 * this freelist can be empty if an interrupt triggered disconnect()
	 * and reconfigured the gadget (shutting down this queue) after the
	 * network stack decided to xmit but before we got the spinlock.
	 */
	if (list_empty(&dev->tx_reqs)) {
		spin_unlock_irqrestore(&dev->req_lock, flags);
		return 1;
	}

	req = container_of(dev->tx_reqs.next, struct usb_request, list);
	list_del(&req->list);

	/* temporarily stop TX queue when the freelist empties */
	if (list_empty(&dev->tx_reqs))
		netif_stop_queue(net);
	spin_unlock_irqrestore(&dev->req_lock, flags);

	/* no buffer copies needed, unless the network stack did it
	 * or the hardware can't use skb buffers.
	 * or there's not enough space for extra headers we need
	 */
	if (dev->wrap) {
		struct sk_buff	*skb_new;

		skb_new = dev->wrap(skb);
		if (!skb_new)
			goto drop;

		dev_kfree_skb_any(skb);
		skb = skb_new;
		length = skb->len;
	}
	req->buf = skb->data;
	req->context = skb;
	req->complete = tx_complete;

	/* use zlp framing on tx for strict CDC-Ether conformance,
	 * though any robust network rx path ignores extra padding.
	 * and some hardware doesn't like to write zlps.
	 */
	req->zero = 1;
	if (!dev->zlp && (length % in->maxpacket) == 0)
		length++;

	req->length = length;

	/* throttle highspeed IRQ rate back slightly */
	if (gadget_is_dualspeed(dev->gadget))
		req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH)
			? ((atomic_read(&dev->tx_qlen) % qmult) != 0)
			: 0;

	retval = usb_ep_queue(in, req, GFP_ATOMIC);
	switch (retval) {
	default:
		DBG(dev, "tx queue err %d\n", retval);
		break;
	case 0:
		net->trans_start = jiffies;
		atomic_inc(&dev->tx_qlen);
	}

	if (retval) {
drop:
		dev->net->stats.tx_dropped++;
		dev_kfree_skb_any(skb);
		spin_lock_irqsave(&dev->req_lock, flags);
		if (list_empty(&dev->tx_reqs))
			netif_start_queue(net);
		list_add(&req->list, &dev->tx_reqs);
		spin_unlock_irqrestore(&dev->req_lock, flags);
	}
	return 0;
}
Ejemplo n.º 26
0
netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
				     struct net_device *net)
{
	struct usbnet		*dev = netdev_priv(net);
	int			length;
	struct urb		*urb = NULL;
	struct skb_data		*entry;
	struct driver_info	*info = dev->driver_info;
	unsigned long		flags;
	int retval;

	// some devices want funky USB-level framing, for
	// win32 driver (usually) and/or hardware quirks
	if (info->tx_fixup) {
		skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
		if (!skb) {
			if (netif_msg_tx_err(dev)) {
				netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
				goto drop;
			} else {
				/* cdc_ncm collected packet; waits for more */
				goto not_drop;
			}
		}
	}
	length = skb->len;

	if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) {
		netif_dbg(dev, tx_err, dev->net, "no urb\n");
		goto drop;
	}

	entry = (struct skb_data *) skb->cb;
	entry->urb = urb;
	entry->dev = dev;
	entry->state = tx_start;
	entry->length = length;

	usb_fill_bulk_urb (urb, dev->udev, dev->out,
			skb->data, skb->len, tx_complete, skb);

	/* don't assume the hardware handles USB_ZERO_PACKET
	 * NOTE:  strictly conforming cdc-ether devices should expect
	 * the ZLP here, but ignore the one-byte packet.
	 * NOTE2: CDC NCM specification is different from CDC ECM when
	 * handling ZLP/short packets, so cdc_ncm driver will make short
	 * packet itself if needed.
	 */
	if (length % dev->maxpacket == 0) {
		if (!(info->flags & FLAG_SEND_ZLP)) {
			if (!(info->flags & FLAG_MULTI_PACKET)) {
				urb->transfer_buffer_length++;
				if (skb_tailroom(skb)) {
					skb->data[skb->len] = 0;
					__skb_put(skb, 1);
				}
			}
		} else
			urb->transfer_flags |= URB_ZERO_PACKET;
	}

	spin_lock_irqsave(&dev->txq.lock, flags);
	retval = usb_autopm_get_interface_async(dev->intf);
	if (retval < 0) {
		spin_unlock_irqrestore(&dev->txq.lock, flags);
		goto drop;
	}

#ifdef CONFIG_PM
	/* if this triggers the device is still a sleep */
	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
		/* transmission will be done in resume */
		usb_anchor_urb(urb, &dev->deferred);
		/* no use to process more packets */
		netif_stop_queue(net);
		spin_unlock_irqrestore(&dev->txq.lock, flags);
		netdev_dbg(dev->net, "Delaying transmission for resumption\n");
		goto deferred;
	}
#endif

	switch ((retval = usb_submit_urb (urb, GFP_ATOMIC))) {
	case -EPIPE:
		netif_stop_queue (net);
		usbnet_defer_kevent (dev, EVENT_TX_HALT);
		usb_autopm_put_interface_async(dev->intf);
		break;
	default:
		usb_autopm_put_interface_async(dev->intf);
		netif_dbg(dev, tx_err, dev->net,
			  "tx: submit urb err %d\n", retval);
		break;
	case 0:
		net->trans_start = jiffies;
		__skb_queue_tail (&dev->txq, skb);
		if (dev->txq.qlen >= TX_QLEN (dev))
			netif_stop_queue (net);
	}
	spin_unlock_irqrestore (&dev->txq.lock, flags);

	if (retval) {
		netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", retval);
drop:
		dev->net->stats.tx_dropped++;
not_drop:
		if (skb)
			dev_kfree_skb_any (skb);
		usb_free_urb (urb);
	} else
		netif_dbg(dev, tx_queued, dev->net,
			  "> tx, len %d, type 0x%x\n", length, skb->protocol);
#ifdef CONFIG_PM
deferred:
#endif
	return NETDEV_TX_OK;
}
static u32 process_private_cmd(struct net_adapter *adapter, void *buffer)
{
	struct hw_private_packet	*cmd;
	struct wimax_cfg	*g_cfg = adapter->pdata->g_cfg;
	u8				*bufp;
	int					ret;

	cmd = (struct hw_private_packet *)buffer;

	switch (cmd->code) {
	case HWCODEMACRESPONSE: {
		u8 mac_addr[ETHERNET_ADDRESS_LENGTH];
		bufp = (u8 *)buffer;

		/* processing for mac_req request */
		pr_debug("MAC address = %02x:%02x:%02x:%02x:%02x:%02x",
				bufp[3], bufp[4], bufp[5],
				bufp[6], bufp[7], bufp[8]);
		memcpy(mac_addr, bufp + 3,
				ETHERNET_ADDRESS_LENGTH);

		/* create ethernet header */
		memcpy(adapter->hw.eth_header,
				mac_addr, ETHERNET_ADDRESS_LENGTH);
		memcpy(adapter->hw.eth_header + ETHERNET_ADDRESS_LENGTH,
				mac_addr, ETHERNET_ADDRESS_LENGTH);
		adapter->hw.eth_header[(ETHERNET_ADDRESS_LENGTH * 2) - 1] += 1;

		memcpy(adapter->net->dev_addr, bufp + 3,
				ETHERNET_ADDRESS_LENGTH);
		adapter->mac_ready = true;

		ret = sizeof(struct hw_private_packet)
			+ ETHERNET_ADDRESS_LENGTH - sizeof(u8);
		return ret;
	}
	case HWCODELINKINDICATION: {
		if ((cmd->value == HW_PROT_VALUE_LINK_DOWN)
			&& (adapter->media_state != MEDIA_DISCONNECTED)) {
			pr_debug("LINK_DOWN_INDICATION");

			/* set values */
			adapter->media_state = MEDIA_DISCONNECTED;
			g_cfg->wimax_status = WIMAX_STATE_READY;

			/* indicate link down */
			netif_stop_queue(adapter->net);
			netif_carrier_off(adapter->net);
		} else if ((cmd->value == HW_PROT_VALUE_LINK_UP)
			&& (adapter->media_state != MEDIA_CONNECTED)) {
			pr_debug("LINK_UP_INDICATION");

			/* set values */
			adapter->media_state = MEDIA_CONNECTED;
			g_cfg->wimax_status = WIMAX_STATE_NORMAL;
			adapter->net->mtu = WIMAX_MTU_SIZE;

			/* indicate link up */
			netif_start_queue(adapter->net);
			netif_carrier_on(adapter->net);
		}
		break;
	}
	case HWCODEHALTEDINDICATION: {
		pr_debug("Device is about to reset, stop driver");
		adapter->halted = true;
		break;
	}
	case HWCODERXREADYINDICATION: {
		pr_debug("Device RxReady");
		/*
		* to start the data packet send
		* queue again after stopping in xmit
		*/
		if (adapter->media_state == MEDIA_CONNECTED)
			netif_wake_queue(adapter->net);
		break;
	}
	case HWCODEIDLENTFY: {
		/* set idle / vi */
		if (g_cfg->wimax_status == WIMAX_STATE_NORMAL
				|| g_cfg->wimax_status == WIMAX_STATE_IDLE) {
			pr_debug("process_private_cmd: IDLE");
			g_cfg->wimax_status = WIMAX_STATE_IDLE;
		} else {
			pr_debug("process_private_cmd: VIRTUAL IDLE");
			g_cfg->wimax_status = WIMAX_STATE_VIRTUAL_IDLE;
		}
		break;
	}
	case HWCODEWAKEUPNTFY: {
		/*
		* IMPORTANT!! at least 4 sec
		* is required after modem waked up
		*/
		wake_lock_timeout(&g_cfg->wimax_wake_lock, 4 * HZ);

		if (g_cfg->wimax_status ==
				WIMAX_STATE_AWAKE_REQUESTED) {
			complete(&adapter->wakeup_event);
			break;
		}

		if (g_cfg->wimax_status == WIMAX_STATE_IDLE
			|| g_cfg->wimax_status == WIMAX_STATE_NORMAL) {
			pr_debug("process_private_cmd: IDLE -> NORMAL");
			g_cfg->wimax_status = WIMAX_STATE_NORMAL;
		} else {
			pr_debug("process_private_cmd: VI -> READY");
			g_cfg->wimax_status = WIMAX_STATE_READY;
		}
		break;
	}
	default:
		pr_debug("Command = %04x", cmd->code);
		break;
	}

	return sizeof(struct hw_private_packet);
}
Ejemplo n.º 28
0
static int fjn_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
    struct local_info_t *lp = (struct local_info_t *)dev->priv;
    ioaddr_t ioaddr = dev->base_addr;

    netif_stop_queue(dev);

    {
	short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
	unsigned char *buf = skb->data;

	if (length > ETH_FRAME_LEN) {
	    printk(KERN_NOTICE "%s: Attempting to send a large packet"
		   " (%d bytes).\n", dev->name, length);
	    return 1;
	}

	DEBUG(4, "%s: Transmitting a packet of length %lu.\n",
	      dev->name, (unsigned long)skb->len);
	lp->stats.tx_bytes += skb->len;

	/* Disable both interrupts. */
	outw(0x0000, ioaddr + TX_INTR);

	/* wait for a while */
	udelay(1);

	outw(length, ioaddr + DATAPORT);
	outsw(ioaddr + DATAPORT, buf, (length + 1) >> 1);

	lp->tx_queue++;
	lp->tx_queue_len += ((length+3) & ~1);

	if (lp->tx_started == 0) {
	    /* If the Tx is idle, always trigger a transmit. */
	    outb(DO_TX | lp->tx_queue, ioaddr + TX_START);
	    lp->sent = lp->tx_queue ;
	    lp->tx_queue = 0;
	    lp->tx_queue_len = 0;
	    dev->trans_start = jiffies;
	    lp->tx_started = 1;
	    netif_start_queue(dev);
	} else {
	    if( sram_config == 0 ) {
		if (lp->tx_queue_len < (4096 - (ETH_FRAME_LEN +2)) )
		    /* Yes, there is room for one more packet. */
		    netif_start_queue(dev);
	    } else {
		if (lp->tx_queue_len < (8192 - (ETH_FRAME_LEN +2)) && 
						lp->tx_queue < 127 )
		    /* Yes, there is room for one more packet. */
		    netif_start_queue(dev);
	    }
	}

	/* Re-enable interrupts */
	outb(D_TX_INTR, ioaddr + TX_INTR);
	outb(D_RX_INTR, ioaddr + RX_INTR);
    }
    dev_kfree_skb (skb);

    return 0;
} /* fjn_start_xmit */
Ejemplo n.º 29
0
Archivo: bgmac.c Proyecto: deri82/linux
static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
				    struct bgmac_dma_ring *ring,
				    struct sk_buff *skb)
{
	struct device *dma_dev = bgmac->core->dma_dev;
	struct net_device *net_dev = bgmac->net_dev;
	struct bgmac_dma_desc *dma_desc;
	struct bgmac_slot_info *slot;
	u32 ctl0, ctl1;
	int free_slots;

	if (skb->len > BGMAC_DESC_CTL1_LEN) {
		bgmac_err(bgmac, "Too long skb (%d)\n", skb->len);
		goto err_stop_drop;
	}

	if (ring->start <= ring->end)
		free_slots = ring->start - ring->end + BGMAC_TX_RING_SLOTS;
	else
		free_slots = ring->start - ring->end;
	if (free_slots == 1) {
		bgmac_err(bgmac, "TX ring is full, queue should be stopped!\n");
		netif_stop_queue(net_dev);
		return NETDEV_TX_BUSY;
	}

	slot = &ring->slots[ring->end];
	slot->skb = skb;
	slot->dma_addr = dma_map_single(dma_dev, skb->data, skb->len,
					DMA_TO_DEVICE);
	if (dma_mapping_error(dma_dev, slot->dma_addr)) {
		bgmac_err(bgmac, "Mapping error of skb on ring 0x%X\n",
			  ring->mmio_base);
		goto err_stop_drop;
	}

	ctl0 = BGMAC_DESC_CTL0_IOC | BGMAC_DESC_CTL0_SOF | BGMAC_DESC_CTL0_EOF;
	if (ring->end == ring->num_slots - 1)
		ctl0 |= BGMAC_DESC_CTL0_EOT;
	ctl1 = skb->len & BGMAC_DESC_CTL1_LEN;

	dma_desc = ring->cpu_base;
	dma_desc += ring->end;
	dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr));
	dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr));
	dma_desc->ctl0 = cpu_to_le32(ctl0);
	dma_desc->ctl1 = cpu_to_le32(ctl1);

	wmb();

	/* Increase ring->end to point empty slot. We tell hardware the first
	 * slot it should *not* read.
	 */
	if (++ring->end >= BGMAC_TX_RING_SLOTS)
		ring->end = 0;
	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
		    ring->index_base +
		    ring->end * sizeof(struct bgmac_dma_desc));

	/* Always keep one slot free to allow detecting bugged calls. */
	if (--free_slots == 1)
		netif_stop_queue(net_dev);

	return NETDEV_TX_OK;

err_stop_drop:
	netif_stop_queue(net_dev);
	dev_kfree_skb(skb);
	return NETDEV_TX_OK;
}
Ejemplo n.º 30
0
int adapter_close(struct net_device *net)
{
	dump_debug("adapter driver close success!!!!!!!");
	netif_stop_queue(net);
	return 0;
}