Ejemplo n.º 1
0
static int mwl_rx_ring_alloc(struct mwl_priv *priv)
{
	WLDBG_ENTER_INFO(DBG_LEVEL_4, "allocating %i (0x%x) bytes",
			 MAX_NUM_RX_RING_BYTES, MAX_NUM_RX_RING_BYTES);

	BUG_ON(!priv);

	priv->desc_data[0].prx_ring =
		(struct mwl_rx_desc *)dma_alloc_coherent(&priv->pdev->dev,
							 MAX_NUM_RX_RING_BYTES,
							 &priv->desc_data[0].pphys_rx_ring,
							 GFP_KERNEL);

	if (priv->desc_data[0].prx_ring == NULL) {
		WLDBG_ERROR(DBG_LEVEL_4, "can not alloc mem");
		WLDBG_EXIT_INFO(DBG_LEVEL_4, "no memory");
		return -ENOMEM;
	}

	memset(priv->desc_data[0].prx_ring, 0x00, MAX_NUM_RX_RING_BYTES);

	WLDBG_EXIT_INFO(DBG_LEVEL_4, "RX ring vaddr: 0x%x paddr: 0x%x",
			priv->desc_data[0].prx_ring, priv->desc_data[0].pphys_rx_ring);

	return 0;
}
Ejemplo n.º 2
0
int wlTxRingInit(struct net_device *netdev)
{
    struct wlprivate *wlpptr = NETDEV_PRIV_P(struct wlprivate, netdev);
    int currDescr;
    int num;

    WLDBG_ENTER_INFO(DBG_LEVEL_12, "initializing %i descriptors", MAX_NUM_TX_DESC);
    for(num =0; num < NUM_OF_DESCRIPTOR_DATA; num++)
    {
        QUEUE_INIT(&((struct wlprivate_data *)(wlpptr->wlpd_p))->txQ[num]);
        ((struct wlprivate_data *)(wlpptr->wlpd_p))->fwDescCnt[num] =0;
        if (((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pTxRing != NULL)
        {
            for (currDescr = 0; currDescr < MAX_NUM_TX_DESC; currDescr++)
            {
                CURR_TXD(num).Status    = ENDIAN_SWAP32(EAGLE_TXD_STATUS_IDLE);
                CURR_TXD(num).pNext     = &NEXT_TXD(num);
                CURR_TXD(num).pPhysNext =
                    ENDIAN_SWAP32((u_int32_t) ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pPhysTxRing +
                                  ((currDescr+1)*sizeof(wltxdesc_t)));
                WLDBG_INFO(DBG_LEVEL_12,
                           "txdesc: %i status: 0x%x (%i) vnext: 0x%p pnext: 0x%x",
                           currDescr, EAGLE_TXD_STATUS_IDLE, EAGLE_TXD_STATUS_IDLE,
                           CURR_TXD(num).pNext, ENDIAN_SWAP32(CURR_TXD(num).pPhysNext));
            }
            LAST_TXD(num).pNext = &FIRST_TXD(num);
            LAST_TXD(num).pPhysNext =
                ENDIAN_SWAP32((u_int32_t) ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pPhysTxRing);
            ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pStaleTxDesc = &FIRST_TXD(num);
            ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pNextTxDesc  = &FIRST_TXD(num);

            WLDBG_EXIT_INFO(DBG_LEVEL_12,
                            "last txdesc vnext: 0x%p pnext: 0x%x pstale 0x%x vfirst 0x%x",
                            LAST_TXD(num).pNext, ENDIAN_SWAP32(LAST_TXD(num).pPhysNext),
                            ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pStaleTxDesc, ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pNextTxDesc);
        }
        else
        {
            WLDBG_ERROR(DBG_LEVEL_12, "no valid TX mem");
            return FAIL;
        }
    }
    return SUCCESS;
}
Ejemplo n.º 3
0
int wlRxRingAlloc(struct net_device *netdev)
{
    struct wlprivate *wlpptr = NETDEV_PRIV_P(struct wlprivate, netdev);

    WLDBG_ENTER_INFO(DBG_LEVEL_12, "allocating %i (0x%x) bytes",
                     MAX_NUM_RX_RING_BYTES, MAX_NUM_RX_RING_BYTES);

    ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pRxRing =
        (wlrxdesc_t *) pci_alloc_consistent(wlpptr->pPciDev,
                                            MAX_NUM_RX_RING_BYTES,
                                            &((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pPhysRxRing);

    if (((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pRxRing == NULL)
    {
        WLDBG_ERROR(DBG_LEVEL_12, "can not alloc mem");
        return FAIL;
    }
    memset(((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pRxRing, 0x00, MAX_NUM_RX_RING_BYTES);
    WLDBG_EXIT_INFO(DBG_LEVEL_12, "RX ring vaddr: 0x%x paddr: 0x%x",
                    ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pRxRing, ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pPhysRxRing);
    return SUCCESS;
}
Ejemplo n.º 4
0
int wlTxRingAlloc(struct net_device *netdev)
{
    struct wlprivate *wlpptr = NETDEV_PRIV_P(struct wlprivate, netdev);
    int num;
    UINT8 *mem = (UINT8 *) pci_alloc_consistent(wlpptr->pPciDev,
                 MAX_NUM_TX_RING_BYTES *NUM_OF_DESCRIPTOR_DATA,
                 &((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pPhysTxRing);
    for(num =0; num < NUM_OF_DESCRIPTOR_DATA; num++)
    {

        WLDBG_ENTER_INFO(DBG_LEVEL_12, "allocating %i (0x%x) bytes",MAX_NUM_TX_RING_BYTES, MAX_NUM_TX_RING_BYTES);
        ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pTxRing =(wltxdesc_t *) (mem +num*MAX_NUM_TX_RING_BYTES);
        ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pPhysTxRing = (dma_addr_t)((UINT32)((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pPhysTxRing+num*MAX_NUM_TX_RING_BYTES);
        if (((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pTxRing == NULL)
        {
            WLDBG_ERROR(DBG_LEVEL_12, "can not alloc mem");
            return FAIL;
        }
        memset(((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pTxRing, 0x00, MAX_NUM_TX_RING_BYTES);
        WLDBG_EXIT_INFO(DBG_LEVEL_12, "TX ring vaddr: 0x%x paddr: 0x%x",
                        ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pTxRing, ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pPhysTxRing);
    }
    return SUCCESS;
}
Ejemplo n.º 5
0
extern SINT8 evtDot11MgtMsg(vmacApInfo_t *vmacSta_p,UINT8 *message, struct sk_buff *skb)
#endif
{
    MIB_AUTH_ALG  *mib_AuthAlg_p=vmacSta_p->Mib802dot11->AuthAlg;
    MhsmEvent_t smMsg;
    macmgmtQ_MgmtMsg3_t *MgmtMsg_p;
    extStaDb_StaInfo_t *StaInfo_p;

    if (message == NULL)
    {
        return 1;
    }
    WLDBG_ENTER(DBG_LEVEL_11);
    MgmtMsg_p = (macmgmtQ_MgmtMsg3_t *)message;

#ifdef FILTER_BSSID
    if ( ( memcmp(MgmtMsg_p->Hdr.DestAddr, vmacSta_p->macStaAddr,sizeof(IEEEtypes_MacAddr_t)) ||
            memcmp(MgmtMsg_p->Hdr.BssId, vmacSta_p->macBssId,sizeof(IEEEtypes_MacAddr_t)) )
            && ( MgmtMsg_p->Hdr.FrmCtl.Subtype != IEEE_MSG_PROBE_RQST ) )
#else
    if (memcmp(MgmtMsg_p->Hdr.DestAddr, vmacSta_p->macStaAddr,sizeof(IEEEtypes_MacAddr_t)) &&
            memcmp(MgmtMsg_p->Hdr.DestAddr, vmacSta_p->macStaAddr2,sizeof(IEEEtypes_MacAddr_t)) &&
            (MgmtMsg_p->Hdr.FrmCtl.Subtype != IEEE_MSG_PROBE_RQST) )
#endif
    {
        WLDBG_ENTER_INFO(DBG_LEVEL_11,"mgt frame %d rxved %2x-%2x-%2x-%2x-%2x-%2x\n",MgmtMsg_p->Hdr.FrmCtl.Subtype,
                         MgmtMsg_p->Hdr.DestAddr[0],
                         MgmtMsg_p->Hdr.DestAddr[1],
                         MgmtMsg_p->Hdr.DestAddr[2],
                         MgmtMsg_p->Hdr.DestAddr[3],
                         MgmtMsg_p->Hdr.DestAddr[4],
                         MgmtMsg_p->Hdr.DestAddr[5]);
        return 1;
    }
    if (isMacAccessList(vmacSta_p, &(MgmtMsg_p->Hdr.SrcAddr)) != SUCCESS)
    {
        return 1;
    }
    if ((StaInfo_p = extStaDb_GetStaInfo(vmacSta_p,&MgmtMsg_p->Hdr.SrcAddr,  0)) == NULL)
    {
        if(MgmtMsg_p->Hdr.FrmCtl.Subtype == IEEE_MSG_AUTHENTICATE)
        {
            //added call to check other VAP's StaInfo_p
            if ((StaInfo_p = extStaDb_GetStaInfo(vmacSta_p,&MgmtMsg_p->Hdr.SrcAddr,  2)))
            {
                macMgmtRemoveSta(vmacSta_p, StaInfo_p);
            }
            if ((StaInfo_p = macMgtStaDbInit(vmacSta_p,&MgmtMsg_p->Hdr.SrcAddr,&MgmtMsg_p->Hdr.DestAddr)) == NULL)
            {
                WLDBG_ENTER_INFO(DBG_LEVEL_11,"init data base fail\n");
                return 1;
            }
        }
        else if ((MgmtMsg_p->Hdr.FrmCtl.Subtype != IEEE_MSG_PROBE_RQST))
        {
            if ((MgmtMsg_p->Hdr.FrmCtl.Subtype != IEEE_MSG_DEAUTHENTICATE) &&
                    (MgmtMsg_p->Hdr.FrmCtl.Subtype !=  IEEE_MSG_PROBE_RSP) )

            {

                macMgmtMlme_SendDeauthenticateMsg(vmacSta_p,&MgmtMsg_p->Hdr.SrcAddr,
                                                  0,
                                                  IEEEtypes_REASON_CLASS2_NONAUTH);

            }
            return 1;
        }
    }
#ifdef AP_MAC_LINUX
    smMsg.devinfo = (void *) vmacSta_p;
#endif

    switch (MgmtMsg_p->Hdr.FrmCtl.Subtype)
    {
    case IEEE_MSG_AUTHENTICATE:
    {
        AuthRspSrvApMsg authRspMsg;
        WLDBG_INFO(DBG_LEVEL_11, "IEEE_MSG_AUTHENTICATE message received. \n");
        memcpy(authRspMsg.rspMac, MgmtMsg_p->Hdr.SrcAddr, 6);
        authRspMsg.arAlg_in = MgmtMsg_p->Body.Auth.AuthAlg;
        {
            if (mib_AuthAlg_p->Type == AUTH_OPEN_OR_SHARED_KEY)
            {
                authRspMsg.arAlg = authRspMsg.arAlg_in;
            }
            else
            {
                authRspMsg.arAlg = mib_AuthAlg_p->Type;
            }
        }
        authRspMsg.mgtMsg = (UINT8 *)MgmtMsg_p;
        smMsg.event = AuthOdd;
        smMsg.pBody = (unsigned char *)&authRspMsg;
#ifdef AP_MAC_LINUX
        smMsg.info = (void *) skb;
#endif
        mhsm_send_event((Mhsm_t *)&StaInfo_p->mgtAuthRsp.super, &smMsg);
    }
    break;

    case IEEE_MSG_ASSOCIATE_RQST:
    {
        WLDBG_INFO(DBG_LEVEL_11, "IEEE_MSG_ASSOCIATE_RQST message received. \n");
        smMsg.event = AssocReq;
        smMsg.pBody = (unsigned char *)MgmtMsg_p;
#ifdef AP_MAC_LINUX
        smMsg.info = (void *) skb;
#endif
        mhsm_send_event((Mhsm_t *)&StaInfo_p->mgtAssoc.super,&smMsg);
    }
    break;

    case IEEE_MSG_REASSOCIATE_RQST:
    {
        WLDBG_INFO(DBG_LEVEL_11, "IEEE_MSG_REASSOCIATE_RQST message received. \n");
        smMsg.event = ReAssocReq;
        smMsg.pBody = (unsigned char *)MgmtMsg_p;
#ifdef AP_MAC_LINUX
        smMsg.info = (void *) skb;
#endif
        mhsm_send_event((Mhsm_t *)&StaInfo_p->mgtAssoc.super, &smMsg);
    }
    break;

    case IEEE_MSG_DISASSOCIATE:
    {
        WLDBG_INFO(DBG_LEVEL_11, "IEEE_MSG_DISASSOCIATE message received. \n");
        smMsg.event = DisAssoc;
        smMsg.pBody = (unsigned char *)MgmtMsg_p;
#ifdef AP_MAC_LINUX
        smMsg.info = (void *) skb;
#endif
        mhsm_send_event((Mhsm_t *)&StaInfo_p->mgtAssoc.super, &smMsg);
    }
    break;

    case IEEE_MSG_DEAUTHENTICATE:
    {
        WLDBG_INFO(DBG_LEVEL_11, "IEEE_MSG_DEAUTHENTICATE message received. \n");
        smMsg.event = DeAuth;
        smMsg.pBody = (unsigned char *)MgmtMsg_p;
#ifdef AP_MAC_LINUX
        smMsg.info = (void *) skb;
#endif
        mhsm_send_event((Mhsm_t *)&StaInfo_p->mgtAuthRsp.super, &smMsg);
    }
    break;

    /* Could be handled by HW */
    case IEEE_MSG_PROBE_RQST:
    {
        SyncSrvApMsg syncMsg;
        WLDBG_INFO(DBG_LEVEL_11, "IEEE_MSG_PROBE_RQST message received. \n");
        syncMsg.opMode = infrastructure;
        syncMsg.mgtMsg = (UINT8 *)MgmtMsg_p;
        smMsg.event = ProbeReq;
        smMsg.pBody = (unsigned char *)&syncMsg;
#ifdef AP_MAC_LINUX
        smMsg.info = (void *) skb;
#endif
        mhsm_send_event((Mhsm_t *)&vmacSta_p->mgtSync.super, &smMsg);
    }
    break;

#if defined(QOS_FEATURE)||defined(IEEE80211H)
    case IEEE_MSG_QOS_ACTION:
    {
        WLDBG_INFO(DBG_LEVEL_11, "IEEE_MSG_QOS_ACTION message received. \n");

        if(MgmtMsg_p->Body.Action.Category == HT_CATEGORY)
        {
            switch (MgmtMsg_p->Body.Action.Action)
            {
            case ACTION_SMPS:
            {
                extern int wlFwSetMimoPsHt(struct net_device *netdev, UINT8 *addr, UINT8 enable, UINT8 mode);
                IEEEtypes_SM_PwrCtl_t *p =  (IEEEtypes_SM_PwrCtl_t *)&MgmtMsg_p->Body.Act.Field.SmPwrCtl;
                WLDBG_INFO(DBG_LEVEL_11, "IEEE_MSG_QOS_ACTION MIMO PS HT message received. \n");
                wlFwSetMimoPsHt(vmacSta_p->dev, (UINT8 *)MgmtMsg_p->Hdr.SrcAddr, p->Enable, p->Mode);
                break;
            }
#ifdef INTOLERANT40
            case ACTION_INFOEXCH:
            {
                extern void RecHTIntolerant(vmacApInfo_t *vmacSta_p,UINT8 enable);
                IEEEtypes_InfoExch_t *p =  (IEEEtypes_InfoExch_t *)&MgmtMsg_p->Body.Act.Field.InfoExch;
                WLDBG_INFO(DBG_LEVEL_11, "IEEE_MSG_QOS_ACTION Info Exch HT message received. \n");

                RecHTIntolerant(vmacSta_p,p->FortyMIntolerant);
                break;
            }
#endif //#ifdef INTOLERANT40
            default:
                break;
            }
            break;
        }
#ifdef IEEE80211H
        if(TRUE == macMgmtMlme_80211hAct(vmacSta_p,(macmgmtQ_MgmtMsg3_t *) MgmtMsg_p)) /* if it's mine frame, then return true */
            break;
#endif /* IEEE80211H */
#ifdef COEXIST_20_40_SUPPORT
        if(TRUE == macMgmtMlme_80211PublicAction(vmacSta_p,(macmgmtQ_MgmtMsg3_t *) MgmtMsg_p)) /* if it's mine frame, then return true */
            break;
#endif
#ifdef QOS_FEATURE
        /*smMsg.event = QoSAction;
        smMsg.pBody = MgmtMsg_p;
        mhsm_send_event((Mhsm_t *)&wlpptr->vmacSta_p->mgtSync.super, &smMsg);*/
        macMgmtMlme_QoSAct(vmacSta_p,(macmgmtQ_MgmtMsg3_t *) MgmtMsg_p);
        break;
#endif /* QOS_FEATURE */
    }
#endif
    default:
        break;
    }
    WLDBG_EXIT(DBG_LEVEL_11);

    return 0;
}
Ejemplo n.º 6
0
int wlRxRingInit(struct net_device *netdev)
{
    struct wlprivate *wlpptr = NETDEV_PRIV_P(struct wlprivate, netdev);
    int currDescr;

    WLDBG_ENTER_INFO(DBG_LEVEL_12,  "initializing %i descriptors", MAX_NUM_RX_DESC);

    if (((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pRxRing != NULL)
    {
        ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxBufSize = MAX_AGGR_SIZE;
        for (currDescr = 0; currDescr < MAX_NUM_RX_DESC; currDescr++)
        {
            CURR_RXD.pSkBuff   = dev_alloc_skb(((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxBufSize);
            if(skb_linearize(CURR_RXD.pSkBuff))
            {
                WL_SKB_FREE(CURR_RXD.pSkBuff);
                printk(KERN_ERR "%s: Need linearize memory\n", netdev->name);
                return FAIL;
            }
            skb_reserve(CURR_RXD.pSkBuff , MIN_BYTES_HEADROOM);
            CURR_RXD.RxControl = EAGLE_RXD_CTRL_DRIVER_OWN;
            CURR_RXD.Status    = EAGLE_RXD_STATUS_OK;
            CURR_RXD.QosCtrl   = 0x0000;
            CURR_RXD.Channel   = 0x00;
            CURR_RXD.RSSI      = 0x00;
            CURR_RXD.SQ2       = 0x00;

            if (CURR_RXD.pSkBuff != NULL)
            {
                CURR_RXD.PktLen    = 6*netdev->mtu + NUM_EXTRA_RX_BYTES;
                CURR_RXD.pBuffData = CURR_RXD.pSkBuff->data;
                CURR_RXD.pPhysBuffData =
                    ENDIAN_SWAP32(pci_map_single(wlpptr->pPciDev,
                                                 CURR_RXD.pSkBuff->data,
                                                 ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxBufSize,
                                                 PCI_DMA_FROMDEVICE));
                CURR_RXD.pNext = &NEXT_RXD;
                CURR_RXD.pPhysNext =
                    ENDIAN_SWAP32((u_int32_t) ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pPhysRxRing +
                                  ((currDescr+1)*sizeof(wlrxdesc_t)));
                WLDBG_INFO(DBG_LEVEL_12,
                           "rxdesc: %i status: 0x%x (%i) len: 0x%x (%i)",
                           currDescr, EAGLE_TXD_STATUS_IDLE, EAGLE_TXD_STATUS_IDLE,
                           ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxBufSize, ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxBufSize);
                WLDBG_INFO(DBG_LEVEL_12,
                           "rxdesc: %i vnext: 0x%p pnext: 0x%x", currDescr,
                           CURR_RXD.pNext, ENDIAN_SWAP32(CURR_RXD.pPhysNext));
            } else
            {
                WLDBG_ERROR(DBG_LEVEL_12,
                            "rxdesc %i: no skbuff available", currDescr);
                return FAIL;
            }
        }
        LAST_RXD.pPhysNext =
            ENDIAN_SWAP32((u_int32_t) ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pPhysRxRing);
        LAST_RXD.pNext             = &FIRST_RXD;
        ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pNextRxDesc = &FIRST_RXD;

        WLDBG_EXIT_INFO(DBG_LEVEL_12,
                        "last rxdesc vnext: 0x%p pnext: 0x%x vfirst 0x%x",
                        LAST_RXD.pNext, ENDIAN_SWAP32(LAST_RXD.pPhysNext),
                        ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pNextRxDesc);
        return SUCCESS;
    }
    WLDBG_ERROR(DBG_LEVEL_12, "no valid RX mem");
    return FAIL;
}
Ejemplo n.º 7
0
static int mwl_rx_ring_init(struct mwl_priv *priv)
{
	int curr_desc;

	WLDBG_ENTER_INFO(DBG_LEVEL_4,  "initializing %i descriptors", SYSADPT_MAX_NUM_RX_DESC);

	if (priv->desc_data[0].prx_ring != NULL) {
		priv->desc_data[0].rx_buf_size = SYSADPT_MAX_AGGR_SIZE;

		for (curr_desc = 0; curr_desc < SYSADPT_MAX_NUM_RX_DESC; curr_desc++) {
			CURR_RXD.psk_buff = dev_alloc_skb(priv->desc_data[0].rx_buf_size);

			if (skb_linearize(CURR_RXD.psk_buff)) {
				dev_kfree_skb_any(CURR_RXD.psk_buff);
				WLDBG_ERROR(DBG_LEVEL_4, "need linearize memory");
				WLDBG_EXIT_INFO(DBG_LEVEL_4, "no suitable memory");
				return -ENOMEM;
			}

			skb_reserve(CURR_RXD.psk_buff, SYSADPT_MIN_BYTES_HEADROOM);
			CURR_RXD.rx_control = EAGLE_RXD_CTRL_DRIVER_OWN;
			CURR_RXD.status = EAGLE_RXD_STATUS_OK;
			CURR_RXD.qos_ctrl = 0x0000;
			CURR_RXD.channel = 0x00;
			CURR_RXD.rssi = 0x00;

			if (CURR_RXD.psk_buff != NULL) {
				CURR_RXD.pkt_len = SYSADPT_MAX_AGGR_SIZE;
				CURR_RXD.pbuff_data = CURR_RXD.psk_buff->data;
				CURR_RXD.pphys_buff_data =
					ENDIAN_SWAP32(pci_map_single(priv->pdev,
								     CURR_RXD.psk_buff->data,
								     priv->desc_data[0].rx_buf_size,
								     PCI_DMA_FROMDEVICE));
				CURR_RXD.pnext = &NEXT_RXD;
				CURR_RXD.pphys_next =
					ENDIAN_SWAP32((u32)priv->desc_data[0].pphys_rx_ring +
						      ((curr_desc + 1) * sizeof(struct mwl_rx_desc)));
				WLDBG_INFO(DBG_LEVEL_4,
					   "rxdesc: %i status: 0x%x (%i) len: 0x%x (%i)",
					   curr_desc, EAGLE_TXD_STATUS_IDLE, EAGLE_TXD_STATUS_IDLE,
					   priv->desc_data[0].rx_buf_size, priv->desc_data[0].rx_buf_size);
				WLDBG_INFO(DBG_LEVEL_4,
					   "rxdesc: %i vnext: 0x%p pnext: 0x%x", curr_desc,
					   CURR_RXD.pnext, ENDIAN_SWAP32(CURR_RXD.pphys_next));
			} else {
				WLDBG_ERROR(DBG_LEVEL_4,
					    "rxdesc %i: no skbuff available", curr_desc);
				WLDBG_EXIT_INFO(DBG_LEVEL_4, "no socket buffer");
				return -ENOMEM;
			}
		}
		LAST_RXD.pphys_next =
			ENDIAN_SWAP32((u32)priv->desc_data[0].pphys_rx_ring);
		LAST_RXD.pnext = &FIRST_RXD;
		priv->desc_data[0].pnext_rx_desc = &FIRST_RXD;

		WLDBG_EXIT_INFO(DBG_LEVEL_4,
				"last rxdesc vnext: 0x%p pnext: 0x%x vfirst 0x%x",
				LAST_RXD.pnext, ENDIAN_SWAP32(LAST_RXD.pphys_next),
				priv->desc_data[0].pnext_rx_desc);

		return 0;
	}

	WLDBG_ERROR(DBG_LEVEL_4, "no valid RX mem");
	WLDBG_EXIT_INFO(DBG_LEVEL_4, "no valid RX mem");

	return -ENOMEM;
}