Example #1
0
static int mwl_rx_ring_alloc(struct mwl_priv *priv)
{
	WLDBG_ENTER_INFO(DBG_LEVEL_4, "allocating %i (0x%x) bytes",
			 MAX_NUM_RX_RING_BYTES, MAX_NUM_RX_RING_BYTES);

	BUG_ON(!priv);

	priv->desc_data[0].prx_ring =
		(struct mwl_rx_desc *)dma_alloc_coherent(&priv->pdev->dev,
							 MAX_NUM_RX_RING_BYTES,
							 &priv->desc_data[0].pphys_rx_ring,
							 GFP_KERNEL);

	if (priv->desc_data[0].prx_ring == NULL) {
		WLDBG_ERROR(DBG_LEVEL_4, "can not alloc mem");
		WLDBG_EXIT_INFO(DBG_LEVEL_4, "no memory");
		return -ENOMEM;
	}

	memset(priv->desc_data[0].prx_ring, 0x00, MAX_NUM_RX_RING_BYTES);

	WLDBG_EXIT_INFO(DBG_LEVEL_4, "RX ring vaddr: 0x%x paddr: 0x%x",
			priv->desc_data[0].prx_ring, priv->desc_data[0].pphys_rx_ring);

	return 0;
}
Example #2
0
int wlTxRingInit(struct net_device *netdev)
{
    struct wlprivate *wlpptr = NETDEV_PRIV_P(struct wlprivate, netdev);
    int currDescr;
    int num;

    WLDBG_ENTER_INFO(DBG_LEVEL_12, "initializing %i descriptors", MAX_NUM_TX_DESC);
    for(num =0; num < NUM_OF_DESCRIPTOR_DATA; num++)
    {
        QUEUE_INIT(&((struct wlprivate_data *)(wlpptr->wlpd_p))->txQ[num]);
        ((struct wlprivate_data *)(wlpptr->wlpd_p))->fwDescCnt[num] =0;
        if (((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pTxRing != NULL)
        {
            for (currDescr = 0; currDescr < MAX_NUM_TX_DESC; currDescr++)
            {
                CURR_TXD(num).Status    = ENDIAN_SWAP32(EAGLE_TXD_STATUS_IDLE);
                CURR_TXD(num).pNext     = &NEXT_TXD(num);
                CURR_TXD(num).pPhysNext =
                    ENDIAN_SWAP32((u_int32_t) ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pPhysTxRing +
                                  ((currDescr+1)*sizeof(wltxdesc_t)));
                WLDBG_INFO(DBG_LEVEL_12,
                           "txdesc: %i status: 0x%x (%i) vnext: 0x%p pnext: 0x%x",
                           currDescr, EAGLE_TXD_STATUS_IDLE, EAGLE_TXD_STATUS_IDLE,
                           CURR_TXD(num).pNext, ENDIAN_SWAP32(CURR_TXD(num).pPhysNext));
            }
            LAST_TXD(num).pNext = &FIRST_TXD(num);
            LAST_TXD(num).pPhysNext =
                ENDIAN_SWAP32((u_int32_t) ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pPhysTxRing);
            ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pStaleTxDesc = &FIRST_TXD(num);
            ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pNextTxDesc  = &FIRST_TXD(num);

            WLDBG_EXIT_INFO(DBG_LEVEL_12,
                            "last txdesc vnext: 0x%p pnext: 0x%x pstale 0x%x vfirst 0x%x",
                            LAST_TXD(num).pNext, ENDIAN_SWAP32(LAST_TXD(num).pPhysNext),
                            ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pStaleTxDesc, ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pNextTxDesc);
        }
        else
        {
            WLDBG_ERROR(DBG_LEVEL_12, "no valid TX mem");
            return FAIL;
        }
    }
    return SUCCESS;
}
Example #3
0
static int mwl_rx_refill(struct mwl_priv *priv, struct mwl_rx_desc *pdesc)
{
	WLDBG_ENTER(DBG_LEVEL_4);

	BUG_ON(!priv);
	BUG_ON(!pdesc);

	pdesc->psk_buff = dev_alloc_skb(priv->desc_data[0].rx_buf_size);

	if (pdesc->psk_buff == NULL)
		goto nomem;

	if (skb_linearize(pdesc->psk_buff)) {
		dev_kfree_skb_any(pdesc->psk_buff);
		WLDBG_ERROR(DBG_LEVEL_4, "need linearize memory");
		goto nomem;
	}

	skb_reserve(pdesc->psk_buff, SYSADPT_MIN_BYTES_HEADROOM);

	pdesc->status = EAGLE_RXD_STATUS_OK;
	pdesc->qos_ctrl = 0x0000;
	pdesc->channel = 0x00;
	pdesc->rssi = 0x00;

	pdesc->pkt_len = priv->desc_data[0].rx_buf_size;
	pdesc->pbuff_data = pdesc->psk_buff->data;
	pdesc->pphys_buff_data =
		ENDIAN_SWAP32(pci_map_single(priv->pdev,
					     pdesc->psk_buff->data,
					     priv->desc_data[0].rx_buf_size,
					     PCI_DMA_BIDIRECTIONAL));

	WLDBG_EXIT(DBG_LEVEL_4);

	return 0;

nomem:

	WLDBG_EXIT_INFO(DBG_LEVEL_4, "no memory");

	return -ENOMEM;
}
Example #4
0
void wlTxRingCleanup(struct net_device *netdev)
{
    struct wlprivate *wlpptr = NETDEV_PRIV_P(struct wlprivate, netdev);
    int cleanedTxDescr = 0;
    int currDescr;
    int num;

    WLDBG_ENTER(DBG_LEVEL_12);

    for(num =0; num < NUM_OF_DESCRIPTOR_DATA; num++)
    {
        QUEUE_PURGE(&((struct wlprivate_data *)(wlpptr->wlpd_p))->txQ[num]);
        ((struct wlprivate_data *)(wlpptr->wlpd_p))->fwDescCnt[num] =0;
        if (((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pTxRing != NULL)
        {
            for (currDescr = 0; currDescr < MAX_NUM_TX_DESC; currDescr++)
            {
                if (CURR_TXD(num).pSkBuff != NULL)
                {
                    WLDBG_INFO(DBG_LEVEL_12,
                               "unmapped and free'd txdesc %i vaddr: 0x%p paddr: 0x%x",
                               currDescr, CURR_TXD(num).pSkBuff->data,
                               ENDIAN_SWAP32(CURR_TXD(num).PktPtr));
                    pci_unmap_single(wlpptr->pPciDev,
                                     ENDIAN_SWAP32(CURR_TXD(num).PktPtr),
                                     CURR_TXD(num).pSkBuff->len,
                                     PCI_DMA_TODEVICE);
                    {
                        WL_SKB_FREE(CURR_TXD(num).pSkBuff);
                    }
                    CURR_TXD(num).Status    = ENDIAN_SWAP32(EAGLE_TXD_STATUS_IDLE);
                    CURR_TXD(num).pSkBuff   = NULL;
                    CURR_TXD(num).PktPtr    = 0;
                    CURR_TXD(num).PktLen    = 0;
                    cleanedTxDescr++;
                }
            }
        }
    }
    WLDBG_EXIT_INFO(DBG_LEVEL_12, "cleaned %i TX descr", cleanedTxDescr);
}
Example #5
0
int wlRxRingAlloc(struct net_device *netdev)
{
    struct wlprivate *wlpptr = NETDEV_PRIV_P(struct wlprivate, netdev);

    WLDBG_ENTER_INFO(DBG_LEVEL_12, "allocating %i (0x%x) bytes",
                     MAX_NUM_RX_RING_BYTES, MAX_NUM_RX_RING_BYTES);

    ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pRxRing =
        (wlrxdesc_t *) pci_alloc_consistent(wlpptr->pPciDev,
                                            MAX_NUM_RX_RING_BYTES,
                                            &((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pPhysRxRing);

    if (((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pRxRing == NULL)
    {
        WLDBG_ERROR(DBG_LEVEL_12, "can not alloc mem");
        return FAIL;
    }
    memset(((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pRxRing, 0x00, MAX_NUM_RX_RING_BYTES);
    WLDBG_EXIT_INFO(DBG_LEVEL_12, "RX ring vaddr: 0x%x paddr: 0x%x",
                    ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pRxRing, ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pPhysRxRing);
    return SUCCESS;
}
Example #6
0
extern BOOLEAN wlSetRFChan(vmacApInfo_t *vmacSta_p,UINT32 channel)
{
	MIB_802DOT11 *mib = vmacSta_p->ShadowMib802dot11;
	MIB_PHY_DSSS_TABLE *PhyDSSSTable=mib->PhyDSSSTable;
	CHNL_FLAGS Chanflag;
	Chanflag = PhyDSSSTable->Chanflag;
	Chanflag.ChnlWidth=CH_20_MHz_WIDTH;
	Chanflag.ExtChnlOffset=EXT_CH_ABOVE_CTRL_CH;
	if(domainChannelValid(channel, channel <= 14?FREQ_BAND_2DOT4GHZ:FREQ_BAND_5GHZ))
	{
		if(channel<=14)
			Chanflag.FreqBand=FREQ_BAND_2DOT4GHZ;
		else
			Chanflag.FreqBand=FREQ_BAND_5GHZ;

		if (wlchannelSet(vmacSta_p->dev, channel, Chanflag, 1))
		{
			WLDBG_EXIT_INFO(DBG_LEVEL_15, "setting channel failed");
			return FALSE;
		}
	}
	return TRUE;
}
Example #7
0
int wlTxRingAlloc(struct net_device *netdev)
{
    struct wlprivate *wlpptr = NETDEV_PRIV_P(struct wlprivate, netdev);
    int num;
    UINT8 *mem = (UINT8 *) pci_alloc_consistent(wlpptr->pPciDev,
                 MAX_NUM_TX_RING_BYTES *NUM_OF_DESCRIPTOR_DATA,
                 &((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pPhysTxRing);
    for(num =0; num < NUM_OF_DESCRIPTOR_DATA; num++)
    {

        WLDBG_ENTER_INFO(DBG_LEVEL_12, "allocating %i (0x%x) bytes",MAX_NUM_TX_RING_BYTES, MAX_NUM_TX_RING_BYTES);
        ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pTxRing =(wltxdesc_t *) (mem +num*MAX_NUM_TX_RING_BYTES);
        ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pPhysTxRing = (dma_addr_t)((UINT32)((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pPhysTxRing+num*MAX_NUM_TX_RING_BYTES);
        if (((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pTxRing == NULL)
        {
            WLDBG_ERROR(DBG_LEVEL_12, "can not alloc mem");
            return FAIL;
        }
        memset(((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pTxRing, 0x00, MAX_NUM_TX_RING_BYTES);
        WLDBG_EXIT_INFO(DBG_LEVEL_12, "TX ring vaddr: 0x%x paddr: 0x%x",
                        ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pTxRing, ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pPhysTxRing);
    }
    return SUCCESS;
}
Example #8
0
int wlRxRingInit(struct net_device *netdev)
{
    struct wlprivate *wlpptr = NETDEV_PRIV_P(struct wlprivate, netdev);
    int currDescr;

    WLDBG_ENTER_INFO(DBG_LEVEL_12,  "initializing %i descriptors", MAX_NUM_RX_DESC);

    if (((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pRxRing != NULL)
    {
        ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxBufSize = MAX_AGGR_SIZE;
        for (currDescr = 0; currDescr < MAX_NUM_RX_DESC; currDescr++)
        {
            CURR_RXD.pSkBuff   = dev_alloc_skb(((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxBufSize);
            if(skb_linearize(CURR_RXD.pSkBuff))
            {
                WL_SKB_FREE(CURR_RXD.pSkBuff);
                printk(KERN_ERR "%s: Need linearize memory\n", netdev->name);
                return FAIL;
            }
            skb_reserve(CURR_RXD.pSkBuff , MIN_BYTES_HEADROOM);
            CURR_RXD.RxControl = EAGLE_RXD_CTRL_DRIVER_OWN;
            CURR_RXD.Status    = EAGLE_RXD_STATUS_OK;
            CURR_RXD.QosCtrl   = 0x0000;
            CURR_RXD.Channel   = 0x00;
            CURR_RXD.RSSI      = 0x00;
            CURR_RXD.SQ2       = 0x00;

            if (CURR_RXD.pSkBuff != NULL)
            {
                CURR_RXD.PktLen    = 6*netdev->mtu + NUM_EXTRA_RX_BYTES;
                CURR_RXD.pBuffData = CURR_RXD.pSkBuff->data;
                CURR_RXD.pPhysBuffData =
                    ENDIAN_SWAP32(pci_map_single(wlpptr->pPciDev,
                                                 CURR_RXD.pSkBuff->data,
                                                 ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxBufSize,
                                                 PCI_DMA_FROMDEVICE));
                CURR_RXD.pNext = &NEXT_RXD;
                CURR_RXD.pPhysNext =
                    ENDIAN_SWAP32((u_int32_t) ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pPhysRxRing +
                                  ((currDescr+1)*sizeof(wlrxdesc_t)));
                WLDBG_INFO(DBG_LEVEL_12,
                           "rxdesc: %i status: 0x%x (%i) len: 0x%x (%i)",
                           currDescr, EAGLE_TXD_STATUS_IDLE, EAGLE_TXD_STATUS_IDLE,
                           ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxBufSize, ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxBufSize);
                WLDBG_INFO(DBG_LEVEL_12,
                           "rxdesc: %i vnext: 0x%p pnext: 0x%x", currDescr,
                           CURR_RXD.pNext, ENDIAN_SWAP32(CURR_RXD.pPhysNext));
            } else
            {
                WLDBG_ERROR(DBG_LEVEL_12,
                            "rxdesc %i: no skbuff available", currDescr);
                return FAIL;
            }
        }
        LAST_RXD.pPhysNext =
            ENDIAN_SWAP32((u_int32_t) ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pPhysRxRing);
        LAST_RXD.pNext             = &FIRST_RXD;
        ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pNextRxDesc = &FIRST_RXD;

        WLDBG_EXIT_INFO(DBG_LEVEL_12,
                        "last rxdesc vnext: 0x%p pnext: 0x%x vfirst 0x%x",
                        LAST_RXD.pNext, ENDIAN_SWAP32(LAST_RXD.pPhysNext),
                        ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pNextRxDesc);
        return SUCCESS;
    }
    WLDBG_ERROR(DBG_LEVEL_12, "no valid RX mem");
    return FAIL;
}
Example #9
0
static int mwl_rx_ring_init(struct mwl_priv *priv)
{
	int curr_desc;

	WLDBG_ENTER_INFO(DBG_LEVEL_4,  "initializing %i descriptors", SYSADPT_MAX_NUM_RX_DESC);

	if (priv->desc_data[0].prx_ring != NULL) {
		priv->desc_data[0].rx_buf_size = SYSADPT_MAX_AGGR_SIZE;

		for (curr_desc = 0; curr_desc < SYSADPT_MAX_NUM_RX_DESC; curr_desc++) {
			CURR_RXD.psk_buff = dev_alloc_skb(priv->desc_data[0].rx_buf_size);

			if (skb_linearize(CURR_RXD.psk_buff)) {
				dev_kfree_skb_any(CURR_RXD.psk_buff);
				WLDBG_ERROR(DBG_LEVEL_4, "need linearize memory");
				WLDBG_EXIT_INFO(DBG_LEVEL_4, "no suitable memory");
				return -ENOMEM;
			}

			skb_reserve(CURR_RXD.psk_buff, SYSADPT_MIN_BYTES_HEADROOM);
			CURR_RXD.rx_control = EAGLE_RXD_CTRL_DRIVER_OWN;
			CURR_RXD.status = EAGLE_RXD_STATUS_OK;
			CURR_RXD.qos_ctrl = 0x0000;
			CURR_RXD.channel = 0x00;
			CURR_RXD.rssi = 0x00;

			if (CURR_RXD.psk_buff != NULL) {
				CURR_RXD.pkt_len = SYSADPT_MAX_AGGR_SIZE;
				CURR_RXD.pbuff_data = CURR_RXD.psk_buff->data;
				CURR_RXD.pphys_buff_data =
					ENDIAN_SWAP32(pci_map_single(priv->pdev,
								     CURR_RXD.psk_buff->data,
								     priv->desc_data[0].rx_buf_size,
								     PCI_DMA_FROMDEVICE));
				CURR_RXD.pnext = &NEXT_RXD;
				CURR_RXD.pphys_next =
					ENDIAN_SWAP32((u32)priv->desc_data[0].pphys_rx_ring +
						      ((curr_desc + 1) * sizeof(struct mwl_rx_desc)));
				WLDBG_INFO(DBG_LEVEL_4,
					   "rxdesc: %i status: 0x%x (%i) len: 0x%x (%i)",
					   curr_desc, EAGLE_TXD_STATUS_IDLE, EAGLE_TXD_STATUS_IDLE,
					   priv->desc_data[0].rx_buf_size, priv->desc_data[0].rx_buf_size);
				WLDBG_INFO(DBG_LEVEL_4,
					   "rxdesc: %i vnext: 0x%p pnext: 0x%x", curr_desc,
					   CURR_RXD.pnext, ENDIAN_SWAP32(CURR_RXD.pphys_next));
			} else {
				WLDBG_ERROR(DBG_LEVEL_4,
					    "rxdesc %i: no skbuff available", curr_desc);
				WLDBG_EXIT_INFO(DBG_LEVEL_4, "no socket buffer");
				return -ENOMEM;
			}
		}
		LAST_RXD.pphys_next =
			ENDIAN_SWAP32((u32)priv->desc_data[0].pphys_rx_ring);
		LAST_RXD.pnext = &FIRST_RXD;
		priv->desc_data[0].pnext_rx_desc = &FIRST_RXD;

		WLDBG_EXIT_INFO(DBG_LEVEL_4,
				"last rxdesc vnext: 0x%p pnext: 0x%x vfirst 0x%x",
				LAST_RXD.pnext, ENDIAN_SWAP32(LAST_RXD.pphys_next),
				priv->desc_data[0].pnext_rx_desc);

		return 0;
	}

	WLDBG_ERROR(DBG_LEVEL_4, "no valid RX mem");
	WLDBG_EXIT_INFO(DBG_LEVEL_4, "no valid RX mem");

	return -ENOMEM;
}
Example #10
0
void mwl_rx_recv(unsigned long data)
{
	struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
	struct mwl_priv *priv;
	struct mwl_rx_desc *curr_desc;
	int work_done = 0;
	struct sk_buff *prx_skb = NULL;
	int pkt_len;
	struct ieee80211_rx_status status;
	struct mwl_vif *mwl_vif = NULL;
	struct ieee80211_hdr *wh;
	u32 status_mask;

	WLDBG_ENTER(DBG_LEVEL_4);

	BUG_ON(!hw);
	priv = hw->priv;
	BUG_ON(!priv);

	curr_desc = priv->desc_data[0].pnext_rx_desc;

	if (curr_desc == NULL) {
		status_mask = readl(priv->iobase1 + MACREG_REG_A2H_INTERRUPT_STATUS_MASK);
		writel(status_mask | MACREG_A2HRIC_BIT_RX_RDY,
		       priv->iobase1 + MACREG_REG_A2H_INTERRUPT_STATUS_MASK);

		priv->is_rx_schedule = false;

		WLDBG_EXIT_INFO(DBG_LEVEL_4, "busy or no receiving packets");
		return;
	}

	while ((curr_desc->rx_control == EAGLE_RXD_CTRL_DMA_OWN)
		&& (work_done < priv->recv_limit)) {
		prx_skb = curr_desc->psk_buff;
		if (prx_skb == NULL)
			goto out;
		pci_unmap_single(priv->pdev,
				 ENDIAN_SWAP32(curr_desc->pphys_buff_data),
				 priv->desc_data[0].rx_buf_size,
				 PCI_DMA_FROMDEVICE);
		pkt_len = curr_desc->pkt_len;

		if (skb_tailroom(prx_skb) < pkt_len) {
			WLDBG_PRINT("Critical error: not enough tail room =%x pkt_len=%x, curr_desc=%x, curr_desc_data=%x",
				    skb_tailroom(prx_skb), pkt_len, curr_desc, curr_desc->pbuff_data);
			dev_kfree_skb_any(prx_skb);
			goto out;
		}

		if (curr_desc->channel != hw->conf.chandef.chan->hw_value) {
			dev_kfree_skb_any(prx_skb);
			goto out;
		}

		mwl_rx_prepare_status(curr_desc, &status);

		priv->noise = -curr_desc->noise_floor;

		wh = &((struct mwl_dma_data *)prx_skb->data)->wh;

		if (ieee80211_has_protected(wh->frame_control)) {
			/* Check if hw crypto has been enabled for
			 * this bss. If yes, set the status flags
			 * accordingly
			 */
			if (ieee80211_has_tods(wh->frame_control))
				mwl_vif = mwl_rx_find_vif_bss(&priv->vif_list,
							      wh->addr1);
			else
				mwl_vif = mwl_rx_find_vif_bss(&priv->vif_list,
							      wh->addr2);

			if (mwl_vif != NULL &&
			    mwl_vif->is_hw_crypto_enabled) {
				/*
				 * When MMIC ERROR is encountered
				 * by the firmware, payload is
				 * dropped and only 32 bytes of
				 * mwl8k Firmware header is sent
				 * to the host.
				 *
				 * We need to add four bytes of
				 * key information.  In it
				 * MAC80211 expects keyidx set to
				 * 0 for triggering Counter
				 * Measure of MMIC failure.
				 */
				if (status.flag & RX_FLAG_MMIC_ERROR) {
					struct mwl_dma_data *tr;

					tr = (struct mwl_dma_data *)prx_skb->data;
					memset((void *)&(tr->data), 0, 4);
					pkt_len += 4;
				}

				if (!ieee80211_is_auth(wh->frame_control))
					status.flag |= RX_FLAG_IV_STRIPPED |
						       RX_FLAG_DECRYPTED |
						       RX_FLAG_MMIC_STRIPPED;
			}
		}

		skb_put(prx_skb, pkt_len);
		mwl_rx_remove_dma_header(prx_skb, curr_desc->qos_ctrl);
		memcpy(IEEE80211_SKB_RXCB(prx_skb), &status, sizeof(status));
		ieee80211_rx(hw, prx_skb);
out:
		mwl_rx_refill(priv, curr_desc);
		curr_desc->rx_control = EAGLE_RXD_CTRL_DRIVER_OWN;
		curr_desc->qos_ctrl = 0;
		curr_desc = curr_desc->pnext;
		work_done++;
	}

	priv->desc_data[0].pnext_rx_desc = curr_desc;

	status_mask = readl(priv->iobase1 + MACREG_REG_A2H_INTERRUPT_STATUS_MASK);
	writel(status_mask | MACREG_A2HRIC_BIT_RX_RDY,
	       priv->iobase1 + MACREG_REG_A2H_INTERRUPT_STATUS_MASK);

	priv->is_rx_schedule = false;

	WLDBG_EXIT(DBG_LEVEL_4);
}