int mwl_rx_init(struct ieee80211_hw *hw) { struct mwl_priv *priv; int rc; WLDBG_ENTER(DBG_LEVEL_4); BUG_ON(!hw); priv = hw->priv; BUG_ON(!priv); rc = mwl_rx_ring_alloc(priv); if (rc) { WLDBG_ERROR(DBG_LEVEL_4, "allocating RX ring failed"); } else { rc = mwl_rx_ring_init(priv); if (rc) { mwl_rx_ring_free(priv); WLDBG_ERROR(DBG_LEVEL_4, "initializing RX ring failed"); } } WLDBG_EXIT(DBG_LEVEL_4); return rc; }
static int mwl_rx_ring_alloc(struct mwl_priv *priv) { WLDBG_ENTER_INFO(DBG_LEVEL_4, "allocating %i (0x%x) bytes", MAX_NUM_RX_RING_BYTES, MAX_NUM_RX_RING_BYTES); BUG_ON(!priv); priv->desc_data[0].prx_ring = (struct mwl_rx_desc *)dma_alloc_coherent(&priv->pdev->dev, MAX_NUM_RX_RING_BYTES, &priv->desc_data[0].pphys_rx_ring, GFP_KERNEL); if (priv->desc_data[0].prx_ring == NULL) { WLDBG_ERROR(DBG_LEVEL_4, "can not alloc mem"); WLDBG_EXIT_INFO(DBG_LEVEL_4, "no memory"); return -ENOMEM; } memset(priv->desc_data[0].prx_ring, 0x00, MAX_NUM_RX_RING_BYTES); WLDBG_EXIT_INFO(DBG_LEVEL_4, "RX ring vaddr: 0x%x paddr: 0x%x", priv->desc_data[0].prx_ring, priv->desc_data[0].pphys_rx_ring); return 0; }
int wlTxRingInit(struct net_device *netdev) { struct wlprivate *wlpptr = NETDEV_PRIV_P(struct wlprivate, netdev); int currDescr; int num; WLDBG_ENTER_INFO(DBG_LEVEL_12, "initializing %i descriptors", MAX_NUM_TX_DESC); for(num =0; num < NUM_OF_DESCRIPTOR_DATA; num++) { QUEUE_INIT(&((struct wlprivate_data *)(wlpptr->wlpd_p))->txQ[num]); ((struct wlprivate_data *)(wlpptr->wlpd_p))->fwDescCnt[num] =0; if (((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pTxRing != NULL) { for (currDescr = 0; currDescr < MAX_NUM_TX_DESC; currDescr++) { CURR_TXD(num).Status = ENDIAN_SWAP32(EAGLE_TXD_STATUS_IDLE); CURR_TXD(num).pNext = &NEXT_TXD(num); CURR_TXD(num).pPhysNext = ENDIAN_SWAP32((u_int32_t) ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pPhysTxRing + ((currDescr+1)*sizeof(wltxdesc_t))); WLDBG_INFO(DBG_LEVEL_12, "txdesc: %i status: 0x%x (%i) vnext: 0x%p pnext: 0x%x", currDescr, EAGLE_TXD_STATUS_IDLE, EAGLE_TXD_STATUS_IDLE, CURR_TXD(num).pNext, ENDIAN_SWAP32(CURR_TXD(num).pPhysNext)); } LAST_TXD(num).pNext = &FIRST_TXD(num); LAST_TXD(num).pPhysNext = ENDIAN_SWAP32((u_int32_t) ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pPhysTxRing); ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pStaleTxDesc = &FIRST_TXD(num); ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pNextTxDesc = &FIRST_TXD(num); WLDBG_EXIT_INFO(DBG_LEVEL_12, "last txdesc vnext: 0x%p pnext: 0x%x pstale 0x%x vfirst 0x%x", LAST_TXD(num).pNext, ENDIAN_SWAP32(LAST_TXD(num).pPhysNext), ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pStaleTxDesc, ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pNextTxDesc); } else { WLDBG_ERROR(DBG_LEVEL_12, "no valid TX mem"); return FAIL; } } return SUCCESS; }
static int mwl_rx_refill(struct mwl_priv *priv, struct mwl_rx_desc *pdesc) { WLDBG_ENTER(DBG_LEVEL_4); BUG_ON(!priv); BUG_ON(!pdesc); pdesc->psk_buff = dev_alloc_skb(priv->desc_data[0].rx_buf_size); if (pdesc->psk_buff == NULL) goto nomem; if (skb_linearize(pdesc->psk_buff)) { dev_kfree_skb_any(pdesc->psk_buff); WLDBG_ERROR(DBG_LEVEL_4, "need linearize memory"); goto nomem; } skb_reserve(pdesc->psk_buff, SYSADPT_MIN_BYTES_HEADROOM); pdesc->status = EAGLE_RXD_STATUS_OK; pdesc->qos_ctrl = 0x0000; pdesc->channel = 0x00; pdesc->rssi = 0x00; pdesc->pkt_len = priv->desc_data[0].rx_buf_size; pdesc->pbuff_data = pdesc->psk_buff->data; pdesc->pphys_buff_data = ENDIAN_SWAP32(pci_map_single(priv->pdev, pdesc->psk_buff->data, priv->desc_data[0].rx_buf_size, PCI_DMA_BIDIRECTIONAL)); WLDBG_EXIT(DBG_LEVEL_4); return 0; nomem: WLDBG_EXIT_INFO(DBG_LEVEL_4, "no memory"); return -ENOMEM; }
int wlRxRingAlloc(struct net_device *netdev) { struct wlprivate *wlpptr = NETDEV_PRIV_P(struct wlprivate, netdev); WLDBG_ENTER_INFO(DBG_LEVEL_12, "allocating %i (0x%x) bytes", MAX_NUM_RX_RING_BYTES, MAX_NUM_RX_RING_BYTES); ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pRxRing = (wlrxdesc_t *) pci_alloc_consistent(wlpptr->pPciDev, MAX_NUM_RX_RING_BYTES, &((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pPhysRxRing); if (((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pRxRing == NULL) { WLDBG_ERROR(DBG_LEVEL_12, "can not alloc mem"); return FAIL; } memset(((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pRxRing, 0x00, MAX_NUM_RX_RING_BYTES); WLDBG_EXIT_INFO(DBG_LEVEL_12, "RX ring vaddr: 0x%x paddr: 0x%x", ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pRxRing, ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pPhysRxRing); return SUCCESS; }
int wlTxRingAlloc(struct net_device *netdev) { struct wlprivate *wlpptr = NETDEV_PRIV_P(struct wlprivate, netdev); int num; UINT8 *mem = (UINT8 *) pci_alloc_consistent(wlpptr->pPciDev, MAX_NUM_TX_RING_BYTES *NUM_OF_DESCRIPTOR_DATA, &((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pPhysTxRing); for(num =0; num < NUM_OF_DESCRIPTOR_DATA; num++) { WLDBG_ENTER_INFO(DBG_LEVEL_12, "allocating %i (0x%x) bytes",MAX_NUM_TX_RING_BYTES, MAX_NUM_TX_RING_BYTES); ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pTxRing =(wltxdesc_t *) (mem +num*MAX_NUM_TX_RING_BYTES); ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pPhysTxRing = (dma_addr_t)((UINT32)((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pPhysTxRing+num*MAX_NUM_TX_RING_BYTES); if (((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pTxRing == NULL) { WLDBG_ERROR(DBG_LEVEL_12, "can not alloc mem"); return FAIL; } memset(((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pTxRing, 0x00, MAX_NUM_TX_RING_BYTES); WLDBG_EXIT_INFO(DBG_LEVEL_12, "TX ring vaddr: 0x%x paddr: 0x%x", ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pTxRing, ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pPhysTxRing); } return SUCCESS; }
int wlRxRingInit(struct net_device *netdev) { struct wlprivate *wlpptr = NETDEV_PRIV_P(struct wlprivate, netdev); int currDescr; WLDBG_ENTER_INFO(DBG_LEVEL_12, "initializing %i descriptors", MAX_NUM_RX_DESC); if (((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pRxRing != NULL) { ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxBufSize = MAX_AGGR_SIZE; for (currDescr = 0; currDescr < MAX_NUM_RX_DESC; currDescr++) { CURR_RXD.pSkBuff = dev_alloc_skb(((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxBufSize); if(skb_linearize(CURR_RXD.pSkBuff)) { WL_SKB_FREE(CURR_RXD.pSkBuff); printk(KERN_ERR "%s: Need linearize memory\n", netdev->name); return FAIL; } skb_reserve(CURR_RXD.pSkBuff , MIN_BYTES_HEADROOM); CURR_RXD.RxControl = EAGLE_RXD_CTRL_DRIVER_OWN; CURR_RXD.Status = EAGLE_RXD_STATUS_OK; CURR_RXD.QosCtrl = 0x0000; CURR_RXD.Channel = 0x00; CURR_RXD.RSSI = 0x00; CURR_RXD.SQ2 = 0x00; if (CURR_RXD.pSkBuff != NULL) { CURR_RXD.PktLen = 6*netdev->mtu + NUM_EXTRA_RX_BYTES; CURR_RXD.pBuffData = CURR_RXD.pSkBuff->data; CURR_RXD.pPhysBuffData = ENDIAN_SWAP32(pci_map_single(wlpptr->pPciDev, CURR_RXD.pSkBuff->data, ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxBufSize, PCI_DMA_FROMDEVICE)); CURR_RXD.pNext = &NEXT_RXD; CURR_RXD.pPhysNext = ENDIAN_SWAP32((u_int32_t) ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pPhysRxRing + ((currDescr+1)*sizeof(wlrxdesc_t))); WLDBG_INFO(DBG_LEVEL_12, "rxdesc: %i status: 0x%x (%i) len: 0x%x (%i)", currDescr, EAGLE_TXD_STATUS_IDLE, EAGLE_TXD_STATUS_IDLE, ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxBufSize, ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxBufSize); WLDBG_INFO(DBG_LEVEL_12, "rxdesc: %i vnext: 0x%p pnext: 0x%x", currDescr, CURR_RXD.pNext, ENDIAN_SWAP32(CURR_RXD.pPhysNext)); } else { WLDBG_ERROR(DBG_LEVEL_12, "rxdesc %i: no skbuff available", currDescr); return FAIL; } } LAST_RXD.pPhysNext = ENDIAN_SWAP32((u_int32_t) ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pPhysRxRing); LAST_RXD.pNext = &FIRST_RXD; ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pNextRxDesc = &FIRST_RXD; WLDBG_EXIT_INFO(DBG_LEVEL_12, "last rxdesc vnext: 0x%p pnext: 0x%x vfirst 0x%x", LAST_RXD.pNext, ENDIAN_SWAP32(LAST_RXD.pPhysNext), ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pNextRxDesc); return SUCCESS; } WLDBG_ERROR(DBG_LEVEL_12, "no valid RX mem"); return FAIL; }
static int mwl_rx_ring_init(struct mwl_priv *priv) { int curr_desc; WLDBG_ENTER_INFO(DBG_LEVEL_4, "initializing %i descriptors", SYSADPT_MAX_NUM_RX_DESC); if (priv->desc_data[0].prx_ring != NULL) { priv->desc_data[0].rx_buf_size = SYSADPT_MAX_AGGR_SIZE; for (curr_desc = 0; curr_desc < SYSADPT_MAX_NUM_RX_DESC; curr_desc++) { CURR_RXD.psk_buff = dev_alloc_skb(priv->desc_data[0].rx_buf_size); if (skb_linearize(CURR_RXD.psk_buff)) { dev_kfree_skb_any(CURR_RXD.psk_buff); WLDBG_ERROR(DBG_LEVEL_4, "need linearize memory"); WLDBG_EXIT_INFO(DBG_LEVEL_4, "no suitable memory"); return -ENOMEM; } skb_reserve(CURR_RXD.psk_buff, SYSADPT_MIN_BYTES_HEADROOM); CURR_RXD.rx_control = EAGLE_RXD_CTRL_DRIVER_OWN; CURR_RXD.status = EAGLE_RXD_STATUS_OK; CURR_RXD.qos_ctrl = 0x0000; CURR_RXD.channel = 0x00; CURR_RXD.rssi = 0x00; if (CURR_RXD.psk_buff != NULL) { CURR_RXD.pkt_len = SYSADPT_MAX_AGGR_SIZE; CURR_RXD.pbuff_data = CURR_RXD.psk_buff->data; CURR_RXD.pphys_buff_data = ENDIAN_SWAP32(pci_map_single(priv->pdev, CURR_RXD.psk_buff->data, priv->desc_data[0].rx_buf_size, PCI_DMA_FROMDEVICE)); CURR_RXD.pnext = &NEXT_RXD; CURR_RXD.pphys_next = ENDIAN_SWAP32((u32)priv->desc_data[0].pphys_rx_ring + ((curr_desc + 1) * sizeof(struct mwl_rx_desc))); WLDBG_INFO(DBG_LEVEL_4, "rxdesc: %i status: 0x%x (%i) len: 0x%x (%i)", curr_desc, EAGLE_TXD_STATUS_IDLE, EAGLE_TXD_STATUS_IDLE, priv->desc_data[0].rx_buf_size, priv->desc_data[0].rx_buf_size); WLDBG_INFO(DBG_LEVEL_4, "rxdesc: %i vnext: 0x%p pnext: 0x%x", curr_desc, CURR_RXD.pnext, ENDIAN_SWAP32(CURR_RXD.pphys_next)); } else { WLDBG_ERROR(DBG_LEVEL_4, "rxdesc %i: no skbuff available", curr_desc); WLDBG_EXIT_INFO(DBG_LEVEL_4, "no socket buffer"); return -ENOMEM; } } LAST_RXD.pphys_next = ENDIAN_SWAP32((u32)priv->desc_data[0].pphys_rx_ring); LAST_RXD.pnext = &FIRST_RXD; priv->desc_data[0].pnext_rx_desc = &FIRST_RXD; WLDBG_EXIT_INFO(DBG_LEVEL_4, "last rxdesc vnext: 0x%p pnext: 0x%x vfirst 0x%x", LAST_RXD.pnext, ENDIAN_SWAP32(LAST_RXD.pphys_next), priv->desc_data[0].pnext_rx_desc); return 0; } WLDBG_ERROR(DBG_LEVEL_4, "no valid RX mem"); WLDBG_EXIT_INFO(DBG_LEVEL_4, "no valid RX mem"); return -ENOMEM; }