static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch) { struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl; struct wcn36xx_dxe_desc *dxe = ctl->desc; dma_addr_t dma_addr; struct sk_buff *skb; while (!(dxe->ctrl & WCN36XX_DXE_CTRL_VALID_MASK)) { skb = ctl->skb; dma_addr = dxe->dst_addr_l; wcn36xx_dxe_fill_skb(ctl); switch (ch->ch_type) { case WCN36XX_DXE_CH_RX_L: dxe->ctrl = WCN36XX_DXE_CTRL_RX_L; break; case WCN36XX_DXE_CH_RX_H: dxe->ctrl = WCN36XX_DXE_CTRL_RX_H; break; default: wcn36xx_warn("Unknown channel"); } dma_unmap_single(NULL, dma_addr, WCN36XX_PKT_SIZE, DMA_FROM_DEVICE); wcn36xx_rx_skb(wcn, skb); ctl = ctl->next; dxe = ctl->desc; } ch->head_blk_ctl = ctl; return 0; }
static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *wcn_ch) { int i; struct wcn36xx_dxe_ctl *cur_ctl = NULL; cur_ctl = wcn_ch->head_blk_ctl; for (i = 0; i < wcn_ch->desc_num; i++) { wcn36xx_dxe_fill_skb(cur_ctl); cur_ctl = cur_ctl->next; } return 0; }
static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch, u32 ctrl, u32 en_mask, u32 int_mask, u32 status_reg) { struct wcn36xx_dxe_desc *dxe; struct wcn36xx_dxe_ctl *ctl; dma_addr_t dma_addr; struct sk_buff *skb; u32 int_reason; int ret; wcn36xx_dxe_read_register(wcn, status_reg, &int_reason); wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR, int_mask); if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK) { wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ERR_CLR, int_mask); wcn36xx_err("DXE IRQ reported error on RX channel\n"); } if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK) wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_DONE_CLR, int_mask); if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK) wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ED_CLR, int_mask); if (!(int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK | WCN36XX_CH_STAT_INT_ED_MASK))) return 0; spin_lock(&ch->lock); ctl = ch->head_blk_ctl; dxe = ctl->desc; while (!(READ_ONCE(dxe->ctrl) & WCN36xx_DXE_CTRL_VLD)) { skb = ctl->skb; dma_addr = dxe->dst_addr_l; ret = wcn36xx_dxe_fill_skb(wcn->dev, ctl, GFP_ATOMIC); if (0 == ret) { /* new skb allocation ok. Use the new one and queue * the old one to network system. */ dma_unmap_single(wcn->dev, dma_addr, WCN36XX_PKT_SIZE, DMA_FROM_DEVICE); wcn36xx_rx_skb(wcn, skb); } /* else keep old skb not submitted and use it for rx DMA */ dxe->ctrl = ctrl; ctl = ctl->next; dxe = ctl->desc; } wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR, en_mask); ch->head_blk_ctl = ctl; spin_unlock(&ch->lock); return 0; }