static int wcn36xx_dxe_request_irqs(struct wcn36xx *wcn) { int ret; ret = request_irq(wcn->tx_irq, wcn36xx_irq_tx_complete, IRQF_TRIGGER_HIGH, "wcn36xx_tx", wcn); if (ret) { wcn36xx_err("failed to alloc tx irq\n"); goto out_err; } ret = request_irq(wcn->rx_irq, wcn36xx_irq_rx_ready, IRQF_TRIGGER_HIGH, "wcn36xx_rx", wcn); if (ret) { wcn36xx_err("failed to alloc rx irq\n"); goto out_txirq; } enable_irq_wake(wcn->rx_irq); return 0; out_txirq: free_irq(wcn->tx_irq, wcn); out_err: return ret; }
int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn) { int ret; wcn->dxe_tx_l_ch.ch_type = WCN36XX_DXE_CH_TX_L; wcn->dxe_tx_h_ch.ch_type = WCN36XX_DXE_CH_TX_H; wcn->dxe_rx_l_ch.ch_type = WCN36XX_DXE_CH_RX_L; wcn->dxe_rx_h_ch.ch_type = WCN36XX_DXE_CH_RX_H; wcn->dxe_tx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_L; wcn->dxe_tx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_H; wcn->dxe_rx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_L; wcn->dxe_rx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_H; wcn->dxe_tx_l_ch.dxe_wq = WCN36XX_DXE_WQ_TX_L; wcn->dxe_tx_h_ch.dxe_wq = WCN36XX_DXE_WQ_TX_H; wcn->dxe_tx_l_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_L_BD; wcn->dxe_tx_h_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_H_BD; wcn->dxe_tx_l_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_L_SKB; wcn->dxe_tx_h_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_H_SKB; wcn->dxe_tx_l_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_L; wcn->dxe_tx_h_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_H; wcn->dxe_tx_l_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_L; wcn->dxe_tx_h_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_H; /* DXE control block allocation */ ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_l_ch); if (ret) goto out_err; ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_h_ch); if (ret) goto out_err; ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_l_ch); if (ret) goto out_err; ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_h_ch); if (ret) goto out_err; /* Initialize SMSM state Clear TX Enable RING EMPTY STATE */ ret = qcom_smem_state_update_bits(wcn->tx_enable_state, WCN36XX_SMSM_WLAN_TX_ENABLE | WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY, WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY); if (ret) goto out_err; return 0; out_err: wcn36xx_err("Failed to allocate DXE control blocks\n"); wcn36xx_dxe_free_ctl_blks(wcn); return -ENOMEM; }
int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn) { size_t s; void *cpu_addr; /* Allocate BD headers for MGMT frames */ /* Where this come from ask QC */ wcn->mgmt_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE + 16 - (WCN36XX_BD_CHUNK_SIZE % 8); s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H; cpu_addr = dma_alloc_coherent(NULL, s, &wcn->mgmt_mem_pool.phy_addr, GFP_KERNEL); if (!cpu_addr) goto out_err; wcn->mgmt_mem_pool.virt_addr = cpu_addr; memset(cpu_addr, 0, s); /* Allocate BD headers for DATA frames */ /* Where this come from ask QC */ wcn->data_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE + 16 - (WCN36XX_BD_CHUNK_SIZE % 8); s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L; cpu_addr = dma_alloc_coherent(NULL, s, &wcn->data_mem_pool.phy_addr, GFP_KERNEL); if (!cpu_addr) goto out_err; wcn->data_mem_pool.virt_addr = cpu_addr; memset(cpu_addr, 0, s); return 0; out_err: wcn36xx_dxe_free_mem_pools(wcn); wcn36xx_err("Failed to allocate BD mempool\n"); return -ENOMEM; }
int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn, struct wcn36xx_vif *vif_priv, struct sk_buff *skb, bool is_low) { struct wcn36xx_dxe_ctl *ctl = NULL; struct wcn36xx_dxe_desc *desc = NULL; struct wcn36xx_dxe_ch *ch = NULL; unsigned long flags; ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch; ctl = ch->head_blk_ctl; spin_lock_irqsave(&ctl->next->skb_lock, flags); /* * If skb is not null that means that we reached the tail of the ring * hence ring is full. Stop queues to let mac80211 back off until ring * has an empty slot again. */ if (NULL != ctl->next->skb) { ieee80211_stop_queues(wcn->hw); wcn->queues_stopped = true; spin_unlock_irqrestore(&ctl->next->skb_lock, flags); return -EBUSY; } spin_unlock_irqrestore(&ctl->next->skb_lock, flags); ctl->skb = NULL; desc = ctl->desc; /* Set source address of the BD we send */ desc->src_addr_l = ctl->bd_phy_addr; desc->dst_addr_l = ch->dxe_wq; desc->fr_len = sizeof(struct wcn36xx_tx_bd); desc->ctrl = ch->ctrl_bd; wcn36xx_dbg(WCN36XX_DBG_DXE, "DXE TX\n"); wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC1 >>> ", (char *)desc, sizeof(*desc)); wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "BD >>> ", (char *)ctl->bd_cpu_addr, sizeof(struct wcn36xx_tx_bd)); /* Set source address of the SKB we send */ ctl = ctl->next; ctl->skb = skb; desc = ctl->desc; if (ctl->bd_cpu_addr) { wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n"); return -EINVAL; } desc->src_addr_l = dma_map_single(NULL, ctl->skb->data, ctl->skb->len, DMA_TO_DEVICE); desc->dst_addr_l = ch->dxe_wq; desc->fr_len = ctl->skb->len; /* set dxe descriptor to VALID */ desc->ctrl = ch->ctrl_skb; wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC2 >>> ", (char *)desc, sizeof(*desc)); wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "SKB >>> ", (char *)ctl->skb->data, ctl->skb->len); /* Move the head of the ring to the next empty descriptor */ ch->head_blk_ctl = ctl->next; /* * When connected and trying to send data frame chip can be in sleep * mode and writing to the register will not wake up the chip. Instead * notify chip about new frame through SMSM bus. */ if (is_low && vif_priv->pw_state == WCN36XX_BMPS) { wcn->ctrl_ops->smsm_change_state( 0, WCN36XX_SMSM_WLAN_TX_ENABLE); } else { /* indicate End Of Packet and generate interrupt on descriptor * done. */ wcn36xx_dxe_write_register(wcn, ch->reg_ctrl, ch->def_ctrl); } return 0; }
int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn, struct wcn36xx_vif *vif_priv, struct wcn36xx_tx_bd *bd, struct sk_buff *skb, bool is_low) { struct wcn36xx_dxe_desc *desc_bd, *desc_skb; struct wcn36xx_dxe_ctl *ctl_bd, *ctl_skb; struct wcn36xx_dxe_ch *ch = NULL; unsigned long flags; int ret; ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch; spin_lock_irqsave(&ch->lock, flags); ctl_bd = ch->head_blk_ctl; ctl_skb = ctl_bd->next; /* * If skb is not null that means that we reached the tail of the ring * hence ring is full. Stop queues to let mac80211 back off until ring * has an empty slot again. */ if (NULL != ctl_skb->skb) { ieee80211_stop_queues(wcn->hw); wcn->queues_stopped = true; spin_unlock_irqrestore(&ch->lock, flags); return -EBUSY; } if (unlikely(ctl_skb->bd_cpu_addr)) { wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n"); ret = -EINVAL; goto unlock; } desc_bd = ctl_bd->desc; desc_skb = ctl_skb->desc; ctl_bd->skb = NULL; /* write buffer descriptor */ memcpy(ctl_bd->bd_cpu_addr, bd, sizeof(*bd)); /* Set source address of the BD we send */ desc_bd->src_addr_l = ctl_bd->bd_phy_addr; desc_bd->dst_addr_l = ch->dxe_wq; desc_bd->fr_len = sizeof(struct wcn36xx_tx_bd); wcn36xx_dbg(WCN36XX_DBG_DXE, "DXE TX\n"); wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC1 >>> ", (char *)desc_bd, sizeof(*desc_bd)); wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "BD >>> ", (char *)ctl_bd->bd_cpu_addr, sizeof(struct wcn36xx_tx_bd)); desc_skb->src_addr_l = dma_map_single(wcn->dev, skb->data, skb->len, DMA_TO_DEVICE); if (dma_mapping_error(wcn->dev, desc_skb->src_addr_l)) { dev_err(wcn->dev, "unable to DMA map src_addr_l\n"); ret = -ENOMEM; goto unlock; } ctl_skb->skb = skb; desc_skb->dst_addr_l = ch->dxe_wq; desc_skb->fr_len = ctl_skb->skb->len; wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC2 >>> ", (char *)desc_skb, sizeof(*desc_skb)); wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "SKB >>> ", (char *)ctl_skb->skb->data, ctl_skb->skb->len); /* Move the head of the ring to the next empty descriptor */ ch->head_blk_ctl = ctl_skb->next; /* Commit all previous writes and set descriptors to VALID */ wmb(); desc_skb->ctrl = ch->ctrl_skb; wmb(); desc_bd->ctrl = ch->ctrl_bd; /* * When connected and trying to send data frame chip can be in sleep * mode and writing to the register will not wake up the chip. Instead * notify chip about new frame through SMSM bus. */ if (is_low && vif_priv->pw_state == WCN36XX_BMPS) { qcom_smem_state_update_bits(wcn->tx_rings_empty_state, WCN36XX_SMSM_WLAN_TX_ENABLE, WCN36XX_SMSM_WLAN_TX_ENABLE); } else { /* indicate End Of Packet and generate interrupt on descriptor * done. */ wcn36xx_dxe_write_register(wcn, ch->reg_ctrl, ch->def_ctrl); } ret = 0; unlock: spin_unlock_irqrestore(&ch->lock, flags); return ret; }
static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch, u32 ctrl, u32 en_mask, u32 int_mask, u32 status_reg) { struct wcn36xx_dxe_desc *dxe; struct wcn36xx_dxe_ctl *ctl; dma_addr_t dma_addr; struct sk_buff *skb; u32 int_reason; int ret; wcn36xx_dxe_read_register(wcn, status_reg, &int_reason); wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR, int_mask); if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK) { wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ERR_CLR, int_mask); wcn36xx_err("DXE IRQ reported error on RX channel\n"); } if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK) wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_DONE_CLR, int_mask); if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK) wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ED_CLR, int_mask); if (!(int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK | WCN36XX_CH_STAT_INT_ED_MASK))) return 0; spin_lock(&ch->lock); ctl = ch->head_blk_ctl; dxe = ctl->desc; while (!(READ_ONCE(dxe->ctrl) & WCN36xx_DXE_CTRL_VLD)) { skb = ctl->skb; dma_addr = dxe->dst_addr_l; ret = wcn36xx_dxe_fill_skb(wcn->dev, ctl, GFP_ATOMIC); if (0 == ret) { /* new skb allocation ok. Use the new one and queue * the old one to network system. */ dma_unmap_single(wcn->dev, dma_addr, WCN36XX_PKT_SIZE, DMA_FROM_DEVICE); wcn36xx_rx_skb(wcn, skb); } /* else keep old skb not submitted and use it for rx DMA */ dxe->ctrl = ctrl; ctl = ctl->next; dxe = ctl->desc; } wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR, en_mask); ch->head_blk_ctl = ctl; spin_unlock(&ch->lock); return 0; }
static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev) { struct wcn36xx *wcn = (struct wcn36xx *)dev; int int_src, int_reason; wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src); if (int_src & WCN36XX_INT_MASK_CHAN_TX_H) { wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H, &int_reason); wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR, WCN36XX_INT_MASK_CHAN_TX_H); if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK ) { wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ERR_CLR, WCN36XX_INT_MASK_CHAN_TX_H); wcn36xx_err("DXE IRQ reported error: 0x%x in high TX channel\n", int_src); } if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK) { wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_DONE_CLR, WCN36XX_INT_MASK_CHAN_TX_H); } if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK) { wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ED_CLR, WCN36XX_INT_MASK_CHAN_TX_H); } wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready high, reason %08x\n", int_reason); if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK | WCN36XX_CH_STAT_INT_ED_MASK)) reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch); } if (int_src & WCN36XX_INT_MASK_CHAN_TX_L) { wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L, &int_reason); wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR, WCN36XX_INT_MASK_CHAN_TX_L); if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK ) { wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ERR_CLR, WCN36XX_INT_MASK_CHAN_TX_L); wcn36xx_err("DXE IRQ reported error: 0x%x in low TX channel\n", int_src); } if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK) { wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_DONE_CLR, WCN36XX_INT_MASK_CHAN_TX_L); } if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK) { wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ED_CLR, WCN36XX_INT_MASK_CHAN_TX_L); } wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready low, reason %08x\n", int_reason); if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK | WCN36XX_CH_STAT_INT_ED_MASK)) reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch); } return IRQ_HANDLED; }