static void cpmac_end_xmit(struct net_device *dev, int queue) { struct cpmac_desc *desc; struct cpmac_priv *priv = netdev_priv(dev); desc = &priv->desc_ring[queue]; cpmac_write(priv->regs, CPMAC_TX_ACK(queue), (u32)desc->mapping); if (likely(desc->skb)) { spin_lock(&priv->lock); dev->stats.tx_packets++; dev->stats.tx_bytes += desc->skb->len; spin_unlock(&priv->lock); dma_unmap_single(&dev->dev, desc->data_mapping, desc->skb->len, DMA_TO_DEVICE); if (unlikely(netif_msg_tx_done(priv))) printk(KERN_DEBUG "%s: sent 0x%p, len=%d\n", dev->name, desc->skb, desc->skb->len); dev_kfree_skb_irq(desc->skb); desc->skb = NULL; if (netif_subqueue_stopped(dev, queue)) netif_wake_subqueue(dev, queue); } else { if (netif_msg_tx_err(priv) && net_ratelimit()) printk(KERN_WARNING "%s: end_xmit: spurious interrupt\n", dev->name); if (netif_subqueue_stopped(dev, queue)) netif_wake_subqueue(dev, queue); } }
static void cpmac_end_xmit(int queue) { // struct cpmac_desc *desc; //struct cpmac_priv *priv = netdev_priv(dev); // desc = desc_ring[queue]; cpmac_write(CPMAC_TX_ACK(queue), (u32)desc_ring[queue].mapping); if (likely(desc_ring[queue].skb)) { unlock_s(synthlock_0); spin_lock(cplock); netdev.stats.tx_packets++; lock_s(synthlock_0); netdev.stats.tx_bytes += desc_ring[queue].skb->len; spin_unlock(cplock); dma_unmap_single(desc_ring[queue].data_mapping, desc_ring[queue].skb->len, DMA_TO_DEVICE); // if (unlikely(netif_msg_tx_done(priv))) // netdev_dbg(dev, "sent 0x%p, len=%d\n", // desc_ring[queue].skb, desc_ring[queue].skb->len); dev_kfree_skb_irq(desc_ring[queue].skb); desc_ring[queue].skb = NULL; //if (__netif_subqueue_stopped(dev, queue)) unlock_s(synthlock_0); netif_wake_subqueue(); } else { // if (netif_msg_tx_err(priv) && net_ratelimit()) // netdev_warn(dev, "end_xmit: spurious interrupt\n"); //if (__netif_subqueue_stopped(dev, queue)) unlock_s(synthlock_0); netif_wake_subqueue(); } lock_s(synthlock_0); }
void rtw_os_wake_queue_at_free_stainfo(_adapter *padapter, int *qcnt_freed) { #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) int i; for (i=0;i<4;i++) { if (qcnt_freed[i] == 0) continue; if(rtw_os_need_wake_queue(padapter, i)) { if (DBG_DUMP_OS_QUEUE_CTL) DBG_871X(FUNC_ADPT_FMT": netif_wake_subqueue[%d]\n", FUNC_ADPT_ARG(padapter), i); netif_wake_subqueue(padapter->pnetdev, i); } } #else if (qcnt_freed[0] || qcnt_freed[1] || qcnt_freed[2] || qcnt_freed[3]) { if(rtw_os_need_wake_queue(padapter, 0)) { if (DBG_DUMP_OS_QUEUE_CTL) DBG_871X(FUNC_ADPT_FMT": netif_wake_queue\n", FUNC_ADPT_ARG(padapter)); netif_wake_queue(padapter->pnetdev); } } #endif }
void rtw_os_pkt_complete23a(struct rtw_adapter *padapter, struct sk_buff *pkt) { struct xmit_priv *pxmitpriv = &padapter->xmitpriv; u16 queue; queue = skb_get_queue_mapping(pkt); if (padapter->registrypriv.wifi_spec) { if (__netif_subqueue_stopped(padapter->pnetdev, queue) && (pxmitpriv->hwxmits[queue].accnt < WMM_XMIT_THRESHOLD)) netif_wake_subqueue(padapter->pnetdev, queue); } else { if (__netif_subqueue_stopped(padapter->pnetdev, queue)) netif_wake_subqueue(padapter->pnetdev, queue); } dev_kfree_skb_any(pkt); }
void iwm_tx_worker(struct work_struct *work) { struct iwm_priv *iwm; struct iwm_tx_info *tx_info = NULL; struct sk_buff *skb; int cmdlen, ret; struct iwm_tx_queue *txq; int pool_id; txq = container_of(work, struct iwm_tx_queue, worker); iwm = container_of(txq, struct iwm_priv, txq[txq->id]); pool_id = queue_to_pool_id(txq->id); while (!test_bit(pool_id, &iwm->tx_credit.full_pools_map) && !skb_queue_empty(&txq->queue)) { skb = skb_dequeue(&txq->queue); tx_info = skb_to_tx_info(skb); cmdlen = IWM_UDMA_HDR_LEN + skb->len; IWM_DBG_TX(iwm, DBG, "Tx frame on queue %d: skb: 0x%p, sta: " "%d, color: %d\n", txq->id, skb, tx_info->sta, tx_info->color); #if !CONFIG_IWM_TX_CONCATENATED ret = iwm_send_packet(iwm, skb, pool_id); #else if (txq->concat_count + cmdlen > IWM_HAL_CONCATENATE_BUF_SIZE) iwm_tx_send_concat_packets(iwm, txq); ret = iwm_tx_credit_alloc(iwm, pool_id, cmdlen); if (ret) { IWM_DBG_TX(iwm, DBG, "not enough tx_credit for queue " "%d, Tx worker stopped\n", txq->id); skb_queue_head(&txq->queue, skb); break; } txq->concat_ptr = txq->concat_buf + txq->concat_count; iwm_tx_build_packet(iwm, skb, pool_id, txq->concat_ptr); txq->concat_count += ALIGN(cmdlen, 16); #endif kfree_skb(skb); } iwm_tx_send_concat_packets(iwm, txq); if (__netif_subqueue_stopped(iwm_to_ndev(iwm), txq->id) && !test_bit(pool_id, &iwm->tx_credit.full_pools_map) && (skb_queue_len(&txq->queue) < IWM_TX_LIST_SIZE / 2)) { IWM_DBG_TX(iwm, DBG, "LINK: start netif_subqueue[%d]", txq->id); netif_wake_subqueue(iwm_to_ndev(iwm), txq->id); } }
void _kc_netif_tx_wake_all_queues(struct net_device *netdev) { struct adapter_struct *adapter = netdev_priv(netdev); int i; netif_wake_queue(netdev); if (netif_is_multiqueue(netdev)) for (i = 0; i < adapter->num_tx_queues; i++) netif_wake_subqueue(netdev, i); }
void rtw_os_pkt_complete(_adapter *padapter, _pkt *pkt) { u16 queue; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; queue = skb_get_queue_mapping(pkt); if (padapter->registrypriv.wifi_spec) { if(__netif_subqueue_stopped(padapter->pnetdev, queue) && (pxmitpriv->hwxmits[queue].accnt < WMM_XMIT_THRESHOLD)) { netif_wake_subqueue(padapter->pnetdev, queue); } } else { if(__netif_subqueue_stopped(padapter->pnetdev, queue)) netif_wake_subqueue(padapter->pnetdev, queue); } rtw_skb_free(pkt); }
void rtw_os_pkt_complete(struct adapter *padapter, struct sk_buff *pkt) { #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)) u16 queue; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; queue = skb_get_queue_mapping(pkt); if (padapter->registrypriv.wifi_spec) { if (__netif_subqueue_stopped(padapter->pnetdev, queue) && (pxmitpriv->hwxmits[queue].accnt < WMM_XMIT_THRESHOLD)) netif_wake_subqueue(padapter->pnetdev, queue); } else { if (__netif_subqueue_stopped(padapter->pnetdev, queue)) netif_wake_subqueue(padapter->pnetdev, queue); } #else if (netif_queue_stopped(padapter->pnetdev)) netif_wake_queue(padapter->pnetdev); #endif dev_kfree_skb_any(pkt); }
void rtw_os_pkt_complete(_adapter *padapter, _pkt *pkt) { #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) u16 queue; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; queue = skb_get_queue_mapping(pkt); if(__netif_subqueue_stopped(padapter->pnetdev, queue) && (pxmitpriv->hwxmits[queue].accnt < NR_XMITFRAME/2)) { netif_wake_subqueue(padapter->pnetdev, queue); } #else if (netif_queue_stopped(padapter->pnetdev)) netif_wake_queue(padapter->pnetdev); #endif rtw_skb_free(pkt); }
static void cpmac_hw_error(struct work_struct *work) { int i; struct cpmac_priv *priv = container_of(work, struct cpmac_priv, reset_work); spin_lock(&priv->rx_lock); cpmac_clear_rx(priv->dev); spin_unlock(&priv->rx_lock); cpmac_clear_tx(priv->dev); cpmac_hw_start(priv->dev); barrier(); atomic_dec(&priv->reset_pending); for (i = 0; i < CPMAC_QUEUES; i++) netif_wake_subqueue(priv->dev, i); netif_wake_queue(priv->dev); cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3); }
void rtw_os_pkt_complete(_adapter *padapter, _pkt *pkt) { #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) u16 qidx; qidx = skb_get_queue_mapping(pkt); if (rtw_os_need_wake_queue(padapter, qidx)) { if (DBG_DUMP_OS_QUEUE_CTL) DBG_871X(FUNC_ADPT_FMT": netif_wake_subqueue[%d]\n", FUNC_ADPT_ARG(padapter), qidx); netif_wake_subqueue(padapter->pnetdev, qidx); } #else if (rtw_os_need_wake_queue(padapter, 0)) { if (DBG_DUMP_OS_QUEUE_CTL) DBG_871X(FUNC_ADPT_FMT": netif_wake_queue\n", FUNC_ADPT_ARG(padapter)); netif_wake_queue(padapter->pnetdev); } #endif rtw_skb_free(pkt); }
/* Runs in interrupt context. */ static void lio_update_txq_status(struct octeon_device *oct, int iq_num) { struct octeon_instr_queue *iq = oct->instr_queue[iq_num]; struct net_device *netdev; struct lio *lio; netdev = oct->props[iq->ifidx].netdev; /* This is needed because the first IQ does not have * a netdev associated with it. */ if (!netdev) return; lio = GET_LIO(netdev); if (__netif_subqueue_stopped(netdev, iq->q_index) && lio->linfo.link.s.link_up && (!octnet_iq_is_full(oct, iq_num))) { netif_wake_subqueue(netdev, iq->q_index); INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num, tx_restart, 1); } }
void iwm_tx_worker(struct work_struct *work) { struct iwm_priv *iwm; struct iwm_tx_info *tx_info = NULL; struct sk_buff *skb; struct iwm_tx_queue *txq; struct iwm_sta_info *sta_info; struct iwm_tid_info *tid_info; int cmdlen, ret, pool_id; txq = container_of(work, struct iwm_tx_queue, worker); iwm = container_of(txq, struct iwm_priv, txq[txq->id]); pool_id = queue_to_pool_id(txq->id); while (!test_bit(pool_id, &iwm->tx_credit.full_pools_map) && !skb_queue_empty(&txq->queue)) { spin_lock_bh(&txq->lock); skb = skb_dequeue(&txq->queue); spin_unlock_bh(&txq->lock); tx_info = skb_to_tx_info(skb); sta_info = &iwm->sta_table[tx_info->sta]; if (!sta_info->valid) { IWM_ERR(iwm, "Trying to send a frame to unknown STA\n"); kfree_skb(skb); continue; } tid_info = &sta_info->tid_info[tx_info->tid]; mutex_lock(&tid_info->mutex); if (tid_info->stopped) { IWM_DBG_TX(iwm, DBG, "%dx%d stopped\n", tx_info->sta, tx_info->tid); spin_lock_bh(&txq->lock); skb_queue_tail(&txq->stopped_queue, skb); spin_unlock_bh(&txq->lock); mutex_unlock(&tid_info->mutex); continue; } cmdlen = IWM_UDMA_HDR_LEN + skb->len; IWM_DBG_TX(iwm, DBG, "Tx frame on queue %d: skb: 0x%p, sta: " "%d, color: %d\n", txq->id, skb, tx_info->sta, tx_info->color); if (txq->concat_count + cmdlen > IWM_HAL_CONCATENATE_BUF_SIZE) iwm_tx_send_concat_packets(iwm, txq); ret = iwm_tx_credit_alloc(iwm, pool_id, cmdlen); if (ret) { IWM_DBG_TX(iwm, DBG, "not enough tx_credit for queue " "%d, Tx worker stopped\n", txq->id); spin_lock_bh(&txq->lock); skb_queue_head(&txq->queue, skb); spin_unlock_bh(&txq->lock); mutex_unlock(&tid_info->mutex); break; } txq->concat_ptr = txq->concat_buf + txq->concat_count; tid_info->last_seq_num = iwm_tx_build_packet(iwm, skb, pool_id, txq->concat_ptr); txq->concat_count += ALIGN(cmdlen, 16); mutex_unlock(&tid_info->mutex); kfree_skb(skb); } iwm_tx_send_concat_packets(iwm, txq); if (__netif_subqueue_stopped(iwm_to_ndev(iwm), txq->id) && !test_bit(pool_id, &iwm->tx_credit.full_pools_map) && (skb_queue_len(&txq->queue) < IWM_TX_LIST_SIZE / 2)) { IWM_DBG_TX(iwm, DBG, "LINK: start netif_subqueue[%d]", txq->id); netif_wake_subqueue(iwm_to_ndev(iwm), txq->id); } }