static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) { struct xenvif_queue *queue = dev_id; struct netdev_queue *net_queue = netdev_get_tx_queue(queue->vif->dev, queue->id); /* QUEUE_STATUS_RX_PURGE_EVENT is only set if either QDisc was off OR * the carrier went down and this queue was previously blocked */ if (unlikely(netif_tx_queue_stopped(net_queue) || (!netif_carrier_ok(queue->vif->dev) && test_bit(QUEUE_STATUS_RX_STALLED, &queue->status)))) set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status); xenvif_kick_thread(queue); return IRQ_HANDLED; }
/* * This function stops all queues in net_device */ void mwifiex_stop_net_dev_queue(struct net_device *netdev, struct mwifiex_adapter *adapter) { unsigned long dev_queue_flags; unsigned int i; spin_lock_irqsave(&adapter->queue_lock, dev_queue_flags); for (i = 0; i < netdev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(netdev, i); if (!netif_tx_queue_stopped(txq)) netif_tx_stop_queue(txq); } spin_unlock_irqrestore(&adapter->queue_lock, dev_queue_flags); }
static void rtw_check_xmit_resource(struct rtw_adapter *padapter, struct sk_buff *pkt) { struct xmit_priv *pxmitpriv = &padapter->xmitpriv; u16 queue; queue = skb_get_queue_mapping(pkt); if (padapter->registrypriv.wifi_spec) { /* No free space for Tx, tx_worker is too slow */ if (pxmitpriv->hwxmits[queue].accnt > WMM_XMIT_THRESHOLD) netif_stop_subqueue(padapter->pnetdev, queue); } else { if (pxmitpriv->free_xmitframe_cnt <= 4) { if (!netif_tx_queue_stopped(netdev_get_tx_queue(padapter->pnetdev, queue))) netif_stop_subqueue(padapter->pnetdev, queue); } } }
/* * Add buffer into wmm tx queue and queue work to transmit it. */ int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb) { struct netdev_queue *txq; int index = mwifiex_1d_to_wmm_queue[skb->priority]; if (atomic_inc_return(&priv->wmm_tx_pending[index]) >= MAX_TX_PENDING) { txq = netdev_get_tx_queue(priv->netdev, index); if (!netif_tx_queue_stopped(txq)) { netif_tx_stop_queue(txq); dev_dbg(priv->adapter->dev, "stop queue: %d\n", index); } } atomic_inc(&priv->adapter->tx_pending); mwifiex_wmm_add_buf_txqueue(priv, skb); queue_work(priv->adapter->workqueue, &priv->adapter->main_work); return 0; }
static inline struct sk_buff *dequeue_skb(struct Qdisc *q) { struct sk_buff *skb = q->gso_skb; if (unlikely(skb)) { struct net_device *dev = qdisc_dev(q); struct netdev_queue *txq; /* check the reason of requeuing without tx lock first */ txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq)) q->gso_skb = NULL; else skb = NULL; } else { skb = q->dequeue(q); } return skb; }
static void dev_watchdog(unsigned long arg) { struct net_device *dev = (struct net_device *)arg; netif_tx_lock(dev); if (!qdisc_tx_is_noop(dev)) { if (netif_device_present(dev) && netif_running(dev) && netif_carrier_ok(dev)) { int some_queue_timedout = 0; unsigned int i; unsigned long trans_start; for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq; txq = netdev_get_tx_queue(dev, i); /* * old device drivers set dev->trans_start */ trans_start = txq->trans_start ? : dev->trans_start; if (netif_tx_queue_stopped(txq) && time_after(jiffies, (trans_start + dev->watchdog_timeo))) { some_queue_timedout = 1; break; } } if (some_queue_timedout) { char drivername[64]; WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n", dev->name, netdev_drivername(dev, drivername, 64), i); dev->netdev_ops->ndo_tx_timeout(dev); } if (!mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + dev->watchdog_timeo))) dev_hold(dev); } }
static inline struct sk_buff *dequeue_skb(struct Qdisc *q) { struct sk_buff *skb = q->gso_skb; if (unlikely(skb)) { struct net_device *dev = qdisc_dev(q); struct netdev_queue *txq; txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq)) { q->gso_skb = NULL; q->q.qlen--; } else skb = NULL; } else { skb = q->dequeue(q); } return skb; }
void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter) { void *p; char drv_version[64]; struct usb_card_rec *cardp; struct sdio_mmc_card *sdio_card; struct mwifiex_private *priv; int i, idx; struct netdev_queue *txq; struct mwifiex_debug_info *debug_info; if (adapter->drv_info_dump) { vfree(adapter->drv_info_dump); adapter->drv_info_size = 0; } dev_info(adapter->dev, "=== DRIVER INFO DUMP START===\n"); adapter->drv_info_dump = vzalloc(MWIFIEX_DRV_INFO_SIZE_MAX); if (!adapter->drv_info_dump) return; p = (char *)(adapter->drv_info_dump); p += sprintf(p, "driver_name = " "\"mwifiex\"\n"); mwifiex_drv_get_driver_version(adapter, drv_version, sizeof(drv_version) - 1); p += sprintf(p, "driver_version = %s\n", drv_version); if (adapter->iface_type == MWIFIEX_USB) { cardp = (struct usb_card_rec *)adapter->card; p += sprintf(p, "tx_cmd_urb_pending = %d\n", atomic_read(&cardp->tx_cmd_urb_pending)); p += sprintf(p, "tx_data_urb_pending = %d\n", atomic_read(&cardp->tx_data_urb_pending)); p += sprintf(p, "rx_cmd_urb_pending = %d\n", atomic_read(&cardp->rx_cmd_urb_pending)); p += sprintf(p, "rx_data_urb_pending = %d\n", atomic_read(&cardp->rx_data_urb_pending)); } p += sprintf(p, "tx_pending = %d\n", atomic_read(&adapter->tx_pending)); p += sprintf(p, "rx_pending = %d\n", atomic_read(&adapter->rx_pending)); if (adapter->iface_type == MWIFIEX_SDIO) { sdio_card = (struct sdio_mmc_card *)adapter->card; p += sprintf(p, "\nmp_rd_bitmap=0x%x curr_rd_port=0x%x\n", sdio_card->mp_rd_bitmap, sdio_card->curr_rd_port); p += sprintf(p, "mp_wr_bitmap=0x%x curr_wr_port=0x%x\n", sdio_card->mp_wr_bitmap, sdio_card->curr_wr_port); } for (i = 0; i < adapter->priv_num; i++) { if (!adapter->priv[i] || !adapter->priv[i]->netdev) continue; priv = adapter->priv[i]; p += sprintf(p, "\n[interface : \"%s\"]\n", priv->netdev->name); p += sprintf(p, "wmm_tx_pending[0] = %d\n", atomic_read(&priv->wmm_tx_pending[0])); p += sprintf(p, "wmm_tx_pending[1] = %d\n", atomic_read(&priv->wmm_tx_pending[1])); p += sprintf(p, "wmm_tx_pending[2] = %d\n", atomic_read(&priv->wmm_tx_pending[2])); p += sprintf(p, "wmm_tx_pending[3] = %d\n", atomic_read(&priv->wmm_tx_pending[3])); p += sprintf(p, "media_state=\"%s\"\n", !priv->media_connected ? "Disconnected" : "Connected"); p += sprintf(p, "carrier %s\n", (netif_carrier_ok(priv->netdev) ? "on" : "off")); for (idx = 0; idx < priv->netdev->num_tx_queues; idx++) { txq = netdev_get_tx_queue(priv->netdev, idx); p += sprintf(p, "tx queue %d:%s ", idx, netif_tx_queue_stopped(txq) ? "stopped" : "started"); } p += sprintf(p, "\n%s: num_tx_timeout = %d\n", priv->netdev->name, priv->num_tx_timeout); } if (adapter->iface_type == MWIFIEX_SDIO) { p += sprintf(p, "\n=== SDIO register DUMP===\n"); if (adapter->if_ops.reg_dump) p += adapter->if_ops.reg_dump(adapter, p); } p += sprintf(p, "\n=== MORE DEBUG INFORMATION\n"); debug_info = kzalloc(sizeof(*debug_info), GFP_KERNEL); if (debug_info) { for (i = 0; i < adapter->priv_num; i++) { if (!adapter->priv[i] || !adapter->priv[i]->netdev) continue; priv = adapter->priv[i]; mwifiex_get_debug_info(priv, debug_info); p += mwifiex_debug_info_to_buffer(priv, p, debug_info); break; } kfree(debug_info); } adapter->drv_info_size = p - adapter->drv_info_dump; dev_info(adapter->dev, "=== DRIVER INFO DUMP END===\n"); }
static inline void dma_xmit_clean(struct net_device *dev, END_DEVICE *ei_local) { struct netdev_queue *txq; int cpu, clean_done = 0; u32 cpu_ptr, dma_ptr, cpu_idx; #if defined (CONFIG_RAETH_BQL) u32 bytes_sent_ge1 = 0; #if defined (CONFIG_PSEUDO_SUPPORT) u32 bytes_sent_ge2 = 0; #endif #endif spin_lock(&ei_local->page_lock); cpu_ptr = sysRegRead(QTX_CRX_PTR); dma_ptr = sysRegRead(QTX_DRX_PTR); /* get current CPU TXD index */ cpu_idx = get_txd_offset(ei_local, cpu_ptr); while (cpu_ptr != dma_ptr) { struct QDMA_txdesc *txd; struct sk_buff *skb; txd = &ei_local->txd_pool[cpu_idx]; /* check TXD not owned by DMA */ if (!(ACCESS_ONCE(txd->txd_info3) & TX3_QDMA_OWN)) break; /* hold next TXD ptr */ cpu_ptr = ACCESS_ONCE(txd->txd_info2); /* release current TXD */ put_free_txd(ei_local, cpu_idx); /* get next TXD index */ cpu_idx = get_txd_offset(ei_local, cpu_ptr); /* free skb */ skb = ei_local->txd_buff[cpu_idx]; if (skb) { #if defined (CONFIG_RAETH_BQL) #if defined (CONFIG_PSEUDO_SUPPORT) if (skb->dev == ei_local->PseudoDev) bytes_sent_ge2 += skb->len; else #endif bytes_sent_ge1 += skb->len; #endif ei_local->txd_buff[cpu_idx] = NULL; dev_kfree_skb(skb); } clean_done++; /* prevent infinity loop when something wrong */ if (clean_done > (NUM_TX_DESC-4)) break; } if (clean_done) sysRegWrite(QTX_CRX_PTR, cpu_ptr); spin_unlock(&ei_local->page_lock); if (!clean_done) return; cpu = smp_processor_id(); if (netif_running(dev)) { txq = netdev_get_tx_queue(dev, 0); __netif_tx_lock(txq, cpu); #if defined (CONFIG_RAETH_BQL) netdev_tx_completed_queue(txq, 0, bytes_sent_ge1); #endif if (netif_tx_queue_stopped(txq)) netif_tx_wake_queue(txq); __netif_tx_unlock(txq); } #if defined (CONFIG_PSEUDO_SUPPORT) if (netif_running(ei_local->PseudoDev)) { txq = netdev_get_tx_queue(ei_local->PseudoDev, 0); __netif_tx_lock(txq, cpu); #if defined (CONFIG_RAETH_BQL) netdev_tx_completed_queue(txq, 0, bytes_sent_ge2); #endif if (netif_tx_queue_stopped(txq)) netif_tx_wake_queue(txq); __netif_tx_unlock(txq); } #endif }
static int xenvif_read_io_ring(struct seq_file *m, void *v) { struct xenvif_queue *queue = m->private; struct xen_netif_tx_back_ring *tx_ring = &queue->tx; struct xen_netif_rx_back_ring *rx_ring = &queue->rx; struct netdev_queue *dev_queue; if (tx_ring->sring) { struct xen_netif_tx_sring *sring = tx_ring->sring; seq_printf(m, "Queue %d\nTX: nr_ents %u\n", queue->id, tx_ring->nr_ents); seq_printf(m, "req prod %u (%d) cons %u (%d) event %u (%d)\n", sring->req_prod, sring->req_prod - sring->rsp_prod, tx_ring->req_cons, tx_ring->req_cons - sring->rsp_prod, sring->req_event, sring->req_event - sring->rsp_prod); seq_printf(m, "rsp prod %u (base) pvt %u (%d) event %u (%d)\n", sring->rsp_prod, tx_ring->rsp_prod_pvt, tx_ring->rsp_prod_pvt - sring->rsp_prod, sring->rsp_event, sring->rsp_event - sring->rsp_prod); seq_printf(m, "pending prod %u pending cons %u nr_pending_reqs %u\n", queue->pending_prod, queue->pending_cons, nr_pending_reqs(queue)); seq_printf(m, "dealloc prod %u dealloc cons %u dealloc_queue %u\n\n", queue->dealloc_prod, queue->dealloc_cons, queue->dealloc_prod - queue->dealloc_cons); } if (rx_ring->sring) { struct xen_netif_rx_sring *sring = rx_ring->sring; seq_printf(m, "RX: nr_ents %u\n", rx_ring->nr_ents); seq_printf(m, "req prod %u (%d) cons %u (%d) event %u (%d)\n", sring->req_prod, sring->req_prod - sring->rsp_prod, rx_ring->req_cons, rx_ring->req_cons - sring->rsp_prod, sring->req_event, sring->req_event - sring->rsp_prod); seq_printf(m, "rsp prod %u (base) pvt %u (%d) event %u (%d)\n\n", sring->rsp_prod, rx_ring->rsp_prod_pvt, rx_ring->rsp_prod_pvt - sring->rsp_prod, sring->rsp_event, sring->rsp_event - sring->rsp_prod); } seq_printf(m, "NAPI state: %lx NAPI weight: %d TX queue len %u\n" "Credit timer_pending: %d, credit: %lu, usec: %lu\n" "remaining: %lu, expires: %lu, now: %lu\n", queue->napi.state, queue->napi.weight, skb_queue_len(&queue->tx_queue), timer_pending(&queue->credit_timeout), queue->credit_bytes, queue->credit_usec, queue->remaining_credit, queue->credit_timeout.expires, jiffies); dev_queue = netdev_get_tx_queue(queue->vif->dev, queue->id); seq_printf(m, "\nRx internal queue: len %u max %u pkts %u %s\n", queue->rx_queue_len, queue->rx_queue_max, skb_queue_len(&queue->rx_queue), netif_tx_queue_stopped(dev_queue) ? "stopped" : "running"); return 0; }
static void netvsc_send_completion(struct netvsc_device *net_device, struct hv_device *device, struct vmpacket_descriptor *packet) { struct nvsp_message *nvsp_packet; struct hv_netvsc_packet *nvsc_packet; struct net_device *ndev; u32 send_index; ndev = net_device->ndev; nvsp_packet = (struct nvsp_message *)((unsigned long)packet + (packet->offset8 << 3)); if ((nvsp_packet->hdr.msg_type == NVSP_MSG_TYPE_INIT_COMPLETE) || (nvsp_packet->hdr.msg_type == NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE) || (nvsp_packet->hdr.msg_type == NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE) || (nvsp_packet->hdr.msg_type == NVSP_MSG5_TYPE_SUBCHANNEL)) { /* Copy the response back */ memcpy(&net_device->channel_init_pkt, nvsp_packet, sizeof(struct nvsp_message)); complete(&net_device->channel_init_wait); } else if (nvsp_packet->hdr.msg_type == NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) { int num_outstanding_sends; u16 q_idx = 0; struct vmbus_channel *channel = device->channel; int queue_sends; /* Get the send context */ nvsc_packet = (struct hv_netvsc_packet *)(unsigned long) packet->trans_id; /* Notify the layer above us */ if (nvsc_packet) { send_index = nvsc_packet->send_buf_index; if (send_index != NETVSC_INVALID_INDEX) netvsc_free_send_slot(net_device, send_index); q_idx = nvsc_packet->q_idx; channel = nvsc_packet->channel; nvsc_packet->send_completion(nvsc_packet-> send_completion_ctx); } num_outstanding_sends = atomic_dec_return(&net_device->num_outstanding_sends); queue_sends = atomic_dec_return(&net_device-> queue_sends[q_idx]); if (net_device->destroy && num_outstanding_sends == 0) wake_up(&net_device->wait_drain); if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) && !net_device->start_remove && (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) netif_tx_wake_queue(netdev_get_tx_queue( ndev, q_idx)); } else { netdev_err(ndev, "Unknown send completion packet type- " "%d received!!\n", nvsp_packet->hdr.msg_type); } }
u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, int index, u64 timestamp, int napi_mode) { struct mlx4_en_tx_info *tx_info = &ring->tx_info[index]; struct mlx4_en_rx_alloc frame = { .page = tx_info->page, .dma = tx_info->map0_dma, }; if (!mlx4_en_rx_recycle(ring->recycle_ring, &frame)) { dma_unmap_page(priv->ddev, tx_info->map0_dma, PAGE_SIZE, priv->dma_dir); put_page(tx_info->page); } return tx_info->nr_txbb; } int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) { struct mlx4_en_priv *priv = netdev_priv(dev); int cnt = 0; /* Skip last polled descriptor */ ring->cons += ring->last_nr_txbb; en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n", ring->cons, ring->prod); if ((u32) (ring->prod - ring->cons) > ring->size) { if (netif_msg_tx_err(priv)) en_warn(priv, "Tx consumer passed producer!\n"); return 0; } while (ring->cons != ring->prod) { ring->last_nr_txbb = ring->free_tx_desc(priv, ring, ring->cons & ring->size_mask, 0, 0 /* Non-NAPI caller */); ring->cons += ring->last_nr_txbb; cnt++; } if (ring->tx_queue) netdev_tx_reset_queue(ring->tx_queue); if (cnt) en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt); return cnt; } bool mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int napi_budget) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_cq *mcq = &cq->mcq; struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->type][cq->ring]; struct mlx4_cqe *cqe; u16 index, ring_index, stamp_index; u32 txbbs_skipped = 0; u32 txbbs_stamp = 0; u32 cons_index = mcq->cons_index; int size = cq->size; u32 size_mask = ring->size_mask; struct mlx4_cqe *buf = cq->buf; u32 packets = 0; u32 bytes = 0; int factor = priv->cqe_factor; int done = 0; int budget = priv->tx_work_limit; u32 last_nr_txbb; u32 ring_cons; if (unlikely(!priv->port_up)) return true; netdev_txq_bql_complete_prefetchw(ring->tx_queue); index = cons_index & size_mask; cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor; last_nr_txbb = READ_ONCE(ring->last_nr_txbb); ring_cons = READ_ONCE(ring->cons); ring_index = ring_cons & size_mask; stamp_index = ring_index; /* Process all completed CQEs */ while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, cons_index & size) && (done < budget)) { u16 new_index; /* * make sure we read the CQE after we read the * ownership bit */ dma_rmb(); if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_ERROR)) { struct mlx4_err_cqe *cqe_err = (struct mlx4_err_cqe *)cqe; en_err(priv, "CQE error - vendor syndrome: 0x%x syndrome: 0x%x\n", cqe_err->vendor_err_syndrome, cqe_err->syndrome); } /* Skip over last polled CQE */ new_index = be16_to_cpu(cqe->wqe_index) & size_mask; do { u64 timestamp = 0; txbbs_skipped += last_nr_txbb; ring_index = (ring_index + last_nr_txbb) & size_mask; if (unlikely(ring->tx_info[ring_index].ts_requested)) timestamp = mlx4_en_get_cqe_ts(cqe); /* free next descriptor */ last_nr_txbb = ring->free_tx_desc( priv, ring, ring_index, timestamp, napi_budget); mlx4_en_stamp_wqe(priv, ring, stamp_index, !!((ring_cons + txbbs_stamp) & ring->size)); stamp_index = ring_index; txbbs_stamp = txbbs_skipped; packets++; bytes += ring->tx_info[ring_index].nr_bytes; } while ((++done < budget) && (ring_index != new_index)); ++cons_index; index = cons_index & size_mask; cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor; } /* * To prevent CQ overflow we first update CQ consumer and only then * the ring consumer. */ mcq->cons_index = cons_index; mlx4_cq_set_ci(mcq); wmb(); /* we want to dirty this cache line once */ WRITE_ONCE(ring->last_nr_txbb, last_nr_txbb); WRITE_ONCE(ring->cons, ring_cons + txbbs_skipped); if (cq->type == TX_XDP) return done < budget; netdev_tx_completed_queue(ring->tx_queue, packets, bytes); /* Wakeup Tx queue if this stopped, and ring is not full. */ if (netif_tx_queue_stopped(ring->tx_queue) && !mlx4_en_is_tx_ring_full(ring)) { netif_tx_wake_queue(ring->tx_queue); ring->wake_queue++; } return done < budget; } void mlx4_en_tx_irq(struct mlx4_cq *mcq) { struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); struct mlx4_en_priv *priv = netdev_priv(cq->dev); if (likely(priv->port_up)) napi_schedule_irqoff(&cq->napi); else mlx4_en_arm_cq(priv, cq); } /* TX CQ polling - called by NAPI */ int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget) { struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); struct net_device *dev = cq->dev; struct mlx4_en_priv *priv = netdev_priv(dev); bool clean_complete; clean_complete = mlx4_en_process_tx_cq(dev, cq, budget); if (!clean_complete) return budget; napi_complete(napi); mlx4_en_arm_cq(priv, cq); return 0; } static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, u32 index, unsigned int desc_size) { u32 copy = (ring->size - index) << LOG_TXBB_SIZE; int i; for (i = desc_size - copy - 4; i >= 0; i -= 4) { if ((i & (TXBB_SIZE - 1)) == 0) wmb(); *((u32 *) (ring->buf + i)) = *((u32 *) (ring->bounce_buf + copy + i)); } for (i = copy - 4; i >= 4 ; i -= 4) { if ((i & (TXBB_SIZE - 1)) == 0) wmb(); *((u32 *)(ring->buf + (index << LOG_TXBB_SIZE) + i)) = *((u32 *) (ring->bounce_buf + i)); } /* Return real descriptor location */ return ring->buf + (index << LOG_TXBB_SIZE); }
static inline void dma_xmit_clean(struct net_device *dev, END_DEVICE *ei_local) { struct netdev_queue *txq; int cpu, clean_done = 0; u32 txd_free_idx; #if defined (CONFIG_RAETH_BQL) u32 bytes_sent_ge1 = 0; #if defined (CONFIG_PSEUDO_SUPPORT) u32 bytes_sent_ge2 = 0; #endif #endif spin_lock(&ei_local->page_lock); txd_free_idx = ei_local->txd_free_idx; while (clean_done < (NUM_TX_DESC-2)) { struct PDMA_txdesc *txd; struct sk_buff *skb; skb = ei_local->txd_buff[txd_free_idx]; if (!skb) break; txd = &ei_local->txd_ring[txd_free_idx]; /* check TXD not owned by DMA */ if (!(ACCESS_ONCE(txd->txd_info2) & TX2_DMA_DONE)) break; if (skb != (struct sk_buff *)0xFFFFFFFF) { #if defined (CONFIG_RAETH_BQL) #if defined (CONFIG_PSEUDO_SUPPORT) if (skb->dev == ei_local->PseudoDev) bytes_sent_ge2 += skb->len; else #endif bytes_sent_ge1 += skb->len; #endif dev_kfree_skb(skb); } ei_local->txd_buff[txd_free_idx] = NULL; txd_free_idx = (txd_free_idx + 1) % NUM_TX_DESC; clean_done++; } if (ei_local->txd_free_idx != txd_free_idx) ei_local->txd_free_idx = txd_free_idx; spin_unlock(&ei_local->page_lock); if (!clean_done) return; cpu = smp_processor_id(); if (netif_running(dev)) { txq = netdev_get_tx_queue(dev, 0); __netif_tx_lock(txq, cpu); #if defined (CONFIG_RAETH_BQL) netdev_tx_completed_queue(txq, 0, bytes_sent_ge1); #endif if (netif_tx_queue_stopped(txq)) netif_tx_wake_queue(txq); __netif_tx_unlock(txq); } #if defined (CONFIG_PSEUDO_SUPPORT) if (netif_running(ei_local->PseudoDev)) { txq = netdev_get_tx_queue(ei_local->PseudoDev, 0); __netif_tx_lock(txq, cpu); #if defined (CONFIG_RAETH_BQL) netdev_tx_completed_queue(txq, 0, bytes_sent_ge2); #endif if (netif_tx_queue_stopped(txq)) netif_tx_wake_queue(txq); __netif_tx_unlock(txq); } #endif }
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq) { struct mlx5_cqe64 *cqe; struct mlx5e_sq *sq; u32 dma_fifo_cc; u32 nbytes; u16 npkts; u16 sqcc; int i; sq = container_of(cq, struct mlx5e_sq, cq); if (unlikely(test_bit(MLX5E_SQ_TX_TIMEOUT, &sq->state))) return false; npkts = 0; nbytes = 0; /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), * otherwise a cq overrun may occur */ sqcc = sq->cc; /* avoid dirtying sq cache line every cqe */ dma_fifo_cc = sq->dma_fifo_cc; cqe = mlx5e_get_cqe(cq); for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) { u16 wqe_counter; bool last_wqe; if (!cqe) break; mlx5_cqwq_pop(&cq->wq); mlx5e_prefetch_cqe(cq); wqe_counter = be16_to_cpu(cqe->wqe_counter); do { struct mlx5e_tx_wqe_info *wi; struct sk_buff *skb; u16 ci; int j; last_wqe = (sqcc == wqe_counter); ci = sqcc & sq->wq.sz_m1; skb = sq->skb[ci]; wi = &sq->wqe_info[ci]; if (unlikely(!skb)) { /* nop */ sqcc++; continue; } if (unlikely(MLX5E_TX_HW_STAMP(sq->channel->priv, skb))) { struct skb_shared_hwtstamps hwts; mlx5e_fill_hwstamp(&sq->cq.channel->priv->tstamp, &hwts, get_cqe_ts(cqe)); skb_tstamp_tx(skb, &hwts); } for (j = 0; j < wi->num_dma; j++) { struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, dma_fifo_cc++); mlx5e_tx_dma_unmap(sq->pdev, dma); } npkts++; nbytes += wi->num_bytes; sqcc += wi->num_wqebbs; dev_kfree_skb(skb); } while (!last_wqe); cqe = mlx5e_get_cqe(cq); } mlx5_cqwq_update_db_record(&cq->wq); /* ensure cq space is freed before enabling more cqes */ wmb(); sq->dma_fifo_cc = dma_fifo_cc; sq->cc = sqcc; netdev_tx_completed_queue(sq->txq, npkts, nbytes); if (netif_tx_queue_stopped(sq->txq) && mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM) && likely(test_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state))) { netif_tx_wake_queue(sq->txq); sq->stats.queue_wake++; } return (i == MLX5E_TX_CQ_POLL_BUDGET); }
/** * @brief Proc read function for info * * @param page Pointer to buffer * @param start Read data starting position * @param offset Offset * @param count Counter * @param eof End of file flag * @param data Data to output * * @return Number of output data */ static int woal_info_proc_read(char *page, char **start, off_t offset, int count, int *eof, void *data) { char *p = page; struct net_device *netdev = data; char fmt[MLAN_MAX_VER_STR_LEN]; moal_private *priv = (moal_private *) netdev_priv(netdev); #ifdef STA_SUPPORT int i = 0; moal_handle *handle = priv->phandle; mlan_bss_info info; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) struct dev_mc_list *mcptr = netdev->mc_list; int mc_count = netdev->mc_count; #else struct netdev_hw_addr *mcptr = NULL; int mc_count = netdev_mc_count(netdev); #endif /* < 2.6.35 */ #else #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,29) int i = 0; #endif /* >= 2.6.29 */ #endif #ifdef UAP_SUPPORT mlan_ds_uap_stats ustats; #endif ENTER(); if (offset) { *eof = 1; goto exit; } memset(fmt, 0, sizeof(fmt)); #ifdef UAP_SUPPORT if (GET_BSS_ROLE(priv) == MLAN_BSS_ROLE_UAP) { p += sprintf(p, "driver_name = " "\"uap\"\n"); woal_uap_get_version(priv, fmt, sizeof(fmt) - 1); if (MLAN_STATUS_SUCCESS != woal_uap_get_stats(priv, MOAL_PROC_WAIT, &ustats)) { *eof = 1; goto exit; } } #endif /* UAP_SUPPORT */ #ifdef STA_SUPPORT if (GET_BSS_ROLE(priv) == MLAN_BSS_ROLE_STA) { woal_get_version(handle, fmt, sizeof(fmt) - 1); memset(&info, 0, sizeof(info)); if (MLAN_STATUS_SUCCESS != woal_get_bss_info(priv, MOAL_PROC_WAIT, &info)) { *eof = 1; goto exit; } p += sprintf(p, "driver_name = " "\"wlan\"\n"); } #endif p += sprintf(p, "driver_version = %s", fmt); p += sprintf(p, "\ninterface_name=\"%s\"\n", netdev->name); #if defined(WIFI_DIRECT_SUPPORT) if (priv->bss_type == MLAN_BSS_TYPE_WIFIDIRECT) { if (GET_BSS_ROLE(priv) == MLAN_BSS_ROLE_STA) p += sprintf(p, "bss_mode = \"WIFIDIRECT-Client\"\n"); else p += sprintf(p, "bss_mode = \"WIFIDIRECT-GO\"\n"); } #endif #ifdef STA_SUPPORT if (priv->bss_type == MLAN_BSS_TYPE_STA) p += sprintf(p, "bss_mode =\"%s\"\n", szModes[info.bss_mode]); #endif p += sprintf(p, "media_state=\"%s\"\n", ((priv->media_connected == MFALSE) ? "Disconnected" : "Connected")); p += sprintf(p, "mac_address=\"%02x:%02x:%02x:%02x:%02x:%02x\"\n", netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2], netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]); #ifdef STA_SUPPORT if (GET_BSS_ROLE(priv) == MLAN_BSS_ROLE_STA) { p += sprintf(p, "multicast_count=\"%d\"\n", mc_count); p += sprintf(p, "essid=\"%s\"\n", info.ssid.ssid); p += sprintf(p, "bssid=\"%02x:%02x:%02x:%02x:%02x:%02x\"\n", info.bssid[0], info.bssid[1], info.bssid[2], info.bssid[3], info.bssid[4], info.bssid[5]); p += sprintf(p, "channel=\"%d\"\n", (int) info.bss_chan); p += sprintf(p, "region_code = \"%02x\"\n", (t_u8) info.region_code); /* * Put out the multicast list */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) for (i = 0; i < netdev->mc_count; i++) { p += sprintf(p, "multicast_address[%d]=\"%02x:%02x:%02x:%02x:%02x:%02x\"\n", i, mcptr->dmi_addr[0], mcptr->dmi_addr[1], mcptr->dmi_addr[2], mcptr->dmi_addr[3], mcptr->dmi_addr[4], mcptr->dmi_addr[5]); mcptr = mcptr->next; } #else netdev_for_each_mc_addr(mcptr, netdev) p += sprintf(p, "multicast_address[%d]=\"%02x:%02x:%02x:%02x:%02x:%02x\"\n", i++, mcptr->addr[0], mcptr->addr[1], mcptr->addr[2], mcptr->addr[3], mcptr->addr[4], mcptr->addr[5]); #endif /* < 2.6.35 */ } #endif p += sprintf(p, "num_tx_bytes = %lu\n", priv->stats.tx_bytes); p += sprintf(p, "num_rx_bytes = %lu\n", priv->stats.rx_bytes); p += sprintf(p, "num_tx_pkts = %lu\n", priv->stats.tx_packets); p += sprintf(p, "num_rx_pkts = %lu\n", priv->stats.rx_packets); p += sprintf(p, "num_tx_pkts_dropped = %lu\n", priv->stats.tx_dropped); p += sprintf(p, "num_rx_pkts_dropped = %lu\n", priv->stats.rx_dropped); p += sprintf(p, "num_tx_pkts_err = %lu\n", priv->stats.tx_errors); p += sprintf(p, "num_rx_pkts_err = %lu\n", priv->stats.rx_errors); p += sprintf(p, "carrier %s\n", ((netif_carrier_ok(priv->netdev)) ? "on" : "off")); #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,29) for (i = 0; i < netdev->num_tx_queues; i++) { p += sprintf(p, "tx queue %d: %s\n", i, ((netif_tx_queue_stopped(netdev_get_tx_queue(netdev, 0))) ? "stopped" : "started")); } #else p += sprintf(p, "tx queue %s\n", ((netif_queue_stopped(priv->netdev)) ? "stopped" : "started")); #endif #ifdef UAP_SUPPORT if (GET_BSS_ROLE(priv) == MLAN_BSS_ROLE_UAP) { p += sprintf(p, "tkip_mic_failures = %u\n", ustats.tkip_mic_failures); p += sprintf(p, "ccmp_decrypt_errors = %u\n", ustats.ccmp_decrypt_errors); p += sprintf(p, "wep_undecryptable_count = %u\n", ustats.wep_undecryptable_count); p += sprintf(p, "wep_icv_error_count = %u\n", ustats.wep_icv_error_count); p += sprintf(p, "decrypt_failure_count = %u\n", ustats.decrypt_failure_count); p += sprintf(p, "mcast_tx_count = %u\n", ustats.mcast_tx_count); p += sprintf(p, "failed_count = %u\n", ustats.failed_count); p += sprintf(p, "retry_count = %u\n", ustats.retry_count); p += sprintf(p, "multiple_retry_count = %u\n", ustats.multi_retry_count); p += sprintf(p, "frame_duplicate_count = %u\n", ustats.frame_dup_count); p += sprintf(p, "rts_success_count = %u\n", ustats.rts_success_count); p += sprintf(p, "rts_failure_count = %u\n", ustats.rts_failure_count); p += sprintf(p, "ack_failure_count = %u\n", ustats.ack_failure_count); p += sprintf(p, "rx_fragment_count = %u\n", ustats.rx_fragment_count); p += sprintf(p, "mcast_rx_frame_count = %u\n", ustats.mcast_rx_frame_count); p += sprintf(p, "fcs_error_count = %u\n", ustats.fcs_error_count); p += sprintf(p, "tx_frame_count = %u\n", ustats.tx_frame_count); p += sprintf(p, "rsna_tkip_cm_invoked = %u\n", ustats.rsna_tkip_cm_invoked); p += sprintf(p, "rsna_4way_hshk_failures = %u\n", ustats.rsna_4way_hshk_failures); } #endif /* UAP_SUPPORT */ exit: LEAVE(); return (p - page); }
static void ifb_ri_tasklet(unsigned long _txp) { struct ifb_q_private *txp = (struct ifb_q_private *)_txp; struct netdev_queue *txq; struct sk_buff *skb; txq = netdev_get_tx_queue(txp->dev, txp->txqnum); skb = skb_peek(&txp->tq); if (!skb) { if (!__netif_tx_trylock(txq)) goto resched; skb_queue_splice_tail_init(&txp->rq, &txp->tq); __netif_tx_unlock(txq); } while ((skb = __skb_dequeue(&txp->tq)) != NULL) { skb->tc_redirected = 0; skb->tc_skip_classify = 1; u64_stats_update_begin(&txp->tsync); txp->tx_packets++; txp->tx_bytes += skb->len; u64_stats_update_end(&txp->tsync); rcu_read_lock(); skb->dev = dev_get_by_index_rcu(dev_net(txp->dev), skb->skb_iif); if (!skb->dev) { rcu_read_unlock(); dev_kfree_skb(skb); txp->dev->stats.tx_dropped++; if (skb_queue_len(&txp->tq) != 0) goto resched; break; } rcu_read_unlock(); skb->skb_iif = txp->dev->ifindex; if (!skb->tc_from_ingress) { dev_queue_xmit(skb); } else { skb_pull_rcsum(skb, skb->mac_len); netif_receive_skb(skb); } } if (__netif_tx_trylock(txq)) { skb = skb_peek(&txp->rq); if (!skb) { txp->tasklet_pending = 0; if (netif_tx_queue_stopped(txq)) netif_tx_wake_queue(txq); } else { __netif_tx_unlock(txq); goto resched; } __netif_tx_unlock(txq); } else { resched: txp->tasklet_pending = 1; tasklet_schedule(&txp->ifb_tasklet); } }
/** * nfp_net_tx_complete() - Handled completed TX packets * @tx_ring: TX ring structure * * Return: Number of completed TX descriptors */ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring) { struct nfp_net_r_vector *r_vec = tx_ring->r_vec; struct nfp_net *nn = r_vec->nfp_net; const struct skb_frag_struct *frag; struct netdev_queue *nd_q; u32 done_pkts = 0, done_bytes = 0; struct sk_buff *skb; int todo, nr_frags; u32 qcp_rd_p; int fidx; int idx; /* Work out how many descriptors have been transmitted */ qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q); if (qcp_rd_p == tx_ring->qcp_rd_p) return; if (qcp_rd_p > tx_ring->qcp_rd_p) todo = qcp_rd_p - tx_ring->qcp_rd_p; else todo = qcp_rd_p + tx_ring->cnt - tx_ring->qcp_rd_p; while (todo--) { idx = tx_ring->rd_p % tx_ring->cnt; tx_ring->rd_p++; skb = tx_ring->txbufs[idx].skb; if (!skb) continue; nr_frags = skb_shinfo(skb)->nr_frags; fidx = tx_ring->txbufs[idx].fidx; if (fidx == -1) { /* unmap head */ dma_unmap_single(&nn->pdev->dev, tx_ring->txbufs[idx].dma_addr, skb_headlen(skb), DMA_TO_DEVICE); done_pkts += tx_ring->txbufs[idx].pkt_cnt; done_bytes += tx_ring->txbufs[idx].real_len; } else { /* unmap fragment */ frag = &skb_shinfo(skb)->frags[fidx]; dma_unmap_page(&nn->pdev->dev, tx_ring->txbufs[idx].dma_addr, skb_frag_size(frag), DMA_TO_DEVICE); } /* check for last gather fragment */ if (fidx == nr_frags - 1) dev_kfree_skb_any(skb); tx_ring->txbufs[idx].dma_addr = 0; tx_ring->txbufs[idx].skb = NULL; tx_ring->txbufs[idx].fidx = -2; } tx_ring->qcp_rd_p = qcp_rd_p; u64_stats_update_begin(&r_vec->tx_sync); r_vec->tx_bytes += done_bytes; r_vec->tx_pkts += done_pkts; u64_stats_update_end(&r_vec->tx_sync); nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx); netdev_tx_completed_queue(nd_q, done_pkts, done_bytes); if (nfp_net_tx_ring_should_wake(tx_ring)) { /* Make sure TX thread will see updated tx_ring->rd_p */ smp_mb(); if (unlikely(netif_tx_queue_stopped(nd_q))) netif_tx_wake_queue(nd_q); } WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt, "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n", tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt); }
/* * Proc info file read handler. * * This function is called when the 'info' file is opened for reading. * It prints the following driver related information - * - Driver name * - Driver version * - Driver extended version * - Interface name * - BSS mode * - Media state (connected or disconnected) * - MAC address * - Total number of Tx bytes * - Total number of Rx bytes * - Total number of Tx packets * - Total number of Rx packets * - Total number of dropped Tx packets * - Total number of dropped Rx packets * - Total number of corrupted Tx packets * - Total number of corrupted Rx packets * - Carrier status (on or off) * - Tx queue status (started or stopped) * * For STA mode drivers, it also prints the following extra - * - ESSID * - BSSID * - Channel * - Region code * - Multicast count * - Multicast addresses */ static ssize_t mwifiex_info_read(struct file *file, char __user *ubuf, size_t count, loff_t *ppos) { struct mwifiex_private *priv = (struct mwifiex_private *) file->private_data; struct net_device *netdev = priv->netdev; struct netdev_hw_addr *ha; struct netdev_queue *txq; unsigned long page = get_zeroed_page(GFP_KERNEL); char *p = (char *) page, fmt[64]; struct mwifiex_bss_info info; ssize_t ret; int i = 0; if (!p) return -ENOMEM; memset(&info, 0, sizeof(info)); ret = mwifiex_get_bss_info(priv, &info); if (ret) goto free_and_exit; mwifiex_drv_get_driver_version(priv->adapter, fmt, sizeof(fmt) - 1); if (!priv->version_str[0]) mwifiex_get_ver_ext(priv); p += sprintf(p, "driver_name = " "\"mwifiex\"\n"); p += sprintf(p, "driver_version = %s", fmt); p += sprintf(p, "\nverext = %s", priv->version_str); p += sprintf(p, "\ninterface_name=\"%s\"\n", netdev->name); if (info.bss_mode >= ARRAY_SIZE(bss_modes)) p += sprintf(p, "bss_mode=\"%d\"\n", info.bss_mode); else p += sprintf(p, "bss_mode=\"%s\"\n", bss_modes[info.bss_mode]); p += sprintf(p, "media_state=\"%s\"\n", (!priv->media_connected ? "Disconnected" : "Connected")); p += sprintf(p, "mac_address=\"%pM\"\n", netdev->dev_addr); if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) { p += sprintf(p, "multicast_count=\"%d\"\n", netdev_mc_count(netdev)); p += sprintf(p, "essid=\"%s\"\n", info.ssid.ssid); p += sprintf(p, "bssid=\"%pM\"\n", info.bssid); p += sprintf(p, "channel=\"%d\"\n", (int) info.bss_chan); p += sprintf(p, "country_code = \"%s\"\n", info.country_code); netdev_for_each_mc_addr(ha, netdev) p += sprintf(p, "multicast_address[%d]=\"%pM\"\n", i++, ha->addr); } p += sprintf(p, "num_tx_bytes = %lu\n", priv->stats.tx_bytes); p += sprintf(p, "num_rx_bytes = %lu\n", priv->stats.rx_bytes); p += sprintf(p, "num_tx_pkts = %lu\n", priv->stats.tx_packets); p += sprintf(p, "num_rx_pkts = %lu\n", priv->stats.rx_packets); p += sprintf(p, "num_tx_pkts_dropped = %lu\n", priv->stats.tx_dropped); p += sprintf(p, "num_rx_pkts_dropped = %lu\n", priv->stats.rx_dropped); p += sprintf(p, "num_tx_pkts_err = %lu\n", priv->stats.tx_errors); p += sprintf(p, "num_rx_pkts_err = %lu\n", priv->stats.rx_errors); p += sprintf(p, "carrier %s\n", ((netif_carrier_ok(priv->netdev)) ? "on" : "off")); p += sprintf(p, "tx queue"); for (i = 0; i < netdev->num_tx_queues; i++) { txq = netdev_get_tx_queue(netdev, i); p += sprintf(p, " %d:%s", i, netif_tx_queue_stopped(txq) ? "stopped" : "started"); } p += sprintf(p, "\n"); ret = simple_read_from_buffer(ubuf, count, ppos, (char *) page, (unsigned long) p - page); free_and_exit: free_page(page); return ret; }
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq) { struct mlx5e_sq *sq; u32 dma_fifo_cc; u32 nbytes; u16 npkts; u16 sqcc; int i; /* avoid accessing cq (dma coherent memory) if not needed */ if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags)) return false; sq = container_of(cq, struct mlx5e_sq, cq); npkts = 0; nbytes = 0; /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), * otherwise a cq overrun may occur */ sqcc = sq->cc; /* avoid dirtying sq cache line every cqe */ dma_fifo_cc = sq->dma_fifo_cc; for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) { struct mlx5_cqe64 *cqe; u16 wqe_counter; bool last_wqe; cqe = mlx5e_get_cqe(cq); if (!cqe) break; mlx5_cqwq_pop(&cq->wq); wqe_counter = be16_to_cpu(cqe->wqe_counter); do { struct sk_buff *skb; u16 ci; int j; last_wqe = (sqcc == wqe_counter); ci = sqcc & sq->wq.sz_m1; skb = sq->skb[ci]; if (unlikely(!skb)) { /* nop */ sq->stats.nop++; sqcc++; continue; } for (j = 0; j < MLX5E_TX_SKB_CB(skb)->num_dma; j++) { dma_addr_t addr; u32 size; mlx5e_dma_get(sq, dma_fifo_cc, &addr, &size); dma_fifo_cc++; dma_unmap_single(sq->pdev, addr, size, DMA_TO_DEVICE); } npkts++; nbytes += MLX5E_TX_SKB_CB(skb)->num_bytes; sqcc += MLX5E_TX_SKB_CB(skb)->num_wqebbs; dev_kfree_skb(skb); } while (!last_wqe); } mlx5_cqwq_update_db_record(&cq->wq); /* ensure cq space is freed before enabling more cqes */ wmb(); sq->dma_fifo_cc = dma_fifo_cc; sq->cc = sqcc; netdev_tx_completed_queue(sq->txq, npkts, nbytes); if (netif_tx_queue_stopped(sq->txq) && mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM) && likely(test_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state))) { netif_tx_wake_queue(sq->txq); sq->stats.wake++; } if (i == MLX5E_TX_CQ_POLL_BUDGET) { set_bit(MLX5E_CQ_HAS_CQES, &cq->flags); return true; } return false; }
static bool mlx4_en_process_tx_cq(struct ether *dev, struct mlx4_en_cq *cq) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_cq *mcq = &cq->mcq; struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring]; struct mlx4_cqe *cqe; uint16_t index; uint16_t new_index, ring_index, stamp_index; uint32_t txbbs_skipped = 0; uint32_t txbbs_stamp = 0; uint32_t cons_index = mcq->cons_index; int size = cq->size; uint32_t size_mask = ring->size_mask; struct mlx4_cqe *buf = cq->buf; uint32_t packets = 0; uint32_t bytes = 0; int factor = priv->cqe_factor; uint64_t timestamp = 0; int done = 0; int budget = priv->tx_work_limit; uint32_t last_nr_txbb; uint32_t ring_cons; if (!priv->port_up) return true; #if 0 // AKAROS_PORT netdev_txq_bql_complete_prefetchw(ring->tx_queue); #endif index = cons_index & size_mask; cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor; last_nr_txbb = ACCESS_ONCE(ring->last_nr_txbb); ring_cons = ACCESS_ONCE(ring->cons); ring_index = ring_cons & size_mask; stamp_index = ring_index; /* Process all completed CQEs */ while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, cons_index & size) && (done < budget)) { /* * make sure we read the CQE after we read the * ownership bit */ bus_rmb(); if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_ERROR)) { struct mlx4_err_cqe *cqe_err = (struct mlx4_err_cqe *)cqe; en_err(priv, "CQE error - vendor syndrome: 0x%x syndrome: 0x%x\n", cqe_err->vendor_err_syndrome, cqe_err->syndrome); } /* Skip over last polled CQE */ new_index = be16_to_cpu(cqe->wqe_index) & size_mask; do { txbbs_skipped += last_nr_txbb; ring_index = (ring_index + last_nr_txbb) & size_mask; if (ring->tx_info[ring_index].ts_requested) timestamp = mlx4_en_get_cqe_ts(cqe); /* free next descriptor */ last_nr_txbb = mlx4_en_free_tx_desc( priv, ring, ring_index, !!((ring_cons + txbbs_skipped) & ring->size), timestamp); mlx4_en_stamp_wqe(priv, ring, stamp_index, !!((ring_cons + txbbs_stamp) & ring->size)); stamp_index = ring_index; txbbs_stamp = txbbs_skipped; packets++; bytes += ring->tx_info[ring_index].nr_bytes; } while ((++done < budget) && (ring_index != new_index)); ++cons_index; index = cons_index & size_mask; cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor; } /* * To prevent CQ overflow we first update CQ consumer and only then * the ring consumer. */ mcq->cons_index = cons_index; mlx4_cq_set_ci(mcq); wmb(); /* we want to dirty this cache line once */ ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb; ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped; #if 0 // AKAROS_PORT netdev_tx_completed_queue(ring->tx_queue, packets, bytes); /* * Wakeup Tx queue if this stopped, and at least 1 packet * was completed */ if (netif_tx_queue_stopped(ring->tx_queue) && txbbs_skipped > 0) { netif_tx_wake_queue(ring->tx_queue); ring->wake_queue++; } #endif return done < budget; }
int xenvif_queue_stopped(struct xenvif_queue *queue) { struct net_device *dev = queue->vif->dev; unsigned int id = queue->id; return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id)); }
/* * NOTE: Called under qdisc_lock(q) with locally disabled BH. * * __QDISC_STATE_RUNNING guarantees only one CPU can process * this qdisc at a time. qdisc_lock(q) serializes queue accesses for * this queue. * * netif_tx_lock serializes accesses to device driver. * * qdisc_lock(q) and netif_tx_lock are mutually exclusive, * if one is grabbed, another must be free. * * Note, that this procedure can be called by a watchdog timer * * Returns to the caller: * 0 - queue is empty or throttled. * >0 - queue is not empty. * */ static inline int qdisc_restart(struct Qdisc *q) { struct netdev_queue *txq; int ret = NETDEV_TX_BUSY; struct net_device *dev; spinlock_t *root_lock; struct sk_buff *skb; /* Dequeue packet */ if (unlikely((skb = dequeue_skb(q)) == NULL)) return 0; root_lock = qdisc_lock(q); /* And release qdisc */ spin_unlock(root_lock); dev = qdisc_dev(q); txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); HARD_TX_LOCK(dev, txq, smp_processor_id()); if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq)) ret = dev_hard_start_xmit(skb, dev, txq); HARD_TX_UNLOCK(dev, txq); spin_lock(root_lock); switch (ret) { case NETDEV_TX_OK: /* Driver sent out skb successfully */ ret = qdisc_qlen(q); break; case NETDEV_TX_LOCKED: /* Driver try lock failed */ ret = handle_dev_cpu_collision(skb, txq, q); break; default: /* Driver returned NETDEV_TX_BUSY - requeue skb */ if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit())) printk(KERN_WARNING "BUG %s code %d qlen %d\n", dev->name, ret, q->q.qlen); //[ENODEV] No such device. An attempt was made to apply an inappropriate function to a device if (ret == -ENODEV) { printk(KERN_EMERG "%s: STOP QUEUE. Reason = %d (-ENODEV) (net\\sched\\sch_generic.c 170)\n", dev->name, ret); if (strnicmp((char *)&dev->name, "eth0", 4) == 0) { //Stop upper layers calling the device hard_start_xmit routine. //Used for flow control when transmit resources are unavailable. ret = dev_requeue_skb(skb, q); netif_stop_queue(dev); } } else ret = dev_requeue_skb(skb, q); break; } if (ret && (netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq))) ret = 0; return ret; }