static void xlgmac_stop(struct xlgmac_pdata *pdata) { struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; struct net_device *netdev = pdata->netdev; struct xlgmac_channel *channel; struct netdev_queue *txq; unsigned int i; netif_tx_stop_all_queues(netdev); xlgmac_stop_timers(pdata); hw_ops->disable_tx(pdata); hw_ops->disable_rx(pdata); xlgmac_free_irqs(pdata); xlgmac_napi_disable(pdata, 1); hw_ops->exit(pdata); channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { if (!channel->tx_ring) continue; txq = netdev_get_tx_queue(netdev, channel->queue_index); netdev_tx_reset_queue(txq); } }
/** * fm10k_clean_tx_ring - Free Tx Buffers * @tx_ring: ring to be cleaned **/ static void fm10k_clean_tx_ring(struct fm10k_ring *tx_ring) { struct fm10k_tx_buffer *tx_buffer; unsigned long size; u16 i; /* ring already cleared, nothing to do */ if (!tx_ring->tx_buffer) return; /* Free all the Tx ring sk_buffs */ for (i = 0; i < tx_ring->count; i++) { tx_buffer = &tx_ring->tx_buffer[i]; fm10k_unmap_and_free_tx_resource(tx_ring, tx_buffer); } /* reset BQL values */ netdev_tx_reset_queue(txring_txq(tx_ring)); size = sizeof(struct fm10k_tx_buffer) * tx_ring->count; memset(tx_ring->tx_buffer, 0, size); /* Zero out the descriptor ring */ memset(tx_ring->desc, 0, tx_ring->size); }
int mlx4_en_free_tx_buf(struct ether *dev, struct mlx4_en_tx_ring *ring) { panic("Disabled"); #if 0 // AKAROS_PORT struct mlx4_en_priv *priv = netdev_priv(dev); int cnt = 0; /* Skip last polled descriptor */ ring->cons += ring->last_nr_txbb; en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n", ring->cons, ring->prod); if ((uint32_t) (ring->prod - ring->cons) > ring->size) { if (netif_msg_tx_err(priv)) en_warn(priv, "Tx consumer passed producer!\n"); return 0; } while (ring->cons != ring->prod) { ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring, ring->cons & ring->size_mask, !!(ring->cons & ring->size), 0); ring->cons += ring->last_nr_txbb; cnt++; } netdev_tx_reset_queue(ring->tx_queue); if (cnt) en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt); return cnt; #endif }
static int mlx5e_open_sq(struct mlx5e_channel *c, int tc, struct mlx5e_sq_param *param, struct mlx5e_sq *sq) { int err; err = mlx5e_create_sq(c, tc, param, sq); if (err) return err; err = mlx5e_enable_sq(sq, param); if (err) goto err_destroy_sq; err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY); if (err) goto err_disable_sq; set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state); netdev_tx_reset_queue(sq->txq); netif_tx_start_queue(sq->txq); return 0; err_disable_sq: mlx5e_disable_sq(sq); err_destroy_sq: mlx5e_destroy_sq(sq); return err; }
/** * nfp_net_tx_ring_reset() - Free any untransmitted buffers and reset pointers * @nn: NFP Net device * @tx_ring: TX ring structure * * Assumes that the device is stopped */ static void nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring) { const struct skb_frag_struct *frag; struct netdev_queue *nd_q; struct pci_dev *pdev = nn->pdev; while (tx_ring->rd_p != tx_ring->wr_p) { int nr_frags, fidx, idx; struct sk_buff *skb; idx = tx_ring->rd_p % tx_ring->cnt; skb = tx_ring->txbufs[idx].skb; nr_frags = skb_shinfo(skb)->nr_frags; fidx = tx_ring->txbufs[idx].fidx; if (fidx == -1) { /* unmap head */ dma_unmap_single(&pdev->dev, tx_ring->txbufs[idx].dma_addr, skb_headlen(skb), DMA_TO_DEVICE); } else { /* unmap fragment */ frag = &skb_shinfo(skb)->frags[fidx]; dma_unmap_page(&pdev->dev, tx_ring->txbufs[idx].dma_addr, skb_frag_size(frag), DMA_TO_DEVICE); } /* check for last gather fragment */ if (fidx == nr_frags - 1) dev_kfree_skb_any(skb); tx_ring->txbufs[idx].dma_addr = 0; tx_ring->txbufs[idx].skb = NULL; tx_ring->txbufs[idx].fidx = -2; tx_ring->qcp_rd_p++; tx_ring->rd_p++; } memset(tx_ring->txds, 0, sizeof(*tx_ring->txds) * tx_ring->cnt); tx_ring->wr_p = 0; tx_ring->rd_p = 0; tx_ring->qcp_rd_p = 0; tx_ring->wr_ptr_add = 0; nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx); netdev_tx_reset_queue(nd_q); }
static void __lb_other_process(struct hns_nic_ring_data *ring_data, struct sk_buff *skb) { struct net_device *ndev; struct hns_nic_priv *priv; struct hnae_ring *ring; struct netdev_queue *dev_queue; struct sk_buff *new_skb; unsigned int frame_size; int check_ok; u32 i; char buff[33]; /* 32B data and the last character '\0' */ if (!ring_data) { /* Just for doing create frame*/ ndev = skb->dev; priv = netdev_priv(ndev); frame_size = skb->len; memset(skb->data, 0xFF, frame_size); if ((!AE_IS_VER1(priv->enet_ver)) && (priv->ae_handle->port_type == HNAE_PORT_SERVICE)) { memcpy(skb->data, ndev->dev_addr, 6); skb->data[5] += 0x1f; } frame_size &= ~1ul; memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); memset(&skb->data[frame_size / 2 + 10], 0xBE, frame_size / 2 - 11); memset(&skb->data[frame_size / 2 + 12], 0xAF, frame_size / 2 - 13); return; } ring = ring_data->ring; ndev = ring_data->napi.dev; if (is_tx_ring(ring)) { /* for tx queue reset*/ dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index); netdev_tx_reset_queue(dev_queue); return; } frame_size = skb->len; frame_size &= ~1ul; /* for mutl buffer*/ new_skb = skb_copy(skb, GFP_ATOMIC); dev_kfree_skb_any(skb); skb = new_skb; check_ok = 0; if (*(skb->data + 10) == 0xFF) { /* for rx check frame*/ if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && (*(skb->data + frame_size / 2 + 12) == 0xAF)) check_ok = 1; } if (check_ok) { ndev->stats.rx_packets++; ndev->stats.rx_bytes += skb->len; } else { ndev->stats.rx_frame_errors++; for (i = 0; i < skb->len; i++) { snprintf(buff + i % 16 * 2, 3, /* tailing \0*/ "%02x", *(skb->data + i)); if ((i % 16 == 15) || (i == skb->len - 1)) pr_info("%s\n", buff); } } dev_kfree_skb_any(skb); }
u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, int index, u64 timestamp, int napi_mode) { struct mlx4_en_tx_info *tx_info = &ring->tx_info[index]; struct mlx4_en_rx_alloc frame = { .page = tx_info->page, .dma = tx_info->map0_dma, }; if (!mlx4_en_rx_recycle(ring->recycle_ring, &frame)) { dma_unmap_page(priv->ddev, tx_info->map0_dma, PAGE_SIZE, priv->dma_dir); put_page(tx_info->page); } return tx_info->nr_txbb; } int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) { struct mlx4_en_priv *priv = netdev_priv(dev); int cnt = 0; /* Skip last polled descriptor */ ring->cons += ring->last_nr_txbb; en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n", ring->cons, ring->prod); if ((u32) (ring->prod - ring->cons) > ring->size) { if (netif_msg_tx_err(priv)) en_warn(priv, "Tx consumer passed producer!\n"); return 0; } while (ring->cons != ring->prod) { ring->last_nr_txbb = ring->free_tx_desc(priv, ring, ring->cons & ring->size_mask, 0, 0 /* Non-NAPI caller */); ring->cons += ring->last_nr_txbb; cnt++; } if (ring->tx_queue) netdev_tx_reset_queue(ring->tx_queue); if (cnt) en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt); return cnt; } bool mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int napi_budget) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_cq *mcq = &cq->mcq; struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->type][cq->ring]; struct mlx4_cqe *cqe; u16 index, ring_index, stamp_index; u32 txbbs_skipped = 0; u32 txbbs_stamp = 0; u32 cons_index = mcq->cons_index; int size = cq->size; u32 size_mask = ring->size_mask; struct mlx4_cqe *buf = cq->buf; u32 packets = 0; u32 bytes = 0; int factor = priv->cqe_factor; int done = 0; int budget = priv->tx_work_limit; u32 last_nr_txbb; u32 ring_cons; if (unlikely(!priv->port_up)) return true; netdev_txq_bql_complete_prefetchw(ring->tx_queue); index = cons_index & size_mask; cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor; last_nr_txbb = READ_ONCE(ring->last_nr_txbb); ring_cons = READ_ONCE(ring->cons); ring_index = ring_cons & size_mask; stamp_index = ring_index; /* Process all completed CQEs */ while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, cons_index & size) && (done < budget)) { u16 new_index; /* * make sure we read the CQE after we read the * ownership bit */ dma_rmb(); if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_ERROR)) { struct mlx4_err_cqe *cqe_err = (struct mlx4_err_cqe *)cqe; en_err(priv, "CQE error - vendor syndrome: 0x%x syndrome: 0x%x\n", cqe_err->vendor_err_syndrome, cqe_err->syndrome); } /* Skip over last polled CQE */ new_index = be16_to_cpu(cqe->wqe_index) & size_mask; do { u64 timestamp = 0; txbbs_skipped += last_nr_txbb; ring_index = (ring_index + last_nr_txbb) & size_mask; if (unlikely(ring->tx_info[ring_index].ts_requested)) timestamp = mlx4_en_get_cqe_ts(cqe); /* free next descriptor */ last_nr_txbb = ring->free_tx_desc( priv, ring, ring_index, timestamp, napi_budget); mlx4_en_stamp_wqe(priv, ring, stamp_index, !!((ring_cons + txbbs_stamp) & ring->size)); stamp_index = ring_index; txbbs_stamp = txbbs_skipped; packets++; bytes += ring->tx_info[ring_index].nr_bytes; } while ((++done < budget) && (ring_index != new_index)); ++cons_index; index = cons_index & size_mask; cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor; } /* * To prevent CQ overflow we first update CQ consumer and only then * the ring consumer. */ mcq->cons_index = cons_index; mlx4_cq_set_ci(mcq); wmb(); /* we want to dirty this cache line once */ WRITE_ONCE(ring->last_nr_txbb, last_nr_txbb); WRITE_ONCE(ring->cons, ring_cons + txbbs_skipped); if (cq->type == TX_XDP) return done < budget; netdev_tx_completed_queue(ring->tx_queue, packets, bytes); /* Wakeup Tx queue if this stopped, and ring is not full. */ if (netif_tx_queue_stopped(ring->tx_queue) && !mlx4_en_is_tx_ring_full(ring)) { netif_tx_wake_queue(ring->tx_queue); ring->wake_queue++; } return done < budget; } void mlx4_en_tx_irq(struct mlx4_cq *mcq) { struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); struct mlx4_en_priv *priv = netdev_priv(cq->dev); if (likely(priv->port_up)) napi_schedule_irqoff(&cq->napi); else mlx4_en_arm_cq(priv, cq); } /* TX CQ polling - called by NAPI */ int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget) { struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); struct net_device *dev = cq->dev; struct mlx4_en_priv *priv = netdev_priv(dev); bool clean_complete; clean_complete = mlx4_en_process_tx_cq(dev, cq, budget); if (!clean_complete) return budget; napi_complete(napi); mlx4_en_arm_cq(priv, cq); return 0; } static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, u32 index, unsigned int desc_size) { u32 copy = (ring->size - index) << LOG_TXBB_SIZE; int i; for (i = desc_size - copy - 4; i >= 0; i -= 4) { if ((i & (TXBB_SIZE - 1)) == 0) wmb(); *((u32 *) (ring->buf + i)) = *((u32 *) (ring->bounce_buf + copy + i)); } for (i = copy - 4; i >= 4 ; i -= 4) { if ((i & (TXBB_SIZE - 1)) == 0) wmb(); *((u32 *)(ring->buf + (index << LOG_TXBB_SIZE) + i)) = *((u32 *) (ring->bounce_buf + i)); } /* Return real descriptor location */ return ring->buf + (index << LOG_TXBB_SIZE); }