コード例 #1
0
ファイル: lio_core.c プロジェクト: AlexShiLucky/linux
void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
					unsigned int bytes_compl)
{
	struct netdev_queue *netdev_queue = txq;

	netdev_tx_completed_queue(netdev_queue, pkts_compl, bytes_compl);
}
コード例 #2
0
ファイル: en_tx.c プロジェクト: DenisLug/mptcp
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
{
	struct mlx5e_sq *sq;
	u32 dma_fifo_cc;
	u32 nbytes;
	u16 npkts;
	u16 sqcc;
	int i;

	/* avoid accessing cq (dma coherent memory) if not needed */
	if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags))
		return false;

	sq = container_of(cq, struct mlx5e_sq, cq);

	npkts = 0;
	nbytes = 0;

	/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
	 * otherwise a cq overrun may occur
	 */
	sqcc = sq->cc;

	/* avoid dirtying sq cache line every cqe */
	dma_fifo_cc = sq->dma_fifo_cc;

	for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) {
		struct mlx5_cqe64 *cqe;
		u16 wqe_counter;
		bool last_wqe;

		cqe = mlx5e_get_cqe(cq);
		if (!cqe)
			break;

		mlx5_cqwq_pop(&cq->wq);

		wqe_counter = be16_to_cpu(cqe->wqe_counter);

		do {
			struct sk_buff *skb;
			u16 ci;
			int j;

			last_wqe = (sqcc == wqe_counter);

			ci = sqcc & sq->wq.sz_m1;
			skb = sq->skb[ci];

			if (unlikely(!skb)) { /* nop */
				sq->stats.nop++;
				sqcc++;
				continue;
			}

			for (j = 0; j < MLX5E_TX_SKB_CB(skb)->num_dma; j++) {
				dma_addr_t addr;
				u32 size;

				mlx5e_dma_get(sq, dma_fifo_cc, &addr, &size);
				dma_fifo_cc++;
				dma_unmap_single(sq->pdev, addr, size,
						 DMA_TO_DEVICE);
			}

			npkts++;
			nbytes += MLX5E_TX_SKB_CB(skb)->num_bytes;
			sqcc += MLX5E_TX_SKB_CB(skb)->num_wqebbs;
			dev_kfree_skb(skb);
		} while (!last_wqe);
	}

	mlx5_cqwq_update_db_record(&cq->wq);

	/* ensure cq space is freed before enabling more cqes */
	wmb();

	sq->dma_fifo_cc = dma_fifo_cc;
	sq->cc = sqcc;

	netdev_tx_completed_queue(sq->txq, npkts, nbytes);

	if (netif_tx_queue_stopped(sq->txq) &&
	    mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM) &&
	    likely(test_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state))) {
				netif_tx_wake_queue(sq->txq);
				sq->stats.wake++;
	}
	if (i == MLX5E_TX_CQ_POLL_BUDGET) {
		set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
		return true;
	}

	return false;
}
コード例 #3
0
/**
 * nfp_net_tx_complete() - Handled completed TX packets
 * @tx_ring:   TX ring structure
 *
 * Return: Number of completed TX descriptors
 */
static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
{
	struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
	struct nfp_net *nn = r_vec->nfp_net;
	const struct skb_frag_struct *frag;
	struct netdev_queue *nd_q;
	u32 done_pkts = 0, done_bytes = 0;
	struct sk_buff *skb;
	int todo, nr_frags;
	u32 qcp_rd_p;
	int fidx;
	int idx;

	/* Work out how many descriptors have been transmitted */
	qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);

	if (qcp_rd_p == tx_ring->qcp_rd_p)
		return;

	if (qcp_rd_p > tx_ring->qcp_rd_p)
		todo = qcp_rd_p - tx_ring->qcp_rd_p;
	else
		todo = qcp_rd_p + tx_ring->cnt - tx_ring->qcp_rd_p;

	while (todo--) {
		idx = tx_ring->rd_p % tx_ring->cnt;
		tx_ring->rd_p++;

		skb = tx_ring->txbufs[idx].skb;
		if (!skb)
			continue;

		nr_frags = skb_shinfo(skb)->nr_frags;
		fidx = tx_ring->txbufs[idx].fidx;

		if (fidx == -1) {
			/* unmap head */
			dma_unmap_single(&nn->pdev->dev,
					 tx_ring->txbufs[idx].dma_addr,
					 skb_headlen(skb), DMA_TO_DEVICE);

			done_pkts += tx_ring->txbufs[idx].pkt_cnt;
			done_bytes += tx_ring->txbufs[idx].real_len;
		} else {
			/* unmap fragment */
			frag = &skb_shinfo(skb)->frags[fidx];
			dma_unmap_page(&nn->pdev->dev,
				       tx_ring->txbufs[idx].dma_addr,
				       skb_frag_size(frag), DMA_TO_DEVICE);
		}

		/* check for last gather fragment */
		if (fidx == nr_frags - 1)
			dev_kfree_skb_any(skb);

		tx_ring->txbufs[idx].dma_addr = 0;
		tx_ring->txbufs[idx].skb = NULL;
		tx_ring->txbufs[idx].fidx = -2;
	}

	tx_ring->qcp_rd_p = qcp_rd_p;

	u64_stats_update_begin(&r_vec->tx_sync);
	r_vec->tx_bytes += done_bytes;
	r_vec->tx_pkts += done_pkts;
	u64_stats_update_end(&r_vec->tx_sync);

	nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx);
	netdev_tx_completed_queue(nd_q, done_pkts, done_bytes);
	if (nfp_net_tx_ring_should_wake(tx_ring)) {
		/* Make sure TX thread will see updated tx_ring->rd_p */
		smp_mb();

		if (unlikely(netif_tx_queue_stopped(nd_q)))
			netif_tx_wake_queue(nd_q);
	}

	WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
		  "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
		  tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
}
コード例 #4
0
ファイル: raether_qdma.c プロジェクト: andy-padavan/rt-n56u
static inline void
dma_xmit_clean(struct net_device *dev, END_DEVICE *ei_local)
{
	struct netdev_queue *txq;
	int cpu, clean_done = 0;
	u32 cpu_ptr, dma_ptr, cpu_idx;
#if defined (CONFIG_RAETH_BQL)
	u32 bytes_sent_ge1 = 0;
#if defined (CONFIG_PSEUDO_SUPPORT)
	u32 bytes_sent_ge2 = 0;
#endif
#endif

	spin_lock(&ei_local->page_lock);

	cpu_ptr = sysRegRead(QTX_CRX_PTR);
	dma_ptr = sysRegRead(QTX_DRX_PTR);

	/* get current CPU TXD index */
	cpu_idx = get_txd_offset(ei_local, cpu_ptr);

	while (cpu_ptr != dma_ptr) {
		struct QDMA_txdesc *txd;
		struct sk_buff *skb;
		
		txd = &ei_local->txd_pool[cpu_idx];
		
		/* check TXD not owned by DMA */
		if (!(ACCESS_ONCE(txd->txd_info3) & TX3_QDMA_OWN))
			break;
		
		/* hold next TXD ptr */
		cpu_ptr = ACCESS_ONCE(txd->txd_info2);
		
		/* release current TXD */
		put_free_txd(ei_local, cpu_idx);
		
		/* get next TXD index */
		cpu_idx = get_txd_offset(ei_local, cpu_ptr);
		
		/* free skb */
		skb = ei_local->txd_buff[cpu_idx];
		if (skb) {
#if defined (CONFIG_RAETH_BQL)
#if defined (CONFIG_PSEUDO_SUPPORT)
			if (skb->dev == ei_local->PseudoDev)
				bytes_sent_ge2 += skb->len;
			else
#endif
				bytes_sent_ge1 += skb->len;
#endif
			ei_local->txd_buff[cpu_idx] = NULL;
			dev_kfree_skb(skb);
		}
		
		clean_done++;
		
		/* prevent infinity loop when something wrong */
		if (clean_done > (NUM_TX_DESC-4))
			break;
	}

	if (clean_done)
		sysRegWrite(QTX_CRX_PTR, cpu_ptr);

	spin_unlock(&ei_local->page_lock);

	if (!clean_done)
		return;

	cpu = smp_processor_id();

	if (netif_running(dev)) {
		txq = netdev_get_tx_queue(dev, 0);
		__netif_tx_lock(txq, cpu);
#if defined (CONFIG_RAETH_BQL)
		netdev_tx_completed_queue(txq, 0, bytes_sent_ge1);
#endif
		if (netif_tx_queue_stopped(txq))
			netif_tx_wake_queue(txq);
		__netif_tx_unlock(txq);
	}

#if defined (CONFIG_PSEUDO_SUPPORT)
	if (netif_running(ei_local->PseudoDev)) {
		txq = netdev_get_tx_queue(ei_local->PseudoDev, 0);
		__netif_tx_lock(txq, cpu);
#if defined (CONFIG_RAETH_BQL)
		netdev_tx_completed_queue(txq, 0, bytes_sent_ge2);
#endif
		if (netif_tx_queue_stopped(txq))
			netif_tx_wake_queue(txq);
		__netif_tx_unlock(txq);
	}
#endif
}
コード例 #5
0
ファイル: dwc-xlgmac-net.c プロジェクト: AlexShiLucky/linux
static int xlgmac_tx_poll(struct xlgmac_channel *channel)
{
	struct xlgmac_pdata *pdata = channel->pdata;
	struct xlgmac_ring *ring = channel->tx_ring;
	struct net_device *netdev = pdata->netdev;
	unsigned int tx_packets = 0, tx_bytes = 0;
	struct xlgmac_desc_data *desc_data;
	struct xlgmac_dma_desc *dma_desc;
	struct xlgmac_desc_ops *desc_ops;
	struct xlgmac_hw_ops *hw_ops;
	struct netdev_queue *txq;
	int processed = 0;
	unsigned int cur;

	desc_ops = &pdata->desc_ops;
	hw_ops = &pdata->hw_ops;

	/* Nothing to do if there isn't a Tx ring for this channel */
	if (!ring)
		return 0;

	cur = ring->cur;

	/* Be sure we get ring->cur before accessing descriptor data */
	smp_rmb();

	txq = netdev_get_tx_queue(netdev, channel->queue_index);

	while ((processed < XLGMAC_TX_DESC_MAX_PROC) &&
	       (ring->dirty != cur)) {
		desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty);
		dma_desc = desc_data->dma_desc;

		if (!hw_ops->tx_complete(dma_desc))
			break;

		/* Make sure descriptor fields are read after reading
		 * the OWN bit
		 */
		dma_rmb();

		if (netif_msg_tx_done(pdata))
			xlgmac_dump_tx_desc(pdata, ring, ring->dirty, 1, 0);

		if (hw_ops->is_last_desc(dma_desc)) {
			tx_packets += desc_data->tx.packets;
			tx_bytes += desc_data->tx.bytes;
		}

		/* Free the SKB and reset the descriptor for re-use */
		desc_ops->unmap_desc_data(pdata, desc_data);
		hw_ops->tx_desc_reset(desc_data);

		processed++;
		ring->dirty++;
	}

	if (!processed)
		return 0;

	netdev_tx_completed_queue(txq, tx_packets, tx_bytes);

	if ((ring->tx.queue_stopped == 1) &&
	    (xlgmac_tx_avail_desc(ring) > XLGMAC_TX_DESC_MIN_FREE)) {
		ring->tx.queue_stopped = 0;
		netif_tx_wake_queue(txq);
	}

	XLGMAC_PR("processed=%d\n", processed);

	return processed;
}
コード例 #6
0
ファイル: en_tx.c プロジェクト: 8l/akaros
static bool mlx4_en_process_tx_cq(struct ether *dev,
				  struct mlx4_en_cq *cq)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
	struct mlx4_cq *mcq = &cq->mcq;
	struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring];
	struct mlx4_cqe *cqe;
	uint16_t index;
	uint16_t new_index, ring_index, stamp_index;
	uint32_t txbbs_skipped = 0;
	uint32_t txbbs_stamp = 0;
	uint32_t cons_index = mcq->cons_index;
	int size = cq->size;
	uint32_t size_mask = ring->size_mask;
	struct mlx4_cqe *buf = cq->buf;
	uint32_t packets = 0;
	uint32_t bytes = 0;
	int factor = priv->cqe_factor;
	uint64_t timestamp = 0;
	int done = 0;
	int budget = priv->tx_work_limit;
	uint32_t last_nr_txbb;
	uint32_t ring_cons;

	if (!priv->port_up)
		return true;

#if 0 // AKAROS_PORT
	netdev_txq_bql_complete_prefetchw(ring->tx_queue);
#endif

	index = cons_index & size_mask;
	cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor;
	last_nr_txbb = ACCESS_ONCE(ring->last_nr_txbb);
	ring_cons = ACCESS_ONCE(ring->cons);
	ring_index = ring_cons & size_mask;
	stamp_index = ring_index;

	/* Process all completed CQEs */
	while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
			cons_index & size) && (done < budget)) {
		/*
		 * make sure we read the CQE after we read the
		 * ownership bit
		 */
		bus_rmb();

		if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
			     MLX4_CQE_OPCODE_ERROR)) {
			struct mlx4_err_cqe *cqe_err = (struct mlx4_err_cqe *)cqe;

			en_err(priv, "CQE error - vendor syndrome: 0x%x syndrome: 0x%x\n",
			       cqe_err->vendor_err_syndrome,
			       cqe_err->syndrome);
		}

		/* Skip over last polled CQE */
		new_index = be16_to_cpu(cqe->wqe_index) & size_mask;

		do {
			txbbs_skipped += last_nr_txbb;
			ring_index = (ring_index + last_nr_txbb) & size_mask;
			if (ring->tx_info[ring_index].ts_requested)
				timestamp = mlx4_en_get_cqe_ts(cqe);

			/* free next descriptor */
			last_nr_txbb = mlx4_en_free_tx_desc(
					priv, ring, ring_index,
					!!((ring_cons + txbbs_skipped) &
					ring->size), timestamp);

			mlx4_en_stamp_wqe(priv, ring, stamp_index,
					  !!((ring_cons + txbbs_stamp) &
						ring->size));
			stamp_index = ring_index;
			txbbs_stamp = txbbs_skipped;
			packets++;
			bytes += ring->tx_info[ring_index].nr_bytes;
		} while ((++done < budget) && (ring_index != new_index));

		++cons_index;
		index = cons_index & size_mask;
		cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor;
	}


	/*
	 * To prevent CQ overflow we first update CQ consumer and only then
	 * the ring consumer.
	 */
	mcq->cons_index = cons_index;
	mlx4_cq_set_ci(mcq);
	wmb();

	/* we want to dirty this cache line once */
	ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb;
	ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped;

#if 0 // AKAROS_PORT
	netdev_tx_completed_queue(ring->tx_queue, packets, bytes);

	/*
	 * Wakeup Tx queue if this stopped, and at least 1 packet
	 * was completed
	 */
	if (netif_tx_queue_stopped(ring->tx_queue) && txbbs_skipped > 0) {
		netif_tx_wake_queue(ring->tx_queue);
		ring->wake_queue++;
	}
#endif
	return done < budget;
}
コード例 #7
0
ファイル: en_tx.c プロジェクト: avagin/linux
u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
			    struct mlx4_en_tx_ring *ring,
			    int index, u64 timestamp,
			    int napi_mode)
{
	struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
	struct mlx4_en_rx_alloc frame = {
		.page = tx_info->page,
		.dma = tx_info->map0_dma,
	};

	if (!mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
		dma_unmap_page(priv->ddev, tx_info->map0_dma,
			       PAGE_SIZE, priv->dma_dir);
		put_page(tx_info->page);
	}

	return tx_info->nr_txbb;
}

int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
	int cnt = 0;

	/* Skip last polled descriptor */
	ring->cons += ring->last_nr_txbb;
	en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n",
		 ring->cons, ring->prod);

	if ((u32) (ring->prod - ring->cons) > ring->size) {
		if (netif_msg_tx_err(priv))
			en_warn(priv, "Tx consumer passed producer!\n");
		return 0;
	}

	while (ring->cons != ring->prod) {
		ring->last_nr_txbb = ring->free_tx_desc(priv, ring,
						ring->cons & ring->size_mask,
						0, 0 /* Non-NAPI caller */);
		ring->cons += ring->last_nr_txbb;
		cnt++;
	}

	if (ring->tx_queue)
		netdev_tx_reset_queue(ring->tx_queue);

	if (cnt)
		en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt);

	return cnt;
}

bool mlx4_en_process_tx_cq(struct net_device *dev,
			   struct mlx4_en_cq *cq, int napi_budget)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
	struct mlx4_cq *mcq = &cq->mcq;
	struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->type][cq->ring];
	struct mlx4_cqe *cqe;
	u16 index, ring_index, stamp_index;
	u32 txbbs_skipped = 0;
	u32 txbbs_stamp = 0;
	u32 cons_index = mcq->cons_index;
	int size = cq->size;
	u32 size_mask = ring->size_mask;
	struct mlx4_cqe *buf = cq->buf;
	u32 packets = 0;
	u32 bytes = 0;
	int factor = priv->cqe_factor;
	int done = 0;
	int budget = priv->tx_work_limit;
	u32 last_nr_txbb;
	u32 ring_cons;

	if (unlikely(!priv->port_up))
		return true;

	netdev_txq_bql_complete_prefetchw(ring->tx_queue);

	index = cons_index & size_mask;
	cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor;
	last_nr_txbb = READ_ONCE(ring->last_nr_txbb);
	ring_cons = READ_ONCE(ring->cons);
	ring_index = ring_cons & size_mask;
	stamp_index = ring_index;

	/* Process all completed CQEs */
	while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
			cons_index & size) && (done < budget)) {
		u16 new_index;

		/*
		 * make sure we read the CQE after we read the
		 * ownership bit
		 */
		dma_rmb();

		if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
			     MLX4_CQE_OPCODE_ERROR)) {
			struct mlx4_err_cqe *cqe_err = (struct mlx4_err_cqe *)cqe;

			en_err(priv, "CQE error - vendor syndrome: 0x%x syndrome: 0x%x\n",
			       cqe_err->vendor_err_syndrome,
			       cqe_err->syndrome);
		}

		/* Skip over last polled CQE */
		new_index = be16_to_cpu(cqe->wqe_index) & size_mask;

		do {
			u64 timestamp = 0;

			txbbs_skipped += last_nr_txbb;
			ring_index = (ring_index + last_nr_txbb) & size_mask;

			if (unlikely(ring->tx_info[ring_index].ts_requested))
				timestamp = mlx4_en_get_cqe_ts(cqe);

			/* free next descriptor */
			last_nr_txbb = ring->free_tx_desc(
					priv, ring, ring_index,
					timestamp, napi_budget);

			mlx4_en_stamp_wqe(priv, ring, stamp_index,
					  !!((ring_cons + txbbs_stamp) &
						ring->size));
			stamp_index = ring_index;
			txbbs_stamp = txbbs_skipped;
			packets++;
			bytes += ring->tx_info[ring_index].nr_bytes;
		} while ((++done < budget) && (ring_index != new_index));

		++cons_index;
		index = cons_index & size_mask;
		cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor;
	}

	/*
	 * To prevent CQ overflow we first update CQ consumer and only then
	 * the ring consumer.
	 */
	mcq->cons_index = cons_index;
	mlx4_cq_set_ci(mcq);
	wmb();

	/* we want to dirty this cache line once */
	WRITE_ONCE(ring->last_nr_txbb, last_nr_txbb);
	WRITE_ONCE(ring->cons, ring_cons + txbbs_skipped);

	if (cq->type == TX_XDP)
		return done < budget;

	netdev_tx_completed_queue(ring->tx_queue, packets, bytes);

	/* Wakeup Tx queue if this stopped, and ring is not full.
	 */
	if (netif_tx_queue_stopped(ring->tx_queue) &&
	    !mlx4_en_is_tx_ring_full(ring)) {
		netif_tx_wake_queue(ring->tx_queue);
		ring->wake_queue++;
	}

	return done < budget;
}

void mlx4_en_tx_irq(struct mlx4_cq *mcq)
{
	struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
	struct mlx4_en_priv *priv = netdev_priv(cq->dev);

	if (likely(priv->port_up))
		napi_schedule_irqoff(&cq->napi);
	else
		mlx4_en_arm_cq(priv, cq);
}

/* TX CQ polling - called by NAPI */
int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget)
{
	struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
	struct net_device *dev = cq->dev;
	struct mlx4_en_priv *priv = netdev_priv(dev);
	bool clean_complete;

	clean_complete = mlx4_en_process_tx_cq(dev, cq, budget);
	if (!clean_complete)
		return budget;

	napi_complete(napi);
	mlx4_en_arm_cq(priv, cq);

	return 0;
}

static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
						      struct mlx4_en_tx_ring *ring,
						      u32 index,
						      unsigned int desc_size)
{
	u32 copy = (ring->size - index) << LOG_TXBB_SIZE;
	int i;

	for (i = desc_size - copy - 4; i >= 0; i -= 4) {
		if ((i & (TXBB_SIZE - 1)) == 0)
			wmb();

		*((u32 *) (ring->buf + i)) =
			*((u32 *) (ring->bounce_buf + copy + i));
	}

	for (i = copy - 4; i >= 4 ; i -= 4) {
		if ((i & (TXBB_SIZE - 1)) == 0)
			wmb();

		*((u32 *)(ring->buf + (index << LOG_TXBB_SIZE) + i)) =
			*((u32 *) (ring->bounce_buf + i));
	}

	/* Return real descriptor location */
	return ring->buf + (index << LOG_TXBB_SIZE);
}
コード例 #8
0
ファイル: odp_tx.c プロジェクト: kalray/odp-mppa
static int mpodp_clean_tx_unlocked(struct mpodp_if_priv *priv,
				   struct mpodp_txq *txq,  unsigned budget)
{
	struct net_device *netdev = priv->netdev;
	struct mpodp_tx *tx;
	unsigned int packets_completed = 0;
	unsigned int bytes_completed = 0;
	unsigned int worked = 0;
	union mppa_timestamp ts;
	uint32_t tx_done, first_tx_done, last_tx_done, tx_submitted,
		tx_size, tx_head;

	tx_submitted = atomic_read(&txq->submitted);
	tx_done = atomic_read(&txq->done);
	first_tx_done = tx_done;
	last_tx_done = first_tx_done;

	tx_size = txq->size;
	tx_head = atomic_read(&txq->head);

	if (!tx_head) {
		/* No carrier yet. Check if there are any buffers yet */
		tx_head = readl(txq->head_addr);
		if (tx_head) {
			/* We now have buffers */
			atomic_set(&txq->head, tx_head);

			if (netif_msg_link(priv))
				netdev_info(netdev,"txq[%d]  now has Tx (%u).\n",
					    txq->id, tx_head);
		}
		return 0;
	}

	/* TX: 2nd step: update TX tail (DMA transfer completed) */
	while (tx_done != tx_submitted && worked < budget) {
		if (!mpodp_tx_is_done(priv, txq, tx_done)) {
			/* DMA transfer not completed */
			break;
		}

		if (netif_msg_tx_done(priv))
			netdev_info(netdev,
				    "txq[%d] tx[%d]: transfer done (head: %d submitted: %d done: %d)\n",
				    txq->id, tx_done, atomic_read(&txq->head),
				    tx_submitted, tx_done);

		/* get TX slot */
		tx = &(txq->ring[tx_done]);

		/* free ressources */
		unmap_skb(&priv->pdev->dev, tx->skb, tx);
		consume_skb(tx->skb);

		worked++;

		tx_done += 1;
		if (tx_done == tx_size)
			tx_done = 0;
		last_tx_done = tx_done;

	}
	/* write new TX tail */
	atomic_set(&txq->done, tx_done);

	/* TX: 3rd step: free finished TX slot */
	while (first_tx_done != last_tx_done) {
		if (netif_msg_tx_done(priv))
			netdev_info(netdev,
				    "txq[%d] tx[%d]: done (head: %d submitted: %d done: %d)\n",
				    txq->id, first_tx_done, atomic_read(&txq->head),
				    tx_submitted, tx_done);

		/* get TX slot */
		tx = &(txq->ring[first_tx_done]);
		mppa_pcie_time_get(priv->tx_time, &ts);
		mppa_pcie_time_update(priv->tx_time, &tx->time, &ts);

		/* get stats */
		packets_completed++;
		bytes_completed += tx->len;

		first_tx_done += 1;
		if (first_tx_done == tx_size)
			first_tx_done = 0;
	}

	if (!packets_completed) {
		goto out;
	}

	/* update stats */
	netdev->stats.tx_bytes += bytes_completed;
	netdev->stats.tx_packets += packets_completed;

	netdev_tx_completed_queue(txq->txq, packets_completed, bytes_completed);
	netif_tx_wake_queue(txq->txq);
      out:
	return worked;
}
コード例 #9
0
ファイル: raether_pdma.c プロジェクト: andy-padavan/rt-n56u
static inline void
dma_xmit_clean(struct net_device *dev, END_DEVICE *ei_local)
{
	struct netdev_queue *txq;
	int cpu, clean_done = 0;
	u32 txd_free_idx;
#if defined (CONFIG_RAETH_BQL)
	u32 bytes_sent_ge1 = 0;
#if defined (CONFIG_PSEUDO_SUPPORT)
	u32 bytes_sent_ge2 = 0;
#endif
#endif

	spin_lock(&ei_local->page_lock);

	txd_free_idx = ei_local->txd_free_idx;

	while (clean_done < (NUM_TX_DESC-2)) {
		struct PDMA_txdesc *txd;
		struct sk_buff *skb;
		
		skb = ei_local->txd_buff[txd_free_idx];
		if (!skb)
			break;
		
		txd = &ei_local->txd_ring[txd_free_idx];
		
		/* check TXD not owned by DMA */
		if (!(ACCESS_ONCE(txd->txd_info2) & TX2_DMA_DONE))
			break;
		
		if (skb != (struct sk_buff *)0xFFFFFFFF) {
#if defined (CONFIG_RAETH_BQL)
#if defined (CONFIG_PSEUDO_SUPPORT)
			if (skb->dev == ei_local->PseudoDev)
				bytes_sent_ge2 += skb->len;
			else
#endif
				bytes_sent_ge1 += skb->len;
#endif
			dev_kfree_skb(skb);
		}
		
		ei_local->txd_buff[txd_free_idx] = NULL;
		
		txd_free_idx = (txd_free_idx + 1) % NUM_TX_DESC;
		
		clean_done++;
	}

	if (ei_local->txd_free_idx != txd_free_idx)
		ei_local->txd_free_idx = txd_free_idx;

	spin_unlock(&ei_local->page_lock);

	if (!clean_done)
		return;

	cpu = smp_processor_id();

	if (netif_running(dev)) {
		txq = netdev_get_tx_queue(dev, 0);
		__netif_tx_lock(txq, cpu);
#if defined (CONFIG_RAETH_BQL)
		netdev_tx_completed_queue(txq, 0, bytes_sent_ge1);
#endif
		if (netif_tx_queue_stopped(txq))
			netif_tx_wake_queue(txq);
		__netif_tx_unlock(txq);
	}

#if defined (CONFIG_PSEUDO_SUPPORT)
	if (netif_running(ei_local->PseudoDev)) {
		txq = netdev_get_tx_queue(ei_local->PseudoDev, 0);
		__netif_tx_lock(txq, cpu);
#if defined (CONFIG_RAETH_BQL)
		netdev_tx_completed_queue(txq, 0, bytes_sent_ge2);
#endif
		if (netif_tx_queue_stopped(txq))
			netif_tx_wake_queue(txq);
		__netif_tx_unlock(txq);
	}
#endif
}
コード例 #10
0
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
{
	struct mlx5_cqe64 *cqe;
	struct mlx5e_sq *sq;
	u32 dma_fifo_cc;
	u32 nbytes;
	u16 npkts;
	u16 sqcc;
	int i;

	sq = container_of(cq, struct mlx5e_sq, cq);

	if (unlikely(test_bit(MLX5E_SQ_TX_TIMEOUT, &sq->state)))
		return false;

	npkts = 0;
	nbytes = 0;

	/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
	 * otherwise a cq overrun may occur */
	sqcc = sq->cc;

	/* avoid dirtying sq cache line every cqe */
	dma_fifo_cc = sq->dma_fifo_cc;

	cqe = mlx5e_get_cqe(cq);

	for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) {
		u16 wqe_counter;
		bool last_wqe;

		if (!cqe)
			break;

		mlx5_cqwq_pop(&cq->wq);
		mlx5e_prefetch_cqe(cq);

		wqe_counter = be16_to_cpu(cqe->wqe_counter);

		do {
			struct mlx5e_tx_wqe_info *wi;
			struct sk_buff *skb;
			u16 ci;
			int j;

			last_wqe = (sqcc == wqe_counter);

			ci = sqcc & sq->wq.sz_m1;
			skb = sq->skb[ci];
			wi = &sq->wqe_info[ci];

			if (unlikely(!skb)) { /* nop */
				sqcc++;
				continue;
			}

			if (unlikely(MLX5E_TX_HW_STAMP(sq->channel->priv,
						       skb))) {
				struct skb_shared_hwtstamps hwts;

				mlx5e_fill_hwstamp(&sq->cq.channel->priv->tstamp,
						   &hwts, get_cqe_ts(cqe));
				skb_tstamp_tx(skb, &hwts);
			}

				for (j = 0; j < wi->num_dma; j++) {
				struct mlx5e_sq_dma *dma =
					mlx5e_dma_get(sq, dma_fifo_cc++);

				mlx5e_tx_dma_unmap(sq->pdev, dma);
			}

			npkts++;
			nbytes += wi->num_bytes;
			sqcc += wi->num_wqebbs;
			dev_kfree_skb(skb);
		} while (!last_wqe);

		cqe = mlx5e_get_cqe(cq);
	}

	mlx5_cqwq_update_db_record(&cq->wq);

	/* ensure cq space is freed before enabling more cqes */
	wmb();

	sq->dma_fifo_cc = dma_fifo_cc;
	sq->cc = sqcc;

	netdev_tx_completed_queue(sq->txq, npkts, nbytes);

	if (netif_tx_queue_stopped(sq->txq) &&
	    mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM) &&
	    likely(test_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state))) {
				netif_tx_wake_queue(sq->txq);
				sq->stats.queue_wake++;
	}
	return (i == MLX5E_TX_CQ_POLL_BUDGET);
}