Пример #1
0
static int xlgmac_one_poll(struct napi_struct *napi, int budget)
{
	struct xlgmac_channel *channel = container_of(napi,
						struct xlgmac_channel,
						napi);
	int processed = 0;

	XLGMAC_PR("budget=%d\n", budget);

	/* Cleanup Tx ring first */
	xlgmac_tx_poll(channel);

	/* Process Rx ring next */
	processed = xlgmac_rx_poll(channel, budget);

	/* If we processed everything, we are done */
	if (processed < budget) {
		/* Turn off polling */
		napi_complete_done(napi, processed);

		/* Enable Tx and Rx interrupts */
		enable_irq(channel->dma_irq);
	}

	XLGMAC_PR("received = %d\n", processed);

	return processed;
}
Пример #2
0
static int xenvif_poll(struct napi_struct *napi, int budget)
{
	struct xenvif_queue *queue =
		container_of(napi, struct xenvif_queue, napi);
	int work_done;

	/* This vif is rogue, we pretend we've there is nothing to do
	 * for this vif to deschedule it from NAPI. But this interface
	 * will be turned off in thread context later.
	 */
	if (unlikely(queue->vif->disabled)) {
		napi_complete(napi);
		return 0;
	}

	work_done = xenvif_tx_action(queue, budget);

	if (work_done < budget) {
		napi_complete_done(napi, work_done);
		/* If the queue is rate-limited, it shall be
		 * rescheduled in the timer callback.
		 */
		if (likely(!queue->rate_limited))
			xenvif_napi_schedule_or_enable_events(queue);
	}

	return work_done;
}
Пример #3
0
int mlx5e_napi_poll(struct napi_struct *napi, int budget)
{
	struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
					       napi);
	bool busy = false;
	int work_done;
	int i;

	clear_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags);

	for (i = 0; i < c->num_tc; i++)
		busy |= mlx5e_poll_tx_cq(&c->sq[i].cq);

	work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget);
	busy |= work_done == budget;
	busy |= mlx5e_post_rx_wqes(&c->rq);

	if (busy)
		return budget;

	napi_complete_done(napi, work_done);

	/* avoid losing completion event during/after polling cqs */
	if (test_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags)) {
		napi_schedule(napi);
		return work_done;
	}

	for (i = 0; i < c->num_tc; i++)
		mlx5e_cq_arm(&c->sq[i].cq);
	mlx5e_cq_arm(&c->rq.cq);

	return work_done;
}
Пример #4
0
static int hisi_femac_poll(struct napi_struct *napi, int budget)
{
	struct hisi_femac_priv *priv = container_of(napi,
					struct hisi_femac_priv, napi);
	struct net_device *dev = priv->ndev;
	int work_done = 0, task = budget;
	int ints, num;

	do {
		hisi_femac_xmit_reclaim(dev);
		num = hisi_femac_rx(dev, task);
		work_done += num;
		task -= num;
		if (work_done >= budget)
			break;

		ints = readl(priv->glb_base + GLB_IRQ_RAW);
		writel(ints & DEF_INT_MASK,
		       priv->glb_base + GLB_IRQ_RAW);
	} while (ints & DEF_INT_MASK);

	if (work_done < budget) {
		napi_complete_done(napi, work_done);
		hisi_femac_irq_enable(priv, DEF_INT_MASK &
					(~IRQ_INT_TX_PER_PACKET));
	}

	return work_done;
}
Пример #5
0
int mlx5e_napi_poll(struct napi_struct *napi, int budget)
{
	struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
					       napi);
	struct mlx5e_ch_stats *ch_stats = c->stats;
	struct mlx5e_rq *rq = &c->rq;
	bool busy = false;
	int work_done = 0;
	int i;

	ch_stats->poll++;

	for (i = 0; i < c->num_tc; i++)
		busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget);

	busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq, NULL);

	if (c->xdp)
		busy |= mlx5e_poll_xdpsq_cq(&rq->xdpsq.cq, rq);

	if (likely(budget)) { /* budget=0 means: don't poll rx rings */
		work_done = mlx5e_poll_rx_cq(&rq->cq, budget);
		busy |= work_done == budget;
	}

	busy |= c->rq.post_wqes(rq);

	if (busy) {
		if (likely(mlx5e_channel_no_affinity_change(c)))
			return budget;
		ch_stats->aff_change++;
		if (budget && work_done == budget)
			work_done--;
	}

	if (unlikely(!napi_complete_done(napi, work_done)))
		return work_done;

	ch_stats->arm++;

	for (i = 0; i < c->num_tc; i++) {
		mlx5e_handle_tx_dim(&c->sq[i]);
		mlx5e_cq_arm(&c->sq[i].cq);
	}

	mlx5e_handle_rx_dim(rq);

	mlx5e_cq_arm(&rq->cq);
	mlx5e_cq_arm(&c->icosq.cq);
	mlx5e_cq_arm(&c->xdpsq.cq);

	return work_done;
}
Пример #6
0
/**
 * \brief Entry point for NAPI polling
 * @param napi NAPI structure
 * @param budget maximum number of items to process
 */
static int liquidio_napi_poll(struct napi_struct *napi, int budget)
{
	struct octeon_instr_queue *iq;
	struct octeon_device *oct;
	struct octeon_droq *droq;
	int tx_done = 0, iq_no;
	int work_done;

	droq = container_of(napi, struct octeon_droq, napi);
	oct = droq->oct_dev;
	iq_no = droq->q_no;

	/* Handle Droq descriptors */
	work_done = octeon_droq_process_poll_pkts(oct, droq, budget);

	/* Flush the instruction queue */
	iq = oct->instr_queue[iq_no];
	if (iq) {
		/* TODO: move this check to inside octeon_flush_iq,
		 * once check_db_timeout is removed
		 */
		if (atomic_read(&iq->instr_pending))
			/* Process iq buffers with in the budget limits */
			tx_done = octeon_flush_iq(oct, iq, budget);
		else
			tx_done = 1;
		/* Update iq read-index rather than waiting for next interrupt.
		 * Return back if tx_done is false.
		 */
		/* sub-queue status update */
		lio_update_txq_status(oct, iq_no);
	} else {
		dev_err(&oct->pci_dev->dev, "%s:  iq (%d) num invalid\n",
			__func__, iq_no);
	}

#define MAX_REG_CNT  2000000U
	/* force enable interrupt if reg cnts are high to avoid wraparound */
	if ((work_done < budget && tx_done) ||
	    (iq && iq->pkt_in_done >= MAX_REG_CNT) ||
	    (droq->pkt_count >= MAX_REG_CNT)) {
		tx_done = 1;
		napi_complete_done(napi, work_done);

		octeon_enable_irq(droq->oct_dev, droq->q_no);
		return 0;
	}

	return (!tx_done) ? (budget) : (work_done);
}
Пример #7
0
/**
 * arc_emac_poll - NAPI poll handler.
 * @napi:	Pointer to napi_struct structure.
 * @budget:	How many BDs to process on 1 call.
 *
 * returns:	Number of processed BDs
 */
static int arc_emac_poll(struct napi_struct *napi, int budget)
{
	struct net_device *ndev = napi->dev;
	struct arc_emac_priv *priv = netdev_priv(ndev);
	unsigned int work_done;

	arc_emac_tx_clean(ndev);

	work_done = arc_emac_rx(ndev, budget);
	if (work_done < budget) {
		napi_complete_done(napi, work_done);
		arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
	}

	return work_done;
}
Пример #8
0
static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget)
{
	struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi);
	struct net_device *netdev = p->netdev;
	unsigned int work_done = 0;

	work_done = octeon_mgmt_receive_packets(p, budget);

	if (work_done < budget) {
		/* We stopped because no more packets were available. */
		napi_complete_done(napi, work_done);
		octeon_mgmt_enable_rx_irq(p);
	}
	octeon_mgmt_update_rx_stats(netdev);

	return work_done;
}
Пример #9
0
Файл: main.c Проект: Lyude/linux
static int xge_napi(struct napi_struct *napi, const int budget)
{
	struct net_device *ndev = napi->dev;
	struct xge_pdata *pdata;
	int processed;

	pdata = netdev_priv(ndev);

	xge_txc_poll(ndev);
	processed = xge_rx_poll(ndev, budget);

	if (processed < budget) {
		napi_complete_done(napi, processed);
		xge_intr_enable(pdata);
	}

	return processed;
}
Пример #10
0
static int xlgmac_all_poll(struct napi_struct *napi, int budget)
{
	struct xlgmac_pdata *pdata = container_of(napi,
						   struct xlgmac_pdata,
						   napi);
	struct xlgmac_channel *channel;
	int processed, last_processed;
	int ring_budget;
	unsigned int i;

	XLGMAC_PR("budget=%d\n", budget);

	processed = 0;
	ring_budget = budget / pdata->rx_ring_count;
	do {
		last_processed = processed;

		channel = pdata->channel_head;
		for (i = 0; i < pdata->channel_count; i++, channel++) {
			/* Cleanup Tx ring first */
			xlgmac_tx_poll(channel);

			/* Process Rx ring next */
			if (ring_budget > (budget - processed))
				ring_budget = budget - processed;
			processed += xlgmac_rx_poll(channel, ring_budget);
		}
	} while ((processed < budget) && (processed != last_processed));

	/* If we processed everything, we are done */
	if (processed < budget) {
		/* Turn off polling */
		napi_complete_done(napi, processed);

		/* Enable Tx and Rx interrupts */
		xlgmac_enable_rx_tx_ints(pdata);
	}

	XLGMAC_PR("received = %d\n", processed);

	return processed;
}
Пример #11
0
static int lpc_eth_poll(struct napi_struct *napi, int budget)
{
	struct netdata_local *pldat = container_of(napi,
			struct netdata_local, napi);
	struct net_device *ndev = pldat->ndev;
	int rx_done = 0;
	struct netdev_queue *txq = netdev_get_tx_queue(ndev, 0);

	__netif_tx_lock(txq, smp_processor_id());
	__lpc_handle_xmit(ndev);
	__netif_tx_unlock(txq);
	rx_done = __lpc_handle_recv(ndev, budget);

	if (rx_done < budget) {
		napi_complete_done(napi, rx_done);
		lpc_eth_enable_int(pldat->net_base);
	}

	return rx_done;
}
Пример #12
0
static int r6040_poll(struct napi_struct *napi, int budget)
{
	struct r6040_private *priv =
		container_of(napi, struct r6040_private, napi);
	struct net_device *dev = priv->dev;
	void __iomem *ioaddr = priv->base;
	int work_done;

	r6040_tx(dev);

	work_done = r6040_rx(dev, budget);

	if (work_done < budget) {
		napi_complete_done(napi, work_done);
		/* Enable RX/TX interrupt */
		iowrite16(ioread16(ioaddr + MIER) | RX_INTS | TX_INTS,
			  ioaddr + MIER);
	}
	return work_done;
}
Пример #13
0
static int bgmac_poll(struct napi_struct *napi, int weight)
{
	struct bgmac *bgmac = container_of(napi, struct bgmac, napi);
	int handled = 0;

	/* Ack */
	bgmac_write(bgmac, BGMAC_INT_STATUS, ~0);

	bgmac_dma_tx_free(bgmac, &bgmac->tx_ring[0]);
	handled += bgmac_dma_rx_read(bgmac, &bgmac->rx_ring[0], weight);

	/* Poll again if more events arrived in the meantime */
	if (bgmac_read(bgmac, BGMAC_INT_STATUS) & (BGMAC_IS_TX0 | BGMAC_IS_RX))
		return weight;

	if (handled < weight) {
		napi_complete_done(napi, handled);
		bgmac_chip_intrs_on(bgmac);
	}

	return handled;
}
Пример #14
0
static int sca_poll(struct napi_struct *napi, int budget)
{
	port_t *port = container_of(napi, port_t, napi);
	u32 isr0 = sca_inl(ISR0, port->card);
	int received = 0;

	if (isr0 & (port->chan ? 0x08000000 : 0x00080000))
		sca_msci_intr(port);

	if (isr0 & (port->chan ? 0x00002000 : 0x00000020))
		sca_tx_done(port);

	if (isr0 & (port->chan ? 0x00000200 : 0x00000002))
		received = sca_rx_done(port, budget);

	if (received < budget) {
		napi_complete_done(napi, received);
		enable_intr(port);
	}

	return received;
}
Пример #15
0
/**
 * nps_enet_poll - NAPI poll handler.
 * @napi:       Pointer to napi_struct structure.
 * @budget:     How many frames to process on one call.
 *
 * returns:     Number of processed frames
 */
static int nps_enet_poll(struct napi_struct *napi, int budget)
{
	struct net_device *ndev = napi->dev;
	struct nps_enet_priv *priv = netdev_priv(ndev);
	u32 work_done;

	nps_enet_tx_handler(ndev);
	work_done = nps_enet_rx_handler(ndev);
	if (work_done < budget) {
		u32 buf_int_enable_value = 0;

		napi_complete_done(napi, work_done);

		/* set tx_done and rx_rdy bits */
		buf_int_enable_value |= NPS_ENET_ENABLE << RX_RDY_SHIFT;
		buf_int_enable_value |= NPS_ENET_ENABLE << TX_DONE_SHIFT;

		nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE,
				 buf_int_enable_value);

		/* in case we will get a tx interrupt while interrupts
		 * are masked, we will lose it since the tx is edge interrupt.
		 * specifically, while executing the code section above,
		 * between nps_enet_tx_handler and the interrupts enable, all
		 * tx requests will be stuck until we will get an rx interrupt.
		 * the two code lines below will solve this situation by
		 * re-adding ourselves to the poll list.
		 */
		if (nps_enet_is_tx_pending(priv)) {
			nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
			napi_reschedule(napi);
		}
	}

	return work_done;
}
Пример #16
0
static int fjes_poll(struct napi_struct *napi, int budget)
{
	struct fjes_adapter *adapter =
			container_of(napi, struct fjes_adapter, napi);
	struct net_device *netdev = napi->dev;
	struct fjes_hw *hw = &adapter->hw;
	struct sk_buff *skb;
	int work_done = 0;
	int cur_epid = 0;
	int epidx;
	size_t frame_len;
	void *frame;

	spin_lock(&hw->rx_status_lock);
	for (epidx = 0; epidx < hw->max_epid; epidx++) {
		if (epidx == hw->my_epid)
			continue;

		if (fjes_hw_get_partner_ep_status(hw, epidx) ==
		    EP_PARTNER_SHARED)
			adapter->hw.ep_shm_info[epidx]
				   .tx.info->v1i.rx_status |= FJES_RX_POLL_WORK;
	}
	spin_unlock(&hw->rx_status_lock);

	while (work_done < budget) {
		prefetch(&adapter->hw);
		frame = fjes_rxframe_get(adapter, &frame_len, &cur_epid);

		if (frame) {
			skb = napi_alloc_skb(napi, frame_len);
			if (!skb) {
				adapter->stats64.rx_dropped += 1;
				hw->ep_shm_info[cur_epid].net_stats
							 .rx_dropped += 1;
				adapter->stats64.rx_errors += 1;
				hw->ep_shm_info[cur_epid].net_stats
							 .rx_errors += 1;
			} else {
				memcpy(skb_put(skb, frame_len),
				       frame, frame_len);
				skb->protocol = eth_type_trans(skb, netdev);
				skb->ip_summed = CHECKSUM_UNNECESSARY;

				netif_receive_skb(skb);

				work_done++;

				adapter->stats64.rx_packets += 1;
				hw->ep_shm_info[cur_epid].net_stats
							 .rx_packets += 1;
				adapter->stats64.rx_bytes += frame_len;
				hw->ep_shm_info[cur_epid].net_stats
							 .rx_bytes += frame_len;

				if (is_multicast_ether_addr(
					((struct ethhdr *)frame)->h_dest)) {
					adapter->stats64.multicast += 1;
					hw->ep_shm_info[cur_epid].net_stats
								 .multicast += 1;
				}
			}

			fjes_rxframe_release(adapter, cur_epid);
			adapter->unset_rx_last = true;
		} else {
			break;
		}
	}

	if (work_done < budget) {
		napi_complete_done(napi, work_done);

		if (adapter->unset_rx_last) {
			adapter->rx_last_jiffies = jiffies;
			adapter->unset_rx_last = false;
		}

		if (((long)jiffies - (long)adapter->rx_last_jiffies) < 3) {
			napi_reschedule(napi);
		} else {
			spin_lock(&hw->rx_status_lock);
			for (epidx = 0; epidx < hw->max_epid; epidx++) {
				if (epidx == hw->my_epid)
					continue;
				if (fjes_hw_get_partner_ep_status(hw, epidx) ==
				    EP_PARTNER_SHARED)
					adapter->hw.ep_shm_info[epidx].tx
						   .info->v1i.rx_status &=
						~FJES_RX_POLL_WORK;
			}
			spin_unlock(&hw->rx_status_lock);

			fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, false);
		}
	}

	return work_done;
}