Exemplo n.º 1
0
/* The return value says how many packets are actually received */
static int sn_poll(struct napi_struct *napi, int budget)
{
	struct sn_queue *rx_queue;

	int ret;

	rx_queue = container_of(napi, struct sn_queue, rx.napi);

	if (!spin_trylock(&rx_queue->rx.lock))
		return 0;

	rx_queue->rx.stats.polls++;

	ret = sn_poll_action(rx_queue, budget);

	if (ret < budget) {
		napi_complete(napi);
		sn_enable_interrupt(rx_queue);

		/* last check for race condition.
		 * see sn_enable_interrupt() */
		if (rx_queue->dev->ops->pending_rx(rx_queue)) {
			napi_reschedule(napi);
			sn_disable_interrupt(rx_queue);
		}
	}

	spin_unlock(&rx_queue->rx.lock);

	return ret;
}
Exemplo n.º 2
0
/* Low latency socket callback. Called with bh disabled */
static int sn_poll_ll(struct napi_struct *napi)
{
	struct sn_queue *rx_queue = container_of(napi, struct sn_queue, napi);

	int idle_cnt = 0;
	int ret;

	if (!spin_trylock(&rx_queue->lock))
		return LL_FLUSH_BUSY;

	rx_queue->rx_stats.ll_polls++;

	sn_disable_interrupt(rx_queue);

	/* Meh... Since there is no notification for busy loop completion,
	 * there is no clean way to avoid race condition w.r.t. interrupts.
	 * Instead, do a roughly 5-us polling in this function. */

	do {
		ret = sn_poll_action(rx_queue, SN_BUSY_POLL_BUDGET);
		if (ret == 0)
			cpu_relax();
	} while (ret == 0 && idle_cnt++ < 1000);
	
	sn_enable_interrupt(rx_queue);

	if (rx_queue->dev->ops->pending_rx(rx_queue)) {
		sn_disable_interrupt(rx_queue);
		napi_schedule(napi);
	}

	spin_unlock(&rx_queue->lock);

	return ret;
}
Exemplo n.º 3
0
/* Interface up */
static int sn_open(struct net_device *netdev)
{
	struct sn_device *dev = netdev_priv(netdev);
	int i;

	for (i = 0; i < dev->num_rxq; i++)
		napi_enable(&dev->rx_queues[i]->rx.napi);
	for (i = 0; i < dev->num_rxq; i++)
		sn_enable_interrupt(dev->rx_queues[i]);

	return 0;
}