Ejemplo n.º 1
0
Archivo: rx.c Proyecto: 3bsa/linux
bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
{
	bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
	unsigned int index, size;
	u32 flow_id;

	if (!spin_trylock_bh(&efx->filter_lock))
		return false;

	expire_one = efx->type->filter_rfs_expire_one;
	index = efx->rps_expire_index;
	size = efx->type->max_rx_ip_filters;
	while (quota--) {
		flow_id = efx->rps_flow_id[index];
		if (expire_one(efx, flow_id, index))
			netif_info(efx, rx_status, efx->net_dev,
				   "expired filter %d [flow %u]\n",
				   index, flow_id);
		if (++index == size)
			index = 0;
	}
	efx->rps_expire_index = index;

	spin_unlock_bh(&efx->filter_lock);
	return true;
}
Ejemplo n.º 2
0
/**
 * efx_fast_push_rx_descriptors - push new RX descriptors quickly
 * @rx_queue:		RX descriptor queue
 * @retry:              Recheck the fill level
 * This will aim to fill the RX descriptor queue up to
 * @rx_queue->@fast_fill_limit. If there is insufficient atomic
 * memory to do so, the caller should retry.
 */
static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
					  int retry)
{
	struct efx_rx_buffer *rx_buf;
	unsigned fill_level, index;
	int i, space, rc = 0;

	/* Calculate current fill level.  Do this outside the lock,
	 * because most of the time we'll end up not wanting to do the
	 * fill anyway.
	 */
	fill_level = (rx_queue->added_count - rx_queue->removed_count);
	EFX_BUG_ON_PARANOID(fill_level >
			    rx_queue->efx->type->rxd_ring_mask + 1);

	/* Don't fill if we don't need to */
	if (fill_level >= rx_queue->fast_fill_trigger)
		return 0;

	/* Record minimum fill level */
	if (unlikely(fill_level < rx_queue->min_fill)) {
		if (fill_level)
			rx_queue->min_fill = fill_level;
	}

	/* Acquire RX add lock.  If this lock is contended, then a fast
	 * fill must already be in progress (e.g. in the refill
	 * tasklet), so we don't need to do anything
	 */
	if (!spin_trylock_bh(&rx_queue->add_lock))
		return -1;

 retry:
	/* Recalculate current fill level now that we have the lock */
	fill_level = (rx_queue->added_count - rx_queue->removed_count);
	EFX_BUG_ON_PARANOID(fill_level >
			    rx_queue->efx->type->rxd_ring_mask + 1);
	space = rx_queue->fast_fill_limit - fill_level;
	if (space < EFX_RX_BATCH)
		goto out_unlock;

	EFX_TRACE(rx_queue->efx, "RX queue %d fast-filling descriptor ring from"
		  " level %d to level %d using %s allocation\n",
		  rx_queue->queue, fill_level, rx_queue->fast_fill_limit,
		  rx_queue->channel->rx_alloc_push_pages ? "page" : "skb");

	do {
		for (i = 0; i < EFX_RX_BATCH; ++i) {
			index = (rx_queue->added_count &
				 rx_queue->efx->type->rxd_ring_mask);
			rx_buf = efx_rx_buffer(rx_queue, index);
			rc = efx_init_rx_buffer(rx_queue, rx_buf);
			if (unlikely(rc))
				goto out;
			++rx_queue->added_count;
		}
	} while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH);

	EFX_TRACE(rx_queue->efx, "RX queue %d fast-filled descriptor ring "
		  "to level %d\n", rx_queue->queue,
		  rx_queue->added_count - rx_queue->removed_count);

 out:
	/* Send write pointer to card. */
	falcon_notify_rx_desc(rx_queue);

	/* If the fast fill is running inside from the refill tasklet, then
	 * for SMP systems it may be running on a different CPU to
	 * RX event processing, which means that the fill level may now be
	 * out of date. */
	if (unlikely(retry && (rc == 0)))
		goto retry;

 out_unlock:
	spin_unlock_bh(&rx_queue->add_lock);

	return rc;
}