/** * efx_fast_push_rx_descriptors - push new RX descriptors quickly * @rx_queue: RX descriptor queue * * This will aim to fill the RX descriptor queue up to * @rx_queue->@max_fill. If there is insufficient atomic * memory to do so, a slow fill will be scheduled. * * The caller must provide serialisation (none is used here). In practise, * this means this function must run from the NAPI handler, or be called * when NAPI is disabled. */ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic) { struct efx_nic *efx = rx_queue->efx; unsigned int fill_level, batch_size; int space, rc = 0; if (!rx_queue->refill_enabled) return; /* Calculate current fill level, and exit if we don't need to fill */ fill_level = (rx_queue->added_count - rx_queue->removed_count); EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries); if (fill_level >= rx_queue->fast_fill_trigger) goto out; /* Record minimum fill level */ if (unlikely(fill_level < rx_queue->min_fill)) { if (fill_level) rx_queue->min_fill = fill_level; } batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page; space = rx_queue->max_fill - fill_level; EFX_BUG_ON_PARANOID(space < batch_size); netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, "RX queue %d fast-filling descriptor ring from" " level %d to level %d\n", efx_rx_queue_index(rx_queue), fill_level, rx_queue->max_fill); do { rc = efx_init_rx_buffers(rx_queue, atomic); if (unlikely(rc)) { /* Ensure that we don't leave the rx queue empty */ if (rx_queue->added_count == rx_queue->removed_count) efx_schedule_slow_fill(rx_queue); goto out; } } while ((space -= batch_size) >= batch_size); netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, "RX queue %d fast-filled descriptor ring " "to level %d\n", efx_rx_queue_index(rx_queue), rx_queue->added_count - rx_queue->removed_count); out: if (rx_queue->notified_count != rx_queue->added_count) efx_nic_notify_rx_desc(rx_queue); }
/** * efx_fast_push_rx_descriptors - push new RX descriptors quickly * @rx_queue: RX descriptor queue * @retry: Recheck the fill level * This will aim to fill the RX descriptor queue up to * @rx_queue->@fast_fill_limit. If there is insufficient atomic * memory to do so, the caller should retry. */ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, int retry) { struct efx_rx_buffer *rx_buf; unsigned fill_level, index; int i, space, rc = 0; /* Calculate current fill level. Do this outside the lock, * because most of the time we'll end up not wanting to do the * fill anyway. */ fill_level = (rx_queue->added_count - rx_queue->removed_count); EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE); /* Don't fill if we don't need to */ if (fill_level >= rx_queue->fast_fill_trigger) return 0; /* Record minimum fill level */ if (unlikely(fill_level < rx_queue->min_fill)) { if (fill_level) rx_queue->min_fill = fill_level; } /* Acquire RX add lock. If this lock is contended, then a fast * fill must already be in progress (e.g. in the refill * tasklet), so we don't need to do anything */ if (!spin_trylock_bh(&rx_queue->add_lock)) return -1; retry: /* Recalculate current fill level now that we have the lock */ fill_level = (rx_queue->added_count - rx_queue->removed_count); EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE); space = rx_queue->fast_fill_limit - fill_level; if (space < EFX_RX_BATCH) goto out_unlock; EFX_TRACE(rx_queue->efx, "RX queue %d fast-filling descriptor ring from" " level %d to level %d using %s allocation\n", rx_queue->queue, fill_level, rx_queue->fast_fill_limit, rx_queue->channel->rx_alloc_push_pages ? "page" : "skb"); do { for (i = 0; i < EFX_RX_BATCH; ++i) { index = rx_queue->added_count & EFX_RXQ_MASK; rx_buf = efx_rx_buffer(rx_queue, index); rc = efx_init_rx_buffer(rx_queue, rx_buf); if (unlikely(rc)) goto out; ++rx_queue->added_count; } } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH); EFX_TRACE(rx_queue->efx, "RX queue %d fast-filled descriptor ring " "to level %d\n", rx_queue->queue, rx_queue->added_count - rx_queue->removed_count); out: /* Send write pointer to card. */ efx_nic_notify_rx_desc(rx_queue); /* If the fast fill is running inside from the refill tasklet, then * for SMP systems it may be running on a different CPU to * RX event processing, which means that the fill level may now be * out of date. */ if (unlikely(retry && (rc == 0))) goto retry; out_unlock: spin_unlock_bh(&rx_queue->add_lock); return rc; }