/** * efx_fast_push_rx_descriptors - push new RX descriptors quickly * @rx_queue: RX descriptor queue * * This will aim to fill the RX descriptor queue up to * @rx_queue->@fast_fill_limit. If there is insufficient memory to do so, * it will schedule a work item to immediately continue the fast fill */ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) { int rc; rc = __efx_fast_push_rx_descriptors(rx_queue, 0); if (unlikely(rc)) { /* Schedule the work item to run immediately. The hope is * that work is immediately pending to free some memory * (e.g. an RX event or TX completion) */ efx_schedule_slow_fill(rx_queue, 0); } }
/** * efx_fast_push_rx_descriptors - push new RX descriptors quickly * @rx_queue: RX descriptor queue * * This will aim to fill the RX descriptor queue up to * @rx_queue->@max_fill. If there is insufficient atomic * memory to do so, a slow fill will be scheduled. * * The caller must provide serialisation (none is used here). In practise, * this means this function must run from the NAPI handler, or be called * when NAPI is disabled. */ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic) { struct efx_nic *efx = rx_queue->efx; unsigned int fill_level, batch_size; int space, rc = 0; if (!rx_queue->refill_enabled) return; /* Calculate current fill level, and exit if we don't need to fill */ fill_level = (rx_queue->added_count - rx_queue->removed_count); EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries); if (fill_level >= rx_queue->fast_fill_trigger) goto out; /* Record minimum fill level */ if (unlikely(fill_level < rx_queue->min_fill)) { if (fill_level) rx_queue->min_fill = fill_level; } batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page; space = rx_queue->max_fill - fill_level; EFX_BUG_ON_PARANOID(space < batch_size); netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, "RX queue %d fast-filling descriptor ring from" " level %d to level %d\n", efx_rx_queue_index(rx_queue), fill_level, rx_queue->max_fill); do { rc = efx_init_rx_buffers(rx_queue, atomic); if (unlikely(rc)) { /* Ensure that we don't leave the rx queue empty */ if (rx_queue->added_count == rx_queue->removed_count) efx_schedule_slow_fill(rx_queue); goto out; } } while ((space -= batch_size) >= batch_size); netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, "RX queue %d fast-filled descriptor ring " "to level %d\n", efx_rx_queue_index(rx_queue), rx_queue->added_count - rx_queue->removed_count); out: if (rx_queue->notified_count != rx_queue->added_count) efx_nic_notify_rx_desc(rx_queue); }
void efx_rx_work(struct work_struct *data) { struct efx_rx_queue *rx_queue; int rc; rx_queue = container_of(data, struct efx_rx_queue, work.work); if (unlikely(!rx_queue->channel->enabled)) return; EFX_TRACE(rx_queue->efx, "RX queue %d worker thread executing on CPU " "%d\n", rx_queue->queue, raw_smp_processor_id()); ++rx_queue->slow_fill_count; /* Push new RX descriptors, allowing at least 1 jiffy for * the kernel to free some more memory. */ rc = __efx_fast_push_rx_descriptors(rx_queue, 1); if (rc) efx_schedule_slow_fill(rx_queue, 1); }