Example #1
0
void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
{
	int i;
	struct efx_rx_buffer *rx_buf;

	EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue);

	falcon_fini_rx(rx_queue);

	/* Release RX buffers NB start at index 0 not current HW ptr */
	if (rx_queue->buffer) {
		for (i = 0; i <= rx_queue->efx->type->rxd_ring_mask; i++) {
			rx_buf = efx_rx_buffer(rx_queue, i);
			efx_fini_rx_buffer(rx_queue, rx_buf);
		}
	}

	/* For a page that is part-way through splitting into RX buffers */
	if (rx_queue->buf_page != NULL) {
		pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr,
			       efx_rx_buf_size(rx_queue->efx),
			       PCI_DMA_FROMDEVICE);
		__free_pages(rx_queue->buf_page,
			     rx_queue->efx->rx_buffer_order);
		rx_queue->buf_page = NULL;
	}
}
Example #2
0
void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
		   unsigned int len, bool checksummed, bool discard)
{
	struct efx_nic *efx = rx_queue->efx;
	struct efx_rx_buffer *rx_buf;
	bool leak_packet = false;

	rx_buf = efx_rx_buffer(rx_queue, index);
	EFX_BUG_ON_PARANOID(!rx_buf->data);
	EFX_BUG_ON_PARANOID(rx_buf->skb && rx_buf->page);
	EFX_BUG_ON_PARANOID(!(rx_buf->skb || rx_buf->page));

	/* This allows the refill path to post another buffer.
	 * EFX_RXD_HEAD_ROOM ensures that the slot we are using
	 * isn't overwritten yet.
	 */
	rx_queue->removed_count++;

	/* Validate the length encoded in the event vs the descriptor pushed */
	efx_rx_packet__check_len(rx_queue, rx_buf, len,
				 &discard, &leak_packet);

	EFX_TRACE(efx, "RX queue %d received id %x at %llx+%x %s%s\n",
		  rx_queue->queue, index,
		  (unsigned long long)rx_buf->dma_addr, len,
		  (checksummed ? " [SUMMED]" : ""),
		  (discard ? " [DISCARD]" : ""));

	/* Discard packet, if instructed to do so */
	if (unlikely(discard)) {
		if (unlikely(leak_packet))
			rx_queue->channel->n_skbuff_leaks++;
		else
			/* We haven't called efx_unmap_rx_buffer yet,
			 * so fini the entire rx_buffer here */
			efx_fini_rx_buffer(rx_queue, rx_buf);
		return;
	}

	/* Release card resources - assumes all RX buffers consumed in-order
	 * per RX queue
	 */
	efx_unmap_rx_buffer(efx, rx_buf);

	/* Prefetch nice and early so data will (hopefully) be in cache by
	 * the time we look at it.
	 */
	prefetch(rx_buf->data);

	/* Pipeline receives so that we give time for packet headers to be
	 * prefetched into cache.
	 */
	rx_buf->len = len;
	if (rx_queue->channel->rx_pkt)
		__efx_rx_packet(rx_queue->channel,
				rx_queue->channel->rx_pkt,
				rx_queue->channel->rx_pkt_csummed);
	rx_queue->channel->rx_pkt = rx_buf;
	rx_queue->channel->rx_pkt_csummed = checksummed;
}
Example #3
0
void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
{
	int i;
	struct efx_nic *efx = rx_queue->efx;
	struct efx_rx_buffer *rx_buf;

	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
		  "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));

	/* A flush failure might have left rx_queue->enabled */
	rx_queue->enabled = false;

	del_timer_sync(&rx_queue->slow_fill);
	efx_nic_fini_rx(rx_queue);

	/* Release RX buffers from the current read ptr to the write ptr */
	if (rx_queue->buffer) {
		for (i = rx_queue->removed_count; i < rx_queue->added_count;
		     i++) {
			unsigned index = i & rx_queue->ptr_mask;
			rx_buf = efx_rx_buffer(rx_queue, index);
			efx_fini_rx_buffer(rx_queue, rx_buf);
		}
	}

	/* Unmap and release the pages in the recycle ring. Remove the ring. */
	for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
		struct page *page = rx_queue->page_ring[i];
		struct efx_rx_page_state *state;

		if (page == NULL)
			continue;

		state = page_address(page);
		dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
			       PAGE_SIZE << efx->rx_buffer_order,
			       DMA_FROM_DEVICE);
		put_page(page);
	}
	kfree(rx_queue->page_ring);
	rx_queue->page_ring = NULL;
}