void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) { int i; struct efx_rx_buffer *rx_buf; EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue); falcon_fini_rx(rx_queue); /* Release RX buffers NB start at index 0 not current HW ptr */ if (rx_queue->buffer) { for (i = 0; i <= rx_queue->efx->type->rxd_ring_mask; i++) { rx_buf = efx_rx_buffer(rx_queue, i); efx_fini_rx_buffer(rx_queue, rx_buf); } } /* For a page that is part-way through splitting into RX buffers */ if (rx_queue->buf_page != NULL) { pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr, efx_rx_buf_size(rx_queue->efx), PCI_DMA_FROMDEVICE); __free_pages(rx_queue->buf_page, rx_queue->efx->rx_buffer_order); rx_queue->buf_page = NULL; } }
/** * efx_init_rx_buffers_page - create EFX_RX_BATCH page-based RX buffers * * @rx_queue: Efx RX queue * * This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA, * and populates struct efx_rx_buffers for each one. Return a negative error * code or 0 on success. If a single page can be split between two buffers, * then the page will either be inserted fully, or not at at all. */ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) { struct efx_nic *efx = rx_queue->efx; struct efx_rx_buffer *rx_buf; struct page *page; void *page_addr; unsigned int page_offset; struct efx_rx_page_state *state; dma_addr_t dma_addr; unsigned index, count; /* We can split a page between two buffers */ BUILD_BUG_ON(EFX_RX_BATCH & 1); for (count = 0; count < EFX_RX_BATCH; ++count) { page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC, efx->rx_buffer_order); if (unlikely(page == NULL)) return -ENOMEM; dma_addr = pci_map_page(efx->pci_dev, page, 0, efx_rx_buf_size(efx), PCI_DMA_FROMDEVICE); if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) { __free_pages(page, efx->rx_buffer_order); return -EIO; } page_addr = page_address(page); state = page_addr; state->refcnt = 0; state->dma_addr = dma_addr; page_addr += sizeof(struct efx_rx_page_state); dma_addr += sizeof(struct efx_rx_page_state); page_offset = sizeof(struct efx_rx_page_state); split: index = rx_queue->added_count & rx_queue->ptr_mask; rx_buf = efx_rx_buffer(rx_queue, index); rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; rx_buf->u.page = page; rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN; rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; rx_buf->is_page = true; ++rx_queue->added_count; ++rx_queue->alloc_page_count; ++state->refcnt; if ((~count & 1) && (efx->rx_buffer_len <= EFX_RX_HALF_PAGE)) { /* Use the second half of the page */ get_page(page); dma_addr += (PAGE_SIZE >> 1); page_addr += (PAGE_SIZE >> 1); page_offset += (PAGE_SIZE >> 1); ++count; goto split; } }
static void efx_unmap_rx_buffer(struct efx_nic *efx, struct efx_rx_buffer *rx_buf) { if (rx_buf->page) { EFX_BUG_ON_PARANOID(rx_buf->skb); if (rx_buf->unmap_addr) { pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr, efx_rx_buf_size(efx), PCI_DMA_FROMDEVICE); rx_buf->unmap_addr = 0; } } else if (likely(rx_buf->skb)) { pci_unmap_single(efx->pci_dev, rx_buf->dma_addr, rx_buf->len, PCI_DMA_FROMDEVICE); } }
/** * efx_init_rx_buffer_page - create new RX buffer using page-based allocation * * @rx_queue: Efx RX queue * @rx_buf: RX buffer structure to populate * * This allocates memory for a new receive buffer, maps it for DMA, * and populates a struct efx_rx_buffer with the relevant * information. Return a negative error code or 0 on success. */ static int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf) { struct efx_nic *efx = rx_queue->efx; int bytes, space, offset; bytes = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; /* If there is space left in the previously allocated page, * then use it. Otherwise allocate a new one */ rx_buf->page = rx_queue->buf_page; if (rx_buf->page == NULL) { dma_addr_t dma_addr; rx_buf->page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC, efx->rx_buffer_order); if (unlikely(rx_buf->page == NULL)) return -ENOMEM; dma_addr = pci_map_page(efx->pci_dev, rx_buf->page, 0, efx_rx_buf_size(efx), PCI_DMA_FROMDEVICE); if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) { __free_pages(rx_buf->page, efx->rx_buffer_order); rx_buf->page = NULL; return -EIO; } rx_queue->buf_page = rx_buf->page; rx_queue->buf_dma_addr = dma_addr; rx_queue->buf_data = (page_address(rx_buf->page) + EFX_PAGE_IP_ALIGN); } rx_buf->len = bytes; rx_buf->data = rx_queue->buf_data; offset = efx_rx_buf_offset(rx_buf); rx_buf->dma_addr = rx_queue->buf_dma_addr + offset; /* Try to pack multiple buffers per page */ if (efx->rx_buffer_order == 0) { /* The next buffer starts on the next 512 byte boundary */ rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff); offset += ((bytes + 0x1ff) & ~0x1ff); space = efx_rx_buf_size(efx) - offset; if (space >= bytes) { /* Refs dropped on kernel releasing each skb */ get_page(rx_queue->buf_page); goto out; } } /* This is the final RX buffer for this page, so mark it for * unmapping */ rx_queue->buf_page = NULL; rx_buf->unmap_addr = rx_queue->buf_dma_addr; out: return 0; }