static inline void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, struct efx_tx_buffer *buffer) { if (buffer->unmap_len) { struct pci_dev *pci_dev = tx_queue->efx->pci_dev; if (buffer->unmap_single) pci_unmap_single(pci_dev, buffer->unmap_addr, buffer->unmap_len, PCI_DMA_TODEVICE); else pci_unmap_page(pci_dev, buffer->unmap_addr, buffer->unmap_len, PCI_DMA_TODEVICE); buffer->unmap_len = 0; buffer->unmap_single = 0; } if (buffer->skb) { dev_kfree_skb_any((struct sk_buff *) buffer->skb); buffer->skb = NULL; EFX_TRACE(tx_queue->efx, "TX queue %d transmission id %x " "complete\n", tx_queue->queue, read_ptr); } }
/** * efx_fast_push_rx_descriptors - push new RX descriptors quickly * @rx_queue: RX descriptor queue * @retry: Recheck the fill level * This will aim to fill the RX descriptor queue up to * @rx_queue->@fast_fill_limit. If there is insufficient atomic * memory to do so, the caller should retry. */ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, int retry) { struct efx_rx_buffer *rx_buf; unsigned fill_level, index; int i, space, rc = 0; /* Calculate current fill level. Do this outside the lock, * because most of the time we'll end up not wanting to do the * fill anyway. */ fill_level = (rx_queue->added_count - rx_queue->removed_count); EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->type->rxd_ring_mask + 1); /* Don't fill if we don't need to */ if (fill_level >= rx_queue->fast_fill_trigger) return 0; /* Record minimum fill level */ if (unlikely(fill_level < rx_queue->min_fill)) { if (fill_level) rx_queue->min_fill = fill_level; } /* Acquire RX add lock. If this lock is contended, then a fast * fill must already be in progress (e.g. in the refill * tasklet), so we don't need to do anything */ if (!spin_trylock_bh(&rx_queue->add_lock)) return -1; retry: /* Recalculate current fill level now that we have the lock */ fill_level = (rx_queue->added_count - rx_queue->removed_count); EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->type->rxd_ring_mask + 1); space = rx_queue->fast_fill_limit - fill_level; if (space < EFX_RX_BATCH) goto out_unlock; EFX_TRACE(rx_queue->efx, "RX queue %d fast-filling descriptor ring from" " level %d to level %d using %s allocation\n", rx_queue->queue, fill_level, rx_queue->fast_fill_limit, rx_queue->channel->rx_alloc_push_pages ? "page" : "skb"); do { for (i = 0; i < EFX_RX_BATCH; ++i) { index = (rx_queue->added_count & rx_queue->efx->type->rxd_ring_mask); rx_buf = efx_rx_buffer(rx_queue, index); rc = efx_init_rx_buffer(rx_queue, rx_buf); if (unlikely(rc)) goto out; ++rx_queue->added_count; } } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH); EFX_TRACE(rx_queue->efx, "RX queue %d fast-filled descriptor ring " "to level %d\n", rx_queue->queue, rx_queue->added_count - rx_queue->removed_count); out: /* Send write pointer to card. */ falcon_notify_rx_desc(rx_queue); /* If the fast fill is running inside from the refill tasklet, then * for SMP systems it may be running on a different CPU to * RX event processing, which means that the fill level may now be * out of date. */ if (unlikely(retry && (rc == 0))) goto retry; out_unlock: spin_unlock_bh(&rx_queue->add_lock); return rc; }
/* Loopback test RX callback * This is called for each received packet during loopback testing. */ void efx_loopback_rx_packet(struct efx_nic *efx, const char *buf_ptr, int pkt_len) { struct efx_loopback_state *state = efx->loopback_selftest; struct efx_loopback_payload *received; struct efx_loopback_payload *payload; BUG_ON(!buf_ptr); /* If we are just flushing, then drop the packet */ if ((state == NULL) || state->flush) return; payload = &state->payload; received = (struct efx_loopback_payload *) buf_ptr; received->ip.saddr = payload->ip.saddr; if (state->offload_csum) received->ip.check = payload->ip.check; /* Check that header exists */ if (pkt_len < sizeof(received->header)) { EFX_ERR(efx, "saw runt RX packet (length %d) in %s loopback " "test\n", pkt_len, LOOPBACK_MODE(efx)); goto err; } /* Check that the ethernet header exists */ if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) { EFX_ERR(efx, "saw non-loopback RX packet in %s loopback test\n", LOOPBACK_MODE(efx)); goto err; } /* Check packet length */ if (pkt_len != sizeof(*payload)) { EFX_ERR(efx, "saw incorrect RX packet length %d (wanted %d) in " "%s loopback test\n", pkt_len, (int)sizeof(*payload), LOOPBACK_MODE(efx)); goto err; } /* Check that IP header matches */ if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) { EFX_ERR(efx, "saw corrupted IP header in %s loopback test\n", LOOPBACK_MODE(efx)); goto err; } /* Check that msg and padding matches */ if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) { EFX_ERR(efx, "saw corrupted RX packet in %s loopback test\n", LOOPBACK_MODE(efx)); goto err; } /* Check that iteration matches */ if (received->iteration != payload->iteration) { EFX_ERR(efx, "saw RX packet from iteration %d (wanted %d) in " "%s loopback test\n", ntohs(received->iteration), ntohs(payload->iteration), LOOPBACK_MODE(efx)); goto err; } /* Increase correct RX count */ EFX_TRACE(efx, "got loopback RX in %s loopback test\n", LOOPBACK_MODE(efx)); atomic_inc(&state->rx_good); return; err: #ifdef EFX_ENABLE_DEBUG if (atomic_read(&state->rx_bad) == 0) { EFX_ERR(efx, "received packet:\n"); print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, buf_ptr, pkt_len, 0); EFX_ERR(efx, "expected packet:\n"); print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, &state->payload, sizeof(state->payload), 0); } #endif atomic_inc(&state->rx_bad); }