static int efx_enqueue_skb_copy(struct efx_tx_queue *tx_queue, struct sk_buff *skb) { unsigned int copy_len = skb->len; struct efx_tx_buffer *buffer; u8 *copy_buffer; int rc; EFX_WARN_ON_ONCE_PARANOID(copy_len > EFX_TX_CB_SIZE); buffer = efx_tx_queue_get_insert_buffer(tx_queue); copy_buffer = efx_tx_get_copy_buffer(tx_queue, buffer); if (unlikely(!copy_buffer)) return -ENOMEM; rc = skb_copy_bits(skb, 0, copy_buffer, copy_len); EFX_WARN_ON_PARANOID(rc); buffer->len = copy_len; buffer->skb = skb; buffer->flags = EFX_TX_BUF_SKB; ++tx_queue->insert_count; return rc; }
/** * efx_tx_queue_insert - push descriptors onto the TX queue * @tx_queue: Efx TX queue * @dma_addr: DMA address of fragment * @len: Length of fragment * @final_buffer: The final buffer inserted into the queue * * Push descriptors onto the TX queue. */ static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue, dma_addr_t dma_addr, unsigned int len, struct efx_tx_buffer **final_buffer) { struct efx_tx_buffer *buffer; unsigned int dma_len; EFX_WARN_ON_ONCE_PARANOID(len <= 0); while (1) { buffer = efx_tx_queue_get_insert_buffer(tx_queue); ++tx_queue->insert_count; EFX_WARN_ON_ONCE_PARANOID(tx_queue->insert_count - tx_queue->read_count >= tx_queue->efx->txq_entries); buffer->dma_addr = dma_addr; dma_len = tx_queue->efx->type->tx_limit_len(tx_queue, dma_addr, len); /* If there's space for everything this is our last buffer. */ if (dma_len >= len) break; buffer->len = dma_len; buffer->flags = EFX_TX_BUF_CONT; dma_addr += dma_len; len -= dma_len; } EFX_WARN_ON_ONCE_PARANOID(!len); buffer->len = len; *final_buffer = buffer; }
/** * tso_start_new_packet - generate a new header and prepare for the new packet * @tx_queue: Efx TX queue * @skb: Socket buffer * @st: TSO state * @is_first: true if this is the first packet * * Generate a new header and prepare for the new packet. Return 0 on * success, or -%ENOMEM if failed to alloc header. */ static int tso_start_new_packet(struct efx_tx_queue *tx_queue, const struct sk_buff *skb, struct tso_state *st, bool is_first) { struct efx_tx_buffer *buffer = efx_tx_queue_get_insert_buffer(tx_queue); bool is_last = st->out_len <= skb_shinfo(skb)->gso_size; u8 tcp_flags_mask; if (!is_last) { st->packet_space = skb_shinfo(skb)->gso_size; tcp_flags_mask = TCPHDR_FIN | TCPHDR_PSH; } else { st->packet_space = st->out_len; tcp_flags_mask = 0; } if (!is_first) tcp_flags_mask |= TCPHDR_CWR; /* Congestion control */ if (!st->header_unmap_len) { /* Allocate and insert a DMA-mapped header buffer. */ struct tcphdr *tsoh_th; unsigned int ip_length; u8 *header; int rc; header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len); if (!header) return -ENOMEM; tsoh_th = (struct tcphdr *)(header + st->tcp_off); /* Copy and update the headers. */ memcpy(header, skb->data, st->header_len); tsoh_th->seq = htonl(st->seqnum); tcp_flag_byte(tsoh_th) &= ~tcp_flags_mask; ip_length = st->ip_base_len + st->packet_space; if (st->protocol == htons(ETH_P_IP)) { struct iphdr *tsoh_iph = (struct iphdr *)(header + st->ip_off); tsoh_iph->tot_len = htons(ip_length); tsoh_iph->id = htons(st->ipv4_id); } else { struct ipv6hdr *tsoh_iph = (struct ipv6hdr *)(header + st->ip_off); tsoh_iph->payload_len = htons(ip_length); } rc = efx_tso_put_header(tx_queue, buffer, header); if (unlikely(rc)) return rc; } else { /* Send the original headers with a TSO option descriptor * in front */ u8 tcp_flags = tcp_flag_byte(tcp_hdr(skb)) & ~tcp_flags_mask; buffer->flags = EFX_TX_BUF_OPTION; buffer->len = 0; buffer->unmap_len = 0; EFX_POPULATE_QWORD_5(buffer->option, ESF_DZ_TX_DESC_IS_OPT, 1, ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO, ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags, ESF_DZ_TX_TSO_IP_ID, st->ipv4_id, ESF_DZ_TX_TSO_TCP_SEQNO, st->seqnum); ++tx_queue->insert_count; /* We mapped the headers in tso_start(). Unmap them * when the last segment is completed. */ buffer = efx_tx_queue_get_insert_buffer(tx_queue); buffer->dma_addr = st->header_dma_addr; buffer->len = st->header_len; if (is_last) { buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_MAP_SINGLE; buffer->unmap_len = st->header_unmap_len; buffer->dma_offset = 0; /* Ensure we only unmap them once in case of a * later DMA mapping error and rollback */ st->header_unmap_len = 0; } else { buffer->flags = EFX_TX_BUF_CONT; buffer->unmap_len = 0; } ++tx_queue->insert_count; } st->seqnum += skb_shinfo(skb)->gso_size; /* Linux leaves suitable gaps in the IP ID space for us to fill. */ ++st->ipv4_id; return 0; }