/** * tso_start_new_packet - generate a new header and prepare for the new packet * @tx_queue: Efx TX queue * @skb: Socket buffer * @st: TSO state * * Generate a new header and prepare for the new packet. Return 0 on * success, or -1 if failed to alloc header. */ static int tso_start_new_packet(struct efx_tx_queue *tx_queue, const struct sk_buff *skb, struct tso_state *st) { struct efx_tso_header *tsoh; struct tcphdr *tsoh_th; unsigned ip_length; u8 *header; /* Allocate a DMA-mapped header buffer. */ if (likely(TSOH_SIZE(st->header_len) <= TSOH_STD_SIZE)) { if (tx_queue->tso_headers_free == NULL) { if (efx_tsoh_block_alloc(tx_queue)) return -1; } EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free); tsoh = tx_queue->tso_headers_free; tx_queue->tso_headers_free = tsoh->next; tsoh->unmap_len = 0; } else { tx_queue->tso_long_headers++; tsoh = efx_tsoh_heap_alloc(tx_queue, st->header_len); if (unlikely(!tsoh)) return -1; } header = TSOH_BUFFER(tsoh); tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb)); /* Copy and update the headers. */ memcpy(header, skb->data, st->header_len); tsoh_th->seq = htonl(st->seqnum); st->seqnum += skb_shinfo(skb)->gso_size; if (st->out_len > skb_shinfo(skb)->gso_size) { /* This packet will not finish the TSO burst. */ ip_length = st->full_packet_size - ETH_HDR_LEN(skb); tsoh_th->fin = 0; tsoh_th->psh = 0; } else { /* This packet will be the last in the TSO burst. */ ip_length = st->header_len - ETH_HDR_LEN(skb) + st->out_len; tsoh_th->fin = tcp_hdr(skb)->fin; tsoh_th->psh = tcp_hdr(skb)->psh; } if (st->protocol == htons(ETH_P_IP)) { struct iphdr *tsoh_iph = (struct iphdr *)(header + SKB_IPV4_OFF(skb)); tsoh_iph->tot_len = htons(ip_length); /* Linux leaves suitable gaps in the IP ID space for us to fill. */ tsoh_iph->id = htons(st->ipv4_id); st->ipv4_id++; } else { struct ipv6hdr *tsoh_iph = (struct ipv6hdr *)(header + SKB_IPV6_OFF(skb)); tsoh_iph->payload_len = htons(ip_length - sizeof(*tsoh_iph)); } st->packet_space = skb_shinfo(skb)->gso_size; ++tx_queue->tso_packets; /* Form a descriptor for this header. */ efx_tso_put_header(tx_queue, tsoh, st->header_len); return 0; }
/** * tso_start_new_packet - generate a new header and prepare for the new packet * @tx_queue: Efx TX queue * @skb: Socket buffer * @st: TSO state * @is_first: true if this is the first packet * * Generate a new header and prepare for the new packet. Return 0 on * success, or -%ENOMEM if failed to alloc header. */ static int tso_start_new_packet(struct efx_tx_queue *tx_queue, const struct sk_buff *skb, struct tso_state *st, bool is_first) { struct efx_tx_buffer *buffer = efx_tx_queue_get_insert_buffer(tx_queue); bool is_last = st->out_len <= skb_shinfo(skb)->gso_size; u8 tcp_flags_mask; if (!is_last) { st->packet_space = skb_shinfo(skb)->gso_size; tcp_flags_mask = TCPHDR_FIN | TCPHDR_PSH; } else { st->packet_space = st->out_len; tcp_flags_mask = 0; } if (!is_first) tcp_flags_mask |= TCPHDR_CWR; /* Congestion control */ if (!st->header_unmap_len) { /* Allocate and insert a DMA-mapped header buffer. */ struct tcphdr *tsoh_th; unsigned int ip_length; u8 *header; int rc; header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len); if (!header) return -ENOMEM; tsoh_th = (struct tcphdr *)(header + st->tcp_off); /* Copy and update the headers. */ memcpy(header, skb->data, st->header_len); tsoh_th->seq = htonl(st->seqnum); tcp_flag_byte(tsoh_th) &= ~tcp_flags_mask; ip_length = st->ip_base_len + st->packet_space; if (st->protocol == htons(ETH_P_IP)) { struct iphdr *tsoh_iph = (struct iphdr *)(header + st->ip_off); tsoh_iph->tot_len = htons(ip_length); tsoh_iph->id = htons(st->ipv4_id); } else { struct ipv6hdr *tsoh_iph = (struct ipv6hdr *)(header + st->ip_off); tsoh_iph->payload_len = htons(ip_length); } rc = efx_tso_put_header(tx_queue, buffer, header); if (unlikely(rc)) return rc; } else { /* Send the original headers with a TSO option descriptor * in front */ u8 tcp_flags = tcp_flag_byte(tcp_hdr(skb)) & ~tcp_flags_mask; buffer->flags = EFX_TX_BUF_OPTION; buffer->len = 0; buffer->unmap_len = 0; EFX_POPULATE_QWORD_5(buffer->option, ESF_DZ_TX_DESC_IS_OPT, 1, ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO, ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags, ESF_DZ_TX_TSO_IP_ID, st->ipv4_id, ESF_DZ_TX_TSO_TCP_SEQNO, st->seqnum); ++tx_queue->insert_count; /* We mapped the headers in tso_start(). Unmap them * when the last segment is completed. */ buffer = efx_tx_queue_get_insert_buffer(tx_queue); buffer->dma_addr = st->header_dma_addr; buffer->len = st->header_len; if (is_last) { buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_MAP_SINGLE; buffer->unmap_len = st->header_unmap_len; buffer->dma_offset = 0; /* Ensure we only unmap them once in case of a * later DMA mapping error and rollback */ st->header_unmap_len = 0; } else { buffer->flags = EFX_TX_BUF_CONT; buffer->unmap_len = 0; } ++tx_queue->insert_count; } st->seqnum += skb_shinfo(skb)->gso_size; /* Linux leaves suitable gaps in the IP ID space for us to fill. */ ++st->ipv4_id; return 0; }