int skb_make_writable(struct sk_buff *skb, unsigned int writable_len) { if (writable_len > skb->len) return 0; /* Not exclusive use of packet? Must copy. */ if (!skb_cloned(skb)) { if (writable_len <= skb_headlen(skb)) return 1; } else if (skb_clone_writable(skb, writable_len)) return 1; if (writable_len <= skb_headlen(skb)) writable_len = 0; else writable_len -= skb_headlen(skb); return !!__pskb_pull_tail(skb, writable_len); }
int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) { int copyflag; int elt; struct sk_buff *skb1, **skb_p; /* If skb is cloned or its head is paged, reallocate * head pulling out all the pages (pages are considered not writable * at the moment even if they are anonymous). */ if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) return -ENOMEM; /* Easy case. Most of packets will go this way. */ if (!skb_shinfo(skb)->frag_list) { /* A little of trouble, not enough of space for trailer. * This should not happen, when stack is tuned to generate * good frames. OK, on miss we reallocate and reserve even more * space, 128 bytes is fair. */ if (skb_tailroom(skb) < tailbits && pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) return -ENOMEM; /* Voila! */ *trailer = skb; return 1; } /* Misery. We are in troubles, going to mincer fragments... */ elt = 1; skb_p = &skb_shinfo(skb)->frag_list; copyflag = 0; while ((skb1 = *skb_p) != NULL) { int ntail = 0; /* The fragment is partially pulled by someone, * this can happen on input. Copy it and everything * after it. */ if (skb_shared(skb1)) copyflag = 1; /* If the skb is the last, worry about trailer. */ if (skb1->next == NULL && tailbits) { if (skb_shinfo(skb1)->nr_frags || skb_shinfo(skb1)->frag_list || skb_tailroom(skb1) < tailbits) ntail = tailbits + 128; } if (copyflag || skb_cloned(skb1) || ntail || skb_shinfo(skb1)->nr_frags || skb_shinfo(skb1)->frag_list) { struct sk_buff *skb2; /* F**k, we are miserable poor guys... */ if (ntail == 0) skb2 = skb_copy(skb1, GFP_ATOMIC); else skb2 = skb_copy_expand(skb1, skb_headroom(skb1), ntail, GFP_ATOMIC); if (unlikely(skb2 == NULL)) return -ENOMEM; if (skb1->sk) skb_set_owner_w(skb2, skb1->sk); /* Looking around. Are we still alive? * OK, link new skb, drop old one */ skb2->next = skb1->next; *skb_p = skb2; kfree_skb(skb1); skb1 = skb2; } elt++; *trailer = skb1; skb_p = &skb1->next; } return elt; }
static bool ftmac100_rx_packet(struct ftmac100 *priv, int *processed) { struct net_device *netdev = priv->netdev; struct ftmac100_rxdes *rxdes; struct sk_buff *skb; struct page *page; dma_addr_t map; int length; rxdes = ftmac100_rx_locate_first_segment(priv); if (!rxdes) return false; if (unlikely(ftmac100_rx_packet_error(priv, rxdes))) { ftmac100_rx_drop_packet(priv); return true; } /* * It is impossible to get multi-segment packets * because we always provide big enough receive buffers. */ if (unlikely(!ftmac100_rxdes_last_segment(rxdes))) BUG(); /* start processing */ skb = netdev_alloc_skb_ip_align(netdev, 128); if (unlikely(!skb)) { if (net_ratelimit()) netdev_err(netdev, "rx skb alloc failed\n"); ftmac100_rx_drop_packet(priv); return true; } if (unlikely(ftmac100_rxdes_multicast(rxdes))) netdev->stats.multicast++; map = ftmac100_rxdes_get_dma_addr(rxdes); dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE); length = ftmac100_rxdes_frame_length(rxdes); page = ftmac100_rxdes_get_page(rxdes); skb_fill_page_desc(skb, 0, page, 0, length); skb->len += length; skb->data_len += length; if (length > 128) { skb->truesize += PAGE_SIZE; /* We pull the minimum amount into linear part */ __pskb_pull_tail(skb, ETH_HLEN); } else { /* Small frames are copied into linear part to free one page */ __pskb_pull_tail(skb, length); } ftmac100_alloc_rx_page(priv, rxdes, GFP_ATOMIC); ftmac100_rx_pointer_advance(priv); skb->protocol = eth_type_trans(skb, netdev); netdev->stats.rx_packets++; netdev->stats.rx_bytes += skb->len; /* push packet to protocol stack */ netif_receive_skb(skb); (*processed)++; return true; }
static int xenvif_tx_submit(struct xenvif *vif) { struct gnttab_map_grant_ref *gop_map = vif->tx_map_ops; struct gnttab_copy *gop_copy = vif->tx_copy_ops; struct sk_buff *skb; int work_done = 0; while ((skb = __skb_dequeue(&vif->tx_queue)) != NULL) { struct xen_netif_tx_request *txp; u16 pending_idx; unsigned data_len; pending_idx = XENVIF_TX_CB(skb)->pending_idx; txp = &vif->pending_tx_info[pending_idx].req; /* Check the remap error code. */ if (unlikely(xenvif_tx_check_gop(vif, skb, &gop_map, &gop_copy))) { skb_shinfo(skb)->nr_frags = 0; kfree_skb(skb); continue; } data_len = skb->len; callback_param(vif, pending_idx).ctx = NULL; if (data_len < txp->size) { /* Append the packet payload as a fragment. */ txp->offset += data_len; txp->size -= data_len; } else { /* Schedule a response immediately. */ xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY); } if (txp->flags & XEN_NETTXF_csum_blank) skb->ip_summed = CHECKSUM_PARTIAL; else if (txp->flags & XEN_NETTXF_data_validated) skb->ip_summed = CHECKSUM_UNNECESSARY; xenvif_fill_frags(vif, skb); if (unlikely(skb_has_frag_list(skb))) { if (xenvif_handle_frag_list(vif, skb)) { if (net_ratelimit()) netdev_err(vif->dev, "Not enough memory to consolidate frag_list!\n"); skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; kfree_skb(skb); continue; } } if (skb_is_nonlinear(skb) && skb_headlen(skb) < PKT_PROT_LEN) { int target = min_t(int, skb->len, PKT_PROT_LEN); __pskb_pull_tail(skb, target - skb_headlen(skb)); } skb->dev = vif->dev; skb->protocol = eth_type_trans(skb, skb->dev); skb_reset_network_header(skb); if (checksum_setup(vif, skb)) { netdev_dbg(vif->dev, "Can't setup checksum in net_tx_action\n"); /* We have to set this flag to trigger the callback */ if (skb_shinfo(skb)->destructor_arg) skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; kfree_skb(skb); continue; } skb_probe_transport_header(skb, 0); /* If the packet is GSO then we will have just set up the * transport header offset in checksum_setup so it's now * straightforward to calculate gso_segs. */ if (skb_is_gso(skb)) { int mss = skb_shinfo(skb)->gso_size; int hdrlen = skb_transport_header(skb) - skb_mac_header(skb) + tcp_hdrlen(skb); skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len - hdrlen, mss); } vif->dev->stats.rx_bytes += skb->len; vif->dev->stats.rx_packets++; work_done++; /* Set this flag right before netif_receive_skb, otherwise * someone might think this packet already left netback, and * do a skb_copy_ubufs while we are still in control of the * skb. E.g. the __pskb_pull_tail earlier can do such thing. */ if (skb_shinfo(skb)->destructor_arg) { skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; vif->tx_zerocopy_sent++; } netif_receive_skb(skb); }
static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv, struct ieee80211_hdr *hdr, u16 len, u32 ampdu_status, struct iwl_rx_mem_buffer *rxb, struct ieee80211_rx_status *stats) { struct sk_buff *skb; int ret = 0; __le16 fc = hdr->frame_control; /* We only process data packets if the interface is open */ if (unlikely(!priv->is_open)) { IWL_DEBUG_DROP_LIMIT(priv, "Dropping packet while interface is not open.\n"); return; } /* In case of HW accelerated crypto and bad decryption, drop */ if (!priv->cfg->mod_params->sw_crypto && iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats)) return; skb = alloc_skb(IWL_LINK_HDR_MAX * 2, GFP_ATOMIC); if (!skb) { IWL_ERR(priv, "alloc_skb failed\n"); return; } skb_reserve(skb, IWL_LINK_HDR_MAX); skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len); /* mac80211 currently doesn't support paged SKB. Convert it to * linear SKB for management frame and data frame requires * software decryption or software defragementation. */ if (ieee80211_is_mgmt(fc) || ieee80211_has_protected(fc) || ieee80211_has_morefrags(fc) || le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG || (ieee80211_is_data_qos(fc) && *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)) ret = skb_linearize(skb); else ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ? 0 : -ENOMEM; if (ret) { kfree_skb(skb); goto out; } /* * XXX: We cannot touch the page and its virtual memory (hdr) after * here. It might have already been freed by the above skb change. */ iwl_update_stats(priv, false, fc, len); memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); ieee80211_rx(priv->hw, skb); out: priv->alloc_rxb_page--; rxb->page = NULL; }
static bool ftmac100_rx_packet(struct ftmac100 *priv, int *processed) { struct net_device *netdev = priv->netdev; struct ftmac100_rxdes *rxdes; struct sk_buff *skb; struct page *page; dma_addr_t map; int length; rxdes = ftmac100_rx_locate_first_segment(priv); if (!rxdes) return false; if (unlikely(ftmac100_rx_packet_error(priv, rxdes))) { ftmac100_rx_drop_packet(priv); return true; } /* */ if (unlikely(!ftmac100_rxdes_last_segment(rxdes))) BUG(); /* */ skb = netdev_alloc_skb_ip_align(netdev, 128); if (unlikely(!skb)) { if (net_ratelimit()) netdev_err(netdev, "rx skb alloc failed\n"); ftmac100_rx_drop_packet(priv); return true; } if (unlikely(ftmac100_rxdes_multicast(rxdes))) netdev->stats.multicast++; map = ftmac100_rxdes_get_dma_addr(rxdes); dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE); length = ftmac100_rxdes_frame_length(rxdes); page = ftmac100_rxdes_get_page(rxdes); skb_fill_page_desc(skb, 0, page, 0, length); skb->len += length; skb->data_len += length; /* */ if (length > 64) skb->truesize += PAGE_SIZE; __pskb_pull_tail(skb, min(length, 64)); ftmac100_alloc_rx_page(priv, rxdes, GFP_ATOMIC); ftmac100_rx_pointer_advance(priv); skb->protocol = eth_type_trans(skb, netdev); netdev->stats.rx_packets++; netdev->stats.rx_bytes += skb->len; /* */ netif_receive_skb(skb); (*processed)++; return true; }
static int xenvif_tx_submit(struct xenvif *vif) { struct gnttab_copy *gop = vif->tx_copy_ops; struct sk_buff *skb; int work_done = 0; while ((skb = __skb_dequeue(&vif->tx_queue)) != NULL) { struct xen_netif_tx_request *txp; u16 pending_idx; unsigned data_len; pending_idx = *((u16 *)skb->data); txp = &vif->pending_tx_info[pending_idx].req; /* Check the remap error code. */ if (unlikely(xenvif_tx_check_gop(vif, skb, &gop))) { netdev_dbg(vif->dev, "netback grant failed.\n"); skb_shinfo(skb)->nr_frags = 0; kfree_skb(skb); continue; } data_len = skb->len; memcpy(skb->data, (void *)(idx_to_kaddr(vif, pending_idx)|txp->offset), data_len); if (data_len < txp->size) { /* Append the packet payload as a fragment. */ txp->offset += data_len; txp->size -= data_len; } else { /* Schedule a response immediately. */ xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY); } if (txp->flags & XEN_NETTXF_csum_blank) skb->ip_summed = CHECKSUM_PARTIAL; else if (txp->flags & XEN_NETTXF_data_validated) skb->ip_summed = CHECKSUM_UNNECESSARY; xenvif_fill_frags(vif, skb); if (skb_is_nonlinear(skb) && skb_headlen(skb) < PKT_PROT_LEN) { int target = min_t(int, skb->len, PKT_PROT_LEN); __pskb_pull_tail(skb, target - skb_headlen(skb)); } skb->dev = vif->dev; skb->protocol = eth_type_trans(skb, skb->dev); skb_reset_network_header(skb); if (checksum_setup(vif, skb)) { netdev_dbg(vif->dev, "Can't setup checksum in net_tx_action\n"); kfree_skb(skb); continue; } skb_probe_transport_header(skb, 0); /* If the packet is GSO then we will have just set up the * transport header offset in checksum_setup so it's now * straightforward to calculate gso_segs. */ if (skb_is_gso(skb)) { int mss = skb_shinfo(skb)->gso_size; int hdrlen = skb_transport_header(skb) - skb_mac_header(skb) + tcp_hdrlen(skb); skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len - hdrlen, mss); } vif->dev->stats.rx_bytes += skb->len; vif->dev->stats.rx_packets++; work_done++; netif_receive_skb(skb); }