static int rmnet_map_egress_handler(struct sk_buff *skb, struct rmnet_port *port, u8 mux_id, struct net_device *orig_dev) { int required_headroom, additional_header_len; struct rmnet_map_header *map_header; additional_header_len = 0; required_headroom = sizeof(struct rmnet_map_header); if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) { additional_header_len = sizeof(struct rmnet_map_ul_csum_header); required_headroom += additional_header_len; } if (skb_headroom(skb) < required_headroom) { if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC)) return -ENOMEM; } if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) rmnet_map_checksum_uplink_packet(skb, orig_dev); map_header = rmnet_map_add_map_header(skb, additional_header_len, 0); if (!map_header) return -ENOMEM; map_header->mux_id = mux_id; skb->protocol = htons(ETH_P_MAP); return 0; }
static int make_writable(struct sk_buff *skb, int write_len) { if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) return 0; return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); }
static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb) { int err = -EINVAL; const unsigned char *old_mac; if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPV6) goto out; if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) goto out; if (skb_cloned(skb) && (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) goto out; if (x->props.flags & XFRM_STATE_DECAP_DSCP) ipv6_copy_dscp(ipv6_get_dsfield(ipv6_hdr(skb)), ipipv6_hdr(skb)); if (!(x->props.flags & XFRM_STATE_NOECN)) ipip6_ecn_decapsulate(skb); old_mac = skb_mac_header(skb); skb_set_mac_header(skb, -skb->mac_len); memmove(skb_mac_header(skb), old_mac, skb->mac_len); skb_reset_network_header(skb); err = 0; out: return err; }
/** * tipc_l2_send_msg - send a TIPC packet out over an L2 interface * @skb: the packet to be sent * @b: the bearer through which the packet is to be sent * @dest: peer destination address */ int tipc_l2_send_msg(struct net *net, struct sk_buff *skb, struct tipc_bearer *b, struct tipc_media_addr *dest) { struct net_device *dev; int delta; void *tipc_ptr; dev = (struct net_device *)rcu_dereference_rtnl(b->media_ptr); if (!dev) return 0; /* Send RESET message even if bearer is detached from device */ tipc_ptr = rcu_dereference_rtnl(dev->tipc_ptr); if (unlikely(!tipc_ptr && !msg_is_reset(buf_msg(skb)))) goto drop; delta = dev->hard_header_len - skb_headroom(skb); if ((delta > 0) && pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) goto drop; skb_reset_network_header(skb); skb->dev = dev; skb->protocol = htons(ETH_P_TIPC); dev_hard_header(skb, dev, ETH_P_TIPC, dest->value, dev->dev_addr, skb->len); dev_queue_xmit(skb); return 0; drop: kfree_skb(skb); return 0; }
/** * tipc_l2_send_msg - send a TIPC packet out over an L2 interface * @buf: the packet to be sent * @b_ptr: the bearer through which the packet is to be sent * @dest: peer destination address */ int tipc_l2_send_msg(struct sk_buff *buf, struct tipc_bearer *b, struct tipc_media_addr *dest) { struct sk_buff *clone; struct net_device *dev; int delta; dev = (struct net_device *)rcu_dereference_rtnl(b->media_ptr); if (!dev) return 0; clone = skb_clone(buf, GFP_ATOMIC); if (!clone) return 0; delta = dev->hard_header_len - skb_headroom(buf); if ((delta > 0) && pskb_expand_head(clone, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) { kfree_skb(clone); return 0; } skb_reset_network_header(clone); clone->dev = dev; clone->protocol = htons(ETH_P_TIPC); dev_hard_header(clone, dev, ETH_P_TIPC, dest->value, dev->dev_addr, clone->len); dev_queue_xmit(clone); return 0; }
/* * Transmit a packet to the base station on behalf of the network stack * * * Returns: NETDEV_TX_OK (always, even in case of error) * * In case of error, we just drop it. Reasons: * * - we add a hw header to each skb, and if the network stack * retries, we have no way to know if that skb has it or not. * * - network protocols have their own drop-recovery mechanisms * * - there is not much else we can do * * If the device is idle, we need to wake it up; that is an operation * that will sleep. See i2400m_net_wake_tx() for details. */ static netdev_tx_t i2400m_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev) { struct i2400m *i2400m = net_dev_to_i2400m(net_dev); struct device *dev = i2400m_dev(i2400m); int result = -1; d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev); if (skb_header_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) goto drop; if (i2400m->state == I2400M_SS_IDLE) result = i2400m_net_wake_tx(i2400m, net_dev, skb); else result = i2400m_net_tx(i2400m, net_dev, skb); if (result < 0) { drop: net_dev->stats.tx_dropped++; } else { net_dev->stats.tx_packets++; net_dev->stats.tx_bytes += skb->len; } dev_kfree_skb(skb); d_fnend(3, dev, "(skb %p net_dev %p) = %d\n", skb, net_dev, result); return NETDEV_TX_OK; }
/** * rmnet_map_egress_handler() - MAP egress handler * @skb: Packet being sent * @config: Physical endpoint configuration for the egress device * @ep: logical endpoint configuration of the packet originator * (e.g.. RmNet virtual network device) * @orig_dev: The originator vnd device * * Called if and only if MAP is configured in the egress device's egress data * format. Will expand skb if there is insufficient headroom for MAP protocol. * Note: headroomexpansion will incur a performance penalty. * * Return: * - 0 on success * - 1 on failure */ static int rmnet_map_egress_handler(struct sk_buff *skb, struct rmnet_phys_ep_conf_s *config, struct rmnet_logical_ep_conf_s *ep, struct net_device *orig_dev) { int required_headroom, additional_header_length, ckresult; struct rmnet_map_header_s *map_header; additional_header_length = 0; required_headroom = sizeof(struct rmnet_map_header_s); if ((config->egress_data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV3) || (config->egress_data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV4)) { required_headroom += sizeof(struct rmnet_map_ul_checksum_header_s); additional_header_length += sizeof(struct rmnet_map_ul_checksum_header_s); } LOGD("headroom of %d bytes", required_headroom); if (skb_headroom(skb) < required_headroom) { if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL)) { LOGD("Failed to add headroom of %d bytes", required_headroom); return 1; } } if ((config->egress_data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV3) || (config->egress_data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV4)) { ckresult = rmnet_map_checksum_uplink_packet (skb, orig_dev, config->egress_data_format); trace_rmnet_map_checksum_uplink_packet(orig_dev, ckresult); rmnet_stats_ul_checksum(ckresult); } map_header = rmnet_map_add_map_header(skb, additional_header_length); if (!map_header) { LOGD("%s", "Failed to add MAP header to egress packet"); return 1; } if (config->egress_data_format & RMNET_EGRESS_FORMAT_MUXING) { if (ep->mux_id == 0xff) map_header->mux_id = 0; else map_header->mux_id = ep->mux_id; } skb->protocol = htons(ETH_P_MAP); if (config->egress_data_format & RMNET_EGRESS_FORMAT_AGGREGATION) { rmnet_map_aggregate(skb, config); return RMNET_MAP_CONSUMED; } return RMNET_MAP_SUCCESS; }
static int skb_remove_foreign_references(struct sk_buff *skb) { struct page *page; unsigned long pfn; int i, off; char *vaddr; BUG_ON(skb_shinfo(skb)->frag_list); if (skb_cloned(skb) && unlikely(pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) return 0; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { pfn = page_to_pfn(skb_shinfo(skb)->frags[i].page); if (!is_foreign(pfn)) continue; page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); if (unlikely(!page)) return 0; vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); off = skb_shinfo(skb)->frags[i].page_offset; memcpy(page_address(page) + off, vaddr + off, skb_shinfo(skb)->frags[i].size); kunmap_skb_frag(vaddr); put_page(skb_shinfo(skb)->frags[i].page); skb_shinfo(skb)->frags[i].page = page; } return 1; }
int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family) { struct flowi fl; unsigned int hh_len; struct dst_entry *dst; int err; err = xfrm_decode_session(skb, &fl, family); if (err < 0) return err; dst = skb_dst(skb); if (dst->xfrm) dst = ((struct xfrm_dst *)dst)->route; dst_hold(dst); dst = xfrm_lookup(net, dst, &fl, skb->sk, 0); if (IS_ERR(dst)) return PTR_ERR(dst); skb_dst_drop(skb); skb_dst_set(skb, dst); /* Change in oif may mean change in hh_len. */ hh_len = skb_dst(skb)->dev->hard_header_len; if (skb_headroom(skb) < hh_len && pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC)) return -ENOMEM; return 0; }
static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res) { struct tcf_pedit *p = a->priv; int i, munged = 0; unsigned int off; if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) return p->tcf_action; off = skb_network_offset(skb); spin_lock(&p->tcf_lock); p->tcf_tm.lastuse = jiffies; if (p->tcfp_nkeys > 0) { struct tc_pedit_key *tkey = p->tcfp_keys; for (i = p->tcfp_nkeys; i > 0; i--, tkey++) { u32 *ptr, _data; int offset = tkey->off; if (tkey->offmask) { char *d, _d; d = skb_header_pointer(skb, off + tkey->at, 1, &_d); if (!d) goto bad; offset += (*d & tkey->offmask) >> tkey->shift; } if (offset % 4) { pr_info("tc filter pedit" " offset must be on 32 bit boundaries\n"); goto bad; } if (offset > 0 && offset > skb->len) { pr_info("tc filter pedit" " offset %d can't exceed pkt length %d\n", offset, skb->len); goto bad; } ptr = skb_header_pointer(skb, off + offset, 4, &_data); if (!ptr) goto bad; /* just do it, baby */ *ptr = ((*ptr & tkey->mask) ^ tkey->val); if (ptr == &_data) skb_store_bits(skb, off + offset, ptr, 4); munged++; } if (munged) skb->tc_verd = SET_TC_MUNGED(skb->tc_verd); goto done; } else
int ip_vs_make_skb_writable(struct sk_buff **pskb, int writable_len) { struct sk_buff *skb = *pskb; /* skb is already used, better copy skb and its payload */ if (unlikely(skb_shared(skb) || skb->sk)) goto copy_skb; /* skb data is already used, copy it */ if (unlikely(skb_cloned(skb))) goto copy_data; return pskb_may_pull(skb, writable_len); copy_data: if (unlikely(writable_len > skb->len)) return 0; return !pskb_expand_head(skb, 0, 0, GFP_ATOMIC); copy_skb: if (unlikely(writable_len > skb->len)) return 0; skb = skb_copy(skb, GFP_ATOMIC); if (!skb) return 0; BUG_ON(skb_is_nonlinear(skb)); /* Rest of kernel will get very unhappy if we pass it a suddenly-orphaned skbuff */ if ((*pskb)->sk) skb_set_owner_w(skb, (*pskb)->sk); kfree_skb(*pskb); *pskb = skb; return 1; }
static int bpf_dp_ctx_init(struct bpf_dp_context *ctx) { struct ovs_key_ipv4_tunnel *tun_key = OVS_CB(ctx->skb)->tun_key; if (skb_headroom(ctx->skb) < 64) { if (pskb_expand_head(ctx->skb, 64, 0, GFP_ATOMIC)) return -ENOMEM; } ctx->context.length = ctx->skb->len; ctx->context.vlan_tag = vlan_tx_tag_present(ctx->skb) ? vlan_tx_tag_get(ctx->skb) : 0; ctx->context.hw_csum = (ctx->skb->ip_summed == CHECKSUM_PARTIAL); if (tun_key) { ctx->context.tun_key.tun_id = be32_to_cpu(be64_get_low32(tun_key->tun_id)); ctx->context.tun_key.src_ip = be32_to_cpu(tun_key->ipv4_src); ctx->context.tun_key.dst_ip = be32_to_cpu(tun_key->ipv4_dst); ctx->context.tun_key.tos = tun_key->ipv4_tos; ctx->context.tun_key.ttl = tun_key->ipv4_ttl; } else { memset(&ctx->context.tun_key, 0, sizeof(struct bpf_ipv4_tun_key)); } return 0; }
static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb) { int err = -EINVAL; if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPIP) goto out; if (!pskb_may_pull(skb, sizeof(struct iphdr))) goto out; if (skb_cloned(skb) && (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) goto out; if (x->props.flags & XFRM_STATE_DECAP_DSCP) ipv4_copy_dscp(XFRM_MODE_SKB_CB(skb)->tos, ipip_hdr(skb)); if (!(x->props.flags & XFRM_STATE_NOECN)) ipip_ecn_decapsulate(skb); skb_reset_network_header(skb); skb_mac_header_rebuild(skb); err = 0; out: return err; }
static int rmnet_map_egress_handler(struct sk_buff *skb, struct rmnet_port *port, u8 mux_id, struct net_device *orig_dev) { int required_headroom, additional_header_len; struct rmnet_map_header *map_header; additional_header_len = 0; required_headroom = sizeof(struct rmnet_map_header); if (skb_headroom(skb) < required_headroom) { if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL)) return RMNET_MAP_CONSUMED; } map_header = rmnet_map_add_map_header(skb, additional_header_len, 0); if (!map_header) return RMNET_MAP_CONSUMED; if (port->egress_data_format & RMNET_EGRESS_FORMAT_MUXING) { if (mux_id == 0xff) map_header->mux_id = 0; else map_header->mux_id = mux_id; } skb->protocol = htons(ETH_P_MAP); return RMNET_MAP_SUCCESS; }
int ip_xfrm_me_harder(struct sk_buff *skb) { struct flowi fl; unsigned int hh_len; struct dst_entry *dst; if (IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) return 0; if (xfrm_decode_session(skb, &fl, AF_INET) < 0) return -1; dst = skb->dst; if (dst->xfrm) dst = ((struct xfrm_dst *)dst)->route; dst_hold(dst); if (xfrm_lookup(&dst, &fl, skb->sk, 0) < 0) return -1; dst_release(skb->dst); skb->dst = dst; /* Change in oif may mean change in hh_len. */ hh_len = skb->dst->dev->hard_header_len; if (skb_headroom(skb) < hh_len && pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC)) return -1; return 0; }
static inline int ipsec_encap_rcv_esp(struct sk_buff *skb, int len) { struct iphdr *iph; int iphlen; if (!pskb_may_pull(skb, sizeof(struct udphdr) + len)) { return -1; } if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { return -1; } iph = skb->nh.iph; iphlen = iph->ihl << 2; iph->tot_len = htons(ntohs(iph->tot_len) - len); if (skb->len < iphlen + len) { return -1; } skb->h.raw = skb_pull(skb, len); iph->protocol = IPPROTO_ESP; return 0; }
/** * send_msg - send a TIPC message out over an InfiniBand interface */ static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr, struct tipc_media_addr *dest) { struct sk_buff *clone; struct net_device *dev; int delta; clone = skb_clone(buf, GFP_ATOMIC); if (!clone) return 0; dev = ((struct ib_bearer *)(tb_ptr->usr_handle))->dev; delta = dev->hard_header_len - skb_headroom(buf); if ((delta > 0) && pskb_expand_head(clone, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) { kfree_skb(clone); return 0; } skb_reset_network_header(clone); clone->dev = dev; clone->protocol = htons(ETH_P_TIPC); dev_hard_header(clone, dev, ETH_P_TIPC, dest->value, dev->dev_addr, clone->len); dev_queue_xmit(clone); return 0; }
static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb) { int err, plen, dlen; struct iphdr *iph; struct ipcomp_data *ipcd = x->data; u8 *start, *scratch = ipcd->scratch; plen = skb->len; dlen = IPCOMP_SCRATCH_SIZE; start = skb->data; err = crypto_comp_decompress(ipcd->tfm, start, plen, scratch, &dlen); if (err) goto out; if (dlen < (plen + sizeof(struct ip_comp_hdr))) { err = -EINVAL; goto out; } err = pskb_expand_head(skb, 0, dlen - plen, GFP_ATOMIC); if (err) goto out; skb_put(skb, dlen - plen); memcpy(skb->data, scratch, dlen); iph = skb->nh.iph; iph->tot_len = htons(dlen + iph->ihl * 4); out: return err; }
static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb) { int err = -EINVAL; if (skb->nh.raw[IP6CB(skb)->nhoff] != IPPROTO_IPV6) goto out; if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) goto out; if (skb_cloned(skb) && (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) goto out; if (x->props.flags & XFRM_STATE_DECAP_DSCP) ipv6_copy_dscp(skb->nh.ipv6h, skb->h.ipv6h); if (!(x->props.flags & XFRM_STATE_NOECN)) ipip6_ecn_decapsulate(skb); skb->mac.raw = memmove(skb->data - skb->mac_len, skb->mac.raw, skb->mac_len); skb->nh.raw = skb->data; err = 0; out: return err; }
static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags) { struct iphdr *iph; int ntkoff; ntkoff = skb_network_offset(skb); if (!pskb_may_pull(skb, sizeof(*iph) + ntkoff)) goto fail; iph = ip_hdr(skb); switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) { case IPPROTO_ICMP: if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP) if (!tcf_csum_ipv4_icmp(skb, iph->ihl * 4, ntohs(iph->tot_len))) goto fail; break; case IPPROTO_IGMP: if (update_flags & TCA_CSUM_UPDATE_FLAG_IGMP) if (!tcf_csum_ipv4_igmp(skb, iph->ihl * 4, ntohs(iph->tot_len))) goto fail; break; case IPPROTO_TCP: if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP) if (!tcf_csum_ipv4_tcp(skb, iph, iph->ihl * 4, ntohs(iph->tot_len))) goto fail; break; case IPPROTO_UDP: if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP) if (!tcf_csum_ipv4_udp(skb, iph, iph->ihl * 4, ntohs(iph->tot_len), 0)) goto fail; break; case IPPROTO_UDPLITE: if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE) if (!tcf_csum_ipv4_udp(skb, iph, iph->ihl * 4, ntohs(iph->tot_len), 1)) goto fail; break; } if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) { if (skb_cloned(skb) && !skb_clone_writable(skb, sizeof(*iph) + ntkoff) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) goto fail; ip_send_check(iph); } return 1; fail: return 0; }
/* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type) { struct net *net = dev_net(skb_dst(skb)->dev); const struct iphdr *iph = ip_hdr(skb); struct rtable *rt; struct flowi4 fl4 = {}; __be32 saddr = iph->saddr; __u8 flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0; unsigned int hh_len; if (addr_type == RTN_UNSPEC) addr_type = inet_addr_type(net, saddr); if (addr_type == RTN_LOCAL || addr_type == RTN_UNICAST) flags |= FLOWI_FLAG_ANYSRC; else saddr = 0; /* some non-standard hacks like ipt_REJECT.c:send_reset() can cause * packets with foreign saddr to appear on the NF_INET_LOCAL_OUT hook. */ fl4.daddr = iph->daddr; fl4.saddr = saddr; fl4.flowi4_tos = RT_TOS(iph->tos); fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0; fl4.flowi4_mark = skb->mark; fl4.flowi4_flags = flags; rt = ip_route_output_key(net, &fl4); if (IS_ERR(rt)) return -1; /* Drop old route. */ skb_dst_drop(skb); skb_dst_set(skb, &rt->dst); if (skb_dst(skb)->error) return -1; #ifdef CONFIG_XFRM if (!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) && xfrm_decode_session(skb, flowi4_to_flowi(&fl4), AF_INET) == 0) { struct dst_entry *dst = skb_dst(skb); skb_dst_set(skb, NULL); dst = xfrm_lookup(net, dst, flowi4_to_flowi(&fl4), skb->sk, 0); if (IS_ERR(dst)) return -1; skb_dst_set(skb, dst); } #endif /* Change in oif may mean change in hh_len. */ hh_len = skb_dst(skb)->dev->hard_header_len; if (skb_headroom(skb) < hh_len && pskb_expand_head(skb, HH_DATA_ALIGN(hh_len - skb_headroom(skb)), 0, GFP_ATOMIC)) return -1; return 0; }
int rpl_dev_queue_xmit(struct sk_buff *skb) { #undef dev_queue_xmit int err = -ENOMEM; if (vlan_tx_tag_present(skb) && !dev_supports_vlan_tx(skb->dev)) { int features; features = netif_skb_features(skb); if (!vlan_tso) features &= ~(NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO | NETIF_F_FSO); skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb)); if (unlikely(!skb)) return err; vlan_set_tci(skb, 0); if (netif_needs_gso(skb, features)) { struct sk_buff *nskb; nskb = skb_gso_segment(skb, features); if (!nskb) { if (unlikely(skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) goto drop; skb_shinfo(skb)->gso_type &= ~SKB_GSO_DODGY; goto xmit; } if (IS_ERR(nskb)) { err = PTR_ERR(nskb); goto drop; } consume_skb(skb); skb = nskb; do { nskb = skb->next; skb->next = NULL; err = dev_queue_xmit(skb); skb = nskb; } while (skb); return err; } } xmit: return dev_queue_xmit(skb); drop: kfree_skb(skb); return err; }
static int enlarge_skb(struct sk_buff *skb, unsigned int extra) { if (skb->len + extra > 65535) return 0; if (pskb_expand_head(skb, 0, extra - skb_tailroom(skb), GFP_ATOMIC)) return 0; return 1; }
static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev) { int ret; struct brcmf_if *ifp = netdev_priv(ndev); struct brcmf_pub *drvr = ifp->drvr; struct ethhdr *eh; int head_delta; brcmf_dbg(DATA, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx); /* Can the device send data? */ if (drvr->bus_if->state != BRCMF_BUS_UP) { bphy_err(drvr, "xmit rejected state=%d\n", drvr->bus_if->state); netif_stop_queue(ndev); dev_kfree_skb(skb); ret = -ENODEV; goto done; } /* Some recent Broadcom's firmwares disassociate STA when they receive * an 802.11f ADD frame. This behavior can lead to a local DoS security * issue. Attacker may trigger disassociation of any STA by sending a * proper Ethernet frame to the wireless interface. * * Moreover this feature may break AP interfaces in some specific * setups. This applies e.g. to the bridge with hairpin mode enabled and * IFLA_BRPORT_MCAST_TO_UCAST set. IAPP packet generated by a firmware * will get passed back to the wireless interface and cause immediate * disassociation of a just-connected STA. */ if (!drvr->settings->iapp && brcmf_skb_is_iapp(skb)) { dev_kfree_skb(skb); ret = -EINVAL; goto done; } /* Make sure there's enough writeable headroom */ if (skb_headroom(skb) < drvr->hdrlen || skb_header_cloned(skb)) { head_delta = max_t(int, drvr->hdrlen - skb_headroom(skb), 0); brcmf_dbg(INFO, "%s: insufficient headroom (%d)\n", brcmf_ifname(ifp), head_delta); atomic_inc(&drvr->bus_if->stats.pktcowed); ret = pskb_expand_head(skb, ALIGN(head_delta, NET_SKB_PAD), 0, GFP_ATOMIC); if (ret < 0) { bphy_err(drvr, "%s: failed to expand headroom\n", brcmf_ifname(ifp)); atomic_inc(&drvr->bus_if->stats.pktcow_failed); goto done; } }
static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res) { struct tcf_pedit *p = a->priv; int i, munged = 0; u8 *pptr; if (!(skb->tc_verd & TC_OK2MUNGE)) { /* should we set skb->cloned? */ if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { return p->tcf_action; } } pptr = skb->nh.raw; spin_lock(&p->tcf_lock); p->tcf_tm.lastuse = jiffies; if (p->tcfp_nkeys > 0) { struct tc_pedit_key *tkey = p->tcfp_keys; for (i = p->tcfp_nkeys; i > 0; i--, tkey++) { u32 *ptr; int offset = tkey->off; if (tkey->offmask) { if (skb->len > tkey->at) { char *j = pptr + tkey->at; offset += ((*j & tkey->offmask) >> tkey->shift); } else { goto bad; } } if (offset % 4) { printk("offset must be on 32 bit boundaries\n"); goto bad; } if (skb->len < 0 || (offset > 0 && offset > skb->len)) { printk("offset %d cant exceed pkt length %d\n", offset, skb->len); goto bad; } ptr = (u32 *)(pptr+offset); /* just do it, baby */ *ptr = ((*ptr & tkey->mask) ^ tkey->val); munged++; }
hif_status_t HIFSend(HIF_HANDLE hHIF, uint8_t PipeID, struct sk_buff * hdr_buf, struct sk_buff * buf) { hif_status_t status = HIF_OK; HIF_DEVICE_USB *macp = (HIF_DEVICE_USB *)hHIF; struct sk_buff * sendBuf; uint8_t *data = NULL; uint32_t len = 0; /* If necessary, link hdr_buf & buf */ if (hdr_buf != NULL) { if(!pskb_expand_head(hdr_buf, 0, buf->len, GFP_ATOMIC)) memcpy(skb_put(hdr_buf, buf->len), buf->data, buf->len); sendBuf = hdr_buf; } else { sendBuf = buf; } data = sendBuf->data; len = sendBuf->len; if ( PipeID == HIF_USB_PIPE_COMMAND ) { status = ((struct _NIC_DEV *)macp->os_hdl)->os_usb_submitCmdOutUrb( macp->os_hdl, data, len, (void*)sendBuf); } else if ( PipeID == HIF_USB_PIPE_TX ) { status = ((struct _NIC_DEV *)macp->os_hdl)->os_usb_submitTxUrb( macp->os_hdl, data, len, (void*)sendBuf, &(((struct _NIC_DEV *)macp->os_hdl)->TxPipe)); } else if ( PipeID == HIF_USB_PIPE_HP_TX ) { status = ((struct _NIC_DEV *)macp->os_hdl)->os_usb_submitTxUrb( macp->os_hdl, data, len, (void*)sendBuf, &(((struct _NIC_DEV *)macp->os_hdl)->HPTxPipe)); } else { printk("Unknown pipe %d\n", PipeID); dev_kfree_skb_any(sendBuf); } return (status ? HIF_ERROR : HIF_OK); }
static int tcf_ipt(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res) { int ret = 0, result = 0; struct tcf_ipt *ipt = a->priv; struct xt_action_param par; if (skb_cloned(skb)) { if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) return TC_ACT_UNSPEC; } spin_lock(&ipt->tcf_lock); ipt->tcf_tm.lastuse = jiffies; bstats_update(&ipt->tcf_bstats, skb); /* yes, we have to worry about both in and out dev * worry later - danger - this API seems to have changed * from earlier kernels */ par.in = skb->dev; par.out = NULL; par.hooknum = ipt->tcfi_hook; par.target = ipt->tcfi_t->u.kernel.target; par.targinfo = ipt->tcfi_t->data; ret = par.target->target(skb, &par); switch (ret) { case NF_ACCEPT: result = TC_ACT_OK; break; case NF_DROP: result = TC_ACT_SHOT; ipt->tcf_qstats.drops++; break; case XT_CONTINUE: result = TC_ACT_PIPE; break; default: if (net_ratelimit()) pr_notice("tc filter: Bogus netfilter code" " %d assume ACCEPT\n", ret); result = TC_POLICE_OK; break; } spin_unlock(&ipt->tcf_lock); return result; }
/* Unusual, but possible case. */ static int dba_enlarge_skb(struct sk_buff *skb, unsigned int extra) { if (skb->len + extra > 65535) { log_error("dba enlarge skb failed: skb len %d + extra %d >65535.\n", skb->len, extra); return -1; } if ((extra > skb_tailroom(skb)) || skb_cloned(skb)) { if (pskb_expand_head(skb, 0, extra - skb_tailroom(skb), GFP_ATOMIC)) { return -1; } } return 0; }
static bool skb_util_make_space(struct sk_buff *skb, const size_t size, const off_t offset) { size_t headroom; size_t tailroom; size_t packet_len; headroom = skb_headroom(skb); tailroom = skb_tailroom(skb); if (headroom + tailroom < size) { if (unlikely(pskb_expand_head(skb, size - headroom, 0, GFP_ATOMIC))) { return false; } headroom = skb_headroom(skb); tailroom = skb_tailroom(skb); } packet_len = skb->len; if (headroom >= size) { skb_push(skb, size); if (offset) { memmove(skb->data, &skb->data[size], offset); } } else { off_t move_back_len; if (tailroom >= size) { move_back_len = size; skb_put(skb, move_back_len); memmove(&skb->data[offset + move_back_len], &skb->data[offset], packet_len - offset); } else { /* headroom + tailroom >= size */ move_back_len = tailroom; skb_push(skb, size - move_back_len); skb_put(skb, move_back_len); if (offset) { memmove(skb->data, &skb->data[size - move_back_len], offset); } memmove(&skb->data[offset + size], &skb->data[offset + size - move_back_len], packet_len - offset); } if (skb->ip_summed == CHECKSUM_PARTIAL) { skb->csum_start += move_back_len; } } return true; }
/** * tcf_csum_skb_nextlayer - Get next layer pointer * @skb: sk_buff to use * @ihl: previous summed headers length * @ipl: complete packet length * @jhl: next header length * * Check the expected next layer availability in the specified sk_buff. * Return the next layer pointer if pass, NULL otherwise. */ static void *tcf_csum_skb_nextlayer(struct sk_buff *skb, unsigned int ihl, unsigned int ipl, unsigned int jhl) { int ntkoff = skb_network_offset(skb); int hl = ihl + jhl; if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) || (skb_cloned(skb) && !skb_clone_writable(skb, hl + ntkoff) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) return NULL; else return (void *)(skb_network_header(skb) + ihl); }