/* userspace injects packet into plum */ int bpf_dp_channel_push_on_plum(struct datapath *dp, u32 plum_id, u32 port_id, u32 fwd_plum_id, u32 arg1, u32 arg2, u32 arg3, u32 arg4, struct sk_buff *skb, u32 direction) { struct plum_stack stack = {}; struct plum_stack_frame first_frame = {}; struct plum_stack_frame *frame; struct bpf_dp_context *ctx; u32 dest; frame = &first_frame; frame->kmem = 0; INIT_LIST_HEAD(&stack.list); ctx = &frame->ctx; ctx->stack = &stack; ctx->skb = skb; ctx->dp = dp; bpf_dp_ctx_init(ctx); rcu_read_lock(); if (direction == OVS_BPF_OUT_DIR) { ctx->context.plum_id = plum_id; stack.curr_frame = frame; bpf_forward(&ctx->context, port_id); execute_plums(&stack); consume_skb(skb); } else if (direction == OVS_BPF_IN_DIR) { dest = MUX(plum_id, port_id); frame->dest = dest; stack.curr_frame = NULL; list_add(&frame->link, &stack.list); execute_plums(&stack); } else if (direction == OVS_BPF_FWD_TO_PLUM) { ctx->context.plum_id = plum_id; ctx->context.arg1 = arg1; ctx->context.arg2 = arg2; ctx->context.arg3 = arg3; ctx->context.arg4 = arg4; stack.curr_frame = frame; bpf_forward_to_plum(&ctx->context, fwd_plum_id); execute_plums(&stack); consume_skb(skb); } rcu_read_unlock(); return 0; }
static unsigned int synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_synproxy_info *info = par->targinfo; struct net *net = xt_net(par); struct synproxy_net *snet = synproxy_pernet(net); struct synproxy_options opts = {}; struct tcphdr *th, _th; if (nf_ip6_checksum(skb, xt_hooknum(par), par->thoff, IPPROTO_TCP)) return NF_DROP; th = skb_header_pointer(skb, par->thoff, sizeof(_th), &_th); if (th == NULL) return NF_DROP; if (!synproxy_parse_options(skb, par->thoff, th, &opts)) return NF_DROP; if (th->syn && !(th->ack || th->fin || th->rst)) { /* Initial SYN from client */ this_cpu_inc(snet->stats->syn_received); if (th->ece && th->cwr) opts.options |= XT_SYNPROXY_OPT_ECN; opts.options &= info->options; if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP) synproxy_init_timestamp_cookie(info, &opts); else opts.options &= ~(XT_SYNPROXY_OPT_WSCALE | XT_SYNPROXY_OPT_SACK_PERM | XT_SYNPROXY_OPT_ECN); synproxy_send_client_synack(net, skb, th, &opts); consume_skb(skb); return NF_STOLEN; } else if (th->ack && !(th->fin || th->rst || th->syn)) { /* ACK from client */ if (synproxy_recv_client_ack(net, skb, th, &opts, ntohl(th->seq))) { consume_skb(skb); return NF_STOLEN; } else { return NF_DROP; } } return XT_CONTINUE; }
/** * Before the last deliver skb to ETH_P_ALL is called, this registered handler will * be called. During this time, we will revert the pkt_type from control buf in skb * * @param[in] skb - double pointer to the skb in case we need to clone.. * * @returns action that needs to be taken on the skb. we can consume it. */ rx_handler_result_t rw_fpath_kni_handle_frame(struct sk_buff **pskb) { struct sk_buff *skb = (struct sk_buff *)*pskb; struct kni_dev *kni; rx_handler_result_t ret = RX_HANDLER_PASS; skb = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) return RX_HANDLER_CONSUMED; if (!skb->dev){ KNI_ERR("No device in the skb in rx_handler\n"); return RX_HANDLER_PASS; } kni = netdev_priv(skb->dev); if (!kni){ KNI_ERR("no kni private data in the device in rx_handler\n"); return RX_HANDLER_PASS; } *pskb = skb; switch (skb->pkt_type){ case PACKET_OUTGOING: skb->pkt_type = PACKET_OTHERHOST; kni->rx_treat_as_tx_filtered++; consume_skb(skb); ret = RX_HANDLER_CONSUMED; break; case PACKET_LOOPBACK: skb->pkt_type = skb->mark; if (skb->pkt_type == PACKET_OTHERHOST){ /*Force the packet to be accepted by the IP stack*/ skb->pkt_type = 0; } kni->rx_treat_as_tx_delivered++; skb->mark = 0; break; case PACKET_OTHERHOST: kni->rx_filtered++; consume_skb(skb); ret = RX_HANDLER_CONSUMED; break; default: kni->rx_delivered++; break; } return ret; }
static int arp_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { const struct arphdr *arp; /* do not tweak dropwatch on an ARP we will ignore */ if (dev->flags & IFF_NOARP || skb->pkt_type == PACKET_OTHERHOST || skb->pkt_type == PACKET_LOOPBACK) goto consumeskb; skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) goto out_of_mem; /* ARP header, plus 2 device addresses, plus 2 IP addresses. */ if (!pskb_may_pull(skb, arp_hdr_len(dev))) goto freeskb; arp = arp_hdr(skb); if (arp->ar_hln != dev->addr_len || arp->ar_pln != 4) goto freeskb; memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb)); return NF_HOOK(NFPROTO_ARP, NF_ARP_IN, skb, dev, NULL, arp_process); consumeskb: consume_skb(skb); return 0; freeskb: kfree_skb(skb); out_of_mem: return 0; }
/* packet arriving on vport processed here * must be called with rcu_read_lock */ void bpf_dp_process_received_packet(struct vport *p, struct sk_buff *skb) { struct datapath *dp = p->dp; struct plum *plum; u32 dest; struct plum_stack stack = {}; struct plum_stack_frame first_frame = {}; struct plum_stack_frame *frame; struct bpf_dp_context *ctx; plum = rcu_dereference(dp->plums[0]); dest = atomic_read(&plum->ports[p->port_no]); if (dest) { frame = &first_frame; INIT_LIST_HEAD(&stack.list); ctx = &frame->ctx; ctx->stack = &stack; ctx->context.port_id = p->port_no; ctx->skb = skb; ctx->dp = dp; bpf_dp_ctx_init(ctx); plum_update_stats(plum, p->port_no, skb, true); frame->dest = dest; stack.curr_frame = NULL; list_add(&frame->link, &stack.list); execute_plums(&stack); } else { consume_skb(skb); } }
static struct sock *run_bpf(struct sock_reuseport *reuse, u16 socks, struct bpf_prog *prog, struct sk_buff *skb, int hdr_len) { struct sk_buff *nskb = NULL; u32 index; if (skb_shared(skb)) { nskb = skb_clone(skb, GFP_ATOMIC); if (!nskb) return NULL; skb = nskb; } /* temporarily advance data past protocol header */ if (!pskb_pull(skb, hdr_len)) { kfree_skb(nskb); return NULL; } index = bpf_prog_run_save_cb(prog, skb); __skb_push(skb, hdr_len); consume_skb(nskb); if (index >= socks) return NULL; return reuse->socks[index]; }
static struct sk_buff *skb_set_peeked(struct sk_buff *skb) { struct sk_buff *nskb; if (skb->peeked) return skb; /* We have to unshare an skb before modifying it. */ if (!skb_shared(skb)) goto done; nskb = skb_clone(skb, GFP_ATOMIC); if (!nskb) return ERR_PTR(-ENOMEM); skb->prev->next = nskb; skb->next->prev = nskb; nskb->prev = skb->prev; nskb->next = skb->next; consume_skb(skb); skb = nskb; done: skb->peeked = 1; return skb; }
struct sk_buff *ax25_rt_build_path(struct sk_buff *skb, ax25_address *src, ax25_address *dest, ax25_digi *digi) { struct sk_buff *skbn; unsigned char *bp; int len; len = digi->ndigi * AX25_ADDR_LEN; if (skb_headroom(skb) < len) { if ((skbn = skb_realloc_headroom(skb, len)) == NULL) { printk(KERN_CRIT "AX.25: ax25_dg_build_path - out of memory\n"); return NULL; } if (skb->sk != NULL) skb_set_owner_w(skbn, skb->sk); consume_skb(skb); skb = skbn; } bp = skb_push(skb, len); ax25_addr_build(bp, src, dest, digi, AX25_COMMAND, AX25_MODULUS); return skb; }
/* GSO packet is too big, segment it so that tbf can transmit * each segment in time */ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch) { struct tbf_sched_data *q = qdisc_priv(sch); struct sk_buff *segs, *nskb; netdev_features_t features = netif_skb_features(skb); int ret, nb; segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); if (IS_ERR_OR_NULL(segs)) return qdisc_reshape_fail(skb, sch); nb = 0; while (segs) { nskb = segs->next; segs->next = NULL; qdisc_skb_cb(segs)->pkt_len = segs->len; ret = qdisc_enqueue(segs, q->qdisc); if (ret != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(ret)) sch->qstats.drops++; } else { nb++; } segs = nskb; } sch->q.qlen += nb; if (nb > 1) qdisc_tree_decrease_qlen(sch, 1 - nb); consume_skb(skb); return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP; }
/* called if GSO skb needs to be fragmented on forward */ static int ip_forward_finish_gso(struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); netdev_features_t features; struct sk_buff *segs; int ret = 0; features = netif_skb_dev_features(skb, dst->dev); segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); if (IS_ERR(segs)) { kfree_skb(skb); return -ENOMEM; } consume_skb(skb); do { struct sk_buff *nskb = segs->next; int err; segs->next = NULL; err = dst_output(segs); if (err && ret == 0) ret = err; segs = nskb; } while (segs); return ret; }
/* GSO packet is too big, segment it so that tbf can transmit * each segment in time */ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct tbf_sched_data *q = qdisc_priv(sch); struct sk_buff *segs, *nskb; netdev_features_t features = netif_skb_features(skb); unsigned int len = 0, prev_len = qdisc_pkt_len(skb); int ret, nb; segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); if (IS_ERR_OR_NULL(segs)) return qdisc_drop(skb, sch, to_free); nb = 0; while (segs) { nskb = segs->next; skb_mark_not_on_list(segs); qdisc_skb_cb(segs)->pkt_len = segs->len; len += segs->len; ret = qdisc_enqueue(segs, q->qdisc, to_free); if (ret != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(ret)) qdisc_qstats_drop(sch); } else { nb++; } segs = nskb; } sch->q.qlen += nb; if (nb > 1) qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len); consume_skb(skb); return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP; }
void ax25_transmit_buffer(ax25_cb *ax25, struct sk_buff *skb, int type) { struct sk_buff *skbn; unsigned char *ptr; int headroom; if (ax25->ax25_dev == NULL) { ax25_disconnect(ax25, ENETUNREACH); return; } headroom = ax25_addr_size(ax25->digipeat); if (skb_headroom(skb) < headroom) { if ((skbn = skb_realloc_headroom(skb, headroom)) == NULL) { printk(KERN_CRIT "AX.25: ax25_transmit_buffer - out of memory\n"); kfree_skb(skb); return; } if (skb->sk != NULL) skb_set_owner_w(skbn, skb->sk); consume_skb(skb); skb = skbn; } ptr = skb_push(skb, headroom); ax25_addr_build(ptr, &ax25->source_addr, &ax25->dest_addr, ax25->digipeat, type, ax25->modulus); ax25_queue_xmit(skb, ax25->ax25_dev->dev); }
/* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero * value if 'skb' is freed. */ static int handle_fragments(struct net *net, struct sw_flow_key *key, u16 zone, struct sk_buff *skb) { struct ovs_gso_cb ovs_cb = *OVS_GSO_CB(skb); if (!skb->dev) { OVS_NLERR(true, "%s: skb has no dev; dropping", __func__); return -EINVAL; } if (key->eth.type == htons(ETH_P_IP)) { enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone; int err; memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); err = ip_defrag(skb, user); if (err) return err; ovs_cb.dp_cb.mru = IPCB(skb)->frag_max_size; #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) } else if (key->eth.type == htons(ETH_P_IPV6)) { enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone; struct sk_buff *reasm; memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); reasm = nf_ct_frag6_gather(skb, user); if (!reasm) return -EINPROGRESS; if (skb == reasm) { kfree_skb(skb); return -EINVAL; } /* Don't free 'skb' even though it is one of the original * fragments, as we're going to morph it into the head. */ skb_get(skb); nf_ct_frag6_consume_orig(reasm); key->ip.proto = ipv6_hdr(reasm)->nexthdr; skb_morph(skb, reasm); skb->next = reasm->next; consume_skb(reasm); ovs_cb.dp_cb.mru = IP6CB(skb)->frag_max_size; #endif /* IP frag support */ } else { kfree_skb(skb); return -EPFNOSUPPORT; } key->ip.frag = OVS_FRAG_TYPE_NONE; skb_clear_hash(skb); skb->ignore_df = 1; *OVS_GSO_CB(skb) = ovs_cb; return 0; }
static netdev_tx_t baseband_usb_netdev_start_xmit( struct sk_buff *skb, struct net_device *dev) { int i = 0; struct baseband_usb *usb = netdev_priv(dev);/*wjp for pm*/ struct urb *urb; unsigned char *buf; int err; pr_debug("baseband_usb_netdev_start_xmit\n"); /* check input */ if (!skb) { pr_err("no skb\n"); return -EINVAL; } /* allocate urb */ urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { pr_err("usb_alloc_urb() failed\n"); return -ENOMEM; } buf = kzalloc(skb->len - 14, GFP_ATOMIC); if (!buf) { pr_err("usb buffer kzalloc() failed\n"); usb_free_urb(urb); return -ENOMEM; } err = skb_copy_bits(skb, 14, buf, skb->len - 14); if (err < 0) { pr_err("skb_copy_bits() failed - %d\n", err); kfree(buf); usb_free_urb(urb); return err; } usb_fill_bulk_urb(urb, usb->usb.device, usb->usb.pipe.bulk.out, buf, skb->len - 14, usb_net_raw_ip_tx_urb_comp, usb); urb->transfer_flags = URB_ZERO_PACKET; /* submit tx urb */ usb->usb.tx_urb = urb; err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { pr_err("usb_submit_urb() failed - err %d\n", err); usb->usb.tx_urb = (struct urb *) 0; kfree(urb->transfer_buffer); usb_free_urb(urb); kfree_skb(skb); return err; } /* free skb */ consume_skb(skb); return NETDEV_TX_OK; }
static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev) { struct canfd_frame *cfd = (struct canfd_frame *)skb->data; struct net_device_stats *stats = &dev->stats; int loop; if (can_dropped_invalid_skb(dev, skb)) return NETDEV_TX_OK; stats->tx_packets++; stats->tx_bytes += cfd->len; /* set flag whether this packet has to be looped back */ loop = skb->pkt_type == PACKET_LOOPBACK; if (!echo) { /* no echo handling available inside this driver */ if (loop) { /* * only count the packets here, because the * CAN core already did the echo for us */ stats->rx_packets++; stats->rx_bytes += cfd->len; } consume_skb(skb); return NETDEV_TX_OK; } /* perform standard echo handling for CAN network interfaces */ if (loop) { skb = can_create_echo_skb(skb); if (!skb) return NETDEV_TX_OK; /* receive with packet counting */ vcan_rx(skb, dev); } else { /* no looped packets => no counting */ consume_skb(skb); } return NETDEV_TX_OK; }
static int usb_net_raw_ip_tx_urb_submit(struct baseband_usb *usb, struct sk_buff *skb) { struct urb *urb; unsigned char *buf; int err; pr_debug("usb_net_raw_ip_tx_urb_submit {\n"); /* check input */ if (!usb) { pr_err("%s: !usb\n", __func__); return -EINVAL; } if (!skb) { pr_err("%s: !skb\n", __func__); return -EINVAL; } if (!usb->usb.interface) { pr_err("usb interface disconnected - not submitting tx urb\n"); return -EINVAL; } /* allocate urb */ urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { pr_err("usb_alloc_urb() failed\n"); return -ENOMEM; } buf = kzalloc(skb->len - 14, GFP_ATOMIC); if (!buf) { pr_err("usb buffer kzalloc() failed\n"); usb_free_urb(urb); return -ENOMEM; } err = skb_copy_bits(skb, 14, buf, skb->len - 14); if (err < 0) { pr_err("skb_copy_bits() failed - %d\n", err); kfree(buf); usb_free_urb(urb); return err; } usb_fill_bulk_urb(urb, usb->usb.device, usb->usb.pipe.bulk.out, buf, skb->len - 14, usb_net_raw_ip_tx_urb_comp, usb); urb->transfer_flags = URB_ZERO_PACKET; /* queue tx urb work */ usb_anchor_urb(urb, &usb->usb.tx_urb_deferred); queue_work(usb->usb.tx_workqueue, &usb->usb.tx_work); /* free skb */ consume_skb(skb); pr_debug("usb_net_raw_ip_tx_urb_submit }\n"); return 0; }
/* When forwarding a packet, we must ensure that we've got enough headroom * for the encapsulation packet in the skb. This also gives us an * opportunity to figure out what the payload_len, dsfield, ttl, and df * values should be, so that we won't need to look at the old ip header * again */ static struct sk_buff * ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af, unsigned int max_headroom, __u8 *next_protocol, __u32 *payload_len, __u8 *dsfield, __u8 *ttl, __be16 *df) { struct sk_buff *new_skb = NULL; struct iphdr *old_iph = NULL; #ifdef CONFIG_IP_VS_IPV6 struct ipv6hdr *old_ipv6h = NULL; #endif ip_vs_drop_early_demux_sk(skb); if (skb_headroom(skb) < max_headroom || skb_cloned(skb)) { new_skb = skb_realloc_headroom(skb, max_headroom); if (!new_skb) goto error; if (skb->sk) skb_set_owner_w(new_skb, skb->sk); consume_skb(skb); skb = new_skb; } #ifdef CONFIG_IP_VS_IPV6 if (skb_af == AF_INET6) { old_ipv6h = ipv6_hdr(skb); *next_protocol = IPPROTO_IPV6; if (payload_len) *payload_len = ntohs(old_ipv6h->payload_len) + sizeof(*old_ipv6h); *dsfield = ipv6_get_dsfield(old_ipv6h); *ttl = old_ipv6h->hop_limit; if (df) *df = 0; } else #endif { old_iph = ip_hdr(skb); /* Copy DF, reset fragment offset and MF */ if (df) *df = (old_iph->frag_off & htons(IP_DF)); *next_protocol = IPPROTO_IPIP; /* fix old IP header checksum */ ip_send_check(old_iph); *dsfield = ipv4_get_dsfield(old_iph); *ttl = old_iph->ttl; if (payload_len) *payload_len = ntohs(old_iph->tot_len); } return skb; error: kfree_skb(skb); return ERR_PTR(-ENOMEM); }
int rpl_dev_queue_xmit(struct sk_buff *skb) { #undef dev_queue_xmit int err = -ENOMEM; if (vlan_tx_tag_present(skb) && !dev_supports_vlan_tx(skb->dev)) { int features; features = netif_skb_features(skb); if (!vlan_tso) features &= ~(NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO | NETIF_F_FSO); skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb)); if (unlikely(!skb)) return err; vlan_set_tci(skb, 0); if (netif_needs_gso(skb, features)) { struct sk_buff *nskb; nskb = skb_gso_segment(skb, features); if (!nskb) { if (unlikely(skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) goto drop; skb_shinfo(skb)->gso_type &= ~SKB_GSO_DODGY; goto xmit; } if (IS_ERR(nskb)) { err = PTR_ERR(nskb); goto drop; } consume_skb(skb); skb = nskb; do { nskb = skb->next; skb->next = NULL; err = dev_queue_xmit(skb); skb = nskb; } while (skb); return err; } } xmit: return dev_queue_xmit(skb); drop: kfree_skb(skb); return err; }
/* called by execute_plums() to execute BPF program * or send it out of vport if destination plum_id is zero * It's called with rcu_read_lock. */ static void __bpf_forward(struct bpf_dp_context *ctx, u32 dest) { struct datapath *dp = ctx->dp; u32 plum_id = dest >> 16; u32 port_id = dest & 0xffff; struct plum *plum; struct vport *vport; struct ovs_key_ipv4_tunnel tun_key; plum = rcu_dereference(dp->plums[plum_id]); if (unlikely(!plum)) { kfree_skb(ctx->skb); return; } if (plum_id == 0) { if (ctx->context.tun_key.dst_ip) { tun_key.tun_id = cpu_to_be64(ctx->context.tun_key.tun_id); tun_key.ipv4_src = cpu_to_be32(ctx->context.tun_key.src_ip); tun_key.ipv4_dst = cpu_to_be32(ctx->context.tun_key.dst_ip); tun_key.ipv4_tos = ctx->context.tun_key.tos; tun_key.ipv4_ttl = ctx->context.tun_key.ttl; tun_key.tun_flags = TUNNEL_KEY; OVS_CB(ctx->skb)->tun_key = &tun_key; } else { OVS_CB(ctx->skb)->tun_key = NULL; } plum_update_stats(plum, port_id, ctx->skb, false); vport = ovs_vport_rcu(dp, port_id); if (unlikely(!vport)) { kfree_skb(ctx->skb); return; } /** begin_fixme **/ #if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) offload_send(vport, ctx->skb); #else ovs_vport_send(vport, ctx->skb); #endif /** end_fixme **/ } else { ctx->context.port_id = port_id; ctx->context.plum_id = plum_id; BUG_ON(plum->run == NULL); plum_update_stats(plum, port_id, ctx->skb, true); /* execute BPF program */ plum->run(ctx); consume_skb(ctx->skb); } }
static void svc_release_udp_skb(struct svc_rqst *rqstp) { struct sk_buff *skb = rqstp->rq_xprt_ctxt; if (skb) { rqstp->rq_xprt_ctxt = NULL; dprintk("svc: service %p, releasing skb %p\n", rqstp, skb); consume_skb(skb); } }
void ts27010_destroy_network(struct dlci_struct *dlci) { struct ts27010_mux_net *mux_net; mux_print(MSG_INFO, "destroy network interface[%d]\n", dlci->line_no); if (dlci->net) { mux_net = (struct ts27010_mux_net *)netdev_priv(dlci->net); if ( mux_net->net_wq != NULL ) { int retry_count = 0; int skb_list_count = 0; //wait wq send all of TX mux_print(MSG_LIGHT, "[WQ] cancel_work_sync\n"); cancel_delayed_work_sync(&mux_net->net_work); mux_print(MSG_LIGHT, "[WQ] destroy_workqueue, wq=[0x%x]\n", mux_net->net_wq); destroy_workqueue(mux_net->net_wq); mux_net->net_wq = NULL; for ( retry_count = 0; retry_count < 20; retry_count++ ) { mutex_lock(&mux_net->net_wq_lock); skb_list_count = skb_queue_len(&mux_net->txhead); mutex_unlock(&mux_net->net_wq_lock); if (skb_list_count > 0) { mux_print(MSG_ERROR, "skb_list_count=[%d]\n", skb_list_count); mdelay(100); } else { break; } } if ( skb_list_count > 0 ) { mutex_lock(&mux_net->net_wq_lock); mux_print(MSG_ERROR, "clear skb\n"); while ( skb_queue_len(&mux_net->txhead) > 0 ) { struct sk_buff* temp = skb_dequeue(&mux_net->txhead); if ( temp != NULL ) { consume_skb(temp); } temp = NULL; } mutex_unlock(&mux_net->net_wq_lock); } else { mutex_destroy(&mux_net->net_wq_lock); } } mux_print(MSG_LIGHT, "unregister_netdev\n"); unregister_netdev(dlci->net); mux_print(MSG_LIGHT, "dlci_net_free\n"); dlci_net_free(dlci); #ifdef ENABLE_MUX_NET_KREF_FEATURE kref_put(&mux_net->ref, net_free); #endif } }
static int ip_local_deliver_finish(struct sk_buff *skb) { struct net *net = dev_net(skb->dev); __skb_pull(skb, skb_network_header_len(skb)); rcu_read_lock(); { int protocol = ip_hdr(skb)->protocol; const struct net_protocol *ipprot; int raw; resubmit: raw = raw_local_deliver(skb, protocol); ipprot = rcu_dereference(inet_protos[protocol]); if (ipprot != NULL) { int ret; if (!ipprot->no_policy) { if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { kfree_skb(skb); goto out; } nf_reset(skb); } ret = ipprot->handler(skb); if (ret < 0) { protocol = -ret; goto resubmit; } IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS); } else { if (!raw) { if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { IP_INC_STATS_BH(net, IPSTATS_MIB_INUNKNOWNPROTOS); icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PROT_UNREACH, 0); } kfree_skb(skb); } else { IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS); consume_skb(skb); } } } out: rcu_read_unlock(); return 0; }
static struct sk_buff *tnl_skb_gso_segment(struct sk_buff *skb, netdev_features_t features, bool tx_path) { struct iphdr *iph = ip_hdr(skb); int pkt_hlen = skb_inner_network_offset(skb); /* inner l2 + tunnel hdr. */ int mac_offset = skb_inner_mac_offset(skb); struct sk_buff *skb1 = skb; struct sk_buff *segs; __be16 proto = skb->protocol; char cb[sizeof(skb->cb)]; /* setup whole inner packet to get protocol. */ __skb_pull(skb, mac_offset); skb->protocol = __skb_network_protocol(skb); /* setup l3 packet to gso, to get around segmentation bug on older kernel.*/ __skb_pull(skb, (pkt_hlen - mac_offset)); skb_reset_mac_header(skb); skb_reset_network_header(skb); skb_reset_transport_header(skb); /* From 3.9 kernel skb->cb is used by skb gso. Therefore * make copy of it to restore it back. */ memcpy(cb, skb->cb, sizeof(cb)); segs = __skb_gso_segment(skb, 0, tx_path); if (!segs || IS_ERR(segs)) goto free; skb = segs; while (skb) { __skb_push(skb, pkt_hlen); skb_reset_mac_header(skb); skb_reset_network_header(skb); skb_set_transport_header(skb, sizeof(struct iphdr)); skb->mac_len = 0; memcpy(ip_hdr(skb), iph, pkt_hlen); memcpy(skb->cb, cb, sizeof(cb)); if (OVS_GSO_CB(skb)->fix_segment) OVS_GSO_CB(skb)->fix_segment(skb); skb->protocol = proto; skb = skb->next; } free: consume_skb(skb1); return segs; }
/* Must be called with rcu_read_lock. */ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb) { struct datapath *dp = p->dp; struct sw_flow *flow; struct dp_stats_percpu *stats; u64 *stats_counter; int error; stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id()); if (!OVS_CB(skb)->flow) { struct sw_flow_key key; int key_len; /* Extract flow from 'skb' into 'key'. */ error = ovs_flow_extract(skb, p->port_no, &key, &key_len); if (unlikely(error)) { kfree_skb(skb); return; } /* Look up flow. */ flow = ovs_flow_tbl_lookup(rcu_dereference(dp->table), &key, key_len); if (unlikely(!flow)) { struct dp_upcall_info upcall; upcall.cmd = OVS_PACKET_CMD_MISS; upcall.key = &key; upcall.userdata = NULL; upcall.pid = p->upcall_pid; ovs_dp_upcall(dp, skb, &upcall); consume_skb(skb); stats_counter = &stats->n_missed; goto out; } OVS_CB(skb)->flow = flow; } stats_counter = &stats->n_hit; ovs_flow_used(OVS_CB(skb)->flow, skb); ovs_execute_actions(dp, skb); out: /* Update datapath statistics. */ u64_stats_update_begin(&stats->sync); (*stats_counter)++; u64_stats_update_end(&stats->sync); }
/** * batadv_frag_clear_chain() - delete entries in the fragment buffer chain * @head: head of chain with entries. * @dropped: whether the chain is cleared because all fragments are dropped * * Free fragments in the passed hlist. Should be called with appropriate lock. */ static void batadv_frag_clear_chain(struct hlist_head *head, bool dropped) { struct batadv_frag_list_entry *entry; struct hlist_node *node; hlist_for_each_entry_safe(entry, node, head, list) { hlist_del(&entry->list); if (dropped) kfree_skb(entry->skb); else consume_skb(entry->skb); kfree(entry); }
static void rmnet_map_ingress_handler(struct sk_buff *skb, struct rmnet_port *port) { struct sk_buff *skbn; if (port->ingress_data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) { while ((skbn = rmnet_map_deaggregate(skb)) != NULL) __rmnet_map_ingress_handler(skbn, port); consume_skb(skb); } else { __rmnet_map_ingress_handler(skb, port); } }
/** * batadv_forw_packet_free() - free a forwarding packet * @forw_packet: The packet to free * @dropped: whether the packet is freed because is is dropped * * This frees a forwarding packet and releases any resources it might * have claimed. */ void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet, bool dropped) { if (dropped) kfree_skb(forw_packet->skb); else consume_skb(forw_packet->skb); if (forw_packet->if_incoming) batadv_hardif_put(forw_packet->if_incoming); if (forw_packet->if_outgoing) batadv_hardif_put(forw_packet->if_outgoing); if (forw_packet->queue_left) atomic_inc(forw_packet->queue_left); kfree(forw_packet); }
/* modelled after ip_finish_output2 */ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); struct rtable *rt = (struct rtable *)dst; struct net_device *dev = dst->dev; unsigned int hh_len = LL_RESERVED_SPACE(dev); struct neighbour *neigh; u32 nexthop; int ret = -EINVAL; nf_reset(skb); /* Be paranoid, rather than too clever. */ if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) { struct sk_buff *skb2; skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev)); if (!skb2) { ret = -ENOMEM; goto err; } if (skb->sk) skb_set_owner_w(skb2, skb->sk); consume_skb(skb); skb = skb2; } rcu_read_lock_bh(); nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr); neigh = __ipv4_neigh_lookup_noref(dev, nexthop); if (unlikely(!neigh)) neigh = __neigh_create(&arp_tbl, &nexthop, dev, false); if (!IS_ERR(neigh)) { sock_confirm_neigh(skb, neigh); ret = neigh_output(neigh, skb); rcu_read_unlock_bh(); return ret; } rcu_read_unlock_bh(); err: vrf_tx_error(skb->dev, skb); return ret; }
static int handle_fragments(struct net *net, struct sw_flow_key *key, u16 zone, struct sk_buff *skb) { struct ovs_skb_cb ovs_cb = *OVS_CB(skb); if (key->eth.type == htons(ETH_P_IP)) { enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone; int err; memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); err = ip_defrag(net, skb, user); if (err) return err; ovs_cb.mru = IPCB(skb)->frag_max_size; } else if (key->eth.type == htons(ETH_P_IPV6)) { #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone; struct sk_buff *reasm; memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); reasm = nf_ct_frag6_gather(net, skb, user); if (!reasm) return -EINPROGRESS; if (skb == reasm) return -EINVAL; key->ip.proto = ipv6_hdr(reasm)->nexthdr; skb_morph(skb, reasm); consume_skb(reasm); ovs_cb.mru = IP6CB(skb)->frag_max_size; #else return -EPFNOSUPPORT; #endif } else { return -EPFNOSUPPORT; } key->ip.frag = OVS_FRAG_TYPE_NONE; skb_clear_hash(skb); skb->ignore_df = 1; *OVS_CB(skb) = ovs_cb; return 0; }
static int queue_gso_packets(int dp_ifindex, struct sk_buff *skb, const struct dp_upcall_info *upcall_info) { struct dp_upcall_info later_info; struct sw_flow_key later_key; struct sk_buff *segs, *nskb; int err; segs = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM); if (IS_ERR(skb)) return PTR_ERR(skb); if (segs == NULL) return -EINVAL; /* Queue all of the segments. */ skb = segs; do { err = queue_userspace_packet(dp_ifindex, skb, upcall_info); if (err) break; if (skb == segs && skb_shinfo(skb)->gso_type & SKB_GSO_UDP) { /* The initial flow key extracted by ovs_flow_extract() * in this case is for a first fragment, so we need to * properly mark later fragments. */ later_key = *upcall_info->key; later_key.ip.frag = OVS_FRAG_TYPE_LATER; later_info = *upcall_info; later_info.key = &later_key; upcall_info = &later_info; } } while ((skb = skb->next)); /* Free all of the segments. */ skb = segs; do { nskb = skb->next; if (err) kfree_skb(skb); else consume_skb(skb); } while ((skb = nskb)); return err; }