static int arp_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct arphdr *arp; /* ARP header, plus 2 device addresses, plus 2 IP addresses. */ if (!pskb_may_pull(skb, arp_hdr_len(dev))) goto freeskb; arp = arp_hdr(skb); if (arp->ar_hln != dev->addr_len || dev->flags & IFF_NOARP || skb->pkt_type == PACKET_OTHERHOST || skb->pkt_type == PACKET_LOOPBACK || arp->ar_pln != 4) goto freeskb; if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) goto out_of_mem; memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb)); return NF_HOOK(NF_ARP, NF_ARP_IN, skb, dev, NULL, arp_process); freeskb: kfree_skb(skb); out_of_mem: return 0; }
int arp_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt) { struct arphdr *arp; /* ARP header, plus 2 device addresses, plus 2 IP addresses. */ if (!pskb_may_pull(skb, (sizeof(struct arphdr) + (2 * dev->addr_len) + (2 * sizeof(u32))))) goto freeskb; arp = skb->nh.arph; if (arp->ar_hln != dev->addr_len || dev->flags & IFF_NOARP || skb->pkt_type == PACKET_OTHERHOST || skb->pkt_type == PACKET_LOOPBACK || arp->ar_pln != 4) goto freeskb; if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) goto out_of_mem; return NF_HOOK(NF_ARP, NF_ARP_IN, skb, dev, NULL, arp_process); freeskb: kfree_skb(skb); out_of_mem: return 0; }
struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user) { struct iphdr iph; u32 len; if (skb->protocol != htons(ETH_P_IP)) return skb; if (!skb_copy_bits(skb, 0, &iph, sizeof(iph))) return skb; if (iph.ihl < 5 || iph.version != 4) return skb; len = ntohs(iph.tot_len); if (skb->len < len || len < (iph.ihl * 4)) return skb; if (ip_is_fragment(&iph)) { skb = skb_share_check(skb, GFP_ATOMIC); if (skb) { if (!pskb_may_pull(skb, iph.ihl*4)) return skb; if (pskb_trim_rcsum(skb, len)) return skb; memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); if (ip_defrag(skb, user)) return NULL; skb->rxhash = 0; } } return skb; }
/* Must be called with rcu_read_lock. */ static void netdev_port_receive(struct sk_buff *skb) { struct vport *vport; vport = ovs_netdev_get_vport(skb->dev); if (unlikely(!vport)) goto error; if (unlikely(skb_warn_if_lro(skb))) goto error; /* Make our own copy of the packet. Otherwise we will mangle the * packet for anyone who came before us (e.g. tcpdump via AF_PACKET). */ skb = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) return; skb_push(skb, ETH_HLEN); skb_postpush_rcsum(skb, skb->data, ETH_HLEN); ovs_vport_receive(vport, skb, skb_tunnel_info(skb)); return; error: kfree_skb(skb); }
/* Must be called with rcu_read_lock. */ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb) { if (unlikely(!vport)) { kfree_skb(skb); return; } /* Make our own copy of the packet. Otherwise we will mangle the * packet for anyone who came before us (e.g. tcpdump via AF_PACKET). * (No one comes after us, since we tell handle_bridge() that we took * the packet.) */ skb = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) return; skb_push(skb, ETH_HLEN); if (unlikely(compute_ip_summed(skb, false))) { kfree_skb(skb); return; } vlan_copy_skb_tci(skb); ovs_vport_receive(vport, skb); }
int ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp) { struct rt6_info *rt; /* Route to the other host */ int mtu; EnterFunction(10); if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL, 0, 1|2))) goto tx_error_icmp; if (__ip_vs_is_local_route6(rt)) { dst_release(&rt->dst); IP_VS_XMIT(NFPROTO_IPV6, skb, cp, 1); } /* MTU checking */ mtu = dst_mtu(&rt->dst); if (skb->len > mtu) { if (!skb->dev) { struct net *net = dev_net(skb_dst(skb)->dev); skb->dev = net->loopback_dev; } icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); dst_release(&rt->dst); IP_VS_DBG_RL("%s(): frag needed\n", __func__); goto tx_error; } /* * Call ip_send_check because we are not sure it is called * after ip_defrag. Is copy-on-write needed? */ skb = skb_share_check(skb, GFP_ATOMIC); if (unlikely(skb == NULL)) { dst_release(&rt->dst); return NF_STOLEN; } /* drop old route */ skb_dst_drop(skb); skb_dst_set(skb, &rt->dst); /* Another hack: avoid icmp_send in ip_fragment */ skb->local_df = 1; IP_VS_XMIT(NFPROTO_IPV6, skb, cp, 0); LeaveFunction(10); return NF_STOLEN; tx_error_icmp: dst_link_failure(skb); tx_error: kfree_skb(skb); LeaveFunction(10); return NF_STOLEN; }
struct sk_buff *vlan_untag(struct sk_buff *skb) { struct vlan_hdr *vhdr; u16 vlan_tci; if (unlikely(vlan_tx_tag_present(skb))) { /* vlan_tci is already set-up so leave this for another time */ return skb; } skb = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) goto err_free; if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) goto err_free; vhdr = (struct vlan_hdr *) skb->data; vlan_tci = ntohs(vhdr->h_vlan_TCI); __vlan_hwaccel_put_tag(skb, vlan_tci); skb_pull_rcsum(skb, VLAN_HLEN); vlan_set_encap_proto(skb, vhdr); skb = vlan_reorder_header(skb); if (unlikely(!skb)) goto err_free; return skb; err_free: kfree_skb(skb); return NULL; }
bool vlan_do_receive(struct sk_buff **skbp, bool last_handler) { struct sk_buff *skb = *skbp; u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK; struct net_device *vlan_dev; struct vlan_pcpu_stats *rx_stats; vlan_dev = vlan_find_dev(skb->dev, vlan_id); if (!vlan_dev) { /* Only the last call to vlan_do_receive() should change * pkt_type to PACKET_OTHERHOST */ if (vlan_id && last_handler) skb->pkt_type = PACKET_OTHERHOST; return false; } skb = *skbp = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) return false; skb->dev = vlan_dev; if (skb->pkt_type == PACKET_OTHERHOST) { /* Our lower layer thinks this is not local, let's make sure. * This allows the VLAN to have a different MAC than the * underlying device, and still route correctly. */ if (ether_addr_equal(eth_hdr(skb)->h_dest, vlan_dev->dev_addr)) skb->pkt_type = PACKET_HOST; } if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) { unsigned int offset = skb->data - skb_mac_header(skb); /* * vlan_insert_tag expect skb->data pointing to mac header. * So change skb->data before calling it and change back to * original position later */ skb_push(skb, offset); skb = *skbp = vlan_insert_tag(skb, skb->vlan_tci); if (!skb) return false; skb_pull(skb, offset + VLAN_HLEN); skb_reset_mac_len(skb); } skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci); skb->vlan_tci = 0; rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats); u64_stats_update_begin(&rx_stats->syncp); rx_stats->rx_packets++; rx_stats->rx_bytes += skb->len; if (skb->pkt_type == PACKET_MULTICAST) rx_stats->rx_multicast++; u64_stats_update_end(&rx_stats->syncp); return true; }
/* Must be called with rcu_read_lock. */ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb) { if (unlikely(!vport)) goto error; if (unlikely(skb_warn_if_lro(skb))) goto error; /* Make our own copy of the packet. Otherwise we will mangle the * packet for anyone who came before us (e.g. tcpdump via AF_PACKET). * (No one comes after us, since we tell handle_bridge() that we took * the packet.) */ skb = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) return; skb_push(skb, ETH_HLEN); ovs_skb_postpush_rcsum(skb, skb->data, ETH_HLEN); ovs_vport_receive(vport, skb, NULL); return; error: kfree_skb(skb); }
/* * Direct Routing transmitter * Used for ANY protocol */ int ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp) { struct rtable *rt; /* Route to the other host */ struct iphdr *iph = ip_hdr(skb); int mtu; EnterFunction(10); if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip, RT_TOS(iph->tos), IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL))) goto tx_error_icmp; if (rt->rt_flags & RTCF_LOCAL) { ip_rt_put(rt); IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 1); } /* MTU checking */ mtu = dst_mtu(&rt->dst); if ((iph->frag_off & htons(IP_DF)) && skb->len > mtu && !skb_is_gso(skb)) { icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu)); ip_rt_put(rt); IP_VS_DBG_RL("%s(): frag needed\n", __func__); goto tx_error; } /* * Call ip_send_check because we are not sure it is called * after ip_defrag. Is copy-on-write needed? */ if (unlikely((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)) { ip_rt_put(rt); return NF_STOLEN; } ip_send_check(ip_hdr(skb)); /* drop old route */ skb_dst_drop(skb); skb_dst_set(skb, &rt->dst); /* Another hack: avoid icmp_send in ip_fragment */ skb->local_df = 1; IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 0); LeaveFunction(10); return NF_STOLEN; tx_error_icmp: dst_link_failure(skb); tx_error: kfree_skb(skb); LeaveFunction(10); return NF_STOLEN; }
static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt) { struct sock *sk; struct sockaddr_pkt *spkt; /* * When we registered the protocol we saved the socket in the data * field for just this event. */ sk = pt->af_packet_priv; /* * Yank back the headers [hope the device set this * right or kerboom...] * * Incoming packets have ll header pulled, * push it back. * * For outgoing ones skb->data == skb->mac.raw * so that this procedure is noop. */ if (skb->pkt_type == PACKET_LOOPBACK) goto out; if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) goto oom; /* drop any routing info */ dst_release(skb->dst); skb->dst = NULL; spkt = (struct sockaddr_pkt*)skb->cb; skb_push(skb, skb->data-skb->mac.raw); /* * The SOCK_PACKET socket receives _all_ frames. */ spkt->spkt_family = dev->type; strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device)); spkt->spkt_protocol = skb->protocol; /* * Charge the memory to the socket. This is done specifically * to prevent sockets using all the memory up. */ if (sock_queue_rcv_skb(sk,skb) == 0) return 0; out: kfree_skb(skb); oom: return 0; }
int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt) { struct ipv6hdr *hdr; u32 pkt_len; if (skb->pkt_type == PACKET_OTHERHOST) goto drop; IP6_INC_STATS_BH(InReceives); if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { IP6_INC_STATS_BH(InDiscards); goto out; } /* Store incoming device index. When the packet will be queued, we cannot refer to skb->dev anymore. */ IP6CB(skb)->iif = dev->ifindex; if (skb->len < sizeof(struct ipv6hdr)) goto err; if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) { IP6_INC_STATS_BH(InHdrErrors); goto drop; } hdr = skb->nh.ipv6h; if (hdr->version != 6) goto err; pkt_len = ntohs(hdr->payload_len); /* pkt_len may be zero if Jumbo payload option is present */ if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) { if (pkt_len + sizeof(struct ipv6hdr) > skb->len) goto truncated; if (pkt_len + sizeof(struct ipv6hdr) < skb->len) { if (__pskb_trim(skb, pkt_len + sizeof(struct ipv6hdr))){ IP6_INC_STATS_BH(InHdrErrors); goto drop; } hdr = skb->nh.ipv6h; if (skb->ip_summed == CHECKSUM_HW) skb->ip_summed = CHECKSUM_NONE; } } if (hdr->nexthdr == NEXTHDR_HOP) { skb->h.raw = (u8*)(hdr+1); if (ipv6_parse_hopopts(skb, offsetof(struct ipv6hdr, nexthdr)) < 0) { IP6_INC_STATS_BH(InHdrErrors); return 0; } hdr = skb->nh.ipv6h; }
/* * Return NULL if skb is handled * note: already called with rcu_read_lock */ struct sk_buff *br_handle_frame(struct sk_buff *skb) { struct net_bridge_port *p; const unsigned char *dest = eth_hdr(skb)->h_dest; int (*rhook)(struct sk_buff *skb); if (skb->pkt_type == PACKET_LOOPBACK) return skb; if (!is_valid_ether_addr(eth_hdr(skb)->h_source)) goto drop; skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) return NULL; p = br_port_get_rcu(skb->dev); if (unlikely(is_link_local(dest))) { /* Pause frames shouldn't be passed up by driver anyway */ if (skb->protocol == htons(ETH_P_PAUSE)) goto drop; /* If STP is turned off, then forward */ if (p->br->stp_enabled == BR_NO_STP && dest[5] == 0) goto forward; if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev, NULL, br_handle_local_finish)) return NULL; /* frame consumed by filter */ else return skb; /* continue processing */ } forward: switch (p->state) { case BR_STATE_FORWARDING: rhook = rcu_dereference(br_should_route_hook); if (rhook != NULL) { if (rhook(skb)) return skb; dest = eth_hdr(skb)->h_dest; } /* fall through */ case BR_STATE_LEARNING: if (!compare_ether_addr(p->br->dev->dev_addr, dest)) skb->pkt_type = PACKET_HOST; NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, br_handle_frame_finish); break; default: drop: kfree_skb(skb); } return NULL; }
struct sk_buff *xip_trim_packet_if_needed(struct sk_buff *skb, u32 mtu) { if (likely(skb->len <= mtu)) return skb; BUG_ON(mtu < XIP_MIN_MTU); skb = skb_share_check(skb, GFP_ATOMIC); if (likely(skb)) __skb_trim(skb, mtu); return skb; }
static int vcan_tx(struct sk_buff *skb, struct net_device *dev) { struct can_frame *cf = (struct can_frame *)skb->data; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23) struct net_device_stats *stats = &dev->stats; #else struct net_device_stats *stats = netdev_priv(dev); #endif int loop; if (can_dropped_invalid_skb(dev, skb)) return NETDEV_TX_OK; stats->tx_packets++; stats->tx_bytes += cf->can_dlc; /* set flag whether this packet has to be looped back */ loop = skb->pkt_type == PACKET_LOOPBACK; if (!echo) { /* no echo handling available inside this driver */ if (loop) { /* * only count the packets here, because the * CAN core already did the echo for us */ stats->rx_packets++; stats->rx_bytes += cf->can_dlc; } kfree_skb(skb); return NETDEV_TX_OK; } /* perform standard echo handling for CAN network interfaces */ if (loop) { struct sock *srcsk = skb->sk; skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) return NETDEV_TX_OK; /* receive with packet counting */ skb->sk = srcsk; vcan_rx(skb, dev); } else { /* no looped packets => no counting */ kfree_skb(skb); } return NETDEV_TX_OK; }
/** * llc_rcv - 802.2 entry point from net lower layers * @skb: received pdu * @dev: device that receive pdu * @pt: packet type * * When the system receives a 802.2 frame this function is called. It * checks SAP and connection of received pdu and passes frame to * llc_{station,sap,conn}_rcv for sending to proper state machine. If * the frame is related to a busy connection (a connection is sending * data now), it queues this frame in the connection's backlog. */ int llc_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt) { struct llc_sap *sap; struct llc_pdu_sn *pdu; int dest; /* * When the interface is in promisc. mode, drop all the crap that it * receives, do not try to analyse it. */ if (unlikely(skb->pkt_type == PACKET_OTHERHOST)) { dprintk("%s: PACKET_OTHERHOST\n", __FUNCTION__); goto drop; } skb = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) goto out; if (unlikely(!llc_fixup_skb(skb))) goto drop; pdu = llc_pdu_sn_hdr(skb); if (unlikely(!pdu->dsap)) /* NULL DSAP, refer to station */ goto handle_station; sap = llc_sap_find(pdu->dsap); if (unlikely(!sap)) {/* unknown SAP */ dprintk("%s: llc_sap_find(%02X) failed!\n", __FUNCTION__, pdu->dsap); goto drop; } /* * First the upper layer protocols that don't need the full * LLC functionality */ if (sap->rcv_func) { sap->rcv_func(skb, dev, pt); goto out; } dest = llc_pdu_type(skb); if (unlikely(!dest || !llc_type_handlers[dest - 1])) goto drop; llc_type_handlers[dest - 1](sap, skb); out: return 0; drop: kfree_skb(skb); goto out; handle_station: if (!llc_station_handler) goto drop; llc_station_handler(skb); goto out; }
/* * Called via br_handle_frame_hook. * Return NULL if skb is handled * note: already called with rcu_read_lock (preempt_disabled) */ struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb) { const unsigned char *dest = eth_hdr(skb)->h_dest; int (*rhook)(struct sk_buff *skb); //源MAC地址为非法,返回错误,丢弃数据包 if (!is_valid_ether_addr(eth_hdr(skb)->h_source)) goto drop; skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) return NULL; if (unlikely(is_link_local(dest))) { /* Pause frames shouldn't be passed up by driver anyway */ if (skb->protocol == htons(ETH_P_PAUSE)) goto drop; /* Process STP BPDU's through normal netif_receive_skb() path */ if (p->br->stp_enabled != BR_NO_STP) { if (NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev, NULL, br_handle_local_finish)) return NULL; else return skb; } } switch (p->state) { case BR_STATE_FORWARDING: rhook = rcu_dereference(br_should_route_hook); if (rhook != NULL) { if (rhook(skb)) return skb; dest = eth_hdr(skb)->h_dest; } /* fall through */ case BR_STATE_LEARNING: //如果网桥状态处于学习状态,则更新数据库 if (!compare_ether_addr(p->br->dev->dev_addr, dest)) skb->pkt_type = PACKET_HOST; NF_HOOK(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, br_handle_frame_finish); break; default: drop: kfree_skb(skb); } return NULL; }
/* * (1) len doesn't include the header by default. I want this. */ static int aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt, struct net_device *orig_dev) { struct aoe_hdr *h; u32 n; if (dev_net(ifp) != &init_net) goto exit; skb = skb_share_check(skb, GFP_ATOMIC); if (skb == NULL) return 0; if (skb_linearize(skb)) goto exit; if (!is_aoe_netif(ifp)) goto exit; skb_push(skb, ETH_HLEN); /* (1) */ h = (struct aoe_hdr *) skb_mac_header(skb); n = get_unaligned_be32(&h->tag); if ((h->verfl & AOEFL_RSP) == 0 || (n & 1<<31)) goto exit; if (h->verfl & AOEFL_ERR) { n = h->err; if (n > NECODES) n = 0; if (net_ratelimit()) printk(KERN_ERR "%s%d.%d@%s; ecode=%d '%s'\n", "aoe: error packet from ", get_unaligned_be16(&h->major), h->minor, skb->dev->name, h->err, aoe_errlist[n]); goto exit; } switch (h->cmd) { case AOECMD_ATA: aoecmd_ata_rsp(skb); break; case AOECMD_CFG: aoecmd_cfg_rsp(skb); break; default: printk(KERN_INFO "aoe: unknown cmd %d\n", h->cmd); } exit: dev_kfree_skb(skb); return 0; }
struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb) { const unsigned char *dest = eth_hdr(skb)->h_dest; int (*rhook)(struct sk_buff *skb); if (!is_valid_ether_addr(eth_hdr(skb)->h_source)) goto drop; skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) return NULL; if (unlikely(is_link_local(dest))) { if (skb->protocol == htons(ETH_P_PAUSE)) goto drop; if (p->br->stp_enabled == BR_NO_STP && dest[5] == 0) goto forward; if (NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev, NULL, br_handle_local_finish)) return NULL; else return skb; } forward: switch (p->state) { case BR_STATE_FORWARDING: rhook = rcu_dereference(br_should_route_hook); if (rhook != NULL) { if (rhook(skb)) return skb; dest = eth_hdr(skb)->h_dest; } case BR_STATE_LEARNING: if (!compare_ether_addr(p->br->dev->dev_addr, dest)) skb->pkt_type = PACKET_HOST; NF_HOOK(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, br_handle_frame_finish); break; default: drop: kfree_skb(skb); } return NULL; }
static inline struct sk_buff *vlan_check_reorder_header(struct sk_buff *skb) { if (VLAN_DEV_INFO(skb->dev)->flags & 1) { skb = skb_share_check(skb, GFP_ATOMIC); if (skb) { /* Lifted from Gleb's VLAN code... */ memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 12); skb->mac.raw += VLAN_HLEN; } } return skb; }
/** * Before the last deliver skb to ETH_P_ALL is called, this registered handler will * be called. During this time, we will revert the pkt_type from control buf in skb * * @param[in] skb - double pointer to the skb in case we need to clone.. * * @returns action that needs to be taken on the skb. we can consume it. */ rx_handler_result_t rw_fpath_kni_handle_frame(struct sk_buff **pskb) { struct sk_buff *skb = (struct sk_buff *)*pskb; struct kni_dev *kni; rx_handler_result_t ret = RX_HANDLER_PASS; skb = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) return RX_HANDLER_CONSUMED; if (!skb->dev){ KNI_ERR("No device in the skb in rx_handler\n"); return RX_HANDLER_PASS; } kni = netdev_priv(skb->dev); if (!kni){ KNI_ERR("no kni private data in the device in rx_handler\n"); return RX_HANDLER_PASS; } *pskb = skb; switch (skb->pkt_type){ case PACKET_OUTGOING: skb->pkt_type = PACKET_OTHERHOST; kni->rx_treat_as_tx_filtered++; consume_skb(skb); ret = RX_HANDLER_CONSUMED; break; case PACKET_LOOPBACK: skb->pkt_type = skb->mark; if (skb->pkt_type == PACKET_OTHERHOST){ /*Force the packet to be accepted by the IP stack*/ skb->pkt_type = 0; } kni->rx_treat_as_tx_delivered++; skb->mark = 0; break; case PACKET_OTHERHOST: kni->rx_filtered++; consume_skb(skb); ret = RX_HANDLER_CONSUMED; break; default: kni->rx_delivered++; break; } return ret; }
static int x25_rx(struct sk_buff *skb) { struct net_device *dev = skb->dev; if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { dev->stats.rx_dropped++; return NET_RX_DROP; } if (lapb_data_received(dev, skb) == LAPB_OK) return NET_RX_SUCCESS; dev->stats.rx_errors++; dev_kfree_skb_any(skb); return NET_RX_DROP; }
bool vlan_hwaccel_do_receive(struct sk_buff **skbp) { struct sk_buff *skb = *skbp; u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK; struct net_device *vlan_dev; struct vlan_pcpu_stats *rx_stats; vlan_dev = vlan_find_dev(skb->dev, vlan_id); if (!vlan_dev) { if (vlan_id) skb->pkt_type = PACKET_OTHERHOST; return false; } skb = *skbp = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) return false; skb->dev = vlan_dev; skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci); skb->vlan_tci = 0; rx_stats = this_cpu_ptr(vlan_dev_info(vlan_dev)->vlan_pcpu_stats); u64_stats_update_begin(&rx_stats->syncp); rx_stats->rx_packets++; rx_stats->rx_bytes += skb->len; switch (skb->pkt_type) { case PACKET_BROADCAST: break; case PACKET_MULTICAST: rx_stats->rx_multicast++; break; case PACKET_OTHERHOST: /* Our lower layer thinks this is not local, let's make sure. * This allows the VLAN to have a different MAC than the * underlying device, and still route correctly. */ if (!compare_ether_addr(eth_hdr(skb)->h_dest, vlan_dev->dev_addr)) skb->pkt_type = PACKET_HOST; break; } u64_stats_update_end(&rx_stats->syncp); return true; }
/* * (1) len doesn't include the header by default. I want this. */ static int aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt, struct net_device *orig_dev) { struct aoe_hdr *h; u32 n; skb = skb_share_check(skb, GFP_ATOMIC); if (skb == NULL) return 0; if (skb_is_nonlinear(skb)) if (skb_linearize(skb, GFP_ATOMIC) < 0) goto exit; if (!is_aoe_netif(ifp)) goto exit; skb_push(skb, ETH_HLEN); /* (1) */ h = (struct aoe_hdr *) skb->mac.raw; n = be32_to_cpu(h->tag); if ((h->verfl & AOEFL_RSP) == 0 || (n & 1<<31)) goto exit; if (h->verfl & AOEFL_ERR) { n = h->err; if (n > NECODES) n = 0; if (net_ratelimit()) printk(KERN_ERR "aoe: aoenet_rcv: error packet from %d.%d; " "ecode=%d '%s'\n", be16_to_cpu(h->major), h->minor, h->err, aoe_errlist[n]); goto exit; } switch (h->cmd) { case AOECMD_ATA: aoecmd_ata_rsp(skb); break; case AOECMD_CFG: aoecmd_cfg_rsp(skb); break; default: printk(KERN_INFO "aoe: aoenet_rcv: unknown cmd %d\n", h->cmd); } exit: dev_kfree_skb(skb); return 0; }
static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev) { struct net_device_stats *stats = &dev->stats; int loop; stats->tx_packets++; stats->tx_bytes += skb->len; /* set flag whether this packet has to be looped back */ loop = skb->pkt_type == PACKET_LOOPBACK; if (!echo) { /* no echo handling available inside this driver */ if (loop) { /* * only count the packets here, because the * CAN core already did the echo for us */ stats->rx_packets++; stats->rx_bytes += skb->len; } kfree_skb(skb); return NETDEV_TX_OK; } /* perform standard echo handling for CAN network interfaces */ if (loop) { struct sock *srcsk = skb->sk; skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) return NETDEV_TX_OK; /* receive with packet counting */ skb->sk = srcsk; vcan_rx(skb, dev); } else { /* no looped packets => no counting */ kfree_skb(skb); } return NETDEV_TX_OK; }
static int arp_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct arphdr *arp; /* ARP header, plus 2 device addresses, plus 2 IP addresses. */ if (!pskb_may_pull(skb, arp_hdr_len(dev))) goto freeskb; arp = arp_hdr(skb); if (arp->ar_hln != dev->addr_len || dev->flags & IFF_NOARP || /* add by Zebos 2015-06-01*/ /* skb->pkt_type == PACKET_OTHERHOST || */ skb->pkt_type == PACKET_LOOPBACK || arp->ar_pln != 4) goto freeskb; /* Accept packets if dest MAC is on the device MAC alias list. */ if (skb->pkt_type == PACKET_OTHERHOST) { extern int dev_ma_list_lookup(struct net_device *dev, unsigned char *addr, int alen); if (! dev_ma_list_lookup(dev, eth_hdr(skb)->h_dest, dev->addr_len)) { skb->pkt_type = PACKET_HOST; } else { goto freeskb; } } if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) goto out_of_mem; memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb)); return NF_HOOK(NFPROTO_ARP, NF_ARP_IN, skb, dev, NULL, arp_process); freeskb: kfree_skb(skb); out_of_mem: return 0; }
/* Must be called with rcu_read_lock. */ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb) { /* Make our own copy of the packet. Otherwise we will mangle the * packet for anyone who came before us (e.g. tcpdump via AF_PACKET). * (No one comes after us, since we tell handle_bridge() that we took * the packet.) */ skb = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) return; skb_warn_if_lro(skb); /* Push the Ethernet header back on. */ skb_push(skb, ETH_HLEN); skb_reset_mac_header(skb); compute_ip_summed(skb, false); vport_receive(vport, skb); }
static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev) { struct net_device_stats *stats = &dev->stats; int loop; stats->tx_packets++; stats->tx_bytes += skb->len; loop = skb->pkt_type == PACKET_LOOPBACK; if (!echo) { if (loop) { stats->rx_packets++; stats->rx_bytes += skb->len; } kfree_skb(skb); return NETDEV_TX_OK; } if (loop) { struct sock *srcsk = skb->sk; skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) return NETDEV_TX_OK; skb->sk = srcsk; vcan_rx(skb, dev); } else { kfree_skb(skb); } return NETDEV_TX_OK; }
A_BOOL preIP_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { #ifdef PRE_IP_DEBUG printk("preIP_rcv:come in\n"); #endif if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) goto out_of_mem; #ifdef PRE_IP_DEBUG printk("PreIP_Receive: above\n"); #endif /*return NF_HOOK(NF_ARP, NF_ARP_IN, skb, dev, NULL, );*/ return PreIP_Receive(skb); out_of_mem: return 0; }
/* * this handler comes into play only for a brief moment: when the agent resets * vrouter, a process in which all the vifs are removed, this rx handler is * installed on the physical device with argument (rx_handler_data) set to the * vhost device */ rx_handler_result_t vhost_rx_handler(struct sk_buff **pskb) { struct net_device *vdev; struct sk_buff *skb = *pskb; vdev = rcu_dereference(skb->dev->rx_handler_data); if (!vdev) { kfree_skb(*pskb); return RX_HANDLER_CONSUMED; } if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) return RX_HANDLER_PASS; skb->dev = vdev; (void)__sync_fetch_and_add(&vdev->stats.rx_bytes, skb->len); (void)__sync_fetch_and_add(&vdev->stats.rx_packets, 1); return RX_HANDLER_ANOTHER; }