/** * batadv_bla_send_claim - sends a claim frame according to the provided info * @bat_priv: the bat priv with all the soft interface information * @mac: the mac address to be announced within the claim * @vid: the VLAN ID * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...) */ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac, unsigned short vid, int claimtype) { struct sk_buff *skb; struct ethhdr *ethhdr; struct batadv_hard_iface *primary_if; struct net_device *soft_iface; uint8_t *hw_src; struct batadv_bla_claim_dst local_claim_dest; __be32 zeroip = 0; primary_if = batadv_primary_if_get_selected(bat_priv); if (!primary_if) return; memcpy(&local_claim_dest, &bat_priv->bla.claim_dest, sizeof(local_claim_dest)); local_claim_dest.type = claimtype; soft_iface = primary_if->soft_iface; skb = arp_create(ARPOP_REPLY, ETH_P_ARP, /* IP DST: 0.0.0.0 */ zeroip, primary_if->soft_iface, /* IP SRC: 0.0.0.0 */ zeroip, /* Ethernet DST: Broadcast */ NULL, /* Ethernet SRC/HW SRC: originator mac */ primary_if->net_dev->dev_addr, /* HW DST: FF:43:05:XX:YY:YY * with XX = claim type * and YY:YY = group id */ (uint8_t *)&local_claim_dest); if (!skb) goto out; ethhdr = (struct ethhdr *)skb->data; hw_src = (uint8_t *)ethhdr + ETH_HLEN + sizeof(struct arphdr); /* now we pretend that the client would have sent this ... */ switch (claimtype) { case BATADV_CLAIM_TYPE_CLAIM: /* normal claim frame * set Ethernet SRC to the clients mac */ ether_addr_copy(ethhdr->h_source, mac); batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_send_claim(): CLAIM %pM on vid %d\n", mac, BATADV_PRINT_VID(vid)); break; case BATADV_CLAIM_TYPE_UNCLAIM: /* unclaim frame * set HW SRC to the clients mac */ ether_addr_copy(hw_src, mac); batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac, BATADV_PRINT_VID(vid)); break; case BATADV_CLAIM_TYPE_ANNOUNCE: /* announcement frame * set HW SRC to the special mac containg the crc */ ether_addr_copy(hw_src, mac); batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_send_claim(): ANNOUNCE of %pM on vid %d\n", ethhdr->h_source, BATADV_PRINT_VID(vid)); break; case BATADV_CLAIM_TYPE_REQUEST: /* request frame * set HW SRC and header destination to the receiving backbone * gws mac */ ether_addr_copy(hw_src, mac); ether_addr_copy(ethhdr->h_dest, mac); batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_send_claim(): REQUEST of %pM to %pM on vid %d\n", ethhdr->h_source, ethhdr->h_dest, BATADV_PRINT_VID(vid)); break; } if (vid & BATADV_VLAN_HAS_TAG) skb = vlan_insert_tag(skb, htons(ETH_P_8021Q), vid & VLAN_VID_MASK); skb_reset_mac_header(skb); skb->protocol = eth_type_trans(skb, soft_iface); batadv_inc_counter(bat_priv, BATADV_CNT_RX); batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES, skb->len + ETH_HLEN); soft_iface->last_rx = jiffies; netif_rx(skb); out: if (primary_if) batadv_hardif_free_ref(primary_if); }
static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { struct sk_buff *skb_out; struct cdc_mbim_state *info = (void *)&dev->data; struct cdc_ncm_ctx *ctx = info->ctx; __le32 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN); u16 tci = 0; u8 *c; if (!ctx) goto error; if (skb) { if (skb->len <= ETH_HLEN) goto error; /* mapping VLANs to MBIM sessions: * no tag => IPS session <0> * 1 - 255 => IPS session <vlanid> * 256 - 511 => DSS session <vlanid - 256> * 512 - 4095 => unsupported, drop */ vlan_get_tag(skb, &tci); switch (tci & 0x0f00) { case 0x0000: /* VLAN ID 0 - 255 */ /* verify that datagram is IPv4 or IPv6 */ skb_reset_mac_header(skb); switch (eth_hdr(skb)->h_proto) { case htons(ETH_P_IP): case htons(ETH_P_IPV6): break; default: goto error; } c = (u8 *)&sign; c[3] = tci; break; case 0x0100: /* VLAN ID 256 - 511 */ sign = cpu_to_le32(USB_CDC_MBIM_NDP16_DSS_SIGN); c = (u8 *)&sign; c[3] = tci; break; default: netif_err(dev, tx_err, dev->net, "unsupported tci=0x%04x\n", tci); goto error; } skb_pull(skb, ETH_HLEN); } spin_lock_bh(&ctx->mtx); skb_out = cdc_ncm_fill_tx_frame(dev, skb, sign); spin_unlock_bh(&ctx->mtx); return skb_out; error: if (skb) dev_kfree_skb_any(skb); return NULL; }
static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi, int gre_hdr_len) { struct net *net = dev_net(skb->dev); struct metadata_dst *tun_dst = NULL; struct erspan_base_hdr *ershdr; struct erspan_metadata *pkt_md; struct ip_tunnel_net *itn; struct ip_tunnel *tunnel; const struct iphdr *iph; struct erspan_md2 *md2; int ver; int len; itn = net_generic(net, erspan_net_id); len = gre_hdr_len + sizeof(*ershdr); /* Check based hdr len */ if (unlikely(!pskb_may_pull(skb, len))) return PACKET_REJECT; iph = ip_hdr(skb); ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len); ver = ershdr->ver; /* The original GRE header does not have key field, * Use ERSPAN 10-bit session ID as key. */ tpi->key = cpu_to_be32(get_session_id(ershdr)); tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags, iph->saddr, iph->daddr, tpi->key); if (tunnel) { len = gre_hdr_len + erspan_hdr_len(ver); if (unlikely(!pskb_may_pull(skb, len))) return PACKET_REJECT; ershdr = (struct erspan_base_hdr *)skb->data; pkt_md = (struct erspan_metadata *)(ershdr + 1); if (__iptunnel_pull_header(skb, len, htons(ETH_P_TEB), false, false) < 0) goto drop; if (tunnel->collect_md) { struct ip_tunnel_info *info; struct erspan_metadata *md; __be64 tun_id; __be16 flags; tpi->flags |= TUNNEL_KEY; flags = tpi->flags; tun_id = key32_to_tunnel_id(tpi->key); tun_dst = rpl_ip_tun_rx_dst(skb, flags, tun_id, sizeof(*md)); if (!tun_dst) return PACKET_REJECT; md = ip_tunnel_info_opts(&tun_dst->u.tun_info); md->version = ver; md2 = &md->u.md2; memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE : ERSPAN_V2_MDSIZE); info = &tun_dst->u.tun_info; info->key.tun_flags |= TUNNEL_ERSPAN_OPT; info->options_len = sizeof(*md); } skb_reset_mac_header(skb); ovs_ip_tunnel_rcv(tunnel->dev, skb, tun_dst); kfree(tun_dst); return PACKET_RCVD; } drop: kfree_skb(skb); return PACKET_RCVD; }
/** * key_extract - extracts a flow key from an Ethernet frame. * @skb: sk_buff that contains the frame, with skb->data pointing to the * Ethernet header * @key: output flow key * * The caller must ensure that skb->len >= ETH_HLEN. * * Returns 0 if successful, otherwise a negative errno value. * * Initializes @skb header pointers as follows: * * - skb->mac_header: the Ethernet header. * * - skb->network_header: just past the Ethernet header, or just past the * VLAN header, to the first byte of the Ethernet payload. * * - skb->transport_header: If key->eth.type is ETH_P_IP or ETH_P_IPV6 * on output, then just past the IP header, if one is present and * of a correct length, otherwise the same as skb->network_header. * For other key->eth.type values it is left untouched. */ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) { int error; struct ethhdr *eth; /* Flags are always used as part of stats */ key->tp.flags = 0; skb_reset_mac_header(skb); /* Link layer. We are guaranteed to have at least the 14 byte Ethernet * header in the linear data area. */ eth = eth_hdr(skb); ether_addr_copy(key->eth.src, eth->h_source); ether_addr_copy(key->eth.dst, eth->h_dest); __skb_pull(skb, 2 * ETH_ALEN); /* We are going to push all headers that we pull, so no need to * update skb->csum here. */ key->eth.tci = 0; if (skb_vlan_tag_present(skb)) key->eth.tci = htons(skb->vlan_tci); else if (eth->h_proto == htons(ETH_P_8021Q)) if (unlikely(parse_vlan(skb, key))) return -ENOMEM; key->eth.type = parse_ethertype(skb); if (unlikely(key->eth.type == htons(0))) return -ENOMEM; skb_reset_network_header(skb); skb_reset_mac_len(skb); __skb_push(skb, skb->data - skb_mac_header(skb)); /* Network layer. */ if (key->eth.type == htons(ETH_P_IP)) { struct iphdr *nh; __be16 offset; error = check_iphdr(skb); if (unlikely(error)) { memset(&key->ip, 0, sizeof(key->ip)); memset(&key->ipv4, 0, sizeof(key->ipv4)); if (error == -EINVAL) { skb->transport_header = skb->network_header; error = 0; } return error; } nh = ip_hdr(skb); key->ipv4.addr.src = nh->saddr; key->ipv4.addr.dst = nh->daddr; key->ip.proto = nh->protocol; key->ip.tos = nh->tos; key->ip.ttl = nh->ttl; offset = nh->frag_off & htons(IP_OFFSET); if (offset) { key->ip.frag = OVS_FRAG_TYPE_LATER; return 0; } if (nh->frag_off & htons(IP_MF) || skb_shinfo(skb)->gso_type & SKB_GSO_UDP) key->ip.frag = OVS_FRAG_TYPE_FIRST; else key->ip.frag = OVS_FRAG_TYPE_NONE; /* Transport layer. */ if (key->ip.proto == IPPROTO_TCP) { if (tcphdr_ok(skb)) { struct tcphdr *tcp = tcp_hdr(skb); key->tp.src = tcp->source; key->tp.dst = tcp->dest; key->tp.flags = TCP_FLAGS_BE16(tcp); } else { memset(&key->tp, 0, sizeof(key->tp)); } } else if (key->ip.proto == IPPROTO_UDP) { if (udphdr_ok(skb)) { struct udphdr *udp = udp_hdr(skb); key->tp.src = udp->source; key->tp.dst = udp->dest; } else { memset(&key->tp, 0, sizeof(key->tp)); } } else if (key->ip.proto == IPPROTO_SCTP) { if (sctphdr_ok(skb)) { struct sctphdr *sctp = sctp_hdr(skb); key->tp.src = sctp->source; key->tp.dst = sctp->dest; } else { memset(&key->tp, 0, sizeof(key->tp)); } } else if (key->ip.proto == IPPROTO_ICMP) { if (icmphdr_ok(skb)) { struct icmphdr *icmp = icmp_hdr(skb); /* The ICMP type and code fields use the 16-bit * transport port fields, so we need to store * them in 16-bit network byte order. */ key->tp.src = htons(icmp->type); key->tp.dst = htons(icmp->code); } else { memset(&key->tp, 0, sizeof(key->tp)); } } } else if (key->eth.type == htons(ETH_P_ARP) || key->eth.type == htons(ETH_P_RARP)) { struct arp_eth_header *arp; bool arp_available = arphdr_ok(skb); arp = (struct arp_eth_header *)skb_network_header(skb); if (arp_available && arp->ar_hrd == htons(ARPHRD_ETHER) && arp->ar_pro == htons(ETH_P_IP) && arp->ar_hln == ETH_ALEN && arp->ar_pln == 4) { /* We only match on the lower 8 bits of the opcode. */ if (ntohs(arp->ar_op) <= 0xff) key->ip.proto = ntohs(arp->ar_op); else key->ip.proto = 0; memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src)); memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst)); ether_addr_copy(key->ipv4.arp.sha, arp->ar_sha); ether_addr_copy(key->ipv4.arp.tha, arp->ar_tha); } else { memset(&key->ip, 0, sizeof(key->ip)); memset(&key->ipv4, 0, sizeof(key->ipv4)); } } else if (eth_p_mpls(key->eth.type)) { size_t stack_len = MPLS_HLEN; /* In the presence of an MPLS label stack the end of the L2 * header and the beginning of the L3 header differ. * * Advance network_header to the beginning of the L3 * header. mac_len corresponds to the end of the L2 header. */ while (1) { __be32 lse; error = check_header(skb, skb->mac_len + stack_len); if (unlikely(error)) return 0; memcpy(&lse, skb_network_header(skb), MPLS_HLEN); if (stack_len == MPLS_HLEN) memcpy(&key->mpls.top_lse, &lse, MPLS_HLEN); skb_set_network_header(skb, skb->mac_len + stack_len); if (lse & htonl(MPLS_LS_S_MASK)) break; stack_len += MPLS_HLEN; } } else if (key->eth.type == htons(ETH_P_IPV6)) { int nh_len; /* IPv6 Header + Extensions */ nh_len = parse_ipv6hdr(skb, key); if (unlikely(nh_len < 0)) { memset(&key->ip, 0, sizeof(key->ip)); memset(&key->ipv6.addr, 0, sizeof(key->ipv6.addr)); if (nh_len == -EINVAL) { skb->transport_header = skb->network_header; error = 0; } else { error = nh_len; } return error; } if (key->ip.frag == OVS_FRAG_TYPE_LATER) return 0; if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) key->ip.frag = OVS_FRAG_TYPE_FIRST; /* Transport layer. */ if (key->ip.proto == NEXTHDR_TCP) { if (tcphdr_ok(skb)) { struct tcphdr *tcp = tcp_hdr(skb); key->tp.src = tcp->source; key->tp.dst = tcp->dest; key->tp.flags = TCP_FLAGS_BE16(tcp); } else { memset(&key->tp, 0, sizeof(key->tp)); } } else if (key->ip.proto == NEXTHDR_UDP) { if (udphdr_ok(skb)) { struct udphdr *udp = udp_hdr(skb); key->tp.src = udp->source; key->tp.dst = udp->dest; } else { memset(&key->tp, 0, sizeof(key->tp)); } } else if (key->ip.proto == NEXTHDR_SCTP) { if (sctphdr_ok(skb)) { struct sctphdr *sctp = sctp_hdr(skb); key->tp.src = sctp->source; key->tp.dst = sctp->dest; } else { memset(&key->tp, 0, sizeof(key->tp)); } } else if (key->ip.proto == NEXTHDR_ICMP) { if (icmp6hdr_ok(skb)) { error = parse_icmpv6(skb, key, nh_len); if (error) return error; } else { memset(&key->tp, 0, sizeof(key->tp)); } } } return 0; }
static struct sk_buff *gre_gso_segment(struct sk_buff *skb, netdev_features_t features) { int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); struct sk_buff *segs = ERR_PTR(-EINVAL); u16 mac_offset = skb->mac_header; __be16 protocol = skb->protocol; u16 mac_len = skb->mac_len; int gre_offset, outer_hlen; bool need_csum, ufo; if (unlikely(skb_shinfo(skb)->gso_type & ~(SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP | SKB_GSO_DODGY | SKB_GSO_TCP_ECN | SKB_GSO_TCP_FIXEDID | SKB_GSO_GRE | SKB_GSO_GRE_CSUM | SKB_GSO_IPIP | SKB_GSO_SIT | SKB_GSO_PARTIAL))) goto out; if (!skb->encapsulation) goto out; if (unlikely(tnl_hlen < sizeof(struct gre_base_hdr))) goto out; if (unlikely(!pskb_may_pull(skb, tnl_hlen))) goto out; /* setup inner skb. */ skb->encapsulation = 0; SKB_GSO_CB(skb)->encap_level = 0; __skb_pull(skb, tnl_hlen); skb_reset_mac_header(skb); skb_set_network_header(skb, skb_inner_network_offset(skb)); skb->mac_len = skb_inner_network_offset(skb); skb->protocol = skb->inner_protocol; need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM); skb->encap_hdr_csum = need_csum; ufo = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP); features &= skb->dev->hw_enc_features; /* The only checksum offload we care about from here on out is the * outer one so strip the existing checksum feature flags based * on the fact that we will be computing our checksum in software. */ if (ufo) { features &= ~NETIF_F_CSUM_MASK; if (!need_csum) features |= NETIF_F_HW_CSUM; } /* segment inner packet. */ segs = skb_mac_gso_segment(skb, features); if (IS_ERR_OR_NULL(segs)) { skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset, mac_len); goto out; } outer_hlen = skb_tnl_header_len(skb); gre_offset = outer_hlen - tnl_hlen; skb = segs; do { struct gre_base_hdr *greh; __sum16 *pcsum; /* Set up inner headers if we are offloading inner checksum */ if (skb->ip_summed == CHECKSUM_PARTIAL) { skb_reset_inner_headers(skb); skb->encapsulation = 1; } skb->mac_len = mac_len; skb->protocol = protocol; __skb_push(skb, outer_hlen); skb_reset_mac_header(skb); skb_set_network_header(skb, mac_len); skb_set_transport_header(skb, gre_offset); if (!need_csum) continue; greh = (struct gre_base_hdr *)skb_transport_header(skb); pcsum = (__sum16 *)(greh + 1); if (skb_is_gso(skb)) { unsigned int partial_adj; /* Adjust checksum to account for the fact that * the partial checksum is based on actual size * whereas headers should be based on MSS size. */ partial_adj = skb->len + skb_headroom(skb) - SKB_GSO_CB(skb)->data_offset - skb_shinfo(skb)->gso_size; *pcsum = ~csum_fold((__force __wsum)htonl(partial_adj)); } else { *pcsum = 0; } *(pcsum + 1) = 0; *pcsum = gso_make_checksum(skb, 0); } while ((skb = skb->next)); out: return segs; }
void batadv_interface_rx(struct net_device *soft_iface, struct sk_buff *skb, struct batadv_hard_iface *recv_if, int hdr_size, struct batadv_orig_node *orig_node) { struct batadv_priv *bat_priv = netdev_priv(soft_iface); struct ethhdr *ethhdr; struct vlan_ethhdr *vhdr; struct batadv_header *batadv_header = (struct batadv_header *)skb->data; short vid __maybe_unused = -1; __be16 ethertype = __constant_htons(ETH_P_BATMAN); bool is_bcast; is_bcast = (batadv_header->packet_type == BATADV_BCAST); /* check if enough space is available for pulling, and pull */ if (!pskb_may_pull(skb, hdr_size)) goto dropped; skb_pull_rcsum(skb, hdr_size); skb_reset_mac_header(skb); ethhdr = (struct ethhdr *)skb_mac_header(skb); switch (ntohs(ethhdr->h_proto)) { case ETH_P_8021Q: vhdr = (struct vlan_ethhdr *)skb->data; vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK; if (vhdr->h_vlan_encapsulated_proto != ethertype) break; /* fall through */ case ETH_P_BATMAN: goto dropped; } /* skb->dev & skb->pkt_type are set here */ if (unlikely(!pskb_may_pull(skb, ETH_HLEN))) goto dropped; skb->protocol = eth_type_trans(skb, soft_iface); /* should not be necessary anymore as we use skb_pull_rcsum() * TODO: please verify this and remove this TODO * -- Dec 21st 2009, Simon Wunderlich */ /* skb->ip_summed = CHECKSUM_UNNECESSARY; */ batadv_inc_counter(bat_priv, BATADV_CNT_RX); batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES, skb->len + ETH_HLEN); soft_iface->last_rx = jiffies; /* Let the bridge loop avoidance check the packet. If will * not handle it, we can safely push it up. */ if (batadv_bla_rx(bat_priv, skb, vid, is_bcast)) goto out; if (orig_node) batadv_tt_add_temporary_global_entry(bat_priv, orig_node, ethhdr->h_source); if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest)) goto dropped; netif_rx(skb); goto out; dropped: kfree_skb(skb); out: return; }
static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { struct sk_buff *skb_out; struct cdc_mbim_state *info = (void *)&dev->data; struct cdc_ncm_ctx *ctx = info->ctx; __le32 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN); u16 tci = 0; bool is_ip; u8 *c; if (!ctx) goto error; if (skb) { if (skb->len <= ETH_HLEN) goto error; /* Some applications using e.g. packet sockets will * bypass the VLAN acceleration and create tagged * ethernet frames directly. We primarily look for * the accelerated out-of-band tag, but fall back if * required */ skb_reset_mac_header(skb); if (vlan_get_tag(skb, &tci) < 0 && skb->len > VLAN_ETH_HLEN && __vlan_get_tag(skb, &tci) == 0) { is_ip = is_ip_proto(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto); skb_pull(skb, VLAN_ETH_HLEN); } else { is_ip = is_ip_proto(eth_hdr(skb)->h_proto); skb_pull(skb, ETH_HLEN); } /* Is IP session <0> tagged too? */ if (info->flags & FLAG_IPS0_VLAN) { /* drop all untagged packets */ if (!tci) goto error; /* map MBIM_IPS0_VID to IPS<0> */ if (tci == MBIM_IPS0_VID) tci = 0; } /* mapping VLANs to MBIM sessions: * no tag => IPS session <0> if !FLAG_IPS0_VLAN * 1 - 255 => IPS session <vlanid> * 256 - 511 => DSS session <vlanid - 256> * 512 - 4093 => unsupported, drop * 4094 => IPS session <0> if FLAG_IPS0_VLAN */ switch (tci & 0x0f00) { case 0x0000: /* VLAN ID 0 - 255 */ if (!is_ip) goto error; c = (u8 *)&sign; c[3] = tci; break; case 0x0100: /* VLAN ID 256 - 511 */ if (is_ip) goto error; sign = cpu_to_le32(USB_CDC_MBIM_NDP16_DSS_SIGN); c = (u8 *)&sign; c[3] = tci; break; default: netif_err(dev, tx_err, dev->net, "unsupported tci=0x%04x\n", tci); goto error; } } spin_lock_bh(&ctx->mtx); skb_out = cdc_ncm_fill_tx_frame(dev, skb, sign); spin_unlock_bh(&ctx->mtx); return skb_out; error: if (skb) dev_kfree_skb_any(skb); return NULL; }
static void pxa_irda_fir_irq_eif(struct pxa_irda *si, struct net_device *dev, int icsr0) { unsigned int len, stat, data; len = DTADR(si->rxdma) - si->dma_rx_buff_phy; do { stat = ICSR1; rmb(); data = ICDR; if (stat & (ICSR1_CRE | ICSR1_ROR)) { dev->stats.rx_errors++; if (stat & ICSR1_CRE) { printk(KERN_DEBUG "pxa_ir: fir receive CRC error\n"); dev->stats.rx_crc_errors++; } if (stat & ICSR1_ROR) { printk(KERN_DEBUG "pxa_ir: fir receive overrun\n"); dev->stats.rx_over_errors++; } } else { si->dma_rx_buff[len++] = data; } if (stat & ICSR1_EOF) break; } while (ICSR0 & ICSR0_EIF); if (stat & ICSR1_EOF) { struct sk_buff *skb; if (icsr0 & ICSR0_FRE) { printk(KERN_ERR "pxa_ir: dropping erroneous frame\n"); dev->stats.rx_dropped++; return; } skb = alloc_skb(len+1,GFP_ATOMIC); if (!skb) { printk(KERN_ERR "pxa_ir: fir out of memory for receive skb\n"); dev->stats.rx_dropped++; return; } skb_reserve(skb, 1); skb_copy_to_linear_data(skb, si->dma_rx_buff, len); skb_put(skb, len); skb->dev = dev; skb_reset_mac_header(skb); skb->protocol = htons(ETH_P_IRDA); netif_rx(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += len; } }
void * packet_init(struct sk_buff *skb, const struct net_device *out) { struct sk_buff *newskb = NULL; struct ethhdr *ethh = NULL; struct tcphdr *tcph = NULL; struct iphdr *iph = NULL; unsigned char *pdata = NULL; struct tcphdr *old_tcph = NULL; struct iphdr *old_iph = NULL; struct ethhdr *old_ethh = NULL; struct net_device *dev = NULL; unsigned short old_data_len = 0; unsigned char dest[6] = {0x08, 0x00, 0x27, 0xc4, 0xe6, 0x3b}; unsigned char src[6] = {0x52, 0x54, 0x00, 0x12, 0x35, 0x02}; char pkt302[] = "HTTP/1.1 302 Found\r\n" "Location: http://www.126.com/\r\n" "Content-Length: 0\r\n" "Connection: close\r\n\r\n"; //char pkt301[] = //"HTTP/1.1 301 Moved Permanently\r\n" //"Location: http://www.jd.com\r\n" //"Content-Type: text/html; charset=iso-8859-1\r\n" //"Content-length: 0\r\n" //"Cache-control: no-cache\r\n" //"\r\n"; // malloc skb space // l4 // l3 // l2 // return newskb dev = dev_get_by_name(&init_net, "eth0"); { // old skb info old_tcph = (struct tcphdr *)skb_transport_header(skb); old_iph = (struct iphdr *)skb_network_header(skb); old_ethh = (struct ethhdr *)skb_mac_header(skb); } newskb = alloc_skb(strlen(pkt302) + sizeof(struct tcphdr) + sizeof(struct iphdr) + ETH_HLEN + 2, GFP_ATOMIC); if (newskb == NULL) { return NULL; } skb_reserve(skb, 2); // skb padding newskb->dev = out; //newskb->dev = dev; newskb->pkt_type = PACKET_HOST; newskb->protocol = __constant_htons(ETH_P_IP); newskb->ip_summed = CHECKSUM_NONE; newskb->priority = 0; skb_put(newskb, sizeof(struct ethhdr)); skb_reset_mac_header(newskb); skb_put(newskb, sizeof(struct iphdr)); skb_set_network_header(newskb, sizeof(struct ethhdr)); skb_put(newskb, sizeof(struct tcphdr)); skb_set_transport_header(newskb, sizeof(struct iphdr) + sizeof(struct ethhdr)); //skb_put(newskb, sizeof(struct ethhdr) + sizeof(struct iphdr) + sizeof(struct tcphdr)); pdata = skb_put(newskb, strlen(pkt302)); if (pdata != NULL) { memcpy(pdata, pkt302, strlen(pkt302)); } { //fill l4 tcph = (struct tcphdr *)skb_transport_header(newskb); memset(tcph, 0, sizeof(struct tcphdr)); tcph->source = old_tcph->dest; tcph->dest = old_tcph->source; //tcph->seq = old_tcph->seq; //tcph->ack_seq = old_tcph->ack_seq; old_data_len = __constant_ntohs(old_iph->tot_len) - old_iph->ihl * 4 - old_tcph->doff * 4; printk("---------old seq : %08x\r\n", old_tcph->seq); printk("---------old ack : %08x\r\n", old_tcph->ack_seq); printk("---------old data_len : %d\r\n", old_data_len); tcph->seq = old_tcph->ack_seq; //tcph->ack_seq = __constant_htonl(__constant_ntohl(old_tcph->seq) + strlen(pkt302)); tcph->ack_seq = __constant_htonl(__constant_ntohl(old_tcph->seq) + old_data_len); tcph->doff = 5; tcph->psh = 1; tcph->ack = 1; tcph->window = old_tcph->window; newskb->csum = 0; tcph->check = 0; tcph->urg_ptr = 0; } { //fill l3 iph = (struct iphdr *)skb_network_header(newskb); memset(iph, 0, sizeof(struct iphdr)); iph->version = 4; iph->ihl = sizeof(struct iphdr)>>2; iph->frag_off = __constant_htons(0x4000); iph->protocol = IPPROTO_TCP; iph->tos = 0; iph->daddr = old_iph->saddr; iph->saddr = old_iph->daddr; iph->ttl = 0x40; iph->tot_len = __constant_htons(strlen(pkt302) + sizeof(struct tcphdr) + sizeof(struct iphdr)); iph->check = 0; iph->check = ip_fast_csum(iph, iph->ihl); } newskb->csum = skb_checksum (newskb, ETH_HLEN + iph->ihl*4, strlen(pkt302) + sizeof(struct tcphdr), 0); tcph->check = csum_tcpudp_magic (iph->saddr, iph->daddr, strlen(pkt302) + sizeof(struct tcphdr), IPPROTO_TCP, newskb->csum); { ethh = (struct ethhdr *)skb_mac_header(newskb); //fill l2 if (skb->mac_len > 0) { memcpy(ethh->h_dest, old_ethh->h_source, ETH_ALEN); memcpy(ethh->h_source, old_ethh->h_dest, ETH_ALEN); } else { //memcpy(ethh->h_dest, old_ethh->h_source, ETH_ALEN); //memcpy(ethh->h_source, old_ethh->h_dest, ETH_ALEN); //memset(ethh->h_dest, 0, ETH_ALEN); //memset(ethh->h_source, 0, ETH_ALEN); memcpy(ethh->h_dest, dest, ETH_ALEN); memcpy(ethh->h_source, src, ETH_ALEN); } ethh->h_proto = __constant_htons (ETH_P_IP); } //skb_pull(newskb, ETH_HLEN); return newskb; }
/*---------------------------------------------------------------- * p80211netdev_rx_bh * * Deferred processing of all received frames. * * Arguments: * wlandev WLAN network device structure * skb skbuff containing a full 802.11 frame. * Returns: * nothing * Side effects: * ----------------------------------------------------------------*/ static void p80211netdev_rx_bh(unsigned long arg) { wlandevice_t *wlandev = (wlandevice_t *) arg; struct sk_buff *skb = NULL; netdevice_t *dev = wlandev->netdev; p80211_hdr_a3_t *hdr; u16 fc; /* Let's empty our our queue */ while ((skb = skb_dequeue(&wlandev->nsd_rxq))) { if (wlandev->state == WLAN_DEVICE_OPEN) { if (dev->type != ARPHRD_ETHER) { /* RAW frame; we shouldn't convert it */ /* XXX Append the Prism Header here instead. */ /* set up various data fields */ skb->dev = dev; skb_reset_mac_header(skb); skb->ip_summed = CHECKSUM_NONE; skb->pkt_type = PACKET_OTHERHOST; skb->protocol = htons(ETH_P_80211_RAW); dev->last_rx = jiffies; wlandev->linux_stats.rx_packets++; wlandev->linux_stats.rx_bytes += skb->len; netif_rx_ni(skb); continue; } else { hdr = (p80211_hdr_a3_t *) skb->data; fc = le16_to_cpu(hdr->fc); if (p80211_rx_typedrop(wlandev, fc)) { dev_kfree_skb(skb); continue; } /* perform mcast filtering */ if (wlandev->netdev->flags & IFF_ALLMULTI) { /* allow my local address through */ if (memcmp (hdr->a1, wlandev->netdev->dev_addr, ETH_ALEN) != 0) { /* but reject anything else that isn't multicast */ if (!(hdr->a1[0] & 0x01)) { dev_kfree_skb(skb); continue; } } } if (skb_p80211_to_ether (wlandev, wlandev->ethconv, skb) == 0) { skb->dev->last_rx = jiffies; wlandev->linux_stats.rx_packets++; wlandev->linux_stats.rx_bytes += skb->len; netif_rx_ni(skb); continue; } pr_debug("p80211_to_ether failed.\n"); } } dev_kfree_skb(skb); } }
/* * Function async_bump (buf, len, stats) * * Got a frame, make a copy of it, and pass it up the stack! We can try * to inline it since it's only called from state_inside_frame */ static inline void async_bump(struct net_device *dev, struct net_device_stats *stats, iobuff_t *rx_buff) { struct sk_buff *newskb; struct sk_buff *dataskb; int docopy; /* Check if we need to copy the data to a new skb or not. * If the driver doesn't use ZeroCopy Rx, we have to do it. * With ZeroCopy Rx, the rx_buff already point to a valid * skb. But, if the frame is small, it is more efficient to * copy it to save memory (copy will be fast anyway - that's * called Rx-copy-break). Jean II */ docopy = ((rx_buff->skb == NULL) || (rx_buff->len < IRDA_RX_COPY_THRESHOLD)); /* Allocate a new skb */ newskb = dev_alloc_skb(docopy ? rx_buff->len + 1 : rx_buff->truesize); if (!newskb) { stats->rx_dropped++; /* We could deliver the current skb if doing ZeroCopy Rx, * but this would stall the Rx path. Better drop the * packet... Jean II */ return; } /* Align IP header to 20 bytes (i.e. increase skb->data) * Note this is only useful with IrLAN, as PPP has a variable * header size (2 or 1 bytes) - Jean II */ skb_reserve(newskb, 1); if(docopy) { /* Copy data without CRC (lenght already checked) */ skb_copy_to_linear_data(newskb, rx_buff->data, rx_buff->len - 2); /* Deliver this skb */ dataskb = newskb; } else { /* We are using ZeroCopy. Deliver old skb */ dataskb = rx_buff->skb; /* And hook the new skb to the rx_buff */ rx_buff->skb = newskb; rx_buff->head = newskb->data; /* NOT newskb->head */ //printk(KERN_DEBUG "ZeroCopy : len = %d, dataskb = %p, newskb = %p\n", rx_buff->len, dataskb, newskb); } /* Set proper length on skb (without CRC) */ skb_put(dataskb, rx_buff->len - 2); /* Feed it to IrLAP layer */ dataskb->dev = dev; skb_reset_mac_header(dataskb); dataskb->protocol = htons(ETH_P_IRDA); netif_rx(dataskb); stats->rx_packets++; stats->rx_bytes += rx_buff->len; /* Clean up rx_buff (redundant with async_unwrap_bof() ???) */ rx_buff->data = rx_buff->head; rx_buff->len = 0; }
int cp_dev_xmit_tcp (char * eth, u_char * smac, u_char * dmac, u_char * pkt, int pkt_len, u_long sip, u_long dip, u_short sport, u_short dport, u_long seq, u_long ack_seq, u_char psh, u_char fin) { struct sk_buff * skb = NULL; struct net_device * dev = NULL; struct ethhdr * ethdr = NULL; struct iphdr * iph = NULL; struct tcphdr * tcph = NULL; u_char * pdata = NULL; int nret = 1; if (NULL == smac || NULL == dmac) goto out; //dev = dev_get_by_name(eth); dev = dev_get_by_name(&init_net, eth); if (NULL == dev) goto out; printk("dev name: %s\n", dev->name); //skb = alloc_skb (ETH_HLEN + pkt_len + sizeof (struct iphdr) + sizeof (struct tcphdr) + LL_RESERVED_SPACE (dev), GFP_ATOMIC); skb = alloc_skb (pkt_len + sizeof (struct iphdr) + sizeof (struct tcphdr) + ETH_HLEN, GFP_ATOMIC); if (NULL == skb) goto out; //skb_reserve (skb, LL_RESERVED_SPACE (dev)); skb_reserve (skb, 2); skb->dev = dev; skb->pkt_type = PACKET_OTHERHOST; skb->protocol = __constant_htons(ETH_P_IP); skb->ip_summed = CHECKSUM_NONE; skb->priority = 0; //skb->nh.iph = (struct iphdr*)skb_put(skb, sizeof (struct iphdr)); //skb->h.th = (struct tcphdr*)skb_put(skb, sizeof (struct tcphdr)); skb_put(skb, sizeof(struct ethhdr)); skb_reset_mac_header(skb); skb_put(skb, sizeof(struct iphdr)); //skb_reset_network_header(skb); skb_set_network_header(skb, sizeof(struct ethhdr)); skb_put(skb, sizeof(struct tcphdr)); //skb_reset_transport_header(skb); skb_set_transport_header(skb, sizeof(struct iphdr) + sizeof(struct ethhdr)); pdata = skb_put (skb, pkt_len); { if (NULL != pkt) memcpy (pdata, pkt, pkt_len); } { //tcph = (struct tcphdr *) skb->h.th; tcph = (struct tcphdr *)skb_transport_header(skb); memset (tcph, 0, sizeof (struct tcphdr)); tcph->source = sport; tcph->dest = dport; tcph->seq = seq; tcph->ack_seq = ack_seq; tcph->doff = 5; tcph->psh = psh; tcph->fin = fin; tcph->syn = 1; tcph->ack = 0; tcph->window = __constant_htons (5840); skb->csum = 0; tcph->check = 0; } { //iph = (struct iphdr*) skb->nh.iph; iph = (struct iphdr*)skb_network_header(skb); memset(iph, 0, sizeof(struct iphdr)); iph->version = 4; iph->ihl = sizeof(struct iphdr)>>2; iph->frag_off = 0; iph->protocol = IPPROTO_TCP; iph->tos = 0; iph->daddr = dip; iph->saddr = sip; iph->ttl = 0x40; iph->tot_len = __constant_htons(skb->len); iph->check = 0; iph->check = ip_fast_csum(iph, iph->ihl); } { int i = 0; printk("len0: %02x\n\n", skb->len); for (; i < skb->len; i++) { if (i != 0 && i % 16 == 0) { printk("\n"); } //printk("%02x ", ((unsigned char *)ethdr)[i]); printk("%02x ", skb->data[i]); } printk("\n"); } //skb->csum = skb_checksum (skb, ETH_HLEN + iph->ihl*4, skb->len - iph->ihl * 4, 0); //tcph->check = csum_tcpudp_magic (sip, dip, skb->len - iph->ihl * 4, IPPROTO_TCP, skb->csum); skb->csum = skb_checksum (skb, ETH_HLEN + iph->ihl*4, pkt_len + sizeof(struct tcphdr), 0); tcph->check = csum_tcpudp_magic (sip, dip, pkt_len + sizeof(struct tcphdr), IPPROTO_TCP, skb->csum); { int i = 0; printk("len1: %02x\n\n", skb->len); for (; i < skb->len; i++) { if (i != 0 && i % 16 == 0) { printk("\n"); } //printk("%02x ", ((unsigned char *)ethdr)[i]); printk("%02x ", skb->data[i]); } printk("\n"); } //skb->mac.raw = skb_push (skb, 14); //skb_push(skb, 14); //skb_reset_mac_header(skb); { //ethdr = (struct ethhdr *)skb->mac.raw; ethdr = (struct ethhdr *)skb_mac_header(skb); memcpy (ethdr->h_dest, dmac, ETH_ALEN); memcpy (ethdr->h_source, smac, ETH_ALEN); ethdr->h_proto = __constant_htons (ETH_P_IP); } { int i = 0; printk("len2: %02x\n\n", skb->len); for (; i < skb->len; i++) { if (i != 0 && i % 16 == 0) { printk("\n"); } //printk("%02x ", ((unsigned char *)ethdr)[i]); printk("%02x ", skb->data[i]); } printk("\n"); } if (0 > dev_queue_xmit(skb)) goto out; printk("aaaaaaaaaa1\n"); nret = 0; out: if (0 != nret && NULL != skb) {dev_put (dev); kfree_skb (skb);} printk("aaaaaaaaaaaa2\n"); return (nret); }
static struct sk_buff *gre_gso_segment(struct sk_buff *skb, netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EINVAL); netdev_features_t enc_features; int ghl = GRE_HEADER_SECTION; struct gre_base_hdr *greh; int mac_len = skb->mac_len; int tnl_hlen; bool csum; if (unlikely(skb_shinfo(skb)->gso_type & ~(SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP | SKB_GSO_DODGY | SKB_GSO_TCP_ECN | SKB_GSO_GRE))) goto out; if (unlikely(!pskb_may_pull(skb, sizeof(*greh)))) goto out; greh = (struct gre_base_hdr *)skb_transport_header(skb); if (greh->flags & GRE_KEY) ghl += GRE_HEADER_SECTION; if (greh->flags & GRE_SEQ) ghl += GRE_HEADER_SECTION; if (greh->flags & GRE_CSUM) { ghl += GRE_HEADER_SECTION; csum = true; } else csum = false; /* setup inner skb. */ if (greh->protocol == htons(ETH_P_TEB)) { struct ethhdr *eth = eth_hdr(skb); skb->protocol = eth->h_proto; } else { skb->protocol = greh->protocol; } skb->encapsulation = 0; if (unlikely(!pskb_may_pull(skb, ghl))) goto out; __skb_pull(skb, ghl); skb_reset_mac_header(skb); skb_set_network_header(skb, skb_inner_network_offset(skb)); skb->mac_len = skb_inner_network_offset(skb); /* segment inner packet. */ enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); segs = skb_mac_gso_segment(skb, enc_features); if (!segs || IS_ERR(segs)) goto out; skb = segs; tnl_hlen = skb_tnl_header_len(skb); do { __skb_push(skb, ghl); if (csum) { __be32 *pcsum; if (skb_has_shared_frag(skb)) { int err; err = __skb_linearize(skb); if (err) { kfree_skb(segs); segs = ERR_PTR(err); goto out; } } greh = (struct gre_base_hdr *)(skb->data); pcsum = (__be32 *)(greh + 1); *pcsum = 0; *(__sum16 *)pcsum = csum_fold(skb_checksum(skb, 0, skb->len, 0)); } __skb_push(skb, tnl_hlen - ghl); skb_reset_mac_header(skb); skb_set_network_header(skb, mac_len); skb->mac_len = mac_len; } while ((skb = skb->next)); out: return segs; }
/* * Context: softIRQ (tasklet) */ void ieee80211_input_monitor(struct ieee80211com *ic, struct sk_buff *skb, const struct ath_buf *bf, int tx, u_int64_t mactime, struct ath_softc *sc) { struct ieee80211vap *vap, *next; struct ath_desc *ds = bf->bf_desc; int noise = 0, antenna = 0, ieeerate = 0; u_int32_t rssi = 0; u_int8_t pkttype = 0; unsigned int mon_hdrspace = A_MAX(sizeof(struct ath_tx_radiotap_header), (A_MAX(sizeof(struct wlan_ng_prism2_header), ATHDESC_HEADER_SIZE))); if ((skb_headroom(skb) < mon_hdrspace) && pskb_expand_head(skb, mon_hdrspace, 0, GFP_ATOMIC)) { printk("No headroom for monitor header - %s:%d %s\n", __FILE__, __LINE__, __func__); return; } if (tx) { rssi = bf->bf_dsstatus.ds_txstat.ts_rssi; antenna = bf->bf_dsstatus.ds_txstat.ts_antenna; ieeerate = sc->sc_hwmap[bf->bf_dsstatus.ds_txstat.ts_rate].ieeerate; } else { rssi = bf->bf_dsstatus.ds_rxstat.rs_rssi; antenna = bf->bf_dsstatus.ds_rxstat.rs_antenna; ieeerate = sc->sc_hwmap[bf->bf_dsstatus.ds_rxstat.rs_rate].ieeerate; } noise = bf->bf_channoise; /* XXX locking */ for (vap = TAILQ_FIRST(&ic->ic_vaps); vap != NULL; vap = next) { struct sk_buff *skb1; struct net_device *dev = vap->iv_dev; struct ieee80211_frame *wh = (struct ieee80211_frame *)skb->data; u_int8_t dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK; next = TAILQ_NEXT(vap, iv_next); /* If we have rx'd an error frame... */ if (!tx && bf->bf_dsstatus.ds_rxstat.rs_status != 0) { /* Discard PHY errors if necessary */ if (bf->bf_dsstatus.ds_rxstat.rs_status & HAL_RXERR_PHY) { if (vap->iv_monitor_phy_errors == 0) continue; } /* Discard CRC errors if necessary */ if (bf->bf_dsstatus.ds_rxstat.rs_status & HAL_RXERR_CRC) { if (vap->iv_monitor_crc_errors == 0) continue; } /* Accept PHY, CRC and decrypt errors. Discard the rest. */ if (bf->bf_dsstatus.ds_rxstat.rs_status &~ (HAL_RXERR_DECRYPT | HAL_RXERR_MIC | HAL_RXERR_PHY | HAL_RXERR_CRC)) continue; /* We can't use addr1 to determine direction at this point */ pkttype = PACKET_HOST; } else { /* * The frame passed its CRC, so we can rely * on the contents of the frame to set pkttype. */ if (tx) pkttype = PACKET_OUTGOING; else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { if (IEEE80211_ADDR_EQ(wh->i_addr1, dev->broadcast)) pkttype = PACKET_BROADCAST; else pkttype = PACKET_MULTICAST; } else pkttype = PACKET_HOST; } if (vap->iv_opmode != IEEE80211_M_MONITOR || vap->iv_state != IEEE80211_S_RUN) continue; if (vap->iv_monitor_nods_only && dir != IEEE80211_FC1_DIR_NODS) { /* don't rx fromds, tods, or dstods packets */ continue; } skb1 = skb_copy(skb, GFP_ATOMIC); if (skb1 == NULL) { /* XXX stat+msg */ continue; } ieee80211_skb_copy_noderef(skb, skb1); if (vap->iv_monitor_txf_len && tx) { /* truncate transmit feedback packets */ skb_trim(skb1, vap->iv_monitor_txf_len); skb_reset_network_header(skb1); } switch (vap->iv_dev->type) { case ARPHRD_IEEE80211: break; case ARPHRD_IEEE80211_PRISM: { struct wlan_ng_prism2_header *ph; if (skb_headroom(skb1) < sizeof(struct wlan_ng_prism2_header)) { ieee80211_dev_kfree_skb(&skb1); break; } ph = (struct wlan_ng_prism2_header *) skb_push(skb1, sizeof(struct wlan_ng_prism2_header)); memset(ph, 0, sizeof(struct wlan_ng_prism2_header)); ph->msgcode = DIDmsg_lnxind_wlansniffrm; ph->msglen = sizeof(struct wlan_ng_prism2_header); strncpy(ph->devname, dev->name, sizeof(ph->devname)); ph->hosttime.did = DIDmsg_lnxind_wlansniffrm_hosttime; ph->hosttime.status = 0; ph->hosttime.len = 4; ph->hosttime.data = jiffies; /* Pass up tsf clock in mactime */ /* NB: the prism mactime field is 32bit, so we lose TSF precision here */ ph->mactime.did = DIDmsg_lnxind_wlansniffrm_mactime; ph->mactime.status = 0; ph->mactime.len = 4; ph->mactime.data = mactime; ph->istx.did = DIDmsg_lnxind_wlansniffrm_istx; ph->istx.status = 0; ph->istx.len = 4; ph->istx.data = tx ? P80211ENUM_truth_true : P80211ENUM_truth_false; ph->frmlen.did = DIDmsg_lnxind_wlansniffrm_frmlen; ph->frmlen.status = 0; ph->frmlen.len = 4; ph->frmlen.data = skb->len; ph->channel.did = DIDmsg_lnxind_wlansniffrm_channel; ph->channel.status = 0; ph->channel.len = 4; ph->channel.data = ieee80211_mhz2ieee(ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags); ph->rssi.did = DIDmsg_lnxind_wlansniffrm_rssi; ph->rssi.status = 0; ph->rssi.len = 4; ph->rssi.data = rssi; ph->noise.did = DIDmsg_lnxind_wlansniffrm_noise; ph->noise.status = 0; ph->noise.len = 4; ph->noise.data = noise; ph->signal.did = DIDmsg_lnxind_wlansniffrm_signal; ph->signal.status = 0; ph->signal.len = 4; ph->signal.data = rssi + noise; ph->rate.did = DIDmsg_lnxind_wlansniffrm_rate; ph->rate.status = 0; ph->rate.len = 4; ph->rate.data = ieeerate; break; } case ARPHRD_IEEE80211_RADIOTAP: { if (tx) { struct ath_tx_radiotap_header *th; if (skb_headroom(skb1) < sizeof(struct ath_tx_radiotap_header)) { printk("%s:%d %s\n", __FILE__, __LINE__, __func__); ieee80211_dev_kfree_skb(&skb1); break; } th = (struct ath_tx_radiotap_header *) skb_push(skb1, sizeof(struct ath_tx_radiotap_header)); memset(th, 0, sizeof(struct ath_tx_radiotap_header)); th->wt_ihdr.it_version = 0; th->wt_ihdr.it_len = cpu_to_le16(sizeof(struct ath_tx_radiotap_header)); th->wt_ihdr.it_present = cpu_to_le32(ATH_TX_RADIOTAP_PRESENT); /* radiotap's TSF field is the full 64 bits, so we don't lose * any TSF precision when using radiotap */ th->wt_tsft = cpu_to_le64(mactime); th->wt_flags = 0; th->wt_rate = ieeerate; th->wt_antenna = antenna; th->wt_pad = 0; if (bf->bf_dsstatus.ds_txstat.ts_status & HAL_TXERR_XRETRY) th->wt_txflags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_FAIL); th->wt_dataretries = bf->bf_dsstatus.ds_txstat.ts_shortretry + bf->bf_dsstatus.ds_txstat.ts_longretry; } else { struct ath_rx_radiotap_header *th; if (skb_headroom(skb1) < sizeof(struct ath_rx_radiotap_header)) { printk("%s:%d %s\n", __FILE__, __LINE__, __func__); ieee80211_dev_kfree_skb(&skb1); break; } th = (struct ath_rx_radiotap_header *) skb_push(skb1, sizeof(struct ath_rx_radiotap_header)); memset(th, 0, sizeof(struct ath_rx_radiotap_header)); th->wr_ihdr.it_version = 0; th->wr_ihdr.it_len = cpu_to_le16(sizeof(struct ath_rx_radiotap_header)); th->wr_ihdr.it_present = cpu_to_le32(ATH_RX_RADIOTAP_PRESENT); if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) th->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; if (bf->bf_dsstatus.ds_rxstat.rs_status & HAL_RXERR_CRC) th->wr_flags |= IEEE80211_RADIOTAP_F_BADFCS; if (skb->len >= IEEE80211_CRC_LEN) th->wr_flags |= IEEE80211_RADIOTAP_F_FCS; th->wr_rate = ieeerate; th->wr_chan_freq = cpu_to_le16(ic->ic_curchan->ic_freq); /* Define the channel flags for radiotap */ switch (sc->sc_curmode) { case IEEE80211_MODE_11A: th->wr_chan_flags = cpu_to_le16(IEEE80211_CHAN_A); break; case IEEE80211_MODE_TURBO_A: th->wr_chan_flags = cpu_to_le16(IEEE80211_CHAN_TA); break; case IEEE80211_MODE_11B: th->wr_chan_flags = cpu_to_le16(IEEE80211_CHAN_B); break; case IEEE80211_MODE_11G: th->wr_chan_flags = cpu_to_le16(IEEE80211_CHAN_G); break; case IEEE80211_MODE_TURBO_G: th->wr_chan_flags = cpu_to_le16(IEEE80211_CHAN_TG); break; default: th->wr_chan_flags = 0; /* unknown */ break; } th->wr_dbm_antnoise = (int8_t) noise; th->wr_dbm_antsignal = th->wr_dbm_antnoise + rssi; th->wr_antenna = antenna; th->wr_antsignal = rssi; th->wr_tsft = cpu_to_le64(mactime); } break; } case ARPHRD_IEEE80211_ATHDESC: { if (skb_headroom(skb1) < ATHDESC_HEADER_SIZE) { printk("%s:%d %s\n", __FILE__, __LINE__, __func__); ieee80211_dev_kfree_skb(&skb1); break; } memcpy(skb_push(skb1, ATHDESC_HEADER_SIZE), ds, ATHDESC_HEADER_SIZE); break; } default: break; } if (skb1 != NULL) { if (!tx && (skb1->len >= IEEE80211_CRC_LEN) && (vap->iv_dev->type != ARPHRD_IEEE80211_RADIOTAP)) { /* Remove FCS from end of RX frames when * delivering to non-Radiotap VAPs. */ skb_trim(skb1, skb1->len - IEEE80211_CRC_LEN); } skb1->dev = dev; /* NB: deliver to wlanX */ skb_reset_mac_header(skb1); skb1->ip_summed = CHECKSUM_NONE; skb1->pkt_type = pkttype; skb1->protocol = __constant_htons(0x0019); /* ETH_P_80211_RAW */ if (netif_rx(skb1) == NET_RX_DROP) { /* If netif_rx dropped the packet because * device was too busy, reclaim the ref. in * the skb. */ if (SKB_CB(skb1)->ni != NULL) ieee80211_unref_node(&SKB_CB(skb1)->ni); vap->iv_devstats.rx_dropped++; } vap->iv_devstats.rx_packets++; vap->iv_devstats.rx_bytes += skb1->len; } } }
/* * We have a good packet(s), get it/them out of the buffers. */ static void cops_rx(struct net_device *dev) { int pkt_len = 0; int rsp_type = 0; struct sk_buff *skb = NULL; struct cops_local *lp = netdev_priv(dev); int ioaddr = dev->base_addr; int boguscount = 0; unsigned long flags; spin_lock_irqsave(&lp->lock, flags); if(lp->board==DAYNA) { outb(0, ioaddr); /* Send out Zero length. */ outb(0, ioaddr); outb(DATA_READ, ioaddr); /* Send read command out. */ /* Wait for DMA to turn around. */ while(++boguscount<1000000) { barrier(); if((inb(ioaddr+DAYNA_CARD_STATUS)&0x03)==DAYNA_RX_READY) break; } if(boguscount==1000000) { printk(KERN_WARNING "%s: DMA timed out.\n",dev->name); spin_unlock_irqrestore(&lp->lock, flags); return; } } /* Get response length. */ if(lp->board==DAYNA) pkt_len = inb(ioaddr) & 0xFF; else pkt_len = inb(ioaddr) & 0x00FF; pkt_len |= (inb(ioaddr) << 8); /* Input IO code. */ rsp_type=inb(ioaddr); /* Malloc up new buffer. */ skb = dev_alloc_skb(pkt_len); if(skb == NULL) { printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name); dev->stats.rx_dropped++; while(pkt_len--) /* Discard packet */ inb(ioaddr); spin_unlock_irqrestore(&lp->lock, flags); return; } skb->dev = dev; skb_put(skb, pkt_len); skb->protocol = htons(ETH_P_LOCALTALK); insb(ioaddr, skb->data, pkt_len); /* Eat the Data */ if(lp->board==DAYNA) outb(1, ioaddr+DAYNA_INT_CARD); /* Interrupt the card */ spin_unlock_irqrestore(&lp->lock, flags); /* Restore interrupts. */ /* Check for bad response length */ if(pkt_len < 0 || pkt_len > MAX_LLAP_SIZE) { printk(KERN_WARNING "%s: Bad packet length of %d bytes.\n", dev->name, pkt_len); dev->stats.tx_errors++; dev_kfree_skb_any(skb); return; } /* Set nodeid and then get out. */ if(rsp_type == LAP_INIT_RSP) { /* Nodeid taken from received packet. */ lp->node_acquire = skb->data[0]; dev_kfree_skb_any(skb); return; } /* One last check to make sure we have a good packet. */ if(rsp_type != LAP_RESPONSE) { printk(KERN_WARNING "%s: Bad packet type %d.\n", dev->name, rsp_type); dev->stats.tx_errors++; dev_kfree_skb_any(skb); return; } skb_reset_mac_header(skb); /* Point to entire packet. */ skb_pull(skb,3); skb_reset_transport_header(skb); /* Point to data (Skip header). */ /* Update the counters. */ dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len; /* Send packet to a higher place. */ netif_rx(skb); }
static int create_skb(int (*l3_hdr_fn)(void *, u16, u8, struct tuple *, bool, bool, u16, u8), int l3_hdr_type, int l3_hdr_len, bool df, bool mf, u16 frag_offset, u8 ttl, int (*l4_hdr_fn)(void *, int, u16, struct tuple *), int l4_hdr_type, int l4_hdr_len, int l4_total_len, int (*payload_fn)(void *, u16), u16 payload_len, int (*l4_post_fn)(void *, u16, struct tuple *), struct sk_buff **result, struct tuple *tuple) { struct sk_buff *skb; int datagram_len = l4_hdr_len + payload_len; int error; skb = alloc_skb(LL_MAX_HEADER + l3_hdr_len + datagram_len, GFP_ATOMIC); if (!skb) { log_err("New packet allocation failed."); return -ENOMEM; } skb->protocol = htons(l3_hdr_type); skb_reserve(skb, LL_MAX_HEADER); skb_put(skb, l3_hdr_len + l4_hdr_len + payload_len); skb_reset_mac_header(skb); skb_reset_network_header(skb); skb_set_transport_header(skb, l3_hdr_len); error = l3_hdr_fn(skb_network_header(skb), datagram_len, l4_hdr_type, tuple, df, mf, frag_offset, ttl); if (error) goto failure; error = l4_hdr_fn(skb_transport_header(skb), l3_hdr_type, l4_total_len, tuple); if (error) goto failure; error = payload_fn(skb_transport_header(skb) + l4_hdr_len, payload_len); if (error) goto failure; error = l4_post_fn(skb_transport_header(skb), datagram_len, tuple); if (error) goto failure; switch (l3_hdr_type) { case ETH_P_IP: error = skb_init_cb_ipv4(skb); break; case ETH_P_IPV6: error = skb_init_cb_ipv6(skb); break; default: error = -EINVAL; } if (error) goto failure; *result = skb; return 0; failure: kfree_skb(skb); return error; }
/* hard_start_xmit function for data interfaces (wlan#, wlan#wds#, wlan#sta) * Convert Ethernet header into a suitable IEEE 802.11 header depending on * device configuration. */ int hostap_data_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct hostap_interface *iface; local_info_t *local; int need_headroom, need_tailroom = 0; struct ieee80211_hdr_4addr hdr; u16 fc, ethertype = 0; enum { WDS_NO = 0, WDS_OWN_FRAME, WDS_COMPLIANT_FRAME } use_wds = WDS_NO; u8 *encaps_data; int hdr_len, encaps_len, skip_header_bytes; int to_assoc_ap = 0; struct hostap_skb_tx_data *meta; iface = netdev_priv(dev); local = iface->local; if (skb->len < ETH_HLEN) { printk(KERN_DEBUG "%s: hostap_data_start_xmit: short skb " "(len=%d)\n", dev->name, skb->len); kfree_skb(skb); return 0; } if (local->ddev != dev) { use_wds = (local->iw_mode == IW_MODE_MASTER && !(local->wds_type & HOSTAP_WDS_STANDARD_FRAME)) ? WDS_OWN_FRAME : WDS_COMPLIANT_FRAME; if (dev == local->stadev) { to_assoc_ap = 1; use_wds = WDS_NO; } else if (dev == local->apdev) { printk(KERN_DEBUG "%s: prism2_tx: trying to use " "AP device with Ethernet net dev\n", dev->name); kfree_skb(skb); return 0; } } else { if (local->iw_mode == IW_MODE_REPEAT) { printk(KERN_DEBUG "%s: prism2_tx: trying to use " "non-WDS link in Repeater mode\n", dev->name); kfree_skb(skb); return 0; } else if (local->iw_mode == IW_MODE_INFRA && (local->wds_type & HOSTAP_WDS_AP_CLIENT) && memcmp(skb->data + ETH_ALEN, dev->dev_addr, ETH_ALEN) != 0) { /* AP client mode: send frames with foreign src addr * using 4-addr WDS frames */ use_wds = WDS_COMPLIANT_FRAME; } } /* Incoming skb->data: dst_addr[6], src_addr[6], proto[2], payload * ==> * Prism2 TX frame with 802.11 header: * txdesc (address order depending on used mode; includes dst_addr and * src_addr), possible encapsulation (RFC1042/Bridge-Tunnel; * proto[2], payload {, possible addr4[6]} */ ethertype = (skb->data[12] << 8) | skb->data[13]; memset(&hdr, 0, sizeof(hdr)); /* Length of data after IEEE 802.11 header */ encaps_data = NULL; encaps_len = 0; skip_header_bytes = ETH_HLEN; if (ethertype == ETH_P_AARP || ethertype == ETH_P_IPX) { encaps_data = bridge_tunnel_header; encaps_len = sizeof(bridge_tunnel_header); skip_header_bytes -= 2; } else if (ethertype >= 0x600) { encaps_data = rfc1042_header; encaps_len = sizeof(rfc1042_header); skip_header_bytes -= 2; } fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA; hdr_len = IEEE80211_DATA_HDR3_LEN; if (use_wds != WDS_NO) { /* Note! Prism2 station firmware has problems with sending real * 802.11 frames with four addresses; until these problems can * be fixed or worked around, 4-addr frames needed for WDS are * using incompatible format: FromDS flag is not set and the * fourth address is added after the frame payload; it is * assumed, that the receiving station knows how to handle this * frame format */ if (use_wds == WDS_COMPLIANT_FRAME) { fc |= IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS; /* From&To DS: Addr1 = RA, Addr2 = TA, Addr3 = DA, * Addr4 = SA */ skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr4, ETH_ALEN); hdr_len += ETH_ALEN; } else { /* bogus 4-addr format to workaround Prism2 station * f/w bug */ fc |= IEEE80211_FCTL_TODS; /* From DS: Addr1 = DA (used as RA), * Addr2 = BSSID (used as TA), Addr3 = SA (used as DA), */ /* SA from skb->data + ETH_ALEN will be added after * frame payload; use hdr.addr4 as a temporary buffer */ skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr4, ETH_ALEN); need_tailroom += ETH_ALEN; } /* send broadcast and multicast frames to broadcast RA, if * configured; otherwise, use unicast RA of the WDS link */ if ((local->wds_type & HOSTAP_WDS_BROADCAST_RA) && skb->data[0] & 0x01) memset(&hdr.addr1, 0xff, ETH_ALEN); else if (iface->type == HOSTAP_INTERFACE_WDS) memcpy(&hdr.addr1, iface->u.wds.remote_addr, ETH_ALEN); else memcpy(&hdr.addr1, local->bssid, ETH_ALEN); memcpy(&hdr.addr2, dev->dev_addr, ETH_ALEN); skb_copy_from_linear_data(skb, &hdr.addr3, ETH_ALEN); } else if (local->iw_mode == IW_MODE_MASTER && !to_assoc_ap) { fc |= IEEE80211_FCTL_FROMDS; /* From DS: Addr1 = DA, Addr2 = BSSID, Addr3 = SA */ skb_copy_from_linear_data(skb, &hdr.addr1, ETH_ALEN); memcpy(&hdr.addr2, dev->dev_addr, ETH_ALEN); skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr3, ETH_ALEN); } else if (local->iw_mode == IW_MODE_INFRA || to_assoc_ap) { fc |= IEEE80211_FCTL_TODS; /* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */ memcpy(&hdr.addr1, to_assoc_ap ? local->assoc_ap_addr : local->bssid, ETH_ALEN); skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr2, ETH_ALEN); skb_copy_from_linear_data(skb, &hdr.addr3, ETH_ALEN); } else if (local->iw_mode == IW_MODE_ADHOC) { /* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */ skb_copy_from_linear_data(skb, &hdr.addr1, ETH_ALEN); skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr2, ETH_ALEN); memcpy(&hdr.addr3, local->bssid, ETH_ALEN); } hdr.frame_ctl = cpu_to_le16(fc); skb_pull(skb, skip_header_bytes); need_headroom = local->func->need_tx_headroom + hdr_len + encaps_len; if (skb_tailroom(skb) < need_tailroom) { skb = skb_unshare(skb, GFP_ATOMIC); if (skb == NULL) { iface->stats.tx_dropped++; return 0; } if (pskb_expand_head(skb, need_headroom, need_tailroom, GFP_ATOMIC)) { kfree_skb(skb); iface->stats.tx_dropped++; return 0; } } else if (skb_headroom(skb) < need_headroom) { struct sk_buff *tmp = skb; skb = skb_realloc_headroom(skb, need_headroom); kfree_skb(tmp); if (skb == NULL) { iface->stats.tx_dropped++; return 0; } } else { skb = skb_unshare(skb, GFP_ATOMIC); if (skb == NULL) { iface->stats.tx_dropped++; return 0; } } if (encaps_data) memcpy(skb_push(skb, encaps_len), encaps_data, encaps_len); memcpy(skb_push(skb, hdr_len), &hdr, hdr_len); if (use_wds == WDS_OWN_FRAME) { memcpy(skb_put(skb, ETH_ALEN), &hdr.addr4, ETH_ALEN); } iface->stats.tx_packets++; iface->stats.tx_bytes += skb->len; skb_reset_mac_header(skb); meta = (struct hostap_skb_tx_data *) skb->cb; memset(meta, 0, sizeof(*meta)); meta->magic = HOSTAP_SKB_TX_DATA_MAGIC; if (use_wds) meta->flags |= HOSTAP_TX_FLAGS_WDS; meta->ethertype = ethertype; meta->iface = iface; /* Send IEEE 802.11 encapsulated frame using the master radio device */ skb->dev = local->dev; dev_queue_xmit(skb); return 0; }
/* Get packets from the host vring */ static int cfv_rx_poll(struct napi_struct *napi, int quota) { struct cfv_info *cfv = container_of(napi, struct cfv_info, napi); int rxcnt = 0; int err = 0; void *buf; struct sk_buff *skb; struct vringh_kiov *riov = &cfv->ctx.riov; unsigned int skb_len; do { skb = NULL; /* Put the previous iovec back on the used ring and * fetch a new iovec if we have processed all elements. */ if (riov->i == riov->used) { if (cfv->ctx.head != USHRT_MAX) { vringh_complete_kern(cfv->vr_rx, cfv->ctx.head, 0); cfv->ctx.head = USHRT_MAX; } err = vringh_getdesc_kern( cfv->vr_rx, riov, NULL, &cfv->ctx.head, GFP_ATOMIC); if (err <= 0) goto exit; } buf = phys_to_virt((unsigned long) riov->iov[riov->i].iov_base); /* TODO: Add check on valid buffer address */ skb = cfv_alloc_and_copy_skb(&err, cfv, buf, riov->iov[riov->i].iov_len); if (unlikely(err)) goto exit; /* Push received packet up the stack. */ skb_len = skb->len; skb->protocol = htons(ETH_P_CAIF); skb_reset_mac_header(skb); skb->dev = cfv->ndev; err = netif_receive_skb(skb); if (unlikely(err)) { ++cfv->ndev->stats.rx_dropped; } else { ++cfv->ndev->stats.rx_packets; cfv->ndev->stats.rx_bytes += skb_len; } ++riov->i; ++rxcnt; } while (rxcnt < quota); ++cfv->stats.rx_napi_resched; goto out; exit: switch (err) { case 0: ++cfv->stats.rx_napi_complete; /* Really out of patckets? (stolen from virtio_net)*/ napi_complete(napi); if (unlikely(!vringh_notify_enable_kern(cfv->vr_rx)) && napi_schedule_prep(napi)) { vringh_notify_disable_kern(cfv->vr_rx); __napi_schedule(napi); } break; case -ENOMEM: ++cfv->stats.rx_nomem; dev_kfree_skb(skb); /* Stop NAPI poll on OOM, we hope to be polled later */ napi_complete(napi); vringh_notify_enable_kern(cfv->vr_rx); break; default: /* We're doomed, any modem fault is fatal */ netdev_warn(cfv->ndev, "Bad ring, disable device\n"); cfv->ndev->stats.rx_dropped = riov->used - riov->i; napi_complete(napi); vringh_notify_disable_kern(cfv->vr_rx); netif_carrier_off(cfv->ndev); break; } out: if (rxcnt && vringh_need_notify_kern(cfv->vr_rx) > 0) vringh_notify(cfv->vr_rx); return rxcnt; }
static void send_hsr_supervision_frame(struct hsr_port *master, u8 type, u8 hsrVer) { struct sk_buff *skb; int hlen, tlen; struct hsr_tag *hsr_tag; struct hsr_sup_tag *hsr_stag; struct hsr_sup_payload *hsr_sp; unsigned long irqflags; hlen = LL_RESERVED_SPACE(master->dev); tlen = master->dev->needed_tailroom; skb = dev_alloc_skb( sizeof(struct hsr_tag) + sizeof(struct hsr_sup_tag) + sizeof(struct hsr_sup_payload) + hlen + tlen); if (skb == NULL) return; skb_reserve(skb, hlen); skb->dev = master->dev; skb->protocol = htons(hsrVer ? ETH_P_HSR : ETH_P_PRP); skb->priority = TC_PRIO_CONTROL; if (dev_hard_header(skb, skb->dev, (hsrVer ? ETH_P_HSR : ETH_P_PRP), master->hsr->sup_multicast_addr, skb->dev->dev_addr, skb->len) <= 0) goto out; skb_reset_mac_header(skb); if (hsrVer > 0) { hsr_tag = (typeof(hsr_tag)) skb_put(skb, sizeof(struct hsr_tag)); hsr_tag->encap_proto = htons(ETH_P_PRP); set_hsr_tag_LSDU_size(hsr_tag, HSR_V1_SUP_LSDUSIZE); } hsr_stag = (typeof(hsr_stag)) skb_put(skb, sizeof(struct hsr_sup_tag)); set_hsr_stag_path(hsr_stag, (hsrVer ? 0x0 : 0xf)); set_hsr_stag_HSR_Ver(hsr_stag, hsrVer); /* From HSRv1 on we have separate supervision sequence numbers. */ spin_lock_irqsave(&master->hsr->seqnr_lock, irqflags); if (hsrVer > 0) { hsr_stag->sequence_nr = htons(master->hsr->sup_sequence_nr); hsr_tag->sequence_nr = htons(master->hsr->sequence_nr); master->hsr->sup_sequence_nr++; master->hsr->sequence_nr++; } else { hsr_stag->sequence_nr = htons(master->hsr->sequence_nr); master->hsr->sequence_nr++; } spin_unlock_irqrestore(&master->hsr->seqnr_lock, irqflags); hsr_stag->HSR_TLV_Type = type; /* TODO: Why 12 in HSRv0? */ hsr_stag->HSR_TLV_Length = hsrVer ? sizeof(struct hsr_sup_payload) : 12; /* Payload: MacAddressA */ hsr_sp = (typeof(hsr_sp)) skb_put(skb, sizeof(struct hsr_sup_payload)); ether_addr_copy(hsr_sp->MacAddressA, master->dev->dev_addr); skb_put_padto(skb, ETH_ZLEN + HSR_HLEN); hsr_forward_skb(skb, master); return; out: WARN_ONCE(1, "HSR: Could not send supervision frame\n"); kfree_skb(skb); }
/** * Unpack a just received skb and hand it over to * upper layers. * * ch The channel where this skb has been received. * pskb The received skb. */ void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb) { struct net_device *dev = ch->netdev; struct ctcm_priv *priv = dev->ml_priv; __u16 len = *((__u16 *) pskb->data); skb_put(pskb, 2 + LL_HEADER_LENGTH); skb_pull(pskb, 2); pskb->dev = dev; pskb->ip_summed = CHECKSUM_UNNECESSARY; while (len > 0) { struct sk_buff *skb; int skblen; struct ll_header *header = (struct ll_header *)pskb->data; skb_pull(pskb, LL_HEADER_LENGTH); if ((ch->protocol == CTCM_PROTO_S390) && (header->type != ETH_P_IP)) { if (!(ch->logflags & LOG_FLAG_ILLEGALPKT)) { ch->logflags |= LOG_FLAG_ILLEGALPKT; /* * Check packet type only if we stick strictly * to S/390's protocol of OS390. This only * supports IP. Otherwise allow any packet * type. */ CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, "%s(%s): Illegal packet type 0x%04x" " - dropping", CTCM_FUNTAIL, dev->name, header->type); } priv->stats.rx_dropped++; priv->stats.rx_frame_errors++; return; } pskb->protocol = ntohs(header->type); if ((header->length <= LL_HEADER_LENGTH) || (len <= LL_HEADER_LENGTH)) { if (!(ch->logflags & LOG_FLAG_ILLEGALSIZE)) { CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, "%s(%s): Illegal packet size %d(%d,%d)" "- dropping", CTCM_FUNTAIL, dev->name, header->length, dev->mtu, len); ch->logflags |= LOG_FLAG_ILLEGALSIZE; } priv->stats.rx_dropped++; priv->stats.rx_length_errors++; return; } header->length -= LL_HEADER_LENGTH; len -= LL_HEADER_LENGTH; if ((header->length > skb_tailroom(pskb)) || (header->length > len)) { if (!(ch->logflags & LOG_FLAG_OVERRUN)) { CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, "%s(%s): Packet size %d (overrun)" " - dropping", CTCM_FUNTAIL, dev->name, header->length); ch->logflags |= LOG_FLAG_OVERRUN; } priv->stats.rx_dropped++; priv->stats.rx_length_errors++; return; } skb_put(pskb, header->length); skb_reset_mac_header(pskb); len -= header->length; skb = dev_alloc_skb(pskb->len); if (!skb) { if (!(ch->logflags & LOG_FLAG_NOMEM)) { CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, "%s(%s): MEMORY allocation error", CTCM_FUNTAIL, dev->name); ch->logflags |= LOG_FLAG_NOMEM; } priv->stats.rx_dropped++; return; } skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len), pskb->len); skb_reset_mac_header(skb); skb->dev = pskb->dev; skb->protocol = pskb->protocol; pskb->ip_summed = CHECKSUM_UNNECESSARY; skblen = skb->len; /* * reset logflags */ ch->logflags = 0; priv->stats.rx_packets++; priv->stats.rx_bytes += skblen; netif_rx_ni(skb); if (len > 0) { skb_pull(pskb, header->length); if (skb_tailroom(pskb) < LL_HEADER_LENGTH) { CTCM_DBF_DEV_NAME(TRACE, dev, "Overrun in ctcm_unpack_skb"); ch->logflags |= LOG_FLAG_OVERRUN; return; } skb_put(pskb, LL_HEADER_LENGTH); } } }
/* * Prepends an ISI header and sends a datagram. */ static int pn_send(struct sk_buff *skb, struct net_device *dev, u16 dst, u16 src, u8 res, u8 irq) { struct phonethdr *ph; int err, i; if (skb->len + 2 > 0xffff /* Phonet length field limit */ || skb->len + sizeof(struct phonethdr) > dev->mtu) { err = -EMSGSIZE; goto drop; } /* Broadcast sending is not implemented */ if (pn_addr(dst) == PNADDR_BROADCAST) { err = -EOPNOTSUPP; goto drop; } skb_reset_transport_header(skb); WARN_ON(skb_headroom(skb) & 1); /* HW assumes word alignment */ skb_push(skb, sizeof(struct phonethdr)); skb_reset_network_header(skb); ph = pn_hdr(skb); ph->pn_rdev = pn_dev(dst); ph->pn_sdev = pn_dev(src); ph->pn_res = res; ph->pn_length = __cpu_to_be16(skb->len + 2 - sizeof(*ph)); ph->pn_robj = pn_obj(dst); ph->pn_sobj = pn_obj(src); skb->protocol = htons(ETH_P_PHONET); skb->priority = 0; skb->dev = dev; PN_PRINTK("pn_send rdev %x sdev %x res %x robj %x sobj %x netdev=%s\n", ph->pn_rdev, ph->pn_sdev, ph->pn_res, ph->pn_robj, ph->pn_sobj, dev->name); PN_DATA_PRINTK("PHONET : skb data = %d\nPHONET :", skb->len); for (i = 1; i <= skb->len; i++) { PN_DATA_PRINTK(" %02x", skb->data[i-1]); if ((i%8) == 0) PN_DATA_PRINTK("\n"); } if (skb->pkt_type == PACKET_LOOPBACK) { skb_reset_mac_header(skb); skb_orphan(skb); err = (irq ? netif_rx(skb) : netif_rx_ni(skb)) ? -ENOBUFS : 0; } else { err = dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL, skb->len); if (err < 0) { err = -EHOSTUNREACH; goto drop; } err = dev_queue_xmit(skb); if (unlikely(err > 0)) err = net_xmit_errno(err); } return err; drop: printk(KERN_DEBUG "pn_send DROP\n"); kfree_skb(skb); return err; }
/** 根据tcp数据生成数据包 根据tcp数据,生成数据包,并填充 mac/ip/tcp 头部信息 @param skb 原始的sk_buff结构地址 @param names 网卡名称结构首地址 @param num 网卡个数 @param tcpdata tcp数据地址 @param tcpdatalen tcp数据长度 @return 成功返回数据包地址,失败返回NULL。 */ struct sk_buff *pkg_skbuff_generate(struct sk_buff *skb, struct client_nicname *names, int num, char *tcpdata, int tcpdatalen) { struct sk_buff *new_skb = NULL; struct net_device *dev = NULL; struct iphdr *iph = NULL,*new_iph = NULL; struct tcphdr *tcph = NULL,*new_tcph = NULL; struct ethhdr *ethdr = NULL; char *newpdata = NULL; unsigned char * mac_header_addr = NULL; int i = 0; if(!skb || !names) { goto out; } iph = ip_hdr(skb); if(iph == NULL) { goto out; } tcph = (struct tcphdr *)((char *)iph + iph->ihl*4); if(tcph == NULL) { goto out; } ethdr = eth_hdr(skb); if(ethdr == NULL) { goto out; } for (i=0; names[i].index != -1; i++) { #if (LINUX_VERSION_CODE < KERNEL_VERSION (2, 6, 24))//不确定版本号是否应该更早 dev = dev_get_by_name(names[i].name); #else dev = dev_get_by_name(&init_net, names[i].name); #endif if (dev != NULL) break; } if (dev == NULL) { goto out; } new_skb = alloc_skb(tcpdatalen + iph->ihl*4 + tcph->doff*4 + 14, GFP_ATOMIC); if(new_skb == NULL) { goto out; } #if (LINUX_VERSION_CODE < KERNEL_VERSION (3, 11, 0)) new_skb->mac_header = new_skb->data; skb_reserve(new_skb,14); new_skb->transport_header = new_skb->data; new_skb->network_header = new_skb->data; //get_route_mac(iph->saddr, iph->daddr); memcpy(&new_skb->mac_header[0], ethdr->h_source, 6); memcpy(&new_skb->mac_header[6], ethdr->h_dest, 6); new_skb->mac_header[12] = 0x08; new_skb->mac_header[13] = 0x00; #else skb_reset_mac_header(new_skb); skb_reserve(new_skb,14); skb_reset_transport_header(new_skb); skb_reset_network_header(new_skb); mac_header_addr=skb_mac_header(new_skb); if(mac_header_addr==NULL) { printk("Can't get header address!\n"); goto out; } //get_route_mac(iph->saddr, iph->daddr); memcpy(mac_header_addr, ethdr->h_source, 6); memcpy(mac_header_addr+6, ethdr->h_dest, 6); mac_header_addr[12] = 0x08; mac_header_addr[13] = 0x00; #endif skb_put(new_skb, iph->ihl*4 + tcph->doff*4); new_skb->mac_len = 14; new_skb->dev = dev; new_skb->pkt_type = PACKET_OTHERHOST; new_skb->protocol = __constant_htons(ETH_P_IP); new_skb->ip_summed = CHECKSUM_NONE; new_skb->priority = 0; /* *IP set */ new_iph = (struct iphdr *)new_skb->data; memset((char *)new_iph, 0, iph->ihl*4); new_iph->version = iph->version; new_iph->ihl = iph->ihl; new_iph->tos = iph->tos; new_iph->id = iph->id; new_iph->ttl = iph->ttl; new_iph->frag_off = iph->frag_off; new_iph->protocol = IPPROTO_TCP; //new_iph->saddr = iph->saddr; new_iph->saddr = iph->daddr; new_iph->daddr = iph->saddr; new_iph->tot_len = htons(tcpdatalen + iph->ihl*4 + tcph->doff*4); new_iph->check = 0; /* *TCP set */ new_tcph = (struct tcphdr *)(new_skb->data + iph->ihl*4); memset((char *)new_tcph, 0, tcph->doff*4); new_tcph->source = tcph->dest; new_tcph->dest = tcph->source; new_tcph->seq = tcph->ack_seq; new_tcph->ack_seq = htonl(ntohl(tcph->seq) + (ntohs(iph->tot_len) - iph->ihl*4 - tcph->doff*4)); new_tcph->doff = tcph->doff; new_tcph->fin = tcph->fin; new_tcph->ack = tcph->ack; new_tcph->psh = tcph->psh; new_tcph->window = tcph->window; new_tcph->check = 0; if (tcpdatalen > 0) { newpdata = skb_put(new_skb, tcpdatalen); if (newpdata != NULL) { if (tcpdata != NULL) memcpy(newpdata, tcpdata, tcpdatalen); } } refresh_skb_checksum(new_skb); return new_skb; out: if (NULL != skb) { dev_put (dev); kfree_skb (skb); } return NULL; }
static struct sk_buff *gre_gso_segment(struct sk_buff *skb, netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EINVAL); netdev_features_t enc_features; int ghl; struct gre_base_hdr *greh; u16 mac_offset = skb->mac_header; int mac_len = skb->mac_len; __be16 protocol = skb->protocol; int tnl_hlen; bool csum; if (unlikely(skb_shinfo(skb)->gso_type & ~(SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP | SKB_GSO_DODGY | SKB_GSO_TCP_ECN | SKB_GSO_GRE | SKB_GSO_GRE_CSUM | SKB_GSO_IPIP))) goto out; if (!skb->encapsulation) goto out; if (unlikely(!pskb_may_pull(skb, sizeof(*greh)))) goto out; greh = (struct gre_base_hdr *)skb_transport_header(skb); ghl = skb_inner_mac_header(skb) - skb_transport_header(skb); if (unlikely(ghl < sizeof(*greh))) goto out; csum = !!(greh->flags & GRE_CSUM); if (csum) skb->encap_hdr_csum = 1; /* setup inner skb. */ skb->protocol = greh->protocol; skb->encapsulation = 0; if (unlikely(!pskb_may_pull(skb, ghl))) goto out; __skb_pull(skb, ghl); skb_reset_mac_header(skb); skb_set_network_header(skb, skb_inner_network_offset(skb)); skb->mac_len = skb_inner_network_offset(skb); /* segment inner packet. */ enc_features = skb->dev->hw_enc_features & features; segs = skb_mac_gso_segment(skb, enc_features); if (IS_ERR_OR_NULL(segs)) { skb_gso_error_unwind(skb, protocol, ghl, mac_offset, mac_len); goto out; } skb = segs; tnl_hlen = skb_tnl_header_len(skb); do { __skb_push(skb, ghl); if (csum) { __be32 *pcsum; if (skb_has_shared_frag(skb)) { int err; err = __skb_linearize(skb); if (err) { kfree_skb_list(segs); segs = ERR_PTR(err); goto out; } } skb_reset_transport_header(skb); greh = (struct gre_base_hdr *) skb_transport_header(skb); pcsum = (__be32 *)(greh + 1); *pcsum = 0; *(__sum16 *)pcsum = gso_make_checksum(skb, 0); } __skb_push(skb, tnl_hlen - ghl); skb_reset_inner_headers(skb); skb->encapsulation = 1; skb_reset_mac_header(skb); skb_set_network_header(skb, mac_len); skb->mac_len = mac_len; skb->protocol = protocol; } while ((skb = skb->next)); out: return segs; }
bool device_receive_frame( PSDevice pDevice, PSRxDesc pCurrRD ) { PDEVICE_RD_INFO pRDInfo = pCurrRD->pRDInfo; struct net_device_stats *pStats = &pDevice->stats; struct sk_buff *skb; PSMgmtObject pMgmt = pDevice->pMgmt; PSRxMgmtPacket pRxPacket = &(pDevice->pMgmt->sRxPacket); PS802_11Header p802_11Header; unsigned char *pbyRsr; unsigned char *pbyNewRsr; unsigned char *pbyRSSI; PQWORD pqwTSFTime; unsigned short *pwFrameSize; unsigned char *pbyFrame; bool bDeFragRx = false; bool bIsWEP = false; unsigned int cbHeaderOffset; unsigned int FrameSize; unsigned short wEtherType = 0; int iSANodeIndex = -1; int iDANodeIndex = -1; unsigned int ii; unsigned int cbIVOffset; bool bExtIV = false; unsigned char *pbyRxSts; unsigned char *pbyRxRate; unsigned char *pbySQ; unsigned int cbHeaderSize; PSKeyItem pKey = NULL; unsigned short wRxTSC15_0 = 0; unsigned long dwRxTSC47_16 = 0; SKeyItem STempKey; // 802.11h RPI unsigned long dwDuration = 0; long ldBm = 0; long ldBmThreshold = 0; PS802_11Header pMACHeader; bool bRxeapol_key = false; skb = pRDInfo->skb; //PLICE_DEBUG-> pci_unmap_single(pDevice->pcid, pRDInfo->skb_dma, pDevice->rx_buf_sz, PCI_DMA_FROMDEVICE); //PLICE_DEBUG<- pwFrameSize = (unsigned short *)(skb->data + 2); FrameSize = cpu_to_le16(pCurrRD->m_rd1RD1.wReqCount) - cpu_to_le16(pCurrRD->m_rd0RD0.wResCount); // Max: 2312Payload + 30HD +4CRC + 2Padding + 4Len + 8TSF + 4RSR // Min (ACK): 10HD +4CRC + 2Padding + 4Len + 8TSF + 4RSR if ((FrameSize > 2364) || (FrameSize <= 32)) { // Frame Size error drop this packet. DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "---------- WRONG Length 1\n"); return false; } pbyRxSts = (unsigned char *)(skb->data); pbyRxRate = (unsigned char *)(skb->data + 1); pbyRsr = (unsigned char *)(skb->data + FrameSize - 1); pbyRSSI = (unsigned char *)(skb->data + FrameSize - 2); pbyNewRsr = (unsigned char *)(skb->data + FrameSize - 3); pbySQ = (unsigned char *)(skb->data + FrameSize - 4); pqwTSFTime = (PQWORD)(skb->data + FrameSize - 12); pbyFrame = (unsigned char *)(skb->data + 4); // get packet size FrameSize = cpu_to_le16(*pwFrameSize); if ((FrameSize > 2346)|(FrameSize < 14)) { // Max: 2312Payload + 30HD +4CRC // Min: 14 bytes ACK DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "---------- WRONG Length 2\n"); return false; } //PLICE_DEBUG-> // update receive statistic counter STAvUpdateRDStatCounter(&pDevice->scStatistic, *pbyRsr, *pbyNewRsr, *pbyRxRate, pbyFrame, FrameSize); pMACHeader = (PS802_11Header)((unsigned char *)(skb->data) + 8); //PLICE_DEBUG<- if (pDevice->bMeasureInProgress) { if ((*pbyRsr & RSR_CRCOK) != 0) pDevice->byBasicMap |= 0x01; dwDuration = (FrameSize << 4); dwDuration /= acbyRxRate[*pbyRxRate%MAX_RATE]; if (*pbyRxRate <= RATE_11M) { if (*pbyRxSts & 0x01) { // long preamble dwDuration += 192; } else { // short preamble dwDuration += 96; } } else { dwDuration += 16; } RFvRSSITodBm(pDevice, *pbyRSSI, &ldBm); ldBmThreshold = -57; for (ii = 7; ii > 0;) { if (ldBm > ldBmThreshold) break; ldBmThreshold -= 5; ii--; } pDevice->dwRPIs[ii] += dwDuration; return false; } if (!is_multicast_ether_addr(pbyFrame)) { if (WCTLbIsDuplicate(&(pDevice->sDupRxCache), (PS802_11Header)(skb->data + 4))) { pDevice->s802_11Counter.FrameDuplicateCount++; return false; } } // Use for TKIP MIC s_vGetDASA(skb->data+4, &cbHeaderSize, &pDevice->sRxEthHeader); // filter packet send from myself if (ether_addr_equal(pDevice->sRxEthHeader.abySrcAddr, pDevice->abyCurrentNetAddr)) return false; if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) || (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)) { if (IS_CTL_PSPOLL(pbyFrame) || !IS_TYPE_CONTROL(pbyFrame)) { p802_11Header = (PS802_11Header)(pbyFrame); // get SA NodeIndex if (BSSDBbIsSTAInNodeDB(pMgmt, (unsigned char *)(p802_11Header->abyAddr2), &iSANodeIndex)) { pMgmt->sNodeDBTable[iSANodeIndex].ulLastRxJiffer = jiffies; pMgmt->sNodeDBTable[iSANodeIndex].uInActiveCount = 0; } } } if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) { if (s_bAPModeRxCtl(pDevice, pbyFrame, iSANodeIndex)) return false; } if (IS_FC_WEP(pbyFrame)) { bool bRxDecryOK = false; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "rx WEP pkt\n"); bIsWEP = true; if ((pDevice->bEnableHostWEP) && (iSANodeIndex >= 0)) { pKey = &STempKey; pKey->byCipherSuite = pMgmt->sNodeDBTable[iSANodeIndex].byCipherSuite; pKey->dwKeyIndex = pMgmt->sNodeDBTable[iSANodeIndex].dwKeyIndex; pKey->uKeyLength = pMgmt->sNodeDBTable[iSANodeIndex].uWepKeyLength; pKey->dwTSC47_16 = pMgmt->sNodeDBTable[iSANodeIndex].dwTSC47_16; pKey->wTSC15_0 = pMgmt->sNodeDBTable[iSANodeIndex].wTSC15_0; memcpy(pKey->abyKey, &pMgmt->sNodeDBTable[iSANodeIndex].abyWepKey[0], pKey->uKeyLength ); bRxDecryOK = s_bHostWepRxEncryption(pDevice, pbyFrame, FrameSize, pbyRsr, pMgmt->sNodeDBTable[iSANodeIndex].bOnFly, pKey, pbyNewRsr, &bExtIV, &wRxTSC15_0, &dwRxTSC47_16); } else { bRxDecryOK = s_bHandleRxEncryption(pDevice, pbyFrame, FrameSize, pbyRsr, pbyNewRsr, &pKey, &bExtIV, &wRxTSC15_0, &dwRxTSC47_16); } if (bRxDecryOK) { if ((*pbyNewRsr & NEWRSR_DECRYPTOK) == 0) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "ICV Fail\n"); if ((pDevice->pMgmt->eAuthenMode == WMAC_AUTH_WPA) || (pDevice->pMgmt->eAuthenMode == WMAC_AUTH_WPAPSK) || (pDevice->pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) || (pDevice->pMgmt->eAuthenMode == WMAC_AUTH_WPA2) || (pDevice->pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)) { if ((pKey != NULL) && (pKey->byCipherSuite == KEY_CTL_TKIP)) pDevice->s802_11Counter.TKIPICVErrors++; else if ((pKey != NULL) && (pKey->byCipherSuite == KEY_CTL_CCMP)) pDevice->s802_11Counter.CCMPDecryptErrors++; } return false; } } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WEP Func Fail\n"); return false; } if ((pKey != NULL) && (pKey->byCipherSuite == KEY_CTL_CCMP)) FrameSize -= 8; // Message Integrity Code else FrameSize -= 4; // 4 is ICV } // // RX OK // //remove the CRC length FrameSize -= ETH_FCS_LEN; if ((!(*pbyRsr & (RSR_ADDRBROAD | RSR_ADDRMULTI))) && // unicast address (IS_FRAGMENT_PKT((skb->data+4))) ) { // defragment bDeFragRx = WCTLbHandleFragment(pDevice, (PS802_11Header)(skb->data+4), FrameSize, bIsWEP, bExtIV); pDevice->s802_11Counter.ReceivedFragmentCount++; if (bDeFragRx) { // defrag complete skb = pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].skb; FrameSize = pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].cbFrameLength; } else { return false; } } // Management & Control frame Handle if ((IS_TYPE_DATA((skb->data+4))) == false) { // Handle Control & Manage Frame if (IS_TYPE_MGMT((skb->data+4))) { unsigned char *pbyData1; unsigned char *pbyData2; pRxPacket->p80211Header = (PUWLAN_80211HDR)(skb->data+4); pRxPacket->cbMPDULen = FrameSize; pRxPacket->uRSSI = *pbyRSSI; pRxPacket->bySQ = *pbySQ; HIDWORD(pRxPacket->qwLocalTSF) = cpu_to_le32(HIDWORD(*pqwTSFTime)); LODWORD(pRxPacket->qwLocalTSF) = cpu_to_le32(LODWORD(*pqwTSFTime)); if (bIsWEP) { // strip IV pbyData1 = WLAN_HDR_A3_DATA_PTR(skb->data+4); pbyData2 = WLAN_HDR_A3_DATA_PTR(skb->data+4) + 4; for (ii = 0; ii < (FrameSize - 4); ii++) { *pbyData1 = *pbyData2; pbyData1++; pbyData2++; } } pRxPacket->byRxRate = s_byGetRateIdx(*pbyRxRate); pRxPacket->byRxChannel = (*pbyRxSts) >> 2; vMgrRxManagePacket((void *)pDevice, pDevice->pMgmt, pRxPacket); // hostap Deamon handle 802.11 management if (pDevice->bEnableHostapd) { skb->dev = pDevice->apdev; skb->data += 4; skb->tail += 4; skb_put(skb, FrameSize); skb_reset_mac_header(skb); skb->pkt_type = PACKET_OTHERHOST; skb->protocol = htons(ETH_P_802_2); memset(skb->cb, 0, sizeof(skb->cb)); netif_rx(skb); return true; } } return false; } else {
static int niit_xmit(struct sk_buff *skb, struct net_device *dev) { struct niit_tunnel *tunnel = (struct niit_tunnel *) netdev_priv(tunnel4_dev); struct ethhdr *ethhead; struct iphdr *iph4; struct ipv6hdr *iph6; struct net_device_stats *stats; struct rt6_info *rt6; /* Route to the other host */ struct net_device *tdev; /* Device to other host */ __u8 nexthdr; /* IPv6 next header */ u32 delta; /* calc space inside skb */ unsigned int max_headroom; /* The extra header space needed */ struct in6_addr s6addr; struct in6_addr d6addr; /* * all IPv4 (includes icmp) will be encapsulated. * IPv6 ICMPs for IPv4 encapsulated data should be translated * */ if (skb->protocol == htons(ETH_P_IP)) { stats = &tunnel4_dev->stats; PDEBUG("niit: skb->proto = iph4 \n"); iph4 = ip_hdr(skb); s6addr.in6_u.u6_addr32[0] = tunnel->ipv6prefix_1; s6addr.in6_u.u6_addr32[1] = tunnel->ipv6prefix_2; s6addr.in6_u.u6_addr32[2] = tunnel->ipv6prefix_3; s6addr.in6_u.u6_addr32[3] = iph4->saddr; d6addr.in6_u.u6_addr32[0] = tunnel->ipv6prefix_1; d6addr.in6_u.u6_addr32[1] = tunnel->ipv6prefix_2; d6addr.in6_u.u6_addr32[2] = tunnel->ipv6prefix_3; d6addr.in6_u.u6_addr32[3] = iph4->daddr; PDEBUG("niit: ipv4: saddr: %x%x%x%x \n niit: ipv4: daddr %x%x%x%x \n", s6addr.in6_u.u6_addr32[0], s6addr.in6_u.u6_addr32[1], s6addr.in6_u.u6_addr32[2], s6addr.in6_u.u6_addr32[3], d6addr.in6_u.u6_addr32[0], d6addr.in6_u.u6_addr32[1], d6addr.in6_u.u6_addr32[2], d6addr.in6_u.u6_addr32[3]); if ((rt6 = rt6_lookup(dev_net(tunnel4_dev), &d6addr, &s6addr, (tunnel4_dev)->iflink, 0)) == NULL) { stats->tx_carrier_errors++; goto tx_error_icmp; } tdev = rt6->dst.dev; dst_release(&rt6->dst); if (tdev == dev) { PDEBUG("niit: recursion detected todev = dev \n"); stats->collisions++; goto tx_error; } /* old MTU check */ /* * Resize the buffer to push our ipv6 head into */ max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct ipv6hdr); if (skb_headroom(skb) < max_headroom || skb_shared(skb) || (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); if (!new_skb) { stats->tx_dropped++; dev_kfree_skb(skb); tunnel->recursion--; return 0; } if (skb->sk) skb_set_owner_w(new_skb, skb->sk); dev_kfree_skb(skb); skb = new_skb; iph4 = ip_hdr(skb); } delta = skb_network_header(skb) - skb->data; /* make our skb space best fit */ if (delta < sizeof(struct ipv6hdr)) { iph6 = (struct ipv6hdr*) skb_push(skb, sizeof(struct ipv6hdr) - delta); PDEBUG("niit: iph6 < 0 skb->len %x \n", skb->len); } else if (delta > sizeof(struct ipv6hdr)) { iph6 = (struct ipv6hdr*) skb_pull(skb, delta - sizeof(struct ipv6hdr)); PDEBUG("niit: iph6 > 0 skb->len %x \n", skb->len); } else { iph6 = (struct ipv6hdr*) skb->data; PDEBUG("niit: iph6 = 0 skb->len %x \n", skb->len); } /* how the package should look like : * skb->network_header = iph6 * skb->transport_header = iph4; */ skb->transport_header = skb->network_header; /* we say skb->transport_header = iph4; */ skb_reset_network_header(skb); /* now -> we reset the network header to skb->data which is our ipv6 paket */ skb_reset_mac_header(skb); skb->mac_header = skb->network_header - sizeof(struct ethhdr); skb->mac_len = sizeof(struct ethhdr); /* add a dummy ethhdr to use correct interface linktype */ ethhead = eth_hdr(skb); memcpy(ethhead->h_dest, tunnel4_dev->dev_addr, ETH_ALEN); memcpy(ethhead->h_source, tunnel4_dev->dev_addr, ETH_ALEN); ethhead->h_proto = htons(ETH_P_IPV6); /* prepare to send it again */ IPCB(skb)->flags = 0; skb->protocol = htons(ETH_P_IPV6); skb->pkt_type = PACKET_HOST; skb->dev = tunnel4_dev; skb_dst_drop(skb); /* install v6 header */ memset(iph6, 0, sizeof(struct ipv6hdr)); iph6->version = 6; iph6->payload_len = iph4->tot_len; iph6->hop_limit = iph4->ttl; iph6->nexthdr = IPPROTO_IPIP; memcpy(&(iph6->saddr), &s6addr, sizeof(struct in6_addr)); memcpy(&(iph6->daddr), &d6addr, sizeof(struct in6_addr)); nf_reset(skb); netif_rx(skb); tunnel->recursion--; } else if (skb->protocol == htons(ETH_P_IPV6)) { /* got a ipv6-package and need to translate it back to ipv4 */ __be32 s4addr; __be32 d4addr; __u8 hoplimit; stats = &tunnel6_dev->stats; PDEBUG("niit: skb->proto = iph6 \n"); iph6 = ipv6_hdr(skb); if (!iph6) { PDEBUG("niit: cant find iph6 \n"); goto tx_error; } /* IPv6 to IPv4 */ hoplimit = iph6->hop_limit; /* check against our prefix which all packages must have */ if (iph6->daddr.s6_addr32[0] != tunnel->ipv6prefix_1 || iph6->daddr.s6_addr32[1] != tunnel->ipv6prefix_2 || iph6->daddr.s6_addr32[2] != tunnel->ipv6prefix_3) { PDEBUG("niit: xmit ipv6(): Dst addr haven't our previx addr: %x%x%x%x, packet dropped.\n", iph6->daddr.s6_addr32[0], iph6->daddr.s6_addr32[1], iph6->daddr.s6_addr32[2], iph6->daddr.s6_addr32[3]); goto tx_error; } s4addr = iph6->saddr.s6_addr32[3]; d4addr = iph6->daddr.s6_addr32[3]; nexthdr = iph6->nexthdr; /* TODO nexthdr handle */ /* while(nexthdr != IPPROTO_IPIP) { } */ if(nexthdr != IPPROTO_IPIP) { PDEBUG("niit: cant handle hdrtype : %x.\n", nexthdr); goto tx_error; } iph4 = ipip_hdr(skb); /* TODO: fix the check for a valid route */ /* { struct flowi fl = { .nl_u = { .ip4_u = { .daddr = d4addr, .saddr = s4addr, .tos = RT_TOS(iph4->tos) } }, .oif = tunnel_dev->iflink, .proto = iph4->protocol }; if (ip_route_output_key(dev_net(dev), &rt, &fl)) { PDEBUG("niit : ip route not found \n"); stats->tx_carrier_errors++; goto tx_error_icmp; } } tdev = rt->u.dst.dev; if (tdev == tunnel_dev) { PDEBUG("niit : tdev == tunnel_dev \n"); ip_rt_put(rt); stats->collisions++; goto tx_error; } if (iph4->frag_off) mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr); else mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; if (mtu < 68) { PDEBUG("niit : mtu < 68 \n"); stats->collisions++; ip_rt_put(rt); goto tx_error; } if (iph4->daddr && skb_dst(skb)) skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu); */ /* if (skb->len > mtu) { icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev); ip_rt_put(rt); goto tx_error; } */ /* * check if we can reuse our skb_buff */ if (skb_shared(skb) || (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { struct sk_buff *new_skb = skb_realloc_headroom(skb, skb_headroom(skb)); if (!new_skb) { stats->tx_dropped++; dev_kfree_skb(skb); tunnel->recursion--; return 0; } if (skb->sk) skb_set_owner_w(new_skb, skb->sk); dev_kfree_skb(skb); skb = new_skb; iph6 = ipv6_hdr(skb); iph4 = ipip_hdr(skb); } delta = skb_transport_header(skb) - skb->data; skb_pull(skb, delta); /* our paket come with ... */ /* skb->network_header iph6; */ /* skb->transport_header iph4; */ skb->network_header = skb->transport_header; /* we say skb->network_header = iph4; */ skb_set_transport_header(skb, sizeof(struct iphdr)); skb->mac_header = skb->network_header - sizeof(struct ethhdr); skb->mac_len = sizeof(struct ethhdr); /* add a dummy ethhdr to use correct interface linktype */ ethhead = eth_hdr(skb); memcpy(ethhead->h_dest, tunnel6_dev->dev_addr, ETH_ALEN); memcpy(ethhead->h_source, tunnel6_dev->dev_addr, ETH_ALEN); ethhead->h_proto = htons(ETH_P_IP); /* prepare to send it again */ IPCB(skb)->flags = 0; skb->protocol = htons(ETH_P_IP); skb->pkt_type = PACKET_HOST; skb->dev = tunnel6_dev; skb_dst_drop(skb); /* TODO: set iph4->ttl = hoplimit and recalc the checksum ! */ /* sending */ nf_reset(skb); netif_rx(skb); tunnel->recursion--; } else { stats = &tunnel6_dev->stats; PDEBUG("niit: unknown direction %x \n", skb->protocol); goto tx_error; /* drop */ } return 0; tx_error_icmp: dst_link_failure(skb); PDEBUG("niit: tx_error_icmp\n"); tx_error: PDEBUG("niit: tx_error\n"); stats->tx_errors++; dev_kfree_skb(skb); tunnel->recursion--; return 0; }
static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev) { struct sk_buff *skb = si->rxskb; dma_addr_t dma_addr; unsigned int len, stat, data; if (!skb) { printk(KERN_ERR "sa1100_ir: SKB is NULL!\n"); return; } /* * Get the current data position. */ dma_addr = sa1100_get_dma_pos(si->rxdma); len = dma_addr - si->rxbuf_dma; if (len > HPSIR_MAX_RXLEN) len = HPSIR_MAX_RXLEN; dma_unmap_single(si->dev, si->rxbuf_dma, len, DMA_FROM_DEVICE); do { /* * Read Status, and then Data. */ stat = Ser2HSSR1; rmb(); data = Ser2HSDR; if (stat & (HSSR1_CRE | HSSR1_ROR)) { dev->stats.rx_errors++; if (stat & HSSR1_CRE) dev->stats.rx_crc_errors++; if (stat & HSSR1_ROR) dev->stats.rx_frame_errors++; } else skb->data[len++] = data; /* * If we hit the end of frame, there's * no point in continuing. */ if (stat & HSSR1_EOF) break; } while (Ser2HSSR0 & HSSR0_EIF); if (stat & HSSR1_EOF) { si->rxskb = NULL; skb_put(skb, len); skb->dev = dev; skb_reset_mac_header(skb); skb->protocol = htons(ETH_P_IRDA); dev->stats.rx_packets++; dev->stats.rx_bytes += len; /* * Before we pass the buffer up, allocate a new one. */ sa1100_irda_rx_alloc(si); netif_rx(skb); } else { /* * Remap the buffer. */ si->rxbuf_dma = dma_map_single(si->dev, si->rxskb->data, HPSIR_MAX_RXLEN, DMA_FROM_DEVICE); } }
static void rx_complete(struct urb *req) { struct net_device *dev = req->context; struct usbpn_dev *pnd = netdev_priv(dev); struct page *page = virt_to_page(req->transfer_buffer); struct sk_buff *skb; unsigned long flags; switch (req->status) { case 0: spin_lock_irqsave(&pnd->rx_lock, flags); skb = pnd->rx_skb; if (!skb) { skb = pnd->rx_skb = netdev_alloc_skb(dev, 12); if (likely(skb)) { /* Can't use pskb_pull() on page in IRQ */ memcpy(skb_put(skb, 1), page_address(page), 1); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 1, req->actual_length); page = NULL; } } else { skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 0, req->actual_length); page = NULL; } if (req->actual_length < PAGE_SIZE) pnd->rx_skb = NULL; /* Last fragment */ else skb = NULL; spin_unlock_irqrestore(&pnd->rx_lock, flags); if (skb) { skb->protocol = htons(ETH_P_PHONET); skb_reset_mac_header(skb); __skb_pull(skb, 1); skb->dev = dev; dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len; netif_rx(skb); } goto resubmit; case -ENOENT: case -ECONNRESET: case -ESHUTDOWN: req = NULL; break; case -EOVERFLOW: dev->stats.rx_over_errors++; dev_dbg(&dev->dev, "RX overflow\n"); break; case -EILSEQ: dev->stats.rx_crc_errors++; break; } dev->stats.rx_errors++; resubmit: if (page) netdev_free_page(dev, page); if (req) rx_submit(pnd, req, GFP_ATOMIC); }
/* Handle linearized sk_buff post_routing */ static unsigned int post_routing_process(const struct nf_hook_ops *ops, struct sk_buff *skb, const struct net_device *in, const struct net_device *out) { char* ip_pkt; char* nip_pkt; int ip_pkt_len, nip_pkt_len; int eth_vlan_hdr_len, full_pkt_len, ip_hdr_len; int err; struct sk_buff *nskb; struct ethhdr *eth_hdr; struct flow_keys flow_key; struct iphdr *ip_hdr; struct iphdr *nip_hdr; struct stack stk; void *saddr; void *daddr; __be16 proto; u32 hash; __wsum nskb_csum = 0; unsigned char dst[] = {0xFF,0xFF,0xFF,0xFF,0xFF,0xFF}; /* if(skb_is_nonlinear(skb)){ pr_debug("Non-linear skb.. linearizing...\n"); if(skb_linearize(skb)){ pr_debug("Failed to serialize!\n"); } } */ if (skb_is_nonlinear(skb)){ pr_debug("Proces_pkt: Still non-linear skb.\n"); return NF_ACCEPT; } proto = ntohs(skb->protocol); pr_debug("Proto: %04x\n", proto); switch (proto) { case ETH_P_IP: eth_hdr = (struct ethhdr *) skb_mac_header(skb); // need to set ip_hdr = (struct iphdr *) skb_network_header(skb); ip_hdr_len = ip_hdrlen(skb); ip_pkt = (char *) ip_hdr; ip_pkt_len = ntohs(ip_hdr->tot_len); pr_debug("IP pkt_len = %d IP hdr len = %d \n", ip_pkt_len, ip_hdr_len); if(skb_flow_dissect(skb, &flow_key)){ print_ip(flow_key.src); print_ip(flow_key.dst); pr_debug("Ports %d %d\n", ntohs(flow_key.port16[0]), ntohs(flow_key.port16[1])); hash = flow_keys_hash(flow_key); pr_debug("Hash: %x\n", hash); } else{ pr_debug("Failed to dissect flow\n"); return NF_ACCEPT; } //stats_entry_inc(be32_to_cpu(flow_key.dst), ip_pkt_len); pr_debug("Proto: IP pkt\nGetting stk from flow_table for %u\n", be32_to_cpu(flow_key.dst)); stk = flow_table_get(flow_table, flow_key, routing_table, be32_to_cpu(flow_key.dst)); if(stk.num_tags == -1) { // no_stack pr_debug("flow_table miss! consulting rt_table\n"); stk = get_random_stack_for_dst(be32_to_cpu(flow_key.dst), routing_table); flow_table_set(flow_table, flow_key, stk); } if(stk.num_tags < 0) { stk.num_tags=0; } eth_vlan_hdr_len = ETH_HLEN + stk.num_tags * sizeof(vlan_label); full_pkt_len = eth_vlan_hdr_len + ip_pkt_len; pr_debug("Full length: %d", full_pkt_len); // Allocate new skb nskb = alloc_skb(full_pkt_len, GFP_ATOMIC); if (nskb == NULL) { return NF_ACCEPT; } if (skb->sk != NULL) { skb_set_owner_w(nskb, skb->sk); } else { kfree_skb(nskb); return NF_ACCEPT; } pr_debug("mod_vlan: nskb - Reserving header\n"); // Reserve space for eth and vlan headers skb_reserve(nskb, eth_vlan_hdr_len); // Copy IP packet pr_debug("mod_vlan: copying IP pkt.\n"); if (!(nip_pkt = skb_put(nskb, ip_pkt_len))) { pr_debug("skb_put failed!\n"); kfree_skb(nskb); return NF_ACCEPT; } skb_reset_network_header(nskb); memcpy(nip_pkt, ip_pkt, ip_pkt_len); nip_hdr = (struct iphdr *) nip_pkt; nip_pkt_len = ntohs(nip_hdr->tot_len); nskb_csum = fix_csum(nip_hdr); // Set VLAN stack if (set_vlan_stack(nskb, &stk)) { proto = ETH_P_8021Q; } // Get outgoing interface nskb->dev = dev_get_by_name(&init_net, out->name); if (!nskb->dev) { pr_debug("mod_vlan dev_get_by_name (%s) FAILED.", out->name); kfree_skb(nskb); return NF_ACCEPT; } /* // Reduce MTU, if needed if (nskb->dev->mtu > 1500 - (4 * stk.num_tags)) { pr_debug("Setting MTU: (%s) %u", out->name, 1500 - (4 * stk.num_tags)); nskb->dev->mtu = 1500 - (4 * stk.num_tags); } pr_debug("mod_vlan dev_get_by_name success, nskb->dev->name='%s'", nskb->dev->name); */ saddr = nskb->dev->dev_addr; daddr = dst; // ARP Lookup if (get_dst_haddr(daddr, flow_key.dst, nskb->dev) != 0){ pr_debug("ARP lookup - FAILED!\n"); kfree_skb(nskb); return NF_ACCEPT; } // Set DL header print_mac(saddr); print_mac(daddr); pr_debug("calling dev_hard_header\n"); if (!dev_hard_header(nskb, nskb->dev, proto, daddr, saddr, nskb->dev->addr_len)) { pr_debug("mod_vlan dev_hard_header FAILED.\n"); kfree_skb(nskb); return NF_ACCEPT; } skb_reset_mac_header(nskb); // Set skb checksum nskb->csum = nskb_csum; // Send out packet - dev_queue_xmit will consume nskb pr_debug("mod_vlan: sending nskb....\n"); if ((err = dev_queue_xmit(nskb)) != NET_XMIT_SUCCESS) { pr_debug("mod_vlan dev_queue_xmit failed. %d\n", err); return NF_ACCEPT; } // Consume original skb consume_skb(skb); pr_debug("------ success - returning ------\n"); return NF_STOLEN; break; default: pr_debug("Proto: Non-IP pkt\n"); break; } // default: if we didn't send a new skb, then accept the original return NF_ACCEPT; }
/** * ovs_flow_extract - extracts a flow key from an Ethernet frame. * @skb: sk_buff that contains the frame, with skb->data pointing to the * Ethernet header * @in_port: port number on which @skb was received. * @key: output flow key * * The caller must ensure that skb->len >= ETH_HLEN. * * Returns 0 if successful, otherwise a negative errno value. * * Initializes @skb header pointers as follows: * * - skb->mac_header: the Ethernet header. * * - skb->network_header: just past the Ethernet header, or just past the * VLAN header, to the first byte of the Ethernet payload. * * - skb->transport_header: If key->eth.type is ETH_P_IP or ETH_P_IPV6 * on output, then just past the IP header, if one is present and * of a correct length, otherwise the same as skb->network_header. * For other key->eth.type values it is left untouched. */ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key) { int error; struct ethhdr *eth; memset(key, 0, sizeof(*key)); key->phy.priority = skb->priority; if (OVS_CB(skb)->tun_key) memcpy(&key->tun_key, OVS_CB(skb)->tun_key, sizeof(key->tun_key)); key->phy.in_port = in_port; key->phy.skb_mark = skb->mark; skb_reset_mac_header(skb); /* Link layer. We are guaranteed to have at least the 14 byte Ethernet * header in the linear data area. */ eth = eth_hdr(skb); memcpy(key->eth.src, eth->h_source, ETH_ALEN); memcpy(key->eth.dst, eth->h_dest, ETH_ALEN); __skb_pull(skb, 2 * ETH_ALEN); /* We are going to push all headers that we pull, so no need to * update skb->csum here. */ if (vlan_tx_tag_present(skb)) key->eth.tci = htons(skb->vlan_tci); else if (eth->h_proto == htons(ETH_P_8021Q)) if (unlikely(parse_vlan(skb, key))) return -ENOMEM; key->eth.type = parse_ethertype(skb); if (unlikely(key->eth.type == htons(0))) return -ENOMEM; skb_reset_network_header(skb); __skb_push(skb, skb->data - skb_mac_header(skb)); /* Network layer. */ if (key->eth.type == htons(ETH_P_IP)) { struct iphdr *nh; __be16 offset; error = check_iphdr(skb); if (unlikely(error)) { if (error == -EINVAL) { skb->transport_header = skb->network_header; error = 0; } return error; } nh = ip_hdr(skb); key->ipv4.addr.src = nh->saddr; key->ipv4.addr.dst = nh->daddr; key->ip.proto = nh->protocol; key->ip.tos = nh->tos; key->ip.ttl = nh->ttl; offset = nh->frag_off & htons(IP_OFFSET); if (offset) { key->ip.frag = OVS_FRAG_TYPE_LATER; return 0; } if (nh->frag_off & htons(IP_MF) || skb_shinfo(skb)->gso_type & SKB_GSO_UDP) key->ip.frag = OVS_FRAG_TYPE_FIRST; /* Transport layer. */ if (key->ip.proto == IPPROTO_TCP) { if (tcphdr_ok(skb)) { struct tcphdr *tcp = tcp_hdr(skb); key->ipv4.tp.src = tcp->source; key->ipv4.tp.dst = tcp->dest; key->ipv4.tp.flags = TCP_FLAGS_BE16(tcp); } } else if (key->ip.proto == IPPROTO_UDP) { if (udphdr_ok(skb)) { struct udphdr *udp = udp_hdr(skb); key->ipv4.tp.src = udp->source; key->ipv4.tp.dst = udp->dest; } } else if (key->ip.proto == IPPROTO_SCTP) { if (sctphdr_ok(skb)) { struct sctphdr *sctp = sctp_hdr(skb); key->ipv4.tp.src = sctp->source; key->ipv4.tp.dst = sctp->dest; } } else if (key->ip.proto == IPPROTO_ICMP) { if (icmphdr_ok(skb)) { struct icmphdr *icmp = icmp_hdr(skb); /* The ICMP type and code fields use the 16-bit * transport port fields, so we need to store * them in 16-bit network byte order. */ key->ipv4.tp.src = htons(icmp->type); key->ipv4.tp.dst = htons(icmp->code); } } } else if ((key->eth.type == htons(ETH_P_ARP) || key->eth.type == htons(ETH_P_RARP)) && arphdr_ok(skb)) { struct arp_eth_header *arp; arp = (struct arp_eth_header *)skb_network_header(skb); if (arp->ar_hrd == htons(ARPHRD_ETHER) && arp->ar_pro == htons(ETH_P_IP) && arp->ar_hln == ETH_ALEN && arp->ar_pln == 4) { /* We only match on the lower 8 bits of the opcode. */ if (ntohs(arp->ar_op) <= 0xff) key->ip.proto = ntohs(arp->ar_op); memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src)); memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst)); memcpy(key->ipv4.arp.sha, arp->ar_sha, ETH_ALEN); memcpy(key->ipv4.arp.tha, arp->ar_tha, ETH_ALEN); } } else if (key->eth.type == htons(ETH_P_IPV6)) { int nh_len; /* IPv6 Header + Extensions */ nh_len = parse_ipv6hdr(skb, key); if (unlikely(nh_len < 0)) { if (nh_len == -EINVAL) { skb->transport_header = skb->network_header; error = 0; } else { error = nh_len; } return error; } if (key->ip.frag == OVS_FRAG_TYPE_LATER) return 0; if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) key->ip.frag = OVS_FRAG_TYPE_FIRST; /* Transport layer. */ if (key->ip.proto == NEXTHDR_TCP) { if (tcphdr_ok(skb)) { struct tcphdr *tcp = tcp_hdr(skb); key->ipv6.tp.src = tcp->source; key->ipv6.tp.dst = tcp->dest; key->ipv6.tp.flags = TCP_FLAGS_BE16(tcp); } } else if (key->ip.proto == NEXTHDR_UDP) { if (udphdr_ok(skb)) { struct udphdr *udp = udp_hdr(skb); key->ipv6.tp.src = udp->source; key->ipv6.tp.dst = udp->dest; } } else if (key->ip.proto == NEXTHDR_SCTP) { if (sctphdr_ok(skb)) { struct sctphdr *sctp = sctp_hdr(skb); key->ipv6.tp.src = sctp->source; key->ipv6.tp.dst = sctp->dest; } } else if (key->ip.proto == NEXTHDR_ICMP) { if (icmp6hdr_ok(skb)) { error = parse_icmpv6(skb, key, nh_len); if (error) return error; } } } return 0; }
static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv) { struct sk_buff *skb = NULL; struct mlx5ehdr *mlxh; struct ethhdr *ethh; struct udphdr *udph; struct iphdr *iph; int datalen, iplen; datalen = MLX5E_TEST_PKT_SIZE - (sizeof(*ethh) + sizeof(*iph) + sizeof(*udph)); skb = netdev_alloc_skb(priv->netdev, MLX5E_TEST_PKT_SIZE); if (!skb) { netdev_err(priv->netdev, "\tFailed to alloc loopback skb\n"); return NULL; } prefetchw(skb->data); skb_reserve(skb, NET_IP_ALIGN); /* Reserve for ethernet and IP header */ ethh = skb_push(skb, ETH_HLEN); skb_reset_mac_header(skb); skb_set_network_header(skb, skb->len); iph = skb_put(skb, sizeof(struct iphdr)); skb_set_transport_header(skb, skb->len); udph = skb_put(skb, sizeof(struct udphdr)); /* Fill ETH header */ ether_addr_copy(ethh->h_dest, priv->netdev->dev_addr); eth_zero_addr(ethh->h_source); ethh->h_proto = htons(ETH_P_IP); /* Fill UDP header */ udph->source = htons(9); udph->dest = htons(9); /* Discard Protocol */ udph->len = htons(datalen + sizeof(struct udphdr)); udph->check = 0; /* Fill IP header */ iph->ihl = 5; iph->ttl = 32; iph->version = 4; iph->protocol = IPPROTO_UDP; iplen = sizeof(struct iphdr) + sizeof(struct udphdr) + datalen; iph->tot_len = htons(iplen); iph->frag_off = 0; iph->saddr = 0; iph->daddr = 0; iph->tos = 0; iph->id = 0; ip_send_check(iph); /* Fill test header and data */ mlxh = skb_put(skb, sizeof(*mlxh)); mlxh->version = 0; mlxh->magic = cpu_to_be64(MLX5E_TEST_MAGIC); strlcpy(mlxh->text, mlx5e_test_text, sizeof(mlxh->text)); datalen -= sizeof(*mlxh); skb_put_zero(skb, datalen); skb->csum = 0; skb->ip_summed = CHECKSUM_PARTIAL; udp4_hwcsum(skb, iph->saddr, iph->daddr); skb->protocol = htons(ETH_P_IP); skb->pkt_type = PACKET_HOST; skb->dev = priv->netdev; return skb; }