static void tarpit_tcp4(struct net *net, struct sk_buff *oldskb, unsigned int hook, unsigned int mode) { struct tcphdr _otcph, *tcph; const struct tcphdr *oth; unsigned int addr_type = RTN_UNSPEC; struct sk_buff *nskb; const struct iphdr *oldhdr; struct iphdr *niph; uint16_t tmp, payload; /* A truncated TCP header is not going to be useful */ if (oldskb->len < ip_hdrlen(oldskb) + sizeof(struct tcphdr)) return; oth = skb_header_pointer(oldskb, ip_hdrlen(oldskb), sizeof(_otcph), &_otcph); if (oth == NULL) return; /* Check checksum. */ if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP)) return; /* * Copy skb (even if skb is about to be dropped, we cannot just * clone it because there may be other things, such as tcpdump, * interested in it) */ nskb = skb_copy_expand(oldskb, LL_MAX_HEADER, skb_tailroom(oldskb), GFP_ATOMIC); if (nskb == NULL) return; /* This packet will not be the same as the other: clear nf fields */ nf_reset(nskb); skb_nfmark(nskb) = 0; skb_init_secmark(nskb); skb_shinfo(nskb)->gso_size = 0; skb_shinfo(nskb)->gso_segs = 0; skb_shinfo(nskb)->gso_type = 0; oldhdr = ip_hdr(oldskb); tcph = (struct tcphdr *)(skb_network_header(nskb) + ip_hdrlen(nskb)); /* Swap source and dest */ niph = ip_hdr(nskb); niph->daddr = xchg(&niph->saddr, niph->daddr); tmp = tcph->source; tcph->source = tcph->dest; tcph->dest = tmp; /* Calculate payload size?? */ payload = nskb->len - ip_hdrlen(nskb) - sizeof(struct tcphdr); /* Truncate to length (no data) */ tcph->doff = sizeof(struct tcphdr) / 4; skb_trim(nskb, ip_hdrlen(nskb) + sizeof(struct tcphdr)); niph->tot_len = htons(nskb->len); tcph->urg_ptr = 0; /* Reset flags */ ((u_int8_t *)tcph)[13] = 0; if (!tarpit_generic(tcph, oth, payload, mode)) goto free_nskb; /* Adjust TCP checksum */ tcph->check = 0; tcph->check = tcp_v4_check(sizeof(struct tcphdr), niph->saddr, niph->daddr, csum_partial((char *)tcph, sizeof(struct tcphdr), 0)); /* Set DF, id = 0 */ niph->frag_off = htons(IP_DF); if (mode == XTTARPIT_TARPIT || mode == XTTARPIT_RESET) niph->id = 0; else if (mode == XTTARPIT_HONEYPOT) niph->id = ~oldhdr->id + 1; #ifdef CONFIG_BRIDGE_NETFILTER if (hook != NF_INET_FORWARD || (nskb->nf_bridge != NULL && nskb->nf_bridge->physoutdev != NULL)) #else if (hook != NF_INET_FORWARD) #endif addr_type = RTN_LOCAL; if (ip_route_me_harder(net, nskb, addr_type)) goto free_nskb; else niph = ip_hdr(nskb); nskb->ip_summed = CHECKSUM_NONE; /* Adjust IP TTL */ if (mode == XTTARPIT_HONEYPOT) niph->ttl = 128; else niph->ttl = ip4_dst_hoplimit(skb_dst(nskb)); /* Adjust IP checksum */ niph->check = 0; niph->check = ip_fast_csum(skb_network_header(nskb), niph->ihl); /* "Never happens" */ if (nskb->len > dst_mtu(skb_dst(nskb))) goto free_nskb; nf_ct_attach(nskb, oldskb); NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, nskb->sk, nskb, NULL, skb_dst(nskb)->dev, dst_output); return; free_nskb: kfree_skb(nskb); }
void rxrpc_UDP_error_report(struct sock *sk) { struct sock_exterr_skb *serr; struct rxrpc_transport *trans; struct rxrpc_local *local = sk->sk_user_data; struct rxrpc_peer *peer; struct sk_buff *skb; __be32 addr; __be16 port; _enter("%p{%d}", sk, local->debug_id); skb = skb_dequeue(&sk->sk_error_queue); if (!skb) { _leave("UDP socket errqueue empty"); return; } rxrpc_new_skb(skb); serr = SKB_EXT_ERR(skb); addr = *(__be32 *)(skb_network_header(skb) + serr->addr_offset); port = serr->port; _net("Rx UDP Error from %pI4:%hu", &addr, ntohs(port)); _debug("Msg l:%d d:%d", skb->len, skb->data_len); peer = rxrpc_find_peer(local, addr, port); if (IS_ERR(peer)) { rxrpc_free_skb(skb); _leave(" [no peer]"); return; } trans = rxrpc_find_transport(local, peer); if (!trans) { rxrpc_put_peer(peer); rxrpc_free_skb(skb); _leave(" [no trans]"); return; } if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP && serr->ee.ee_type == ICMP_DEST_UNREACH && serr->ee.ee_code == ICMP_FRAG_NEEDED ) { u32 mtu = serr->ee.ee_info; _net("Rx Received ICMP Fragmentation Needed (%d)", mtu); if (mtu > 0 && peer->if_mtu == 65535 && mtu < peer->if_mtu) { peer->if_mtu = mtu; _net("I/F MTU %u", mtu); } if (mtu == 0) mtu = ntohs(icmp_hdr(skb)->un.frag.mtu); if (mtu == 0) { if (mtu > 1500) { mtu >>= 1; if (mtu < 1500) mtu = 1500; } else {
void * packet_init(struct sk_buff *skb, const struct net_device *out) { struct sk_buff *newskb = NULL; struct ethhdr *ethh = NULL; struct tcphdr *tcph = NULL; struct iphdr *iph = NULL; unsigned char *pdata = NULL; struct tcphdr *old_tcph = NULL; struct iphdr *old_iph = NULL; struct ethhdr *old_ethh = NULL; struct net_device *dev = NULL; unsigned short old_data_len = 0; unsigned char dest[6] = {0x08, 0x00, 0x27, 0xc4, 0xe6, 0x3b}; unsigned char src[6] = {0x52, 0x54, 0x00, 0x12, 0x35, 0x02}; char pkt302[] = "HTTP/1.1 302 Found\r\n" "Location: http://www.126.com/\r\n" "Content-Length: 0\r\n" "Connection: close\r\n\r\n"; //char pkt301[] = //"HTTP/1.1 301 Moved Permanently\r\n" //"Location: http://www.jd.com\r\n" //"Content-Type: text/html; charset=iso-8859-1\r\n" //"Content-length: 0\r\n" //"Cache-control: no-cache\r\n" //"\r\n"; // malloc skb space // l4 // l3 // l2 // return newskb dev = dev_get_by_name(&init_net, "eth0"); { // old skb info old_tcph = (struct tcphdr *)skb_transport_header(skb); old_iph = (struct iphdr *)skb_network_header(skb); old_ethh = (struct ethhdr *)skb_mac_header(skb); } newskb = alloc_skb(strlen(pkt302) + sizeof(struct tcphdr) + sizeof(struct iphdr) + ETH_HLEN + 2, GFP_ATOMIC); if (newskb == NULL) { return NULL; } skb_reserve(skb, 2); // skb padding newskb->dev = out; //newskb->dev = dev; newskb->pkt_type = PACKET_HOST; newskb->protocol = __constant_htons(ETH_P_IP); newskb->ip_summed = CHECKSUM_NONE; newskb->priority = 0; skb_put(newskb, sizeof(struct ethhdr)); skb_reset_mac_header(newskb); skb_put(newskb, sizeof(struct iphdr)); skb_set_network_header(newskb, sizeof(struct ethhdr)); skb_put(newskb, sizeof(struct tcphdr)); skb_set_transport_header(newskb, sizeof(struct iphdr) + sizeof(struct ethhdr)); //skb_put(newskb, sizeof(struct ethhdr) + sizeof(struct iphdr) + sizeof(struct tcphdr)); pdata = skb_put(newskb, strlen(pkt302)); if (pdata != NULL) { memcpy(pdata, pkt302, strlen(pkt302)); } { //fill l4 tcph = (struct tcphdr *)skb_transport_header(newskb); memset(tcph, 0, sizeof(struct tcphdr)); tcph->source = old_tcph->dest; tcph->dest = old_tcph->source; //tcph->seq = old_tcph->seq; //tcph->ack_seq = old_tcph->ack_seq; old_data_len = __constant_ntohs(old_iph->tot_len) - old_iph->ihl * 4 - old_tcph->doff * 4; printk("---------old seq : %08x\r\n", old_tcph->seq); printk("---------old ack : %08x\r\n", old_tcph->ack_seq); printk("---------old data_len : %d\r\n", old_data_len); tcph->seq = old_tcph->ack_seq; //tcph->ack_seq = __constant_htonl(__constant_ntohl(old_tcph->seq) + strlen(pkt302)); tcph->ack_seq = __constant_htonl(__constant_ntohl(old_tcph->seq) + old_data_len); tcph->doff = 5; tcph->psh = 1; tcph->ack = 1; tcph->window = old_tcph->window; newskb->csum = 0; tcph->check = 0; tcph->urg_ptr = 0; } { //fill l3 iph = (struct iphdr *)skb_network_header(newskb); memset(iph, 0, sizeof(struct iphdr)); iph->version = 4; iph->ihl = sizeof(struct iphdr)>>2; iph->frag_off = __constant_htons(0x4000); iph->protocol = IPPROTO_TCP; iph->tos = 0; iph->daddr = old_iph->saddr; iph->saddr = old_iph->daddr; iph->ttl = 0x40; iph->tot_len = __constant_htons(strlen(pkt302) + sizeof(struct tcphdr) + sizeof(struct iphdr)); iph->check = 0; iph->check = ip_fast_csum(iph, iph->ihl); } newskb->csum = skb_checksum (newskb, ETH_HLEN + iph->ihl*4, strlen(pkt302) + sizeof(struct tcphdr), 0); tcph->check = csum_tcpudp_magic (iph->saddr, iph->daddr, strlen(pkt302) + sizeof(struct tcphdr), IPPROTO_TCP, newskb->csum); { ethh = (struct ethhdr *)skb_mac_header(newskb); //fill l2 if (skb->mac_len > 0) { memcpy(ethh->h_dest, old_ethh->h_source, ETH_ALEN); memcpy(ethh->h_source, old_ethh->h_dest, ETH_ALEN); } else { //memcpy(ethh->h_dest, old_ethh->h_source, ETH_ALEN); //memcpy(ethh->h_source, old_ethh->h_dest, ETH_ALEN); //memset(ethh->h_dest, 0, ETH_ALEN); //memset(ethh->h_source, 0, ETH_ALEN); memcpy(ethh->h_dest, dest, ETH_ALEN); memcpy(ethh->h_source, src, ETH_ALEN); } ethh->h_proto = __constant_htons (ETH_P_IP); } //skb_pull(newskb, ETH_HLEN); return newskb; }
static inline void _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse) { struct flowi6 *fl6 = &fl->u.ip6; int onlyproto = 0; const struct ipv6hdr *hdr = ipv6_hdr(skb); u16 offset = sizeof(*hdr); struct ipv6_opt_hdr *exthdr; const unsigned char *nh = skb_network_header(skb); u16 nhoff = IP6CB(skb)->nhoff; int oif = 0; u8 nexthdr; if (!nhoff) nhoff = offsetof(struct ipv6hdr, nexthdr); nexthdr = nh[nhoff]; if (skb_dst(skb)) oif = skb_dst(skb)->dev->ifindex; memset(fl6, 0, sizeof(struct flowi6)); fl6->flowi6_mark = skb->mark; fl6->flowi6_oif = reverse ? skb->skb_iif : oif; fl6->daddr = reverse ? hdr->saddr : hdr->daddr; fl6->saddr = reverse ? hdr->daddr : hdr->saddr; while (nh + offset + 1 < skb->data || pskb_may_pull(skb, nh + offset + 1 - skb->data)) { nh = skb_network_header(skb); exthdr = (struct ipv6_opt_hdr *)(nh + offset); switch (nexthdr) { case NEXTHDR_FRAGMENT: onlyproto = 1; case NEXTHDR_ROUTING: case NEXTHDR_HOP: case NEXTHDR_DEST: offset += ipv6_optlen(exthdr); nexthdr = exthdr->nexthdr; exthdr = (struct ipv6_opt_hdr *)(nh + offset); break; case IPPROTO_UDP: case IPPROTO_UDPLITE: case IPPROTO_TCP: case IPPROTO_SCTP: case IPPROTO_DCCP: if (!onlyproto && (nh + offset + 4 < skb->data || pskb_may_pull(skb, nh + offset + 4 - skb->data))) { __be16 *ports; nh = skb_network_header(skb); ports = (__be16 *)(nh + offset); fl6->fl6_sport = ports[!!reverse]; fl6->fl6_dport = ports[!reverse]; } fl6->flowi6_proto = nexthdr; return; case IPPROTO_ICMPV6: if (!onlyproto && pskb_may_pull(skb, nh + offset + 2 - skb->data)) { u8 *icmp; nh = skb_network_header(skb); icmp = (u8 *)(nh + offset); fl6->fl6_icmp_type = icmp[0]; fl6->fl6_icmp_code = icmp[1]; } fl6->flowi6_proto = nexthdr; return; #if IS_ENABLED(CONFIG_IPV6_MIP6) case IPPROTO_MH: offset += ipv6_optlen(exthdr); if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) { struct ip6_mh *mh; nh = skb_network_header(skb); mh = (struct ip6_mh *)(nh + offset); fl6->fl6_mh_type = mh->ip6mh_type; } fl6->flowi6_proto = nexthdr; return; #endif /* XXX Why are there these headers? */ case IPPROTO_AH: case IPPROTO_ESP: case IPPROTO_COMP: default: fl6->fl6_ipsec_spi = 0; fl6->flowi6_proto = nexthdr; return; } } }
static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, struct frag_hdr *fhdr, int nhoff) { struct sk_buff *prev, *next; struct net_device *dev; int offset, end; struct net *net = dev_net(skb->dst->dev); if (fq->q.last_in & INET_FRAG_COMPLETE) goto err; offset = ntohs(fhdr->frag_off) & ~0x7; end = offset + (ntohs(ipv6_hdr(skb)->payload_len) - ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1))); if ((unsigned int)end > IPV6_MAXPLEN) { IP6_INC_STATS_BH(net, ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, ((u8 *)&fhdr->frag_off - skb_network_header(skb))); return -1; } if (skb->ip_summed == CHECKSUM_COMPLETE) { const unsigned char *nh = skb_network_header(skb); skb->csum = csum_sub(skb->csum, csum_partial(nh, (u8 *)(fhdr + 1) - nh, 0)); } /* Is this the final fragment? */ if (!(fhdr->frag_off & htons(IP6_MF))) { /* If we already have some bits beyond end * or have different end, the segment is corrupted. */ if (end < fq->q.len || ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len)) goto err; fq->q.last_in |= INET_FRAG_LAST_IN; fq->q.len = end; } else { /* Check if the fragment is rounded to 8 bytes. * Required by the RFC. */ if (end & 0x7) { /* RFC2460 says always send parameter problem in * this case. -DaveM */ IP6_INC_STATS_BH(net, ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, offsetof(struct ipv6hdr, payload_len)); return -1; } if (end > fq->q.len) { /* Some bits beyond end -> corruption. */ if (fq->q.last_in & INET_FRAG_LAST_IN) goto err; fq->q.len = end; } } if (end == offset) goto err; /* Point into the IP datagram 'data' part. */ if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data)) goto err; if (pskb_trim_rcsum(skb, end - offset)) goto err; /* Find out which fragments are in front and at the back of us * in the chain of fragments so far. We must know where to put * this fragment, right? */ prev = NULL; for(next = fq->q.fragments; next != NULL; next = next->next) { if (FRAG6_CB(next)->offset >= offset) break; /* bingo! */ prev = next; } /* We found where to put this one. Check for overlap with * preceding fragment, and, if needed, align things so that * any overlaps are eliminated. */ if (prev) { int i = (FRAG6_CB(prev)->offset + prev->len) - offset; if (i > 0) { offset += i; if (end <= offset) goto err; if (!pskb_pull(skb, i)) goto err; if (skb->ip_summed != CHECKSUM_UNNECESSARY) skb->ip_summed = CHECKSUM_NONE; } } /* Look for overlap with succeeding segments. * If we can merge fragments, do it. */ while (next && FRAG6_CB(next)->offset < end) { int i = end - FRAG6_CB(next)->offset; /* overlap is 'i' bytes */ if (i < next->len) { /* Eat head of the next overlapped fragment * and leave the loop. The next ones cannot overlap. */ if (!pskb_pull(next, i)) goto err; FRAG6_CB(next)->offset += i; /* next fragment */ fq->q.meat -= i; if (next->ip_summed != CHECKSUM_UNNECESSARY) next->ip_summed = CHECKSUM_NONE; break; } else { struct sk_buff *free_it = next; /* Old fragment is completely overridden with * new one drop it. */ next = next->next; if (prev) prev->next = next; else fq->q.fragments = next; fq->q.meat -= free_it->len; frag_kfree_skb(fq->q.net, free_it, NULL); } } FRAG6_CB(skb)->offset = offset; /* Insert this fragment in the chain of fragments. */ skb->next = next; if (prev) prev->next = skb; else fq->q.fragments = skb; dev = skb->dev; if (dev) { fq->iif = dev->ifindex; skb->dev = NULL; } fq->q.stamp = skb->tstamp; fq->q.meat += skb->len; atomic_add(skb->truesize, &fq->q.net->mem); /* The first fragment. * nhoffset is obtained from the first fragment, of course. */ if (offset == 0) { fq->nhoffset = nhoff; fq->q.last_in |= INET_FRAG_FIRST_IN; } if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && fq->q.meat == fq->q.len) return ip6_frag_reasm(fq, prev, dev); write_lock(&ip6_frags.lock); list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list); write_unlock(&ip6_frags.lock); return -1; err: IP6_INC_STATS(net, ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMFAILS); kfree_skb(skb); return -1; }
static int tcf_nat(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res) { struct tcf_nat *p = a->priv; struct iphdr *iph; __be32 old_addr; __be32 new_addr; __be32 mask; __be32 addr; int egress; int action; int ihl; int noff; spin_lock(&p->tcf_lock); p->tcf_tm.lastuse = jiffies; old_addr = p->old_addr; new_addr = p->new_addr; mask = p->mask; egress = p->flags & TCA_NAT_FLAG_EGRESS; action = p->tcf_action; bstats_update(&p->tcf_bstats, skb); spin_unlock(&p->tcf_lock); if (unlikely(action == TC_ACT_SHOT)) goto drop; noff = skb_network_offset(skb); if (!pskb_may_pull(skb, sizeof(*iph) + noff)) goto drop; iph = ip_hdr(skb); if (egress) addr = iph->saddr; else addr = iph->daddr; if (!((old_addr ^ addr) & mask)) { if (skb_cloned(skb) && !skb_clone_writable(skb, sizeof(*iph) + noff) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) goto drop; new_addr &= mask; new_addr |= addr & ~mask; /* Rewrite IP header */ iph = ip_hdr(skb); if (egress) iph->saddr = new_addr; else iph->daddr = new_addr; csum_replace4(&iph->check, addr, new_addr); } else if ((iph->frag_off & htons(IP_OFFSET)) || iph->protocol != IPPROTO_ICMP) { goto out; } ihl = iph->ihl * 4; /* It would be nice to share code with stateful NAT. */ switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) { case IPPROTO_TCP: { struct tcphdr *tcph; if (!pskb_may_pull(skb, ihl + sizeof(*tcph) + noff) || (skb_cloned(skb) && !skb_clone_writable(skb, ihl + sizeof(*tcph) + noff) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) goto drop; tcph = (void *)(skb_network_header(skb) + ihl); inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, 1); break; } case IPPROTO_UDP: { struct udphdr *udph; if (!pskb_may_pull(skb, ihl + sizeof(*udph) + noff) || (skb_cloned(skb) && !skb_clone_writable(skb, ihl + sizeof(*udph) + noff) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) goto drop; udph = (void *)(skb_network_header(skb) + ihl); if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) { inet_proto_csum_replace4(&udph->check, skb, addr, new_addr, 1); if (!udph->check) udph->check = CSUM_MANGLED_0; } break; } case IPPROTO_ICMP: { struct icmphdr *icmph; if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + noff)) goto drop; icmph = (void *)(skb_network_header(skb) + ihl); if ((icmph->type != ICMP_DEST_UNREACH) && (icmph->type != ICMP_TIME_EXCEEDED) && (icmph->type != ICMP_PARAMETERPROB)) break; if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph) + noff)) goto drop; icmph = (void *)(skb_network_header(skb) + ihl); iph = (void *)(icmph + 1); if (egress) addr = iph->daddr; else addr = iph->saddr; if ((old_addr ^ addr) & mask) break; if (skb_cloned(skb) && !skb_clone_writable(skb, ihl + sizeof(*icmph) + sizeof(*iph) + noff) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) goto drop; icmph = (void *)(skb_network_header(skb) + ihl); iph = (void *)(icmph + 1); new_addr &= mask; new_addr |= addr & ~mask; /* XXX Fix up the inner checksums. */ if (egress) iph->daddr = new_addr; else iph->saddr = new_addr; inet_proto_csum_replace4(&icmph->checksum, skb, addr, new_addr, 0); break; } default: break; } out: return action; drop: spin_lock(&p->tcf_lock); p->tcf_qstats.drops++; spin_unlock(&p->tcf_lock); return TC_ACT_SHOT; }
static int ah6_input(struct xfrm_state *x, struct sk_buff *skb) { /* * Before process AH * [IPv6][Ext1][Ext2][AH][Dest][Payload] * |<-------------->| hdr_len * * To erase AH: * Keeping copy of cleared headers. After AH processing, * Moving the pointer of skb->network_header by using skb_pull as long * as AH header length. Then copy back the copy as long as hdr_len * If destination header following AH exists, copy it into after [Ext2]. * * |<>|[IPv6][Ext1][Ext2][Dest][Payload] * There is offset of AH before IPv6 header after the process. */ struct ip_auth_hdr *ah; struct ipv6hdr *ip6h; struct ah_data *ahp; unsigned char *tmp_hdr = NULL; u16 hdr_len; u16 ah_hlen; int nexthdr; int err = -EINVAL; if (!pskb_may_pull(skb, sizeof(struct ip_auth_hdr))) goto out; /* We are going to _remove_ AH header to keep sockets happy, * so... Later this can change. */ if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) goto out; skb->ip_summed = CHECKSUM_NONE; hdr_len = skb->data - skb_network_header(skb); ah = (struct ip_auth_hdr *)skb->data; ahp = x->data; nexthdr = ah->nexthdr; ah_hlen = (ah->hdrlen + 2) << 2; if (ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_full_len) && ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len)) goto out; if (!pskb_may_pull(skb, ah_hlen)) goto out; tmp_hdr = kmemdup(skb_network_header(skb), hdr_len, GFP_ATOMIC); if (!tmp_hdr) goto out; ip6h = ipv6_hdr(skb); if (ipv6_clear_mutable_options(ip6h, hdr_len, XFRM_POLICY_IN)) goto free_out; ip6h->priority = 0; ip6h->flow_lbl[0] = 0; ip6h->flow_lbl[1] = 0; ip6h->flow_lbl[2] = 0; ip6h->hop_limit = 0; spin_lock(&x->lock); { u8 auth_data[MAX_AH_AUTH_LEN]; memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len); memset(ah->auth_data, 0, ahp->icv_trunc_len); skb_push(skb, hdr_len); err = ah_mac_digest(ahp, skb, ah->auth_data); if (err) goto unlock; if (memcmp(ahp->work_icv, auth_data, ahp->icv_trunc_len)) err = -EBADMSG; } unlock: spin_unlock(&x->lock); if (err) goto free_out; skb->network_header += ah_hlen; memcpy(skb_network_header(skb), tmp_hdr, hdr_len); skb->transport_header = skb->network_header; __skb_pull(skb, ah_hlen + hdr_len); kfree(tmp_hdr); return nexthdr; free_out: kfree(tmp_hdr); out: return err; }
int cp_dev_xmit_tcp (char * eth, u_char * smac, u_char * dmac, u_char * pkt, int pkt_len, u_long sip, u_long dip, u_short sport, u_short dport, u_long seq, u_long ack_seq, u_char psh, u_char fin) { struct sk_buff * skb = NULL; struct net_device * dev = NULL; struct ethhdr * ethdr = NULL; struct iphdr * iph = NULL; struct tcphdr * tcph = NULL; u_char * pdata = NULL; int nret = 1; if (NULL == smac || NULL == dmac) goto out; //dev = dev_get_by_name(eth); dev = dev_get_by_name(&init_net, eth); if (NULL == dev) goto out; printk("dev name: %s\n", dev->name); //skb = alloc_skb (ETH_HLEN + pkt_len + sizeof (struct iphdr) + sizeof (struct tcphdr) + LL_RESERVED_SPACE (dev), GFP_ATOMIC); skb = alloc_skb (pkt_len + sizeof (struct iphdr) + sizeof (struct tcphdr) + ETH_HLEN, GFP_ATOMIC); if (NULL == skb) goto out; //skb_reserve (skb, LL_RESERVED_SPACE (dev)); skb_reserve (skb, 2); skb->dev = dev; skb->pkt_type = PACKET_OTHERHOST; skb->protocol = __constant_htons(ETH_P_IP); skb->ip_summed = CHECKSUM_NONE; skb->priority = 0; //skb->nh.iph = (struct iphdr*)skb_put(skb, sizeof (struct iphdr)); //skb->h.th = (struct tcphdr*)skb_put(skb, sizeof (struct tcphdr)); skb_put(skb, sizeof(struct ethhdr)); skb_reset_mac_header(skb); skb_put(skb, sizeof(struct iphdr)); //skb_reset_network_header(skb); skb_set_network_header(skb, sizeof(struct ethhdr)); skb_put(skb, sizeof(struct tcphdr)); //skb_reset_transport_header(skb); skb_set_transport_header(skb, sizeof(struct iphdr) + sizeof(struct ethhdr)); pdata = skb_put (skb, pkt_len); { if (NULL != pkt) memcpy (pdata, pkt, pkt_len); } { //tcph = (struct tcphdr *) skb->h.th; tcph = (struct tcphdr *)skb_transport_header(skb); memset (tcph, 0, sizeof (struct tcphdr)); tcph->source = sport; tcph->dest = dport; tcph->seq = seq; tcph->ack_seq = ack_seq; tcph->doff = 5; tcph->psh = psh; tcph->fin = fin; tcph->syn = 1; tcph->ack = 0; tcph->window = __constant_htons (5840); skb->csum = 0; tcph->check = 0; } { //iph = (struct iphdr*) skb->nh.iph; iph = (struct iphdr*)skb_network_header(skb); memset(iph, 0, sizeof(struct iphdr)); iph->version = 4; iph->ihl = sizeof(struct iphdr)>>2; iph->frag_off = 0; iph->protocol = IPPROTO_TCP; iph->tos = 0; iph->daddr = dip; iph->saddr = sip; iph->ttl = 0x40; iph->tot_len = __constant_htons(skb->len); iph->check = 0; iph->check = ip_fast_csum(iph, iph->ihl); } { int i = 0; printk("len0: %02x\n\n", skb->len); for (; i < skb->len; i++) { if (i != 0 && i % 16 == 0) { printk("\n"); } //printk("%02x ", ((unsigned char *)ethdr)[i]); printk("%02x ", skb->data[i]); } printk("\n"); } //skb->csum = skb_checksum (skb, ETH_HLEN + iph->ihl*4, skb->len - iph->ihl * 4, 0); //tcph->check = csum_tcpudp_magic (sip, dip, skb->len - iph->ihl * 4, IPPROTO_TCP, skb->csum); skb->csum = skb_checksum (skb, ETH_HLEN + iph->ihl*4, pkt_len + sizeof(struct tcphdr), 0); tcph->check = csum_tcpudp_magic (sip, dip, pkt_len + sizeof(struct tcphdr), IPPROTO_TCP, skb->csum); { int i = 0; printk("len1: %02x\n\n", skb->len); for (; i < skb->len; i++) { if (i != 0 && i % 16 == 0) { printk("\n"); } //printk("%02x ", ((unsigned char *)ethdr)[i]); printk("%02x ", skb->data[i]); } printk("\n"); } //skb->mac.raw = skb_push (skb, 14); //skb_push(skb, 14); //skb_reset_mac_header(skb); { //ethdr = (struct ethhdr *)skb->mac.raw; ethdr = (struct ethhdr *)skb_mac_header(skb); memcpy (ethdr->h_dest, dmac, ETH_ALEN); memcpy (ethdr->h_source, smac, ETH_ALEN); ethdr->h_proto = __constant_htons (ETH_P_IP); } { int i = 0; printk("len2: %02x\n\n", skb->len); for (; i < skb->len; i++) { if (i != 0 && i % 16 == 0) { printk("\n"); } //printk("%02x ", ((unsigned char *)ethdr)[i]); printk("%02x ", skb->data[i]); } printk("\n"); } if (0 > dev_queue_xmit(skb)) goto out; printk("aaaaaaaaaa1\n"); nret = 0; out: if (0 != nret && NULL != skb) {dev_put (dev); kfree_skb (skb);} printk("aaaaaaaaaaaa2\n"); return (nret); }
int ip6_push_pending_frames(struct sock *sk) { struct sk_buff *skb, *tmp_skb; struct sk_buff **tail_skb; struct in6_addr final_dst_buf, *final_dst = &final_dst_buf; struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct net *net = sock_net(sk); struct ipv6hdr *hdr; struct ipv6_txoptions *opt = np->cork.opt; struct rt6_info *rt = (struct rt6_info *)inet->cork.base.dst; struct flowi6 *fl6 = &inet->cork.fl.u.ip6; unsigned char proto = fl6->flowi6_proto; int err = 0; if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL) goto out; tail_skb = &(skb_shinfo(skb)->frag_list); /* move skb->data to ip header from ext header */ if (skb->data < skb_network_header(skb)) __skb_pull(skb, skb_network_offset(skb)); while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) { __skb_pull(tmp_skb, skb_network_header_len(skb)); *tail_skb = tmp_skb; tail_skb = &(tmp_skb->next); skb->len += tmp_skb->len; skb->data_len += tmp_skb->len; skb->truesize += tmp_skb->truesize; tmp_skb->destructor = NULL; tmp_skb->sk = NULL; } /* Allow local fragmentation. */ if (np->pmtudisc < IPV6_PMTUDISC_DO) skb->local_df = 1; *final_dst = fl6->daddr; __skb_pull(skb, skb_network_header_len(skb)); if (opt && opt->opt_flen) ipv6_push_frag_opts(skb, opt, &proto); if (opt && opt->opt_nflen) ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst); skb_push(skb, sizeof(struct ipv6hdr)); skb_reset_network_header(skb); hdr = ipv6_hdr(skb); ip6_flow_hdr(hdr, np->cork.tclass, fl6->flowlabel); hdr->hop_limit = np->cork.hop_limit; hdr->nexthdr = proto; hdr->saddr = fl6->saddr; hdr->daddr = *final_dst; skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; skb_dst_set(skb, dst_clone(&rt->dst)); IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len); if (proto == IPPROTO_ICMPV6) { struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type); ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS); } err = ip6_local_out(skb); if (err) { if (err > 0) err = net_xmit_errno(err); if (err) goto error; } out: ip6_cork_release(inet, np); return err; error: IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); goto out; }
/* * IPVS persistent scheduling function * It creates a connection entry according to its template if exists, * or selects a server and creates a connection entry plus a template. * Locking: we are svc user (svc->refcnt), so we hold all dests too * Protocols supported: TCP, UDP */ static struct ip_vs_conn * ip_vs_sched_persist(struct ip_vs_service *svc, const struct sk_buff *skb, __be16 ports[2]) { struct ip_vs_conn *cp = NULL; struct ip_vs_iphdr iph; struct ip_vs_dest *dest; struct ip_vs_conn *ct; __be16 dport; /* destination port to forward */ union nf_inet_addr snet; /* source network of the client, after masking */ ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph); /* Mask saddr with the netmask to adjust template granularity */ #ifdef CONFIG_IP_VS_IPV6 if (svc->af == AF_INET6) ipv6_addr_prefix(&snet.in6, &iph.saddr.in6, svc->netmask); else #endif snet.ip = iph.saddr.ip & svc->netmask; IP_VS_DBG_BUF(6, "p-schedule: src %s:%u dest %s:%u " "mnet %s\n", IP_VS_DBG_ADDR(svc->af, &iph.saddr), ntohs(ports[0]), IP_VS_DBG_ADDR(svc->af, &iph.daddr), ntohs(ports[1]), IP_VS_DBG_ADDR(svc->af, &snet)); /* * As far as we know, FTP is a very complicated network protocol, and * it uses control connection and data connections. For active FTP, * FTP server initialize data connection to the client, its source port * is often 20. For passive FTP, FTP server tells the clients the port * that it passively listens to, and the client issues the data * connection. In the tunneling or direct routing mode, the load * balancer is on the client-to-server half of connection, the port * number is unknown to the load balancer. So, a conn template like * <caddr, 0, vaddr, 0, daddr, 0> is created for persistent FTP * service, and a template like <caddr, 0, vaddr, vport, daddr, dport> * is created for other persistent services. */ if (ports[1] == svc->port) { /* Check if a template already exists */ if (svc->port != FTPPORT) ct = ip_vs_ct_in_get(svc->af, iph.protocol, &snet, 0, &iph.daddr, ports[1]); else ct = ip_vs_ct_in_get(svc->af, iph.protocol, &snet, 0, &iph.daddr, 0); if (!ct || !ip_vs_check_template(ct)) { /* * No template found or the dest of the connection * template is not available. */ dest = svc->scheduler->schedule(svc, skb); if (dest == NULL) { IP_VS_DBG(1, "p-schedule: no dest found.\n"); return NULL; } /* * Create a template like <protocol,caddr,0, * vaddr,vport,daddr,dport> for non-ftp service, * and <protocol,caddr,0,vaddr,0,daddr,0> * for ftp service. */ if (svc->port != FTPPORT) ct = ip_vs_conn_new(svc->af, iph.protocol, &snet, 0, &iph.daddr, ports[1], &dest->addr, dest->port, IP_VS_CONN_F_TEMPLATE, dest); else ct = ip_vs_conn_new(svc->af, iph.protocol, &snet, 0, &iph.daddr, 0, &dest->addr, 0, IP_VS_CONN_F_TEMPLATE, dest); if (ct == NULL) return NULL; ct->timeout = svc->timeout; } else { /* set destination with the found template */ dest = ct->dest; } dport = dest->port; } else { /* * Note: persistent fwmark-based services and persistent * port zero service are handled here. * fwmark template: <IPPROTO_IP,caddr,0,fwmark,0,daddr,0> * port zero template: <protocol,caddr,0,vaddr,0,daddr,0> */ if (svc->fwmark) { union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) }; ct = ip_vs_ct_in_get(svc->af, IPPROTO_IP, &snet, 0, &fwmark, 0); } else ct = ip_vs_ct_in_get(svc->af, iph.protocol, &snet, 0, &iph.daddr, 0); if (!ct || !ip_vs_check_template(ct)) { /* * If it is not persistent port zero, return NULL, * otherwise create a connection template. */ if (svc->port) return NULL; dest = svc->scheduler->schedule(svc, skb); if (dest == NULL) { IP_VS_DBG(1, "p-schedule: no dest found.\n"); return NULL; } /* * Create a template according to the service */ if (svc->fwmark) { union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) }; ct = ip_vs_conn_new(svc->af, IPPROTO_IP, &snet, 0, &fwmark, 0, &dest->addr, 0, IP_VS_CONN_F_TEMPLATE, dest); } else ct = ip_vs_conn_new(svc->af, iph.protocol, &snet, 0, &iph.daddr, 0, &dest->addr, 0, IP_VS_CONN_F_TEMPLATE, dest); if (ct == NULL) return NULL; ct->timeout = svc->timeout; } else { /* set destination with the found template */ dest = ct->dest; } dport = ports[1]; }
static void _decode_session4(struct sk_buff *skb, struct flowi *fl) { struct iphdr *iph = ip_hdr(skb); u8 *xprth = skb_network_header(skb) + iph->ihl * 4; memset(fl, 0, sizeof(struct flowi)); if (!(iph->frag_off & htons(IP_MF | IP_OFFSET))) { switch (iph->protocol) { case IPPROTO_UDP: case IPPROTO_UDPLITE: case IPPROTO_TCP: case IPPROTO_SCTP: case IPPROTO_DCCP: if (pskb_may_pull(skb, xprth + 4 - skb->data)) { __be16 *ports = (__be16 *)xprth; fl->fl_ip_sport = ports[0]; fl->fl_ip_dport = ports[1]; } break; case IPPROTO_ICMP: if (pskb_may_pull(skb, xprth + 2 - skb->data)) { u8 *icmp = xprth; fl->fl_icmp_type = icmp[0]; fl->fl_icmp_code = icmp[1]; } break; case IPPROTO_ESP: if (pskb_may_pull(skb, xprth + 4 - skb->data)) { __be32 *ehdr = (__be32 *)xprth; fl->fl_ipsec_spi = ehdr[0]; } break; case IPPROTO_AH: if (pskb_may_pull(skb, xprth + 8 - skb->data)) { __be32 *ah_hdr = (__be32*)xprth; fl->fl_ipsec_spi = ah_hdr[1]; } break; case IPPROTO_COMP: if (pskb_may_pull(skb, xprth + 4 - skb->data)) { __be16 *ipcomp_hdr = (__be16 *)xprth; fl->fl_ipsec_spi = htonl(ntohs(ipcomp_hdr[1])); } break; default: fl->fl_ipsec_spi = 0; break; } } fl->proto = iph->protocol; fl->fl4_dst = iph->daddr; fl->fl4_src = iph->saddr; fl->fl4_tos = iph->tos; }
int ip6_forward(struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); struct ipv6hdr *hdr = ipv6_hdr(skb); struct inet6_skb_parm *opt = IP6CB(skb); struct net *net = dev_net(dst->dev); u32 mtu; if (net->ipv6.devconf_all->forwarding == 0) goto error; if (skb->pkt_type != PACKET_HOST) goto drop; #ifdef CONFIG_INET_LRO if (skb_warn_if_lro(skb)) goto drop; #endif #ifdef CONFIG_XFRM if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS); goto drop; } #endif skb_forward_csum(skb); /* * We DO NOT make any processing on * RA packets, pushing them to user level AS IS * without ane WARRANTY that application will be able * to interpret them. The reason is that we * cannot make anything clever here. * * We are not end-node, so that if packet contains * AH/ESP, we cannot make anything. * Defragmentation also would be mistake, RA packets * cannot be fragmented, because there is no warranty * that different fragments will go along one path. --ANK */ if (opt->ra) { u8 *ptr = skb_network_header(skb) + opt->ra; if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3])) return 0; } /* * check and decrement ttl */ if (hdr->hop_limit <= 1) { /* Force OUTPUT device used as source address */ skb->dev = dst->dev; icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0); IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS); kfree_skb(skb); return -ETIMEDOUT; } /* XXX: idev->cnf.proxy_ndp? */ if (net->ipv6.devconf_all->proxy_ndp && pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) { int proxied = ip6_forward_proxy_check(skb); if (proxied > 0) return ip6_input(skb); else if (proxied < 0) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS); goto drop; } } #ifdef CONFIG_XFRM if (!xfrm6_route_forward(skb)) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS); goto drop; } #endif dst = skb_dst(skb); /* IPv6 specs say nothing about it, but it is clear that we cannot send redirects to source routed frames. We don't send redirects to frames decapsulated from IPsec. */ if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) { struct in6_addr *target = NULL; struct rt6_info *rt; /* * incoming and outgoing devices are the same * send a redirect. */ rt = (struct rt6_info *) dst; if (rt->rt6i_flags & RTF_GATEWAY) target = &rt->rt6i_gateway; else target = &hdr->daddr; if (!rt->rt6i_peer) rt6_bind_peer(rt, 1); /* Limit redirects both by destination (here) and by source (inside ndisc_send_redirect) */ if (inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ)) ndisc_send_redirect(skb, target); } else { int addrtype = ipv6_addr_type(&hdr->saddr); /* This check is security critical. */ if (addrtype == IPV6_ADDR_ANY || addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK)) goto error; if (addrtype & IPV6_ADDR_LINKLOCAL) { icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_NOT_NEIGHBOUR, 0); goto error; } } mtu = dst_mtu(dst); if (mtu < IPV6_MIN_MTU) mtu = IPV6_MIN_MTU; if (skb->len > mtu && !skb_is_gso(skb)) { /* Again, force OUTPUT device used as source address */ skb->dev = dst->dev; icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS); IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_FRAGFAILS); kfree_skb(skb); return -EMSGSIZE; } if (skb_cow(skb, dst->dev->hard_header_len)) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS); goto drop; } hdr = ipv6_hdr(skb); /* Mangling hops number delayed to point after skb COW */ hdr->hop_limit--; IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS); return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev, ip6_forward_finish); error: IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS); drop: kfree_skb(skb); return -EINVAL; }
static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EINVAL); unsigned int mss; unsigned int unfrag_ip6hlen, unfrag_len; struct frag_hdr *fptr; u8 *packet_start, *prevhdr; u8 nexthdr; u8 frag_hdr_sz = sizeof(struct frag_hdr); __wsum csum; int tnl_hlen; mss = skb_shinfo(skb)->gso_size; if (unlikely(skb->len <= mss)) goto out; if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { /* Packet is from an untrusted source, reset gso_segs. */ int type = skb_shinfo(skb)->gso_type; if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY | SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM | SKB_GSO_TUNNEL_REMCSUM | SKB_GSO_GRE | SKB_GSO_GRE_CSUM | SKB_GSO_IPIP | SKB_GSO_SIT) || !(type & (SKB_GSO_UDP)))) goto out; skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); /* Set the IPv6 fragment id if not set yet */ if (!skb_shinfo(skb)->ip6_frag_id) ipv6_proxy_select_ident(skb); segs = NULL; goto out; } if (skb->encapsulation && skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM)) segs = skb_udp_tunnel_segment(skb, features, true); else { const struct ipv6hdr *ipv6h; struct udphdr *uh; if (!pskb_may_pull(skb, sizeof(struct udphdr))) goto out; /* Do software UFO. Complete and fill in the UDP checksum as HW cannot * do checksum of UDP packets sent as multiple IP fragments. */ uh = udp_hdr(skb); ipv6h = ipv6_hdr(skb); uh->check = 0; csum = skb_checksum(skb, 0, skb->len, 0); uh->check = udp_v6_check(skb->len, &ipv6h->saddr, &ipv6h->daddr, csum); if (uh->check == 0) uh->check = CSUM_MANGLED_0; skb->ip_summed = CHECKSUM_NONE; /* Check if there is enough headroom to insert fragment header. */ tnl_hlen = skb_tnl_header_len(skb); if (skb->mac_header < (tnl_hlen + frag_hdr_sz)) { if (gso_pskb_expand_head(skb, tnl_hlen + frag_hdr_sz)) goto out; } /* Find the unfragmentable header and shift it left by frag_hdr_sz * bytes to insert fragment header. */ unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); nexthdr = *prevhdr; *prevhdr = NEXTHDR_FRAGMENT; unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) + unfrag_ip6hlen + tnl_hlen; packet_start = (u8 *) skb->head + SKB_GSO_CB(skb)->mac_offset; memmove(packet_start-frag_hdr_sz, packet_start, unfrag_len); SKB_GSO_CB(skb)->mac_offset -= frag_hdr_sz; skb->mac_header -= frag_hdr_sz; skb->network_header -= frag_hdr_sz; fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); fptr->nexthdr = nexthdr; fptr->reserved = 0; if (!skb_shinfo(skb)->ip6_frag_id) ipv6_proxy_select_ident(skb); fptr->identification = skb_shinfo(skb)->ip6_frag_id; /* Fragment the skb. ipv6 header and the remaining fields of the * fragment header are updated in ipv6_gso_segment() */ segs = skb_segment(skb, features); } out: return segs; }
static void tarpit_tcp6(struct net *net, struct sk_buff *oldskb, unsigned int hook, unsigned int mode) { struct sk_buff *nskb; struct tcphdr *tcph, oth; unsigned int otcplen; int tcphoff; const struct ipv6hdr *oip6h = ipv6_hdr(oldskb); struct ipv6hdr *ip6h; const uint8_t tclass = 0; uint8_t proto; uint16_t payload; __be16 frag_off; proto = oip6h->nexthdr; tcphoff = ipv6_skip_exthdr(oldskb, (uint8_t *)(oip6h + 1) - oldskb->data, &proto, &frag_off); if (tcphoff < 0 || tcphoff > oldskb->len) { pr_debug("Cannot get TCP header.\n"); return; } otcplen = oldskb->len - tcphoff; /* IP header checks: fragment, too short. */ if (proto != IPPROTO_TCP || otcplen < sizeof(struct tcphdr)) { pr_debug("proto(%d) != IPPROTO_TCP, " "or too short. otcplen = %d\n", proto, otcplen); return; } if (skb_copy_bits(oldskb, tcphoff, &oth, sizeof(struct tcphdr))) { WARN_ON(1); return; } /* Check checksum. */ if (csum_ipv6_magic(&oip6h->saddr, &oip6h->daddr, otcplen, IPPROTO_TCP, skb_checksum(oldskb, tcphoff, otcplen, 0))) { pr_debug("TCP checksum is invalid\n"); return; } nskb = skb_copy_expand(oldskb, LL_MAX_HEADER, skb_tailroom(oldskb), GFP_ATOMIC); if (nskb == NULL) { if (net_ratelimit()) pr_debug("cannot alloc skb\n"); return; } /* This packet will not be the same as the other: clear nf fields */ nf_reset(nskb); skb_nfmark(nskb) = 0; skb_init_secmark(nskb); skb_shinfo(nskb)->gso_size = 0; skb_shinfo(nskb)->gso_segs = 0; skb_shinfo(nskb)->gso_type = 0; skb_put(nskb, sizeof(struct ipv6hdr)); ip6h = ipv6_hdr(nskb); *(__be32 *)ip6h = htonl(0x60000000 | (tclass << 20)); ip6h->nexthdr = IPPROTO_TCP; ip6h->saddr = oip6h->daddr; ip6h->daddr = oip6h->saddr; /* Adjust IP TTL */ if (mode == XTTARPIT_HONEYPOT) { ip6h->hop_limit = 128; } else { ip6h->hop_limit = ip6_dst_hoplimit(skb_dst(nskb)); } tcph = (struct tcphdr *)(skb_network_header(nskb) + sizeof(struct ipv6hdr)); /* Truncate to length (no data) */ skb_trim(nskb, sizeof(struct ipv6hdr) + sizeof(struct tcphdr)); tcph->doff = sizeof(struct tcphdr)/4; tcph->source = oth.dest; tcph->dest = oth.source; tcph->urg_ptr = 0; /* Reset flags */ ((uint8_t *)tcph)[13] = 0; payload = nskb->len - sizeof(struct ipv6hdr) - sizeof(struct tcphdr); if (!tarpit_generic(&oth, tcph, payload, mode)) goto free_nskb; ip6h->payload_len = htons(sizeof(struct tcphdr)); tcph->check = 0; /* Adjust TCP checksum */ tcph->check = csum_ipv6_magic(&ipv6_hdr(nskb)->saddr, &ipv6_hdr(nskb)->daddr, sizeof(struct tcphdr), IPPROTO_TCP, csum_partial(tcph, sizeof(struct tcphdr), 0)); if (ip6_route_me_harder(net, nskb)) goto free_nskb; nskb->ip_summed = CHECKSUM_NONE; nf_ct_attach(nskb, oldskb); NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, nskb->sk, nskb, NULL, skb_dst(nskb)->dev, dst_output); return; free_nskb: kfree_skb(nskb); }
static int ipv6_frag_rcv(struct sk_buff *skb) { struct frag_hdr *fhdr; struct frag_queue *fq; const struct ipv6hdr *hdr = ipv6_hdr(skb); struct net *net = dev_net(skb_dst(skb)->dev); int evicted; if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED) goto fail_hdr; IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS); /* Jumbo payload inhibits frag. header */ if (hdr->payload_len==0) goto fail_hdr; if (!pskb_may_pull(skb, (skb_transport_offset(skb) + sizeof(struct frag_hdr)))) goto fail_hdr; hdr = ipv6_hdr(skb); fhdr = (struct frag_hdr *)skb_transport_header(skb); if (!(fhdr->frag_off & htons(0xFFF9))) { /* It is not a fragmented frame */ skb->transport_header += sizeof(struct frag_hdr); IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS); IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb); IP6CB(skb)->flags |= IP6SKB_FRAGMENTED; return 1; } evicted = inet_frag_evictor(&net->ipv6.frags, &ip6_frags, false); if (evicted) IP6_ADD_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS, evicted); fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr, ip6_frag_ecn(hdr)); if (fq != NULL) { int ret; spin_lock(&fq->q.lock); ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff); spin_unlock(&fq->q.lock); inet_frag_put(&fq->q, &ip6_frags); return ret; } IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS); kfree_skb(skb); return -1; fail_hdr: IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb_network_header_len(skb)); return -1; }
int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) { struct sk_buff *frag; struct rt6_info *rt = (struct rt6_info*)skb_dst(skb); struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL; struct ipv6hdr *tmp_hdr; struct frag_hdr *fh; unsigned int mtu, hlen, left, len; int hroom, troom; __be32 frag_id = 0; int ptr, offset = 0, err=0; u8 *prevhdr, nexthdr = 0; struct net *net = dev_net(skb_dst(skb)->dev); hlen = ip6_find_1stfragopt(skb, &prevhdr); nexthdr = *prevhdr; mtu = ip6_skb_dst_mtu(skb); /* We must not fragment if the socket is set to force MTU discovery * or if the skb it not generated by a local socket. */ if (unlikely(!skb->local_df && skb->len > mtu) || (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)) { if (skb->sk && dst_allfrag(skb_dst(skb))) sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK); skb->dev = skb_dst(skb)->dev; icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); kfree_skb(skb); return -EMSGSIZE; } if (np && np->frag_size < mtu) { if (np->frag_size) mtu = np->frag_size; } mtu -= hlen + sizeof(struct frag_hdr); if (skb_has_frag_list(skb)) { int first_len = skb_pagelen(skb); struct sk_buff *frag2; if (first_len - hlen > mtu || ((first_len - hlen) & 7) || skb_cloned(skb)) goto slow_path; skb_walk_frags(skb, frag) { /* Correct geometry. */ if (frag->len > mtu || ((frag->len & 7) && frag->next) || skb_headroom(frag) < hlen) goto slow_path_clean; /* Partially cloned skb? */ if (skb_shared(frag)) goto slow_path_clean; BUG_ON(frag->sk); if (skb->sk) { frag->sk = skb->sk; frag->destructor = sock_wfree; } skb->truesize -= frag->truesize; } err = 0; offset = 0; frag = skb_shinfo(skb)->frag_list; skb_frag_list_init(skb); /* BUILD HEADER */ *prevhdr = NEXTHDR_FRAGMENT; tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC); if (!tmp_hdr) { IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); return -ENOMEM; } __skb_pull(skb, hlen); fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr)); __skb_push(skb, hlen); skb_reset_network_header(skb); memcpy(skb_network_header(skb), tmp_hdr, hlen); ipv6_select_ident(fh, rt); fh->nexthdr = nexthdr; fh->reserved = 0; fh->frag_off = htons(IP6_MF); frag_id = fh->identification; first_len = skb_pagelen(skb); skb->data_len = first_len - skb_headlen(skb); skb->len = first_len; ipv6_hdr(skb)->payload_len = htons(first_len - sizeof(struct ipv6hdr)); dst_hold(&rt->dst); for (;;) { /* Prepare header of the next frame, * before previous one went down. */ if (frag) { frag->ip_summed = CHECKSUM_NONE; skb_reset_transport_header(frag); fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr)); __skb_push(frag, hlen); skb_reset_network_header(frag); memcpy(skb_network_header(frag), tmp_hdr, hlen); offset += skb->len - hlen - sizeof(struct frag_hdr); fh->nexthdr = nexthdr; fh->reserved = 0; fh->frag_off = htons(offset); if (frag->next != NULL) fh->frag_off |= htons(IP6_MF); fh->identification = frag_id; ipv6_hdr(frag)->payload_len = htons(frag->len - sizeof(struct ipv6hdr)); ip6_copy_metadata(frag, skb); } err = output(skb); if(!err) IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), IPSTATS_MIB_FRAGCREATES); if (err || !frag) break; skb = frag; frag = skb->next; skb->next = NULL; } kfree(tmp_hdr); if (err == 0) { IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), IPSTATS_MIB_FRAGOKS); ip6_rt_put(rt); return 0; } while (frag) { skb = frag->next; kfree_skb(frag); frag = skb; } IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), IPSTATS_MIB_FRAGFAILS); ip6_rt_put(rt); return err; slow_path_clean: skb_walk_frags(skb, frag2) { if (frag2 == frag) break; frag2->sk = NULL; frag2->destructor = NULL; skb->truesize += frag2->truesize; } } slow_path: if ((skb->ip_summed == CHECKSUM_PARTIAL) && skb_checksum_help(skb)) goto fail; left = skb->len - hlen; /* Space per frame */ ptr = hlen; /* Where to start from */ /* * Fragment the datagram. */ *prevhdr = NEXTHDR_FRAGMENT; hroom = LL_RESERVED_SPACE(rt->dst.dev); troom = rt->dst.dev->needed_tailroom; /* * Keep copying data until we run out. */ while(left > 0) { len = left; /* IF: it doesn't fit, use 'mtu' - the data space left */ if (len > mtu) len = mtu; /* IF: we are not sending up to and including the packet end then align the next start on an eight byte boundary */ if (len < left) { len &= ~7; } /* * Allocate buffer. */ if ((frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) + hroom + troom, GFP_ATOMIC)) == NULL) { NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n"); IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); err = -ENOMEM; goto fail; } /* * Set up data on packet */ ip6_copy_metadata(frag, skb); skb_reserve(frag, hroom); skb_put(frag, len + hlen + sizeof(struct frag_hdr)); skb_reset_network_header(frag); fh = (struct frag_hdr *)(skb_network_header(frag) + hlen); frag->transport_header = (frag->network_header + hlen + sizeof(struct frag_hdr)); /* * Charge the memory for the fragment to any owner * it might possess */ if (skb->sk) skb_set_owner_w(frag, skb->sk); /* * Copy the packet header into the new buffer. */ skb_copy_from_linear_data(skb, skb_network_header(frag), hlen); /* * Build fragment header. */ fh->nexthdr = nexthdr; fh->reserved = 0; if (!frag_id) { ipv6_select_ident(fh, rt); frag_id = fh->identification; } else fh->identification = frag_id; /* * Copy a block of the IP datagram. */ if (skb_copy_bits(skb, ptr, skb_transport_header(frag), len)) BUG(); left -= len; fh->frag_off = htons(offset); if (left > 0) fh->frag_off |= htons(IP6_MF); ipv6_hdr(frag)->payload_len = htons(frag->len - sizeof(struct ipv6hdr)); ptr += len; offset += len; /* * Put this fragment into the sending queue. */ err = output(frag); if (err) goto fail; IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGCREATES); } IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGOKS); consume_skb(skb); return err; fail: IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); kfree_skb(skb); return err; }
static int ip6_input_finish(struct sk_buff *skb) { struct net *net = dev_net(skb_dst(skb)->dev); const struct inet6_protocol *ipprot; struct inet6_dev *idev; unsigned int nhoff; int nexthdr; bool raw; /* * Parse extension headers */ rcu_read_lock(); resubmit: idev = ip6_dst_idev(skb_dst(skb)); if (!pskb_pull(skb, skb_transport_offset(skb))) goto discard; nhoff = IP6CB(skb)->nhoff; nexthdr = skb_network_header(skb)[nhoff]; raw = raw6_local_deliver(skb, nexthdr); if ((ipprot = rcu_dereference(inet6_protos[nexthdr])) != NULL) { int ret; if (ipprot->flags & INET6_PROTO_FINAL) { const struct ipv6hdr *hdr; /* Free reference early: we don't need it any more, and it may hold ip_conntrack module loaded indefinitely. */ nf_reset(skb); skb_postpull_rcsum(skb, skb_network_header(skb), skb_network_header_len(skb)); hdr = ipv6_hdr(skb); if (ipv6_addr_is_multicast(&hdr->daddr) && !ipv6_chk_mcast_addr(skb->dev, &hdr->daddr, &hdr->saddr) && !ipv6_is_mld(skb, nexthdr, skb_network_header_len(skb))) goto discard; } if (!(ipprot->flags & INET6_PROTO_NOPOLICY) && !xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) goto discard; ret = ipprot->handler(skb); if (ret > 0) goto resubmit; else if (ret == 0) IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS); } else { if (!raw) { if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INUNKNOWNPROTOS); icmpv6_send(skb, ICMPV6_PARAMPROB, ICMPV6_UNK_NEXTHDR, nhoff); } kfree_skb(skb); } else { IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS); consume_skb(skb); } } rcu_read_unlock(); return 0; discard: IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDISCARDS); rcu_read_unlock(); kfree_skb(skb); return 0; }
//--------------------------------------------------------------------------- // Search the entity with the IPv6 address 'addr' struct cx_entity *nas_CLASS_cx6(struct sk_buff *skb, unsigned char dscp, struct nas_priv *gpriv, int inst, unsigned char *cx_searcher){ //--------------------------------------------------------------------------- unsigned char cxi; unsigned int *addr,*dst; struct cx_entity *default_ip=NULL; struct classifier_entity *p=NULL; #ifdef KERNEL_VERSION_GREATER_THAN_2630 if (skb->_skb_dst!=0) { #else if (skb->dst!=NULL) { #endif for (cxi=*cx_searcher; cxi<NAS_CX_MAX; cxi++) { (*cx_searcher)++; p = gpriv->cx[cxi].sclassifier[dscp]; while (p!=NULL) { if (p->version == 6) { // verify that this is an IPv4 rule if ((addr = (unsigned int *)(&(p->daddr.ipv6)))== NULL) { printk("nas_CLASS_cx6: addr is null \n"); p = p->next; continue; } #ifdef NAS_DEBUG_CLASS printk("cx %d : %X,%X.%X,%X\n",cxi,addr[0],addr[1],addr[2],addr[3]); #endif //NAS_DEBUG_CLASS #ifdef KERNEL_VERSION_GREATER_THAN_2630 if ((dst = (unsigned int*)&(((struct rt6_info *)(skb->_skb_dst))->rt6i_gateway)) == 0){ #else if ((dst = (unsigned int*)&(((struct rt6_info *)(skb->dst))->rt6i_gateway)) == NULL){ #endif printk("nas_CLASS_cx6: dst addr is null \n"); p = p->next; continue; } if ((addr[0] == dst[0]) && (addr[1] == dst[1]) && (addr[2] == dst[2]) && (addr[3] == dst[3])) { //#ifdef NAS_DEBUG_CLASS printk("nas_CLASS_cx6: found cx %d: %X.%X.%X.%X\n",cxi, dst[0], dst[1], dst[2], dst[3]); //#endif //NAS_DEBUG_CLASS return gpriv->cx+cxi; } /* else if ((addr[0]==NAS_DEFAULT_IPV6_ADDR0) && (addr[1]==NAS_DEFAULT_IPV6_ADDR1) && (addr[2]==NAS_DEFAULT_IPV6_ADDR2) && (addr[3]==NAS_DEFAULT_IPV6_ADDR3)) default_ip = gpriv->cx+cxi; */ } // Go to next classifier entry for connection p = p->next; } } } printk("nas_CLASS_cx6 NOT FOUND: %X.%X.%X.%X\n", dst[0], dst[1], dst[2], dst[3]); return default_ip; } //--------------------------------------------------------------------------- // Search the entity with the IPv4 address 'addr' struct cx_entity *nas_CLASS_cx4(struct sk_buff *skb, unsigned char dscp, struct nas_priv *gpriv, int inst, unsigned char *cx_searcher){ //--------------------------------------------------------------------------- unsigned char cxi; unsigned char *addr; struct cx_entity *default_ip=NULL; struct classifier_entity *p=NULL; // if (inst >0) // return(gpriv->cx); //dump to clusterhead #ifdef NAS_DEBUG_CLASS #ifdef KERNEL_VERSION_GREATER_THAN_2630 addr = (char *)&((((struct rtable *)(skb->_skb_dst))->rt_gateway)); #else addr = (char *)&((((struct rtable *)(skb->dst))->rt_gateway)); #endif if (addr) printk("[NAS][CLASS][IPv4] Searching for %d.%d.%d.%d\n",addr[0],addr[1],addr[2],addr[3]); #endif #ifdef KERNEL_VERSION_GREATER_THAN_2630 if (skb->_skb_dst!=0) { #else if (skb->dst!=NULL) { #endif for (cxi=*cx_searcher; cxi<NAS_CX_MAX; ++cxi) { (*cx_searcher)++; p = gpriv->cx[cxi].sclassifier[dscp]; while (p!=NULL) { if (p->version == 4) { // verify that this is an IPv4 rule #ifdef NAS_DEBUG_CLASS addr = (char *)(&(p->daddr.ipv4)); printk("cx %d : %d.%d.%d.%d\n",cxi,addr[0],addr[1],addr[2],addr[3]); #ifdef KERNEL_VERSION_GREATER_THAN_2630 addr = (char *)&(((struct rtable *)(skb->_skb_dst))->rt_gateway); #else addr = (char *)&(((struct rtable *)(skb->dst))->rt_gateway); #endif printk("rt_gateway : %d.%d.%d.%d\n",addr[0],addr[1],addr[2],addr[3]); #endif //NAS_DEBUG_CLASS #ifdef KERNEL_VERSION_GREATER_THAN_2630 if ((p->daddr.ipv4)== (((((struct rtable *)(skb->_skb_dst))->rt_gateway)))){ #else if ((p->daddr.ipv4)== (((((struct rtable *)(skb->dst))->rt_gateway)))){ #endif addr = (char *)(&(p->daddr.ipv4)); #ifdef NAS_DEBUG_CLASS printk("found cx %d: %d.%d.%d.%d\n",cxi, addr[0], addr[1], addr[2], addr[3]); #endif //NAS_DEBUG_CLASS return gpriv->cx+cxi; } /* else if (gpriv->cx[cxi].sclassifier[dscp]->daddr.ipv4==NAS_DEFAULT_IPV4_ADDR) { #ifdef NAS_DEBUG_CLASS printk("found default_ip rule\n"); #endif //NAS_DEBUG_CLASS default_ip = gpriv->cx+cxi; } */ } // goto to next classification rule for the connection p = p->next; } } } return default_ip; } #ifdef MPLS //--------------------------------------------------------------------------- // Search the entity with the mpls label and given exp struct cx_entity *nas_CLASS_MPLS(struct sk_buff *skb, unsigned char exp, struct nas_priv *gpriv, int inst, unsigned char *cx_searcher){ //--------------------------------------------------------------------------- unsigned char cxi; struct cx_entity *default_label=NULL; struct classifier_entity *p=NULL; // if (inst >0) // return(gpriv->cx); //dump to clusterhead #ifdef NAS_DEBUG_CLASS printk("[NAS][CLASS][MPLS] Searching for label %d\n",MPLSCB(skb)->label); #endif for (cxi=*cx_searcher; cxi<NAS_CX_MAX; ++cxi) { (*cx_searcher)++; p = gpriv->cx[cxi].sclassifier[exp]; while (p!=NULL) { if (p->version == NAS_MPLS_VERSION_CODE) { // verify that this is an MPLS rule #ifdef NAS_DEBUG_CLASS printk("cx %d : label %d\n",cxi,p->daddr.mpls_label); #endif //NAS_DEBUG_CLASS if (p->daddr.mpls_label==MPLSCB(skb)->label){ #ifdef NAS_DEBUG_CLASS printk("found cx %d: label %d, RB %d\n",cxi, MPLSCB(skb)->label, p->rab_id); #endif //NAS_DEBUG_CLASS return gpriv->cx+cxi; } /* else if (gpriv->cx[cxi].sclassifier[dscp]->daddr.ipv4==NAS_DEFAULT_IPV4_ADDR) { #ifdef NAS_DEBUG_CLASS printk("found default_ip rule\n"); #endif //NAS_DEBUG_CLASS default_ip = gpriv->cx+cxi; } */ } // goto to next classification rule for the connection p = p->next; } } return default_label; } #endif //--------------------------------------------------------------------------- // Search the sending function void nas_CLASS_send(struct sk_buff *skb,int inst){ //--------------------------------------------------------------------------- struct classifier_entity *p, *sp; u8 *protocolh,version; u8 protocol, dscp, exp,label; u16 classref; struct cx_entity *cx; unsigned int i; unsigned int router_adv = 0; struct net_device *dev=nasdev[inst]; struct nas_priv *gpriv=netdev_priv(dev); unsigned char cx_searcher,no_connection=1; #ifdef NAS_DEBUG_CLASS printk("NAS_CLASS_SEND: begin - inst %d\n",inst); #endif if (skb==NULL){ #ifdef NAS_DEBUG_SEND printk("NAS_CLASS_SEND - input parameter skb is NULL \n"); #endif return; } #ifdef NAS_DEBUG_SEND printk("[NAS][CLASS][SEND] Got packet from kernel:\n"); for (i=0;i<256;i++) printk("%2x ",((unsigned char *)skb->data)[i]); printk("\n"); #endif // find all connections related to socket cx_searcher = 0; no_connection = 1; //while (cx_searcher<NAS_CX_MAX) { cx = NULL; // Address classification switch (ntohs(skb->protocol)) { case ETH_P_IPV6: version = 6; protocolh=nas_TOOL_get_protocol6( #ifdef KERNEL_VERSION_GREATER_THAN_2622 (struct ipv6hdr *)(skb_network_header(skb)), #else skb->nh.ipv6h, #endif &protocol); dscp=nas_TOOL_get_dscp6( #ifdef KERNEL_VERSION_GREATER_THAN_2622 (struct ipv6hdr *)(skb_network_header(skb)) #else skb->nh.ipv6h #endif ); #ifdef NAS_DEBUG_CLASS printk("NAS_CLASS_SEND: %p %d %p %d %p \n",skb, dscp, gpriv, inst, &cx_searcher); #endif cx=nas_CLASS_cx6(skb,dscp,gpriv,inst,&cx_searcher); #ifdef NAS_DEBUG_CLASS printk("NAS_CLASS_SEND: Got IPv6 packet, dscp = %d\n",dscp); #endif break; case ETH_P_IP: #ifdef KERNEL_VERSION_GREATER_THAN_2622 dscp=nas_TOOL_get_dscp4((struct iphdr *)(skb_network_header(skb))); #else dscp=nas_TOOL_get_dscp4(skb->nh.iph); #endif cx=nas_CLASS_cx4(skb,dscp,gpriv,inst,&cx_searcher); protocolh=nas_TOOL_get_protocol4( #ifdef KERNEL_VERSION_GREATER_THAN_2622 (struct iphdr *)(skb_network_header(skb)), #else skb->nh.iph, #endif &protocol); #ifdef NAS_DEBUG_CLASS printk("NAS_CLASS_SEND: Got IPv4 packet (%x), dscp = %d, cx = %x\n",ntohs(skb->protocol),dscp,cx); #endif version = 4; break; #ifdef MPLS case ETH_P_MPLS_UC: cx=nas_CLASS_MPLS(skb,MPLSCB(skb)->exp,gpriv,inst,&cx_searcher); #ifdef NAS_DEBUG_CLASS printk("NAS_CLASS_SEND: Got MPLS unicast packet, exp = %d, label = %d, cx = %x\n",MPLSCB(skb)->exp,MPLSCB(skb)->label,cx); #endif dscp = MPLSCB(skb)->exp; version = NAS_MPLS_VERSION_CODE; protocol = version; break; #endif default: printk("NAS_CLASS_SEND: Unknown protocol\n"); version = 0; return; } // If a valid connection for the DSCP/EXP with destination address // is found scan all protocol-based classification rules if (cx) { classref=0; sp=NULL; #ifdef NAS_DEBUG_CLASS printk("[NAS][CLASSIFIER] DSCP/EXP %d : looking for classifier entry\n",dscp); #endif for (p=cx->sclassifier[dscp]; p!=NULL; p=p->next) { #ifdef NAS_DEBUG_CLASS printk("[NAS][CLASSIFIER] DSCP %d p->classref=%d,p->protocol=%d,p->version=%d\n",dscp,p->classref,p->protocol,p->version); #endif // Check if transport protocol/message match /* if ((p->protocol == protocol)) if ((protocol == NAS_PROTOCOL_ICMP6) && (version == 6)) if (p->protocol_message_type == (p->protocol_message_type )) { printk("[GRAAL][CLASSIFIER] Router advertisement\n"); } */ // normal rule checks that network protocol version matches if (p->version == version) { sp=p; classref=sp->classref; break; } } if (sp!=NULL) { #ifdef NAS_DEBUG_CLASS char sfct[10], sprotocol[10]; if (sp->fct==nas_COMMON_QOS_send) strcpy(sfct, "qos"); if (sp->fct==nas_CTL_send) strcpy(sfct, "ctl"); if (sp->fct==nas_COMMON_del_send) strcpy(sfct, "del"); if (sp->fct==nas_mesh_DC_send_sig_data_request) strcpy(sfct, "dc"); switch(protocol) { case NAS_PROTOCOL_UDP:strcpy(sprotocol, "udp");printk("udp packet\n");break; case NAS_PROTOCOL_TCP:strcpy(sprotocol, "tcp");printk("tcp packet\n");break; case NAS_PROTOCOL_ICMP4:strcpy(sprotocol, "icmp4");printk("icmp4 packet\n");break; case NAS_PROTOCOL_ICMP6:strcpy(sprotocol, "icmp6"); print_TOOL_pk_icmp6((struct icmp6hdr*)protocolh);break; #ifdef MPLS case NAS_MPLS_VERSION_CODE:strcpy(sprotocol,"mpls");break; #endif } printk("NAS_CLASS_SEND: (dscp %u, %s) received, (classref %u, fct %s, rab_id %u) classifier rule\n", dscp, sprotocol, sp->classref, sfct, sp->rab_id); #endif sp->fct(skb, cx, sp,inst); } // if classifier entry match found else { printk("NAS_CLASS_SEND: no corresponding item in the classifier, so the message is dropped\n"); // nas_COMMON_del_send(skb, cx, NULL,inst); } no_connection = 0; } // if connection found #ifdef NAS_DEBUG_CLASS if (no_connection == 1) printk("NAS_CLASS_SEND: no corresponding connection, so the message is dropped\n"); #endif NAS_DEBUG_CLASS // } // while loop over connections #ifdef NAS_DEBUG_CLASS printk("NAS_CLASS_SEND: end\n"); #endif }
int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) { struct net *net = dev_net(skb->dev); int err; __be32 seq; __be32 seq_hi; struct xfrm_state *x; xfrm_address_t *daddr; struct xfrm_mode *inner_mode; unsigned int family; int decaps = 0; int async = 0; /* A negative encap_type indicates async resumption. */ if (encap_type < 0) { async = 1; x = xfrm_input_state(skb); seq = XFRM_SKB_CB(skb)->seq.input.low; goto resume; } /* Allocate new secpath or COW existing one. */ if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) { struct sec_path *sp; sp = secpath_dup(skb->sp); if (!sp) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR); goto drop; } if (skb->sp) secpath_put(skb->sp); skb->sp = sp; } daddr = (xfrm_address_t *)(skb_network_header(skb) + XFRM_SPI_SKB_CB(skb)->daddroff); family = XFRM_SPI_SKB_CB(skb)->family; seq = 0; if (!spi && (err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); goto drop; } do { if (skb->sp->len == XFRM_MAX_DEPTH) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR); goto drop; } x = xfrm_state_lookup(net, skb->mark, daddr, spi, nexthdr, family); if (x == NULL) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES); xfrm_audit_state_notfound(skb, family, spi, seq); goto drop; } skb->sp->xvec[skb->sp->len++] = x; spin_lock(&x->lock); if (unlikely(x->km.state != XFRM_STATE_VALID)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEINVALID); goto drop_unlock; } if ((x->encap ? x->encap->encap_type : 0) != encap_type) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH); goto drop_unlock; } if (x->repl->check(x, skb, seq)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR); goto drop_unlock; } if (xfrm_state_check_expire(x)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEEXPIRED); goto drop_unlock; } spin_unlock(&x->lock); seq_hi = htonl(xfrm_replay_seqhi(x, seq)); XFRM_SKB_CB(skb)->seq.input.low = seq; XFRM_SKB_CB(skb)->seq.input.hi = seq_hi; skb_dst_force(skb); nexthdr = x->type->input(x, skb); if (nexthdr == -EINPROGRESS) return 0; resume: spin_lock(&x->lock); if (nexthdr <= 0) { if (nexthdr == -EBADMSG) { xfrm_audit_state_icvfail(x, skb, x->type->proto); x->stats.integrity_failed++; } XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR); goto drop_unlock; } /* only the first xfrm gets the encap type */ encap_type = 0; if (async && x->repl->check(x, skb, seq)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR); goto drop_unlock; } x->repl->advance(x, seq); x->curlft.bytes += skb->len; x->curlft.packets++; spin_unlock(&x->lock); XFRM_MODE_SKB_CB(skb)->protocol = nexthdr; inner_mode = x->inner_mode; if (x->sel.family == AF_UNSPEC) { inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol); if (inner_mode == NULL) goto drop; } if (inner_mode->input(x, skb)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR); goto drop; } if (x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL) { decaps = 1; break; } /* * We need the inner address. However, we only get here for * transport mode so the outer address is identical. */ daddr = &x->id.daddr; family = x->outer_mode->afinfo->family; err = xfrm_parse_spi(skb, nexthdr, &spi, &seq); if (err < 0) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); goto drop; } } while (!err); nf_reset(skb); if (decaps) { skb_dst_drop(skb); netif_rx(skb); return 0; } else { return x->inner_mode->afinfo->transport_finish(skb, async); } drop_unlock: spin_unlock(&x->lock); drop: kfree_skb(skb); return 0; }
/* * Handle MSG_ERRQUEUE */ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) { struct sock_exterr_skb *serr; struct sk_buff *skb; DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name); struct { struct sock_extended_err ee; struct sockaddr_in offender; } errhdr; int err; int copied; WARN_ON_ONCE(sk->sk_family == AF_INET6); err = -EAGAIN; skb = sock_dequeue_err_skb(sk); if (!skb) goto out; copied = skb->len; if (copied > len) { msg->msg_flags |= MSG_TRUNC; copied = len; } err = skb_copy_datagram_msg(skb, 0, msg, copied); if (err) goto out_free_skb; sock_recv_timestamp(msg, sk, skb); serr = SKB_EXT_ERR(skb); if (sin && ipv4_datagram_support_addr(serr)) { sin->sin_family = AF_INET; sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) + serr->addr_offset); sin->sin_port = serr->port; memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); *addr_len = sizeof(*sin); } memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err)); sin = &errhdr.offender; memset(sin, 0, sizeof(*sin)); if (ipv4_datagram_support_cmsg(sk, skb, serr->ee.ee_origin)) { sin->sin_family = AF_INET; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; if (inet_sk(sk)->cmsg_flags) ip_cmsg_recv(msg, skb); } put_cmsg(msg, SOL_IP, IP_RECVERR, sizeof(errhdr), &errhdr); /* Now we could try to dump offended packet options */ msg->msg_flags |= MSG_ERRQUEUE; err = copied; out_free_skb: kfree_skb(skb); out: return err; }
static void _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse) { const struct iphdr *iph = ip_hdr(skb); u8 *xprth = skb_network_header(skb) + iph->ihl * 4; struct flowi4 *fl4 = &fl->u.ip4; int oif = 0; if (skb_dst(skb)) oif = skb_dst(skb)->dev->ifindex; memset(fl4, 0, sizeof(struct flowi4)); fl4->flowi4_mark = skb->mark; fl4->flowi4_oif = reverse ? skb->skb_iif : oif; if (!ip_is_fragment(iph)) { switch (iph->protocol) { case IPPROTO_UDP: case IPPROTO_UDPLITE: case IPPROTO_TCP: case IPPROTO_SCTP: case IPPROTO_DCCP: if (xprth + 4 < skb->data || pskb_may_pull(skb, xprth + 4 - skb->data)) { __be16 *ports; xprth = skb_network_header(skb) + iph->ihl * 4; ports = (__be16 *)xprth; fl4->fl4_sport = ports[!!reverse]; fl4->fl4_dport = ports[!reverse]; } break; case IPPROTO_ICMP: if (xprth + 2 < skb->data || pskb_may_pull(skb, xprth + 2 - skb->data)) { u8 *icmp; xprth = skb_network_header(skb) + iph->ihl * 4; icmp = xprth; fl4->fl4_icmp_type = icmp[0]; fl4->fl4_icmp_code = icmp[1]; } break; case IPPROTO_ESP: if (xprth + 4 < skb->data || pskb_may_pull(skb, xprth + 4 - skb->data)) { __be32 *ehdr; xprth = skb_network_header(skb) + iph->ihl * 4; ehdr = (__be32 *)xprth; fl4->fl4_ipsec_spi = ehdr[0]; } break; case IPPROTO_AH: if (xprth + 8 < skb->data || pskb_may_pull(skb, xprth + 8 - skb->data)) { __be32 *ah_hdr; xprth = skb_network_header(skb) + iph->ihl * 4; ah_hdr = (__be32 *)xprth; fl4->fl4_ipsec_spi = ah_hdr[1]; } break; case IPPROTO_COMP: if (xprth + 4 < skb->data || pskb_may_pull(skb, xprth + 4 - skb->data)) { __be16 *ipcomp_hdr; xprth = skb_network_header(skb) + iph->ihl * 4; ipcomp_hdr = (__be16 *)xprth; fl4->fl4_ipsec_spi = htonl(ntohs(ipcomp_hdr[1])); } break; case IPPROTO_GRE: if (xprth + 12 < skb->data || pskb_may_pull(skb, xprth + 12 - skb->data)) { __be16 *greflags; __be32 *gre_hdr; xprth = skb_network_header(skb) + iph->ihl * 4; greflags = (__be16 *)xprth; gre_hdr = (__be32 *)xprth; if (greflags[0] & GRE_KEY) { if (greflags[0] & GRE_CSUM) gre_hdr++; fl4->fl4_gre_key = gre_hdr[1]; } } break; default: fl4->fl4_ipsec_spi = 0; break; } } fl4->flowi4_proto = iph->protocol; fl4->daddr = reverse ? iph->saddr : iph->daddr; fl4->saddr = reverse ? iph->daddr : iph->saddr; fl4->flowi4_tos = iph->tos; }
/* get interconnect packet header */ struct icpkthdr * get_ic_pkt_hdr(struct sk_buff *skb, int hook_num) { struct udphdr *udp_header; struct icpkthdr *ic_header; struct iphdr *ip_header; if (!skb) { /* printk(KERN_INFO "skb_buff is null \n"); */ return NULL; } /* skb->head + skb->network_header; not avail in 2.6.18 */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ip_header = (struct iphdr *) (skb->nh.iph); #else ip_header = (struct iphdr *) skb_network_header(skb); #endif if (ip_header == NULL) { /* printk(KERN_INFO"ip header is null\n"); */ /* printk(KERN_INFO"*****************END******************\n"); */ return NULL; } if (ip_header->protocol != IPPROTO_UDP) { if (DEBUG) { /* printk(KERN_INFO "**OTHER PROTOCOL**\n"); */ /* printk(KERN_INFO"*****************END******************\n"); */ } return NULL; } /* check interconnect packet from packet length */ if (htons(ip_header->tot_len) < sizeof(struct iphdr) + sizeof(struct udphdr) + sizeof(struct icpkthdr)) { if (DEBUG) { /* printk(KERN_INFO"**PACKET LENGTH <92**\n"); */ /* printk(KERN_INFO"*****************END******************\n"); */ } return NULL; } /* printk(KERN_INFO "**PACKET IP ID** %d\n", ip_header->id); */ udp_header = (struct udphdr *) (ip_header + 1); ic_header = (struct icpkthdr *) (udp_header + 1); /* check interconnect packet from the valid of fileds */ if (ic_header->motNodeId >= 256 || ic_header->dstListenerPort >= 65536 || ic_header->srcListenerPort >= 65536 || ic_header->recvSliceIndex >= 256 || ic_header->sendSliceIndex >= 256 || ic_header->flags >= 256 ) { return NULL; } /* printk(KERN_INFO " < hook num > %d\n ", hook_num); */ /* * printk(KERN_INFO"< flag > %d < seqs> * %d\n",ic_header->flags,ic_header->seq); */ /* printk(KERN_INFO "**PACKET IP ID** %d\n", ip_header->id); */ return ic_header; }
static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, struct frag_hdr *fhdr, int nhoff) { struct sk_buff *prev, *next; struct net_device *dev; int offset, end; struct net *net = dev_net(skb_dst(skb)->dev); if (fq->q.last_in & INET_FRAG_COMPLETE) goto err; offset = ntohs(fhdr->frag_off) & ~0x7; end = offset + (ntohs(ipv6_hdr(skb)->payload_len) - ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1))); if ((unsigned int)end > IPV6_MAXPLEN) { IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, ((u8 *)&fhdr->frag_off - skb_network_header(skb))); return -1; } if (skb->ip_summed == CHECKSUM_COMPLETE) { const unsigned char *nh = skb_network_header(skb); skb->csum = csum_sub(skb->csum, csum_partial(nh, (u8 *)(fhdr + 1) - nh, 0)); } /* Is this the final fragment? */ if (!(fhdr->frag_off & htons(IP6_MF))) { /* If we already have some bits beyond end * or have different end, the segment is corrupted. */ if (end < fq->q.len || ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len)) goto err; fq->q.last_in |= INET_FRAG_LAST_IN; fq->q.len = end; } else { /* Check if the fragment is rounded to 8 bytes. * Required by the RFC. */ if (end & 0x7) { /* RFC2460 says always send parameter problem in * this case. -DaveM */ IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, offsetof(struct ipv6hdr, payload_len)); return -1; } if (end > fq->q.len) { /* Some bits beyond end -> corruption. */ if (fq->q.last_in & INET_FRAG_LAST_IN) goto err; fq->q.len = end; } } if (end == offset) goto err; /* Point into the IP datagram 'data' part. */ if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data)) goto err; if (pskb_trim_rcsum(skb, end - offset)) goto err; /* Find out which fragments are in front and at the back of us * in the chain of fragments so far. We must know where to put * this fragment, right? */ prev = fq->q.fragments_tail; if (!prev || FRAG6_CB(prev)->offset < offset) { next = NULL; goto found; } prev = NULL; for(next = fq->q.fragments; next != NULL; next = next->next) { if (FRAG6_CB(next)->offset >= offset) break; /* bingo! */ prev = next; } found: /* RFC5722, Section 4, amended by Errata ID : 3089 * When reassembling an IPv6 datagram, if * one or more its constituent fragments is determined to be an * overlapping fragment, the entire datagram (and any constituent * fragments) MUST be silently discarded. */ /* Check for overlap with preceding fragment. */ if (prev && (FRAG6_CB(prev)->offset + prev->len) > offset) goto discard_fq; /* Look for overlap with succeeding segment. */ if (next && FRAG6_CB(next)->offset < end) goto discard_fq; FRAG6_CB(skb)->offset = offset; /* Insert this fragment in the chain of fragments. */ skb->next = next; if (!next) fq->q.fragments_tail = skb; if (prev) prev->next = skb; else fq->q.fragments = skb; dev = skb->dev; if (dev) { fq->iif = dev->ifindex; skb->dev = NULL; } fq->q.stamp = skb->tstamp; fq->q.meat += skb->len; atomic_add(skb->truesize, &fq->q.net->mem); /* The first fragment. * nhoffset is obtained from the first fragment, of course. */ if (offset == 0) { fq->nhoffset = nhoff; fq->q.last_in |= INET_FRAG_FIRST_IN; } if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && fq->q.meat == fq->q.len) return ip6_frag_reasm(fq, prev, dev); write_lock(&ip6_frags.lock); list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list); write_unlock(&ip6_frags.lock); return -1; discard_fq: fq_kill(fq); err: IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS); kfree_skb(skb); return -1; }
static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EINVAL); struct ipv6hdr *ipv6h; const struct net_offload *ops; int proto; struct frag_hdr *fptr; unsigned int unfrag_ip6hlen; u8 *prevhdr; int offset = 0; bool encap, udpfrag; int nhoff; if (unlikely(skb_shinfo(skb)->gso_type & ~(SKB_GSO_TCPV4 | SKB_GSO_UDP | SKB_GSO_DODGY | SKB_GSO_TCP_ECN | SKB_GSO_GRE | SKB_GSO_GRE_CSUM | SKB_GSO_IPIP | SKB_GSO_SIT | SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM | SKB_GSO_MPLS | SKB_GSO_TCPV6 | 0))) goto out; skb_reset_network_header(skb); nhoff = skb_network_header(skb) - skb_mac_header(skb); if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) goto out; encap = SKB_GSO_CB(skb)->encap_level > 0; if (encap) features = skb->dev->hw_enc_features; SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h); ipv6h = ipv6_hdr(skb); __skb_pull(skb, sizeof(*ipv6h)); segs = ERR_PTR(-EPROTONOSUPPORT); proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); if (skb->encapsulation && skb_shinfo(skb)->gso_type & (SKB_GSO_SIT|SKB_GSO_IPIP)) udpfrag = proto == IPPROTO_UDP && encap; else udpfrag = proto == IPPROTO_UDP && !skb->encapsulation; ops = rcu_dereference(inet6_offloads[proto]); if (likely(ops && ops->callbacks.gso_segment)) { skb_reset_transport_header(skb); segs = ops->callbacks.gso_segment(skb, features); } if (IS_ERR(segs)) goto out; for (skb = segs; skb; skb = skb->next) { ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff); ipv6h->payload_len = htons(skb->len - nhoff - sizeof(*ipv6h)); skb->network_header = (u8 *)ipv6h - skb->head; if (udpfrag) { unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); fptr = (struct frag_hdr *)((u8 *)ipv6h + unfrag_ip6hlen); fptr->frag_off = htons(offset); if (skb->next != NULL) fptr->frag_off |= htons(IP6_MF); offset += (ntohs(ipv6h->payload_len) - sizeof(struct frag_hdr)); } if (encap) skb_reset_inner_headers(skb); } out: return segs; }
/* * Check if this packet is complete. * Returns NULL on failure by any reason, and pointer * to current nexthdr field in reassembled frame. * * It is called with locked fq, and caller must check that * queue is eligible for reassembly i.e. it is not COMPLETE, * the last and the first frames arrived and all the bits are here. */ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_device *dev) { struct net *net = container_of(fq->q.net, struct net, ipv6.frags); struct sk_buff *fp, *head = fq->q.fragments; int payload_len; unsigned int nhoff; fq_kill(fq); /* Make the one we just received the head. */ if (prev) { head = prev->next; fp = skb_clone(head, GFP_ATOMIC); if (!fp) goto out_oom; fp->next = head->next; prev->next = fp; skb_morph(head, fq->q.fragments); head->next = fq->q.fragments->next; kfree_skb(fq->q.fragments); fq->q.fragments = head; } WARN_ON(head == NULL); WARN_ON(FRAG6_CB(head)->offset != 0); /* Unfragmented part is taken from the first segment. */ payload_len = ((head->data - skb_network_header(head)) - sizeof(struct ipv6hdr) + fq->q.len - sizeof(struct frag_hdr)); if (payload_len > IPV6_MAXPLEN) goto out_oversize; /* Head of list must not be cloned. */ if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC)) goto out_oom; /* If the first fragment is fragmented itself, we split * it to two chunks: the first with data and paged part * and the second, holding only fragments. */ if (skb_shinfo(head)->frag_list) { struct sk_buff *clone; int i, plen = 0; if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL) goto out_oom; clone->next = head->next; head->next = clone; skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; skb_shinfo(head)->frag_list = NULL; for (i=0; i<skb_shinfo(head)->nr_frags; i++) plen += skb_shinfo(head)->frags[i].size; clone->len = clone->data_len = head->data_len - plen; head->data_len -= clone->len; head->len -= clone->len; clone->csum = 0; clone->ip_summed = head->ip_summed; atomic_add(clone->truesize, &fq->q.net->mem); } /* We have to remove fragment header from datagram and to relocate * header in order to calculate ICV correctly. */ nhoff = fq->nhoffset; skb_network_header(head)[nhoff] = skb_transport_header(head)[0]; memmove(head->head + sizeof(struct frag_hdr), head->head, (head->data - head->head) - sizeof(struct frag_hdr)); head->mac_header += sizeof(struct frag_hdr); head->network_header += sizeof(struct frag_hdr); skb_shinfo(head)->frag_list = head->next; skb_reset_transport_header(head); skb_push(head, head->data - skb_network_header(head)); atomic_sub(head->truesize, &fq->q.net->mem); for (fp=head->next; fp; fp = fp->next) { head->data_len += fp->len; head->len += fp->len; if (head->ip_summed != fp->ip_summed) head->ip_summed = CHECKSUM_NONE; else if (head->ip_summed == CHECKSUM_COMPLETE) head->csum = csum_add(head->csum, fp->csum); head->truesize += fp->truesize; atomic_sub(fp->truesize, &fq->q.net->mem); } head->next = NULL; head->dev = dev; head->tstamp = fq->q.stamp; ipv6_hdr(head)->payload_len = htons(payload_len); IP6CB(head)->nhoff = nhoff; /* Yes, and fold redundant checksum back. 8) */ if (head->ip_summed == CHECKSUM_COMPLETE) head->csum = csum_partial(skb_network_header(head), skb_network_header_len(head), head->csum); rcu_read_lock(); IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS); rcu_read_unlock(); fq->q.fragments = NULL; return 1; out_oversize: if (net_ratelimit()) printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len); goto out_fail; out_oom: if (net_ratelimit()) printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n"); out_fail: rcu_read_lock(); IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); rcu_read_unlock(); return -1; }
static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, struct net_device *dev) { struct net *net = container_of(qp->q.net, struct net, ipv4.frags); struct iphdr *iph; struct sk_buff *fp, *head = qp->q.fragments; int len; int ihlen; int err; ipq_kill(qp); /* Make the one we just received the head. */ if (prev) { head = prev->next; fp = skb_clone(head, GFP_ATOMIC); if (!fp) goto out_nomem; fp->next = head->next; prev->next = fp; skb_morph(head, qp->q.fragments); head->next = qp->q.fragments->next; kfree_skb(qp->q.fragments); qp->q.fragments = head; } WARN_ON(head == NULL); WARN_ON(FRAG_CB(head)->offset != 0); /* Allocate a new buffer for the datagram. */ ihlen = ip_hdrlen(head); len = ihlen + qp->q.len; err = -E2BIG; if (len > 65535) goto out_oversize; /* Head of list must not be cloned. */ if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC)) goto out_nomem; /* If the first fragment is fragmented itself, we split * it to two chunks: the first with data and paged part * and the second, holding only fragments. */ if (skb_has_frags(head)) { struct sk_buff *clone; int i, plen = 0; if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL) goto out_nomem; clone->next = head->next; head->next = clone; skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; skb_frag_list_init(head); for (i=0; i<skb_shinfo(head)->nr_frags; i++) plen += skb_shinfo(head)->frags[i].size; clone->len = clone->data_len = head->data_len - plen; head->data_len -= clone->len; head->len -= clone->len; clone->csum = 0; clone->ip_summed = head->ip_summed; atomic_add(clone->truesize, &qp->q.net->mem); } skb_shinfo(head)->frag_list = head->next; skb_push(head, head->data - skb_network_header(head)); atomic_sub(head->truesize, &qp->q.net->mem); for (fp=head->next; fp; fp = fp->next) { head->data_len += fp->len; head->len += fp->len; if (head->ip_summed != fp->ip_summed) head->ip_summed = CHECKSUM_NONE; else if (head->ip_summed == CHECKSUM_COMPLETE) head->csum = csum_add(head->csum, fp->csum); head->truesize += fp->truesize; atomic_sub(fp->truesize, &qp->q.net->mem); } head->next = NULL; head->dev = dev; head->tstamp = qp->q.stamp; iph = ip_hdr(head); iph->frag_off = 0; iph->tot_len = htons(len); IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS); qp->q.fragments = NULL; return 0; out_nomem: LIMIT_NETDEBUG(KERN_ERR "IP: queue_glue: no memory for gluing " "queue %p\n", qp); err = -ENOMEM; goto out_fail; out_oversize: if (net_ratelimit()) printk(KERN_INFO "Oversized IP packet from %pI4.\n", &qp->saddr); out_fail: IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); return err; }
int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb) { struct ip_options *sopt; unsigned char *sptr, *dptr; int soffset, doffset; int optlen; __be32 daddr; memset(dopt, 0, sizeof(struct ip_options)); sopt = &(IPCB(skb)->opt); if (sopt->optlen == 0) { dopt->optlen = 0; return 0; } sptr = skb_network_header(skb); dptr = dopt->__data; daddr = skb_rtable(skb)->rt_spec_dst; if (sopt->rr) { optlen = sptr[sopt->rr+1]; soffset = sptr[sopt->rr+2]; dopt->rr = dopt->optlen + sizeof(struct iphdr); memcpy(dptr, sptr+sopt->rr, optlen); if (sopt->rr_needaddr && soffset <= optlen) { if (soffset + 3 > optlen) return -EINVAL; dptr[2] = soffset + 4; dopt->rr_needaddr = 1; } dptr += optlen; dopt->optlen += optlen; } if (sopt->ts) { optlen = sptr[sopt->ts+1]; soffset = sptr[sopt->ts+2]; dopt->ts = dopt->optlen + sizeof(struct iphdr); memcpy(dptr, sptr+sopt->ts, optlen); if (soffset <= optlen) { if (sopt->ts_needaddr) { if (soffset + 3 > optlen) return -EINVAL; dopt->ts_needaddr = 1; soffset += 4; } if (sopt->ts_needtime) { if (soffset + 3 > optlen) return -EINVAL; if ((dptr[3]&0xF) != IPOPT_TS_PRESPEC) { dopt->ts_needtime = 1; soffset += 4; } else { dopt->ts_needtime = 0; if (soffset + 7 <= optlen) { __be32 addr; memcpy(&addr, dptr+soffset-1, 4); if (inet_addr_type(dev_net(skb_dst(skb)->dev), addr) != RTN_UNICAST) { dopt->ts_needtime = 1; soffset += 8; } } } } dptr[2] = soffset; } dptr += optlen; dopt->optlen += optlen; } if (sopt->srr) { unsigned char * start = sptr+sopt->srr; __be32 faddr; optlen = start[1]; soffset = start[2]; doffset = 0; if (soffset > optlen) soffset = optlen + 1; soffset -= 4; if (soffset > 3) { memcpy(&faddr, &start[soffset-1], 4); for (soffset-=4, doffset=4; soffset > 3; soffset-=4, doffset+=4) memcpy(&dptr[doffset-1], &start[soffset-1], 4); /* * RFC1812 requires to fix illegal source routes. */ if (memcmp(&ip_hdr(skb)->saddr, &start[soffset + 3], 4) == 0) doffset -= 4; } if (doffset > 3) { memcpy(&start[doffset-1], &daddr, 4); dopt->faddr = faddr; dptr[0] = start[0]; dptr[1] = doffset+3; dptr[2] = 4; dptr += doffset+3; dopt->srr = dopt->optlen + sizeof(struct iphdr); dopt->optlen += doffset+3; dopt->is_strictroute = sopt->is_strictroute; } } if (sopt->cipso) { optlen = sptr[sopt->cipso+1]; dopt->cipso = dopt->optlen+sizeof(struct iphdr); memcpy(dptr, sptr+sopt->cipso, optlen); dptr += optlen; dopt->optlen += optlen; } while (dopt->optlen & 3) { *dptr++ = IPOPT_END; dopt->optlen++; } return 0; }
/* * Check if this packet is complete. * Returns NULL on failure by any reason, and pointer * to current nexthdr field in reassembled frame. * * It is called with locked fq, and caller must check that * queue is eligible for reassembly i.e. it is not COMPLETE, * the last and the first frames arrived and all the bits are here. */ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_device *dev) { struct net *net = container_of(fq->q.net, struct net, ipv6.frags); struct sk_buff *fp, *head = fq->q.fragments; int payload_len; unsigned int nhoff; int sum_truesize; u8 ecn; inet_frag_kill(&fq->q, &ip6_frags); ecn = ip_frag_ecn_table[fq->ecn]; if (unlikely(ecn == 0xff)) goto out_fail; /* Make the one we just received the head. */ if (prev) { head = prev->next; fp = skb_clone(head, GFP_ATOMIC); if (!fp) goto out_oom; fp->next = head->next; if (!fp->next) fq->q.fragments_tail = fp; prev->next = fp; skb_morph(head, fq->q.fragments); head->next = fq->q.fragments->next; consume_skb(fq->q.fragments); fq->q.fragments = head; } WARN_ON(head == NULL); WARN_ON(FRAG6_CB(head)->offset != 0); /* Unfragmented part is taken from the first segment. */ payload_len = ((head->data - skb_network_header(head)) - sizeof(struct ipv6hdr) + fq->q.len - sizeof(struct frag_hdr)); if (payload_len > IPV6_MAXPLEN) goto out_oversize; /* Head of list must not be cloned. */ if (skb_unclone(head, GFP_ATOMIC)) goto out_oom; /* If the first fragment is fragmented itself, we split * it to two chunks: the first with data and paged part * and the second, holding only fragments. */ if (skb_has_frag_list(head)) { struct sk_buff *clone; int i, plen = 0; if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL) goto out_oom; clone->next = head->next; head->next = clone; skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; skb_frag_list_init(head); for (i = 0; i < skb_shinfo(head)->nr_frags; i++) plen += skb_frag_size(&skb_shinfo(head)->frags[i]); clone->len = clone->data_len = head->data_len - plen; head->data_len -= clone->len; head->len -= clone->len; clone->csum = 0; clone->ip_summed = head->ip_summed; add_frag_mem_limit(&fq->q, clone->truesize); } /* We have to remove fragment header from datagram and to relocate * header in order to calculate ICV correctly. */ nhoff = fq->nhoffset; skb_network_header(head)[nhoff] = skb_transport_header(head)[0]; memmove(head->head + sizeof(struct frag_hdr), head->head, (head->data - head->head) - sizeof(struct frag_hdr)); head->mac_header += sizeof(struct frag_hdr); head->network_header += sizeof(struct frag_hdr); skb_reset_transport_header(head); skb_push(head, head->data - skb_network_header(head)); sum_truesize = head->truesize; for (fp = head->next; fp;) { bool headstolen; int delta; struct sk_buff *next = fp->next; sum_truesize += fp->truesize; if (head->ip_summed != fp->ip_summed) head->ip_summed = CHECKSUM_NONE; else if (head->ip_summed == CHECKSUM_COMPLETE) head->csum = csum_add(head->csum, fp->csum); if (skb_try_coalesce(head, fp, &headstolen, &delta)) { kfree_skb_partial(fp, headstolen); } else { if (!skb_shinfo(head)->frag_list) skb_shinfo(head)->frag_list = fp; head->data_len += fp->len; head->len += fp->len; head->truesize += fp->truesize; } fp = next; } sub_frag_mem_limit(&fq->q, sum_truesize); head->next = NULL; head->dev = dev; head->tstamp = fq->q.stamp; ipv6_hdr(head)->payload_len = htons(payload_len); ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn); IP6CB(head)->nhoff = nhoff; IP6CB(head)->flags |= IP6SKB_FRAGMENTED; /* Yes, and fold redundant checksum back. 8) */ if (head->ip_summed == CHECKSUM_COMPLETE) head->csum = csum_partial(skb_network_header(head), skb_network_header_len(head), head->csum); rcu_read_lock(); IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS); rcu_read_unlock(); fq->q.fragments = NULL; fq->q.fragments_tail = NULL; return 1; out_oversize: net_dbg_ratelimited("ip6_frag_reasm: payload len = %d\n", payload_len); goto out_fail; out_oom: net_dbg_ratelimited("ip6_frag_reasm: no memory for reassembly\n"); out_fail: rcu_read_lock(); IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); rcu_read_unlock(); return -1; }
/** * key_extract - extracts a flow key from an Ethernet frame. * @skb: sk_buff that contains the frame, with skb->data pointing to the * Ethernet header * @key: output flow key * * The caller must ensure that skb->len >= ETH_HLEN. * * Returns 0 if successful, otherwise a negative errno value. * * Initializes @skb header pointers as follows: * * - skb->mac_header: the Ethernet header. * * - skb->network_header: just past the Ethernet header, or just past the * VLAN header, to the first byte of the Ethernet payload. * * - skb->transport_header: If key->eth.type is ETH_P_IP or ETH_P_IPV6 * on output, then just past the IP header, if one is present and * of a correct length, otherwise the same as skb->network_header. * For other key->eth.type values it is left untouched. */ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) { int error; struct ethhdr *eth; /* Flags are always used as part of stats. */ key->tp.flags = 0; skb_reset_mac_header(skb); /* Link layer. We are guaranteed to have at least the 14 byte Ethernet * header in the linear data area. */ eth = eth_hdr(skb); ether_addr_copy(key->eth.src, eth->h_source); ether_addr_copy(key->eth.dst, eth->h_dest); __skb_pull(skb, 2 * ETH_ALEN); /* We are going to push all headers that we pull, so no need to * update skb->csum here. */ key->eth.tci = 0; if (vlan_tx_tag_present(skb)) key->eth.tci = htons(vlan_get_tci(skb)); else if (eth->h_proto == htons(ETH_P_8021Q)) if (unlikely(parse_vlan(skb, key))) return -ENOMEM; key->eth.type = parse_ethertype(skb); if (unlikely(key->eth.type == htons(0))) return -ENOMEM; skb_reset_network_header(skb); skb_reset_mac_len(skb); __skb_push(skb, skb->data - skb_mac_header(skb)); /* Network layer. */ if (key->eth.type == htons(ETH_P_IP)) { struct iphdr *nh; __be16 offset; error = check_iphdr(skb); if (unlikely(error)) { memset(&key->ip, 0, sizeof(key->ip)); memset(&key->ipv4, 0, sizeof(key->ipv4)); if (error == -EINVAL) { skb->transport_header = skb->network_header; error = 0; } return error; } nh = ip_hdr(skb); key->ipv4.addr.src = nh->saddr; key->ipv4.addr.dst = nh->daddr; key->ip.proto = nh->protocol; key->ip.tos = nh->tos; key->ip.ttl = nh->ttl; offset = nh->frag_off & htons(IP_OFFSET); if (offset) { key->ip.frag = OVS_FRAG_TYPE_LATER; return 0; } if (nh->frag_off & htons(IP_MF) || skb_shinfo(skb)->gso_type & SKB_GSO_UDP) key->ip.frag = OVS_FRAG_TYPE_FIRST; else key->ip.frag = OVS_FRAG_TYPE_NONE; /* Transport layer. */ if (key->ip.proto == IPPROTO_TCP) { if (tcphdr_ok(skb)) { struct tcphdr *tcp = tcp_hdr(skb); key->tp.src = tcp->source; key->tp.dst = tcp->dest; key->tp.flags = TCP_FLAGS_BE16(tcp); } else { memset(&key->tp, 0, sizeof(key->tp)); } } else if (key->ip.proto == IPPROTO_UDP) { if (udphdr_ok(skb)) { struct udphdr *udp = udp_hdr(skb); key->tp.src = udp->source; key->tp.dst = udp->dest; } else { memset(&key->tp, 0, sizeof(key->tp)); } } else if (key->ip.proto == IPPROTO_SCTP) { if (sctphdr_ok(skb)) { struct sctphdr *sctp = sctp_hdr(skb); key->tp.src = sctp->source; key->tp.dst = sctp->dest; } else { memset(&key->tp, 0, sizeof(key->tp)); } } else if (key->ip.proto == IPPROTO_ICMP) { if (icmphdr_ok(skb)) { struct icmphdr *icmp = icmp_hdr(skb); /* The ICMP type and code fields use the 16-bit * transport port fields, so we need to store * them in 16-bit network byte order. */ key->tp.src = htons(icmp->type); key->tp.dst = htons(icmp->code); } else { memset(&key->tp, 0, sizeof(key->tp)); } } } else if (key->eth.type == htons(ETH_P_ARP) || key->eth.type == htons(ETH_P_RARP)) { struct arp_eth_header *arp; arp = (struct arp_eth_header *)skb_network_header(skb); if (arphdr_ok(skb) && arp->ar_hrd == htons(ARPHRD_ETHER) && arp->ar_pro == htons(ETH_P_IP) && arp->ar_hln == ETH_ALEN && arp->ar_pln == 4) { /* We only match on the lower 8 bits of the opcode. */ if (ntohs(arp->ar_op) <= 0xff) key->ip.proto = ntohs(arp->ar_op); else key->ip.proto = 0; memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src)); memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst)); ether_addr_copy(key->ipv4.arp.sha, arp->ar_sha); ether_addr_copy(key->ipv4.arp.tha, arp->ar_tha); } else { memset(&key->ip, 0, sizeof(key->ip)); memset(&key->ipv4, 0, sizeof(key->ipv4)); } } else if (eth_p_mpls(key->eth.type)) { size_t stack_len = MPLS_HLEN; /* In the presence of an MPLS label stack the end of the L2 * header and the beginning of the L3 header differ. * * Advance network_header to the beginning of the L3 * header. mac_len corresponds to the end of the L2 header. */ while (1) { __be32 lse; error = check_header(skb, skb->mac_len + stack_len); if (unlikely(error)) return 0; memcpy(&lse, skb_network_header(skb), MPLS_HLEN); if (stack_len == MPLS_HLEN) memcpy(&key->mpls.top_lse, &lse, MPLS_HLEN); skb_set_network_header(skb, skb->mac_len + stack_len); if (lse & htonl(MPLS_BOS_MASK)) break; stack_len += MPLS_HLEN; } } else if (key->eth.type == htons(ETH_P_IPV6)) { int nh_len; /* IPv6 Header + Extensions */ nh_len = parse_ipv6hdr(skb, key); if (unlikely(nh_len < 0)) { memset(&key->ip, 0, sizeof(key->ip)); memset(&key->ipv6.addr, 0, sizeof(key->ipv6.addr)); if (nh_len == -EINVAL) { skb->transport_header = skb->network_header; error = 0; } else { error = nh_len; } return error; } if (key->ip.frag == OVS_FRAG_TYPE_LATER) return 0; if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) key->ip.frag = OVS_FRAG_TYPE_FIRST; /* Transport layer. */ if (key->ip.proto == NEXTHDR_TCP) { if (tcphdr_ok(skb)) { struct tcphdr *tcp = tcp_hdr(skb); key->tp.src = tcp->source; key->tp.dst = tcp->dest; key->tp.flags = TCP_FLAGS_BE16(tcp); } else { memset(&key->tp, 0, sizeof(key->tp)); } } else if (key->ip.proto == NEXTHDR_UDP) { if (udphdr_ok(skb)) { struct udphdr *udp = udp_hdr(skb); key->tp.src = udp->source; key->tp.dst = udp->dest; } else { memset(&key->tp, 0, sizeof(key->tp)); } } else if (key->ip.proto == NEXTHDR_SCTP) { if (sctphdr_ok(skb)) { struct sctphdr *sctp = sctp_hdr(skb); key->tp.src = sctp->source; key->tp.dst = sctp->dest; } else { memset(&key->tp, 0, sizeof(key->tp)); } } else if (key->ip.proto == NEXTHDR_ICMP) { if (icmp6hdr_ok(skb)) { error = parse_icmpv6(skb, key, nh_len); if (error) return error; } else { memset(&key->tp, 0, sizeof(key->tp)); } } } OVS_CB(skb)->pkt_key = key; return 0; }
void nas_COMMON_receive(uint16_t dlen, void *pdcp_sdu, int inst, struct classifier_entity *rclass, nasRadioBearerId_t rb_id) { //--------------------------------------------------------------------------- struct sk_buff *skb; struct ipversion *ipv; struct nas_priv *gpriv=netdev_priv(nasdev[inst]); uint32_t odaddr,osaddr; //int i; unsigned char protocol; unsigned char /**addr,*/ *daddr,*saddr,*ifaddr /*,sn*/; //struct udphdr *uh; //struct tcphdr *th; uint16_t *cksum,check; struct iphdr *network_header; #ifdef NAS_DEBUG_RECEIVE printk("NAS_COMMON_RECEIVE: begin RB %d Inst %d Length %d bytes\n",rb_id,inst,dlen); #endif skb = dev_alloc_skb( dlen + 2 ); if(!skb) { printk("NAS_COMMON_RECEIVE: low on memory\n"); ++gpriv->stats.rx_dropped; return; } skb_reserve(skb,2); memcpy(skb_put(skb, dlen), pdcp_sdu,dlen); skb->dev = nasdev[inst]; skb_reset_mac_header(skb); //printk("[NAC_COMMIN_RECEIVE]: Packet Type %d (%d,%d)",skb->pkt_type,PACKET_HOST,PACKET_BROADCAST); skb->pkt_type = PACKET_HOST; if (rclass->version != NAS_MPLS_VERSION_CODE) { // This is an IP packet skb->ip_summed = CHECKSUM_NONE; ipv = (struct ipversion *)skb->data; switch (ipv->version) { case 6: #ifdef NAS_DEBUG_RECEIVE printk("NAS_COMMON_RECEIVE: receive IPv6 message\n"); #endif skb_reset_network_header(skb); skb->protocol = htons(ETH_P_IPV6); // printk("Writing packet with protocol %x\n",ntohs(skb->protocol)); break; case 4: #ifdef NAS_ADDRESS_FIX // Make the third byte of both the source and destination equal to the fourth of the destination daddr = (unsigned char *)&((struct iphdr *)skb->data)->daddr; odaddr = ((struct iphdr *)skb->data)->daddr; // sn = addr[3]; saddr = (unsigned char *)&((struct iphdr *)skb->data)->saddr; osaddr = ((struct iphdr *)skb->data)->saddr; if (daddr[0] == saddr[0]) {// same network daddr[2] = daddr[3]; // set third byte of destination to that of local machine so that local IP stack accepts the packet saddr[2] = daddr[3]; // set third byte of source to that of local machine so that local IP stack accepts the packet } else { // get the 3rd byte from device address in net_device structure ifaddr = (unsigned char *)(&(((struct in_device *)((nasdev[inst])->ip_ptr))->ifa_list->ifa_local)); if (saddr[0] == ifaddr[0]) { // source is in same network as local machine daddr[0] += saddr[3]; // fix address of remote destination to undo change at source saddr[2] = ifaddr[2]; // set third byte to that of local machine so that local IP stack accepts the packet } else { // source is remote machine from outside network saddr[0] -= daddr[3]; // fix address of remote source to be understood by destination daddr[2] = daddr[3]; // fix 3rd byte of local address to be understood by IP stack of // destination } } #endif //NAS_ADDRESS_FIX #ifdef NAS_DEBUG_RECEIVE // printk("NAS_TOOL_RECEIVE: receive IPv4 message\n"); addr = (unsigned char *)&((struct iphdr *)skb->data)->saddr; if (addr) { // addr[2]^=0x01; printk("[NAS][COMMON][RECEIVE] Source %d.%d.%d.%d\n",addr[0],addr[1],addr[2],addr[3]); } addr = (unsigned char *)&((struct iphdr *)skb->data)->daddr; if (addr) { // addr[2]^=0x01; printk("[NAS][COMMON][RECEIVE] Dest %d.%d.%d.%d\n",addr[0],addr[1],addr[2],addr[3]); } printk("[NAS][COMMON][RECEIVE] protocol %d\n",((struct iphdr *)skb->data)->protocol); #endif skb_reset_network_header(skb); network_header = (struct iphdr *)skb_network_header(skb); protocol = network_header->protocol; #ifdef NAS_DEBUG_RECEIVE switch (protocol) { case IPPROTO_IP: printk("[NAS][COMMON][RECEIVE] Received Raw IPv4 packet\n"); break; case IPPROTO_IPV6: printk("[NAS][COMMON][RECEIVE] Received Raw IPv6 packet\n"); break; case IPPROTO_ICMP: printk("[NAS][COMMON][RECEIVE] Received Raw ICMP packet\n"); break; case IPPROTO_TCP: printk("[NAS][COMMON][RECEIVE] Received TCP packet\n"); break; case IPPROTO_UDP: printk("[NAS][COMMON][RECEIVE] Received UDP packet\n"); break; default: break; } #endif #ifdef NAS_ADDRESS_FIX #ifdef NAS_DEBUG_RECEIVE printk("NAS_COMMON_RECEIVE: dumping the packet before the csum recalculation (len %d)\n",skb->len); for (i=0; i<skb->len; i++) printk("%2x ",((unsigned char *)(skb->data))[i]); printk("\n"); #endif //NAS_DEBUG_RECEIVE network_header->check = 0; network_header->check = ip_fast_csum((unsigned char *) network_header, network_header->ihl); #ifdef NAS_DEBUG_RECEIVE printk("[NAS][COMMON][RECEIVE] IP Fast Checksum %x \n", network_header->check); #endif // if (!(skb->nh.iph->frag_off & htons(IP_OFFSET))) { switch(protocol) { case IPPROTO_TCP: cksum = (uint16_t*)&(((struct tcphdr*)(((char *)network_header + (network_header->ihl<<2))))->check); //check = csum_tcpudp_magic(((struct iphdr *)network_header)->saddr, ((struct iphdr *)network_header)->daddr, tcp_hdrlen(skb), IPPROTO_TCP, ~(*cksum)); #ifdef NAS_DEBUG_RECEIVE printk("[NAS][COMMON] Inst %d TCP packet calculated CS %x, CS = %x (before), SA (%x)%x, DA (%x)%x\n", inst, network_header->check, *cksum, osaddr, ((struct iphdr *)skb->data)->saddr, odaddr, ((struct iphdr *)skb->data)->daddr); #endif check = csum_tcpudp_magic(((struct iphdr *)skb->data)->saddr, ((struct iphdr *)skb->data)->daddr,0,0, ~(*cksum)); *cksum = csum_tcpudp_magic(~osaddr, ~odaddr, 0, 0, ~check); #ifdef NAS_DEBUG_RECEIVE printk("[NAS][COMMON] Inst %d TCP packet NEW CS %x\n", inst, *cksum); #endif break; case IPPROTO_UDP: cksum = (uint16_t*)&(((struct udphdr*)(((char *)network_header + (network_header->ihl<<2))))->check); // check = csum_tcpudp_magic(((struct iphdr *)network_header)->saddr, ((struct iphdr *)network_header)->daddr, udp_hdr(skb)->len, IPPROTO_UDP, ~(*cksum)); #ifdef NAS_DEBUG_RECEIVE printk("[NAS][COMMON] Inst %d UDP packet CS = %x (before), SA (%x)%x, DA (%x)%x\n", inst, *cksum, osaddr, ((struct iphdr *)skb->data)->saddr, odaddr, ((struct iphdr *)skb->data)->daddr); #endif check = csum_tcpudp_magic(((struct iphdr *)skb->data)->saddr, ((struct iphdr *)skb->data)->daddr, 0,0, ~(*cksum)); *cksum= csum_tcpudp_magic(~osaddr, ~odaddr,0,0, ~check); //*cksum= csum_tcpudp_magic(~osaddr, ~odaddr,udp_hdr(skb)->len, IPPROTO_UDP, ~check); #ifdef NAS_DEBUG_RECEIVE printk("[NAS][COMMON] Inst %d UDP packet NEW CS %x\n", inst, *cksum); #endif // if ((check = *cksum) != 0) { // src, dst, len, proto, sum // } break; default: break; } // } #endif //NAS_ADDRESS_FIX skb->protocol = htons(ETH_P_IP); // printk("[NAS][COMMON] Writing packet with protocol %x\n",ntohs(skb->protocol)); break; default: printk("NAS_COMMON_RECEIVE: begin RB %d Inst %d Length %d bytes\n",rb_id,inst,dlen); printk("[NAS][COMMON] Inst %d: receive unknown message (version=%d)\n",inst,ipv->version); } } else { // This is an MPLS packet #ifdef NAS_DEBUG_RECEIVE printk("NAS_COMMON_RECEIVE: Received an MPLS packet on RB %d\n",rb_id); #endif skb->protocol = htons(ETH_P_MPLS_UC); } ++gpriv->stats.rx_packets; gpriv->stats.rx_bytes += dlen; #ifdef NAS_DEBUG_RECEIVE printk("NAS_COMMON_RECEIVE: sending packet of size %d to kernel\n",skb->len); for (i=0; i<skb->len; i++) printk("%2x ",((unsigned char *)(skb->data))[i]); printk("\n"); #endif //NAS_DEBUG_RECEIVE netif_rx(skb); #ifdef NAS_DEBUG_RECEIVE printk("NAS_COMMON_RECEIVE: end\n"); #endif }