static inline void tcp_fast_csum_update(int af, struct tcphdr *tcph, const union nf_inet_addr *oldip, const union nf_inet_addr *newip, __be16 oldport, __be16 newport) { #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) tcph->check = csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6, ip_vs_check_diff2(oldport, newport, ~csum_unfold (tcph-> check)))); else #endif tcph->check = csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip, ip_vs_check_diff2(oldport, newport, ~csum_unfold (tcph-> check)))); }
int skb_copy_and_csum_datagram_iovec(const struct sk_buff *skb, int hlen, struct iovec *iov) { unsigned int csum; int chunk = skb->len - hlen; /* Skip filled elements. Pretty silly, look at memcpy_toiovec, though 8) */ while (iov->iov_len == 0) iov++; if (iov->iov_len < chunk) { if ((unsigned short)csum_fold(skb_checksum(skb, 0, chunk+hlen, skb->csum))) goto csum_error; if (skb_copy_datagram_iovec(skb, hlen, iov, chunk)) goto fault; } else { csum = csum_partial(skb->data, hlen, skb->csum); if (skb_copy_and_csum_datagram(skb, hlen, iov->iov_base, chunk, &csum)) goto fault; if ((unsigned short)csum_fold(csum)) goto csum_error; iov->iov_len -= chunk; iov->iov_base += chunk; } return 0; csum_error: return -EINVAL; fault: return -EFAULT; }
static int rt_icmp_glue_reply_bits(const void *p, char *to, unsigned int offset, unsigned int fraglen) { struct icmp_bxm *icmp_param = (struct icmp_bxm *)p; struct icmphdr *icmph; unsigned long csum; /* TODO: add support for fragmented ICMP packets */ if (offset != 0) return -EMSGSIZE; csum = csum_partial_copy_nocheck((void *)&icmp_param->head, to, icmp_param->head_len, icmp_param->csum); csum = rtskb_copy_and_csum_bits(icmp_param->data.skb, icmp_param->offset, to + icmp_param->head_len, fraglen - icmp_param->head_len, csum); icmph = (struct icmphdr *)to; icmph->checksum = csum_fold(csum); return 0; }
/* set ECT codepoint from IP header. * return 0 in case there was no ECT codepoint * return 1 in case ECT codepoint has been overwritten * return < 0 in case there was error */ static int inline set_ect_ip(struct sk_buff **pskb, struct iphdr *iph, const struct ipt_ECN_info *einfo) { if ((iph->tos & IPT_ECN_IP_MASK) != (einfo->ip_ect & IPT_ECN_IP_MASK)) { u_int16_t diffs[2]; /* raw socket (tcpdump) may have clone of incoming * skb: don't disturb it --RR */ if (skb_cloned(*pskb) && !(*pskb)->sk) { struct sk_buff *nskb = skb_copy(*pskb, GFP_ATOMIC); if (!nskb) return NF_DROP; kfree_skb(*pskb); *pskb = nskb; iph = (*pskb)->nh.iph; } diffs[0] = htons(iph->tos) ^ 0xFFFF; iph->tos = iph->tos & ~IPT_ECN_IP_MASK; iph->tos = iph->tos | (einfo->ip_ect & IPT_ECN_IP_MASK); diffs[1] = htons(iph->tos); iph->check = csum_fold(csum_partial((char *)diffs, sizeof(diffs), iph->check^0xFFFF)); (*pskb)->nfcache |= NFC_ALTERED; return 1; } return 0; }
static void fast_csum(struct snmp_ctx *ctx, unsigned char offset) { unsigned char s[12] = {0,}; int size; if (offset & 1) { memcpy(&s[1], &ctx->from, 4); memcpy(&s[7], &ctx->to, 4); s[0] = ~0; s[1] = ~s[1]; s[2] = ~s[2]; s[3] = ~s[3]; s[4] = ~s[4]; s[5] = ~0; size = 12; } else { memcpy(&s[0], &ctx->from, 4); memcpy(&s[4], &ctx->to, 4); s[0] = ~s[0]; s[1] = ~s[1]; s[2] = ~s[2]; s[3] = ~s[3]; size = 8; } *ctx->check = csum_fold(csum_partial(s, size, ~csum_unfold(*ctx->check))); }
static void __gre_build_header(struct sk_buff *skb, int tunnel_hlen, bool is_gre64) { const struct ovs_key_ipv4_tunnel *tun_key = OVS_CB(skb)->tun_key; __be32 *options = (__be32 *)(skb_network_header(skb) + tunnel_hlen - GRE_HEADER_SECTION); struct gre_base_hdr *greh = (struct gre_base_hdr *) skb_transport_header(skb); greh->protocol = htons(ETH_P_TEB); greh->flags = 0; /* Work backwards over the options so the checksum is last. */ if (tun_key->tun_flags & OVS_TNL_F_KEY || is_gre64) { greh->flags |= GRE_KEY; if (is_gre64) { /* Set higher 32 bits to seq. */ *options = be64_get_high32(tun_key->tun_id); options--; greh->flags |= GRE_SEQ; } *options = be64_get_low32(tun_key->tun_id); options--; } if (tun_key->tun_flags & OVS_TNL_F_CSUM) { greh->flags |= GRE_CSUM; *options = 0; *(__sum16 *)options = csum_fold(skb_checksum(skb, skb_transport_offset(skb), skb->len - skb_transport_offset(skb), 0)); } }
static void build_header(struct sk_buff *skb, int hdr_len, __be16 flags, __be16 proto, __be32 key, __be32 seq) { struct gre_base_hdr *greh; skb_push(skb, hdr_len); skb_reset_transport_header(skb); greh = (struct gre_base_hdr *)skb->data; greh->flags = tnl_flags_to_gre_flags(flags); greh->protocol = proto; if (flags & (TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_SEQ)) { __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4); if (flags & TUNNEL_SEQ) { *ptr = seq; ptr--; } if (flags & TUNNEL_KEY) { *ptr = key; ptr--; } if (flags & TUNNEL_CSUM && !(skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | SKB_GSO_GRE_CSUM))) { *ptr = 0; *(__sum16 *)ptr = csum_fold(skb_checksum(skb, 0, skb->len, 0)); } } }
static bool check_checksum(struct sk_buff *skb) { struct iphdr *iph = ip_hdr(skb); struct gre_base_hdr *greh = (struct gre_base_hdr *)(iph + 1); __sum16 csum = 0; if (greh->flags & GRE_CSUM) { switch (skb->ip_summed) { case CHECKSUM_COMPLETE: csum = csum_fold(skb->csum); if (!csum) break; /* Fall through. */ case CHECKSUM_NONE: skb->csum = 0; csum = __skb_checksum_complete(skb); skb->ip_summed = CHECKSUM_COMPLETE; break; } } return (csum == 0); }
/* * Recaclulate checksum using differences between changed datum, * borrowed from netfilter. */ DEBUG_NO_STATIC u_int16_t ipsec_fast_csum(u_int32_t oldvalinv, u_int32_t newval, u_int16_t oldcheck) { u_int32_t diffs[] = { oldvalinv, newval }; return csum_fold(csum_partial((char *)diffs, sizeof(diffs), oldcheck^0xFFFF)); }
static unsigned int target(struct sk_buff **pskb, const struct net_device *in, const struct net_device *out, unsigned int hooknum, const void *targinfo, void *userinfo) { const struct ipt_tos_target_info *tosinfo = targinfo; if (((*pskb)->nh.iph->tos & IPTOS_TOS_MASK) != tosinfo->tos) { u_int16_t diffs[2]; if (!skb_ip_make_writable(pskb, sizeof(struct iphdr))) return NF_DROP; diffs[0] = htons((*pskb)->nh.iph->tos) ^ 0xFFFF; (*pskb)->nh.iph->tos = ((*pskb)->nh.iph->tos & IPTOS_PREC_MASK) | tosinfo->tos; diffs[1] = htons((*pskb)->nh.iph->tos); (*pskb)->nh.iph->check = csum_fold(csum_partial((char *)diffs, sizeof(diffs), (*pskb)->nh.iph->check ^0xFFFF)); (*pskb)->nfcache |= NFC_ALTERED; } return IPT_CONTINUE; }
/** * skb_copy_and_csum_datagram_iovec - Copy and checkum skb to user iovec. * @skb: skbuff * @hlen: hardware length * @iov: io vector * * Caller _must_ check that skb will fit to this iovec. * * Returns: 0 - success. * -EINVAL - checksum failure. * -EFAULT - fault during copy. Beware, in this case iovec * can be modified! */ int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen, struct iovec *iov) { __wsum csum; int chunk = skb->len - hlen; /* Skip filled elements. * Pretty silly, look at memcpy_toiovec, though 8) */ while (!iov->iov_len) iov++; if (iov->iov_len < chunk) { if (__skb_checksum_complete(skb)) goto csum_error; if (skb_copy_datagram_iovec(skb, hlen, iov, chunk)) goto fault; } else { csum = csum_partial(skb->data, hlen, skb->csum); if (skb_copy_and_csum_datagram(skb, hlen, iov->iov_base, chunk, &csum)) goto fault; if (csum_fold(csum)) goto csum_error; if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) netdev_rx_csum_fault(skb->dev); iov->iov_len -= chunk; iov->iov_base += chunk; } return 0; csum_error: return -EINVAL; fault: return -EFAULT; }
/*** * rtskb_copy_and_csum_dev */ void rtskb_copy_and_csum_dev(const struct rtskb *skb, u8 *to) { unsigned int csum; unsigned int csstart; if (skb->ip_summed == CHECKSUM_HW) { csstart = skb->h.raw - skb->data; if (csstart > skb->len) BUG(); } else csstart = skb->len; memcpy(to, skb->data, csstart); csum = 0; if (csstart != skb->len) csum = rtskb_copy_and_csum_bits(skb, csstart, to+csstart, skb->len-csstart, 0); if (skb->ip_summed == CHECKSUM_HW) { unsigned int csstuff = csstart + skb->csum; *((unsigned short *)(to + csstuff)) = csum_fold(csum); } }
__sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol) { const struct iphdr *iph = ip_hdr(skb); __sum16 csum = 0; switch (skb->ip_summed) { case CHECKSUM_COMPLETE: if (hook != NF_INET_PRE_ROUTING && hook != NF_INET_LOCAL_IN) break; if ((protocol == 0 && !csum_fold(skb->csum)) || !csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len - dataoff, protocol, skb->csum)) { skb->ip_summed = CHECKSUM_UNNECESSARY; break; } /* fall through */ case CHECKSUM_NONE: if (protocol == 0) skb->csum = 0; else skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, skb->len - dataoff, protocol, 0); csum = __skb_checksum_complete(skb); } return csum; }
static inline int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) { struct inet6_dev *idev = in6_dev_get(skb->dev); if (skb->ip_summed != CHECKSUM_UNNECESSARY) { if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum))) { UDP6_INC_STATS_BH(UdpInErrors); IP6_INC_STATS_BH(idev,Ip6InDiscards); if (idev) in6_dev_put(idev); kfree_skb(skb); return 0; } skb->ip_summed = CHECKSUM_UNNECESSARY; } if (sock_queue_rcv_skb(sk,skb)<0) { UDP6_INC_STATS_BH(UdpInErrors); IP6_INC_STATS_BH(idev,Ip6InDiscards); if (idev) in6_dev_put(idev); kfree_skb(skb); return 0; } IP6_INC_STATS_BH(idev,Ip6InDelivers); UDP6_INC_STATS_BH(UdpInDatagrams); if (idev) in6_dev_put(idev); return 0; }
/* We do checksum mangling, so if they were wrong before they're still * wrong. Also works for incomplete packets (eg. ICMP dest * * unreachables.) */ static inline u_int16_t ip_nat_cheat_check(u_int32_t oldvalinv, u_int32_t newval, u_int16_t oldcheck) { u_int32_t diffs[] = { oldvalinv, newval }; return csum_fold(csum_partial((char *)diffs, sizeof(diffs), oldcheck^0xFFFF)); }
static unsigned int ipoptstrip_tg(struct sk_buff *skb, const struct xt_action_param *par) { struct ip_options *opt = &(IPCB(skb)->opt); unsigned char *opt_ptr, *opt_end_ptr; struct iphdr *iphdr; const struct xt_ipoptstrip_tg_info *info; __wsum csum32; if (opt->optlen > 0) { iphdr = ip_hdr(skb); info = par->targinfo; #ifdef DEBUG printk("flags: %x\n", info->flags); printk("Packet with IP options (%i bytes) from: %pI4 to: %pI4\n", opt->optlen, &iphdr->saddr, &iphdr->daddr); print_skb_header_offsets(skb); #endif if (! XT_IPOPTSTRIP_IS_SET(info->flags, XT_IPOPTSTRIP_KEEP_DST)) { opt_ptr = (unsigned char*) &iphdr[1]; opt_end_ptr = opt_ptr + opt->optlen; for (; opt_ptr < opt_end_ptr; opt_ptr++) { switch (*opt_ptr) { case IPOPT_LSRR: case IPOPT_SSRR: /* Re-write destination field with last address */ memcpy(&iphdr->daddr, (opt_ptr+(opt_ptr[1]))-4, 4); break; } } } /* Alter header and total lengths */ iphdr->ihl = IPV4_HL; // 5 32-bit words in IPv4 header with no options iphdr->tot_len -= cpu_to_be16(opt->optlen); /* Move transport header pointer to after network header */ skb_set_transport_header(skb, IPV4_LEN); /* Move remaining data up the buffer */ memmove(skb_transport_header(skb), skb_transport_header(skb) + opt->optlen, skb->tail - (skb->transport_header + opt->optlen)); /* Remove un-needed buffer space */ skb_trim(skb, (skb->len - opt->optlen)); /* Re-calculate IP header checksum */ csum32 = csum_partial(iphdr, sizeof(struct iphdr), 0); iphdr->check = csum_fold(csum32); #ifdef DEBUG print_skb_header_offsets(skb); #endif } return XT_CONTINUE; }
void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi, int hdr_len) { struct gre_base_hdr *greh; skb_push(skb, hdr_len); greh = (struct gre_base_hdr *)skb->data; greh->flags = tnl_flags_to_gre_flags(tpi->flags); greh->protocol = tpi->proto; if (tpi->flags&(TUNNEL_KEY|TUNNEL_CSUM|TUNNEL_SEQ)) { __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4); if (tpi->flags&TUNNEL_SEQ) { *ptr = tpi->seq; ptr--; } if (tpi->flags&TUNNEL_KEY) { *ptr = tpi->key; ptr--; } if (tpi->flags&TUNNEL_CSUM && !(skb_shinfo(skb)->gso_type & SKB_GSO_GRE)) { *ptr = 0; *(__sum16 *)ptr = csum_fold(skb_checksum(skb, 0, skb->len, 0)); } } }
/* Return 0 if there was an error. */ static inline int set_ect_tcp(struct sk_buff **pskb, const struct ipt_ECN_info *einfo) { struct tcphdr tcph; u_int16_t diffs[2]; /* Not enought header? */ if (skb_copy_bits(*pskb, (*pskb)->nh.iph->ihl*4, &tcph, sizeof(tcph)) < 0) return 0; diffs[0] = ((u_int16_t *)&tcph)[6]; if (einfo->operation & IPT_ECN_OP_SET_ECE) tcph.ece = einfo->proto.tcp.ece; if (einfo->operation & IPT_ECN_OP_SET_CWR) tcph.cwr = einfo->proto.tcp.cwr; diffs[1] = ((u_int16_t *)&tcph)[6]; /* Only mangle if it's changed. */ if (diffs[0] != diffs[1]) { diffs[0] = diffs[0] ^ 0xFFFF; if (!skb_ip_make_writable(pskb, (*pskb)->nh.iph->ihl*4+sizeof(tcph))) return 0; tcph.check = csum_fold(csum_partial((char *)diffs, sizeof(diffs), tcph.check^0xFFFF)); memcpy((*pskb)->data + (*pskb)->nh.iph->ihl*4, &tcph, sizeof(tcph)); (*pskb)->nfcache |= NFC_ALTERED; } return 1; }
static int icmp_glue_bits(const void *p, char *to, unsigned int offset, unsigned int fraglen) { struct icmp_bxm *icmp_param = (struct icmp_bxm *)p; struct icmphdr *icmph; unsigned long csum; if (offset) { icmp_param->csum=csum_partial_copy(icmp_param->data_ptr+offset-sizeof(struct icmphdr), to, fraglen,icmp_param->csum); return 0; } /* * First fragment includes header. Note that we've done * the other fragments first, so that we get the checksum * for the whole packet here. */ csum = csum_partial_copy((void *)&icmp_param->icmph, to, sizeof(struct icmphdr), icmp_param->csum); csum = csum_partial_copy(icmp_param->data_ptr, to+sizeof(struct icmphdr), fraglen-sizeof(struct icmphdr), csum); icmph=(struct icmphdr *)to; icmph->checksum = csum_fold(csum); return 0; }
static inline void tcp_seq_csum_update(struct tcphdr *tcph, __u32 oldseq, __u32 newseq) { /* do checksum later */ if (!sysctl_ip_vs_csum_offload) tcph->check = csum_fold(ip_vs_check_diff4(oldseq, newseq, ~csum_unfold(tcph->check))); }
static inline void daddr_csum_replace4(u16 *sum, u32 from, u32 to) { u32 diff[] = { ~from, to }; *sum = csum_fold(csum_partial((unsigned char *)diff, sizeof(diff), ~daddr_csum_unfold(*sum))); }
static inline void tcp_mss_csum_update(struct tcphdr *tcph, __be16 oldmss, __be16 newmss) { /* do checksum later */ if (!sysctl_ip_vs_csum_offload) tcph->check = csum_fold(ip_vs_check_diff2(oldmss, newmss, ~csum_unfold(tcph->check))); }
/* * computes the checksum of the TCP/UDP pseudo-header * returns a 16-bit checksum, already complemented */ static inline unsigned short int csum_tcpudp_magic(unsigned long saddr, unsigned long daddr, unsigned short len, unsigned short proto, unsigned int sum) { return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); }
/* Small and modified version of icmp_rcv */ static int icmp_error(struct sk_buff *skb, enum ip_conntrack_info *ctinfo, unsigned int hooknum) { struct icmphdr _ih, *icmph; /* Not enough header? */ icmph = skb_header_pointer(skb, skb->nh.iph->ihl*4, sizeof(_ih), &_ih); if (icmph == NULL) { if (LOG_INVALID(IPPROTO_ICMP)) nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL, "ip_ct_icmp: short packet "); return -NF_ACCEPT; } /* See ip_conntrack_proto_tcp.c */ if (hooknum != NF_IP_PRE_ROUTING) goto checksum_skipped; switch (skb->ip_summed) { case CHECKSUM_HW: if (!(u16)csum_fold(skb->csum)) break; /* fall through */ case CHECKSUM_NONE: skb->csum = 0; if (__skb_checksum_complete(skb)) { if (LOG_INVALID(IPPROTO_ICMP)) nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL, "ip_ct_icmp: bad ICMP checksum "); return -NF_ACCEPT; } } checksum_skipped: /* * 18 is the highest 'known' ICMP type. Anything else is a mystery * * RFC 1122: 3.2.2 Unknown ICMP messages types MUST be silently * discarded. */ if (icmph->type > NR_ICMP_TYPES) { if (LOG_INVALID(IPPROTO_ICMP)) nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL, "ip_ct_icmp: invalid ICMP type "); return -NF_ACCEPT; } /* Need to track icmp error message? */ if (icmph->type != ICMP_DEST_UNREACH && icmph->type != ICMP_SOURCE_QUENCH && icmph->type != ICMP_TIME_EXCEEDED && icmph->type != ICMP_PARAMETERPROB && icmph->type != ICMP_REDIRECT) return NF_ACCEPT; return icmp_error_message(skb, ctinfo, hooknum); }
static int pim6_rcv(struct sk_buff *skb) { struct pimreghdr *pim; struct ipv6hdr *encap; struct net_device *reg_dev = NULL; struct net *net = dev_net(skb->dev); int reg_vif_num = net->ipv6.mroute_reg_vif_num; if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap))) goto drop; pim = (struct pimreghdr *)skb_transport_header(skb); if (pim->type != ((PIM_VERSION << 4) | PIM_REGISTER) || (pim->flags & PIM_NULL_REGISTER) || (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, sizeof(*pim), IPPROTO_PIM, csum_partial((void *)pim, sizeof(*pim), 0)) && csum_fold(skb_checksum(skb, 0, skb->len, 0)))) goto drop; /* check if the inner packet is destined to mcast group */ encap = (struct ipv6hdr *)(skb_transport_header(skb) + sizeof(*pim)); if (!ipv6_addr_is_multicast(&encap->daddr) || encap->payload_len == 0 || ntohs(encap->payload_len) + sizeof(*pim) > skb->len) goto drop; read_lock(&mrt_lock); if (reg_vif_num >= 0) reg_dev = net->ipv6.vif6_table[reg_vif_num].dev; if (reg_dev) dev_hold(reg_dev); read_unlock(&mrt_lock); if (reg_dev == NULL) goto drop; skb->mac_header = skb->network_header; skb_pull(skb, (u8 *)encap - skb->data); skb_reset_network_header(skb); skb->dev = reg_dev; skb->protocol = htons(ETH_P_IPV6); skb->ip_summed = 0; skb->pkt_type = PACKET_HOST; skb_dst_drop(skb); reg_dev->stats.rx_bytes += skb->len; reg_dev->stats.rx_packets++; nf_reset(skb); netif_rx(skb); dev_put(reg_dev); return 0; drop: kfree_skb(skb); return 0; }
static __sum16 gre_checksum(struct sk_buff *skb) { __wsum csum; if (skb->ip_summed == CHECKSUM_PARTIAL) csum = lco_csum(skb); else csum = skb_checksum(skb, 0, skb->len, 0); return csum_fold(csum); }
static inline void tcp_partial_csum_update(int af, struct tcphdr *tcph, const union nf_inet_addr *oldip, const union nf_inet_addr *newip, __be16 oldlen, __be16 newlen) { #ifdef CONFIG_DISPATCHER_IPV6 if (af == AF_INET6) tcph->check = csum_fold(dispatcher_check_diff16(oldip->ip6, newip->ip6, dispatcher_check_diff2(oldlen, newlen, ~csum_unfold(tcph->check)))); else #endif tcph->check = csum_fold(dispatcher_check_diff4(oldip->ip, newip->ip, dispatcher_check_diff2(oldlen, newlen, ~csum_unfold(tcph->check)))); }
static INLINE void mvFpCSumInc(MV_IP_HEADER *pIpHdr, MV_U32 srcIp, MV_U32 newIp) { MV_TCP_HEADER *pTcpHdr; MV_UDP_HEADER *pUdpHdr; __be32 diff[] = { ~srcIp, newIp }; pIpHdr->checksum = csum_fold(csum_partial((char *)diff, sizeof(diff), ~csum_unfold(pIpHdr->checksum))); switch (pIpHdr->protocol) { case MV_IP_PROTO_TCP: pTcpHdr = (MV_TCP_HEADER *) ((unsigned)pIpHdr + sizeof(MV_IP_HEADER)); pTcpHdr->chksum = csum_fold(csum_partial((char *)diff, sizeof(diff), ~csum_unfold(pTcpHdr->chksum))); break; case MV_IP_PROTO_UDP: pUdpHdr = (MV_UDP_HEADER *) ((unsigned)pIpHdr + sizeof(MV_IP_HEADER)); pUdpHdr->check = csum_fold(csum_partial((char *)diff, sizeof(diff), ~csum_unfold(pUdpHdr->check))); break; } }
/* * Receive a datagram from a UDP socket. */ static int svc_udp_recvfrom(struct svc_rqst *rqstp) { struct svc_sock *svsk = rqstp->rq_sock; struct svc_serv *serv = svsk->sk_server; struct sk_buff *skb; u32 *data; int err, len; svsk->sk_data = 0; while ((skb = skb_recv_datagram(svsk->sk_sk, 0, 1, &err)) == NULL) { svc_sock_received(svsk, 0); if (err == -EAGAIN) return err; /* possibly an icmp error */ dprintk("svc: recvfrom returned error %d\n", -err); } if (skb->ip_summed != CHECKSUM_UNNECESSARY) { unsigned int csum = skb->csum; csum = csum_partial(skb->h.raw, skb->len, csum); if ((unsigned short)csum_fold(csum)) { skb_free_datagram(svsk->sk_sk, skb); svc_sock_received(svsk, 0); return 0; } } /* There may be more data */ svsk->sk_data = 1; len = skb->len - sizeof(struct udphdr); data = (u32 *) (skb->h.raw + sizeof(struct udphdr)); rqstp->rq_skbuff = skb; rqstp->rq_argbuf.base = data; rqstp->rq_argbuf.buf = data; rqstp->rq_argbuf.len = (len >> 2); /* rqstp->rq_resbuf = rqstp->rq_defbuf; */ rqstp->rq_prot = IPPROTO_UDP; /* Get sender address */ rqstp->rq_addr.sin_family = AF_INET; rqstp->rq_addr.sin_port = skb->h.uh->source; rqstp->rq_addr.sin_addr.s_addr = skb->nh.iph->saddr; if (serv->sv_stats) serv->sv_stats->netudpcnt++; /* One down, maybe more to go... */ svsk->sk_sk->stamp = skb->stamp; svc_sock_received(svsk, 0); return len; }
static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, int hook, u8 code) { struct sk_buff *nskb; struct iphdr *niph; struct icmphdr *icmph; unsigned int len; void *payload; __wsum csum; if (!nft_reject_iphdr_validate(oldskb)) return; /* IP header checks: fragment. */ if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET)) return; /* RFC says return as much as we can without exceeding 576 bytes. */ len = min_t(unsigned int, 536, oldskb->len); if (!pskb_may_pull(oldskb, len)) return; if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), 0)) return; nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmphdr) + LL_MAX_HEADER + len, GFP_ATOMIC); if (!nskb) return; skb_reserve(nskb, LL_MAX_HEADER); niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_ICMP, sysctl_ip_default_ttl); skb_reset_transport_header(nskb); icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr)); memset(icmph, 0, sizeof(*icmph)); icmph->type = ICMP_DEST_UNREACH; icmph->code = code; payload = skb_put(nskb, len); memcpy(payload, skb_network_header(oldskb), len); csum = csum_partial((void *)icmph, len + sizeof(struct icmphdr), 0); icmph->checksum = csum_fold(csum); niph->tot_len = htons(nskb->len); ip_send_check(niph); nft_reject_br_push_etherhdr(oldskb, nskb); br_deliver(br_port_get_rcu(oldskb->dev), nskb); }