static int udp_v4_get_port(struct sock *sk, unsigned short snum) { struct hlist_node *node; struct sock *sk2; struct inet_sock *inet = inet_sk(sk); write_lock_bh(&udp_hash_lock); if (!snum) { int i, low, high, remaining; unsigned rover, best, best_size_so_far; inet_get_local_port_range(&low, &high); remaining = (high - low) + 1; best_size_so_far = UINT_MAX; best = rover = net_random() % remaining + low; if (!udp_lport_inuse(rover) && !inet_is_reserved_local_port(rover)) goto gotit; /* 1st pass: look for empty (or shortest) hash chain */ for (i = 0; i < UDP_HTABLE_SIZE; i++) { struct hlist_head *list; int size = 0; list = &udp_hash[rover & (UDP_HTABLE_SIZE - 1)]; if (hlist_empty(list) && !inet_is_reserved_local_port(rover)) goto gotit; sk_for_each(sk2, node, list) if (++size >= best_size_so_far) goto next; best_size_so_far = size; best = rover; next: /* fold back if end of range */ if (++rover > high) rover = low + ((rover - low) & (UDP_HTABLE_SIZE - 1)); } /* 2nd pass: find hole in shortest hash chain */ rover = best; for (i = 0; i < (1 << 16) / UDP_HTABLE_SIZE; i++) { if (!udp_lport_inuse(rover) && !inet_is_reserved_local_port(rover)) goto gotit; rover += UDP_HTABLE_SIZE; if (rover > high) rover = low + ((rover - low) & (UDP_HTABLE_SIZE - 1)); } /* All ports in use! */ goto fail; gotit: snum = rover; } else {
static int udp_v4_get_port(struct sock *sk, unsigned short snum) { struct hlist_node *node; struct sock *sk2; struct inet_opt *inet = inet_sk(sk); write_lock_bh(&udp_hash_lock); if (snum == 0) { int best_size_so_far, best, result, i; if (udp_port_rover > sysctl_local_port_range[1] || udp_port_rover < sysctl_local_port_range[0]) udp_port_rover = sysctl_local_port_range[0]; best_size_so_far = 32767; best = result = udp_port_rover; for (i = 0; i < UDP_HTABLE_SIZE; i++, result++) { struct hlist_head *list; int size; list = &udp_hash[result & (UDP_HTABLE_SIZE - 1)]; if (hlist_empty(list)) { if (result > sysctl_local_port_range[1]) result = sysctl_local_port_range[0] + ((result - sysctl_local_port_range[0]) & (UDP_HTABLE_SIZE - 1)); goto gotit; } size = 0; sk_for_each(sk2, node, list) if (++size >= best_size_so_far) goto next; best_size_so_far = size; best = result; next: ; } result = best; for(i = 0; i < (1 << 16) / UDP_HTABLE_SIZE; i++, result += UDP_HTABLE_SIZE) { if (result > sysctl_local_port_range[1]) result = sysctl_local_port_range[0] + ((result - sysctl_local_port_range[0]) & (UDP_HTABLE_SIZE - 1)); if (!udp_lport_inuse(result)) break; } if (i >= (1 << 16) / UDP_HTABLE_SIZE) goto fail; gotit: udp_port_rover = snum = result; } else {
/* Shared by v4/v6 tcp. */ unsigned short udp_good_socknum(void) { int result; static int start = 0; int i, best, best_size_so_far; SOCKHASH_LOCK(); /* Select initial not-so-random "best" */ best = PROT_SOCK + 1 + (start & 1023); best_size_so_far = 32767; /* "big" num */ result = best; for (i = 0; i < UDP_HTABLE_SIZE; i++, result++) { struct sock *sk; int size; sk = udp_hash[result & (UDP_HTABLE_SIZE - 1)]; /* No clashes - take it */ if (!sk) goto out; /* Is this one better than our best so far? */ size = 0; do { if(++size >= best_size_so_far) goto next; } while((sk = sk->next) != NULL); best_size_so_far = size; best = result; next: } while (udp_lport_inuse(best)) best += UDP_HTABLE_SIZE; result = best; out: start = result; SOCKHASH_UNLOCK(); return result; } static void udp_v4_hash(struct sock *sk) { struct sock **skp; int num = sk->num; num &= (UDP_HTABLE_SIZE - 1); skp = &udp_hash[num]; SOCKHASH_LOCK(); sk->next = *skp; *skp = sk; sk->hashent = num; SOCKHASH_UNLOCK(); } static void udp_v4_unhash(struct sock *sk) { struct sock **skp; int num = sk->num; num &= (UDP_HTABLE_SIZE - 1); skp = &udp_hash[num]; SOCKHASH_LOCK(); while(*skp != NULL) { if(*skp == sk) { *skp = sk->next; break; } skp = &((*skp)->next); } SOCKHASH_UNLOCK(); } static void udp_v4_rehash(struct sock *sk) { struct sock **skp; int num = sk->num; int oldnum = sk->hashent; num &= (UDP_HTABLE_SIZE - 1); skp = &udp_hash[oldnum]; SOCKHASH_LOCK(); while(*skp != NULL) { if(*skp == sk) { *skp = sk->next; break; } skp = &((*skp)->next); } sk->next = udp_hash[num]; udp_hash[num] = sk; sk->hashent = num; SOCKHASH_UNLOCK(); } /* UDP is nearly always wildcards out the wazoo, it makes no sense to try * harder than this. -DaveM */ __inline__ struct sock *udp_v4_lookup(u32 saddr, u16 sport, u32 daddr, u16 dport, struct device *dev) { struct sock *sk, *result = NULL; unsigned short hnum = ntohs(dport); int badness = -1; for(sk = udp_hash[hnum & (UDP_HTABLE_SIZE - 1)]; sk != NULL; sk = sk->next) { if((sk->num == hnum) && !(sk->dead && (sk->state == TCP_CLOSE))) { int score = 0; if(sk->rcv_saddr) { if(sk->rcv_saddr != daddr) continue; score++; } if(sk->daddr) { if(sk->daddr != saddr) continue; score++; } if(sk->dummy_th.dest) { if(sk->dummy_th.dest != sport) continue; score++; } /* If this socket is bound to a particular interface, * did the packet come in on it? */ if (sk->bound_device) { if (dev == sk->bound_device) score++; else continue; /* mismatch--not this sock */ } if(score == 4) { result = sk; break; } else if(score > badness) { result = sk; badness = score; } } } return result; } #ifdef CONFIG_IP_TRANSPARENT_PROXY struct sock *udp_v4_proxy_lookup(u32 saddr, u16 sport, u32 daddr, u16 dport, u32 paddr, u16 rport, struct device *dev) { struct sock *hh[3], *sk, *result = NULL; int i; int badness = -1; unsigned short hnum = ntohs(dport); unsigned short hpnum = ntohs(rport); SOCKHASH_LOCK(); hh[0] = udp_hash[hnum & (UDP_HTABLE_SIZE - 1)]; hh[1] = udp_hash[hpnum & (UDP_HTABLE_SIZE - 1)]; for (i = 0; i < 2; i++) { for(sk = hh[i]; sk != NULL; sk = sk->next) { if(sk->num == hnum || sk->num == hpnum) { int score = 0; if(sk->dead && (sk->state == TCP_CLOSE)) continue; if(sk->rcv_saddr) { if((sk->num != hpnum || sk->rcv_saddr != paddr) && (sk->num != hnum || sk->rcv_saddr != daddr)) continue; score++; } if(sk->daddr) { if(sk->daddr != saddr) continue; score++; } if(sk->dummy_th.dest) { if(sk->dummy_th.dest != sport) continue; score++; } /* If this socket is bound to a particular interface, * did the packet come in on it? */ if(sk->bound_device) { if (sk->bound_device != dev) continue; score++; } if(score == 4 && sk->num == hnum) { result = sk; break; } else if(score > badness && (sk->num == hpnum || sk->rcv_saddr)) { result = sk; badness = score; } } } } SOCKHASH_UNLOCK(); return result; } #endif static inline struct sock *udp_v4_mcast_next(struct sock *sk, unsigned short num, unsigned long raddr, unsigned short rnum, unsigned long laddr, struct device *dev) { struct sock *s = sk; unsigned short hnum = ntohs(num); for(; s; s = s->next) { if ((s->num != hnum) || (s->dead && (s->state == TCP_CLOSE)) || (s->daddr && s->daddr!=raddr) || (s->dummy_th.dest != rnum && s->dummy_th.dest != 0) || ((s->bound_device) && (s->bound_device!=dev)) || (s->rcv_saddr && s->rcv_saddr != laddr)) continue; break; } return s; } #define min(a,b) ((a)<(b)?(a):(b)) /* * This routine is called by the ICMP module when it gets some * sort of error condition. If err < 0 then the socket should * be closed and the error returned to the user. If err > 0 * it's just the icmp type << 8 | icmp code. * Header points to the ip header of the error packet. We move * on past this. Then (as it used to claim before adjustment) * header points to the first 8 bytes of the udp header. We need * to find the appropriate port. */ void udp_err(int type, int code, unsigned char *header, __u32 daddr, __u32 saddr, struct inet_protocol *protocol, int len) { struct udphdr *uh; struct sock *sk; /* * Find the 8 bytes of post IP header ICMP included for us */ if(len<sizeof(struct udphdr)) return; uh = (struct udphdr *)header; sk = udp_v4_lookup(daddr, uh->dest, saddr, uh->source, NULL); if (sk == NULL) return; /* No socket for error */ if (type == ICMP_SOURCE_QUENCH) { /* Slow down! */ if (sk->cong_window > 1) sk->cong_window = sk->cong_window/2; return; } if (type == ICMP_PARAMETERPROB) { sk->err = EPROTO; sk->error_report(sk); return; } /* * Various people wanted BSD UDP semantics. Well they've come * back out because they slow down response to stuff like dead * or unreachable name servers and they screw term users something * chronic. Oh and it violates RFC1122. So basically fix your * client code people. */ /* RFC1122: OK. Passes ICMP errors back to application, as per */ /* 4.1.3.3. */ /* After the comment above, that should be no surprise. */ if(code<=NR_ICMP_UNREACH && icmp_err_convert[code].fatal) { /* * 4.x BSD compatibility item. Break RFC1122 to * get BSD socket semantics. */ if(sk->bsdism && sk->state!=TCP_ESTABLISHED) return; sk->err = icmp_err_convert[code].errno; sk->error_report(sk); } } static unsigned short udp_check(struct udphdr *uh, int len, unsigned long saddr, unsigned long daddr, unsigned long base) { return(csum_tcpudp_magic(saddr, daddr, len, IPPROTO_UDP, base)); } struct udpfakehdr { struct udphdr uh; __u32 daddr; __u32 other; const char *from; __u32 wcheck; }; /* * Copy and checksum a UDP packet from user space into a buffer. We still have to do the planning to * get ip_build_xmit to spot direct transfer to network card and provide an additional callback mode * for direct user->board I/O transfers. That one will be fun. */ static void udp_getfrag(const void *p, __u32 saddr, char * to, unsigned int offset, unsigned int fraglen) { struct udpfakehdr *ufh = (struct udpfakehdr *)p; const char *src; char *dst; unsigned int len; if (offset) { len = fraglen; src = ufh->from+(offset-sizeof(struct udphdr)); dst = to; } else { len = fraglen-sizeof(struct udphdr); src = ufh->from; dst = to+sizeof(struct udphdr); } ufh->wcheck = csum_partial_copy_fromuser(src, dst, len, ufh->wcheck); if (offset == 0) { ufh->wcheck = csum_partial((char *)ufh, sizeof(struct udphdr), ufh->wcheck); ufh->uh.check = csum_tcpudp_magic(saddr, ufh->daddr, ntohs(ufh->uh.len), IPPROTO_UDP, ufh->wcheck); if (ufh->uh.check == 0) ufh->uh.check = -1; memcpy(to, ufh, sizeof(struct udphdr)); } } /* * Unchecksummed UDP is sufficiently critical to stuff like ATM video conferencing * that we use two routines for this for speed. Probably we ought to have a CONFIG_FAST_NET * set for >10Mb/second boards to activate this sort of coding. Timing needed to verify if * this is a valid decision. */ static void udp_getfrag_nosum(const void *p, __u32 saddr, char * to, unsigned int offset, unsigned int fraglen) { struct udpfakehdr *ufh = (struct udpfakehdr *)p; const char *src; char *dst; unsigned int len; if (offset) { len = fraglen; src = ufh->from+(offset-sizeof(struct udphdr)); dst = to; } else { len = fraglen-sizeof(struct udphdr); src = ufh->from; dst = to+sizeof(struct udphdr); } memcpy_fromfs(dst,src,len); if (offset == 0) memcpy(to, ufh, sizeof(struct udphdr)); } /* * Send UDP frames. */ static int udp_send(struct sock *sk, struct sockaddr_in *sin, const unsigned char *from, int len, int rt, __u32 saddr, int noblock) { int ulen = len + sizeof(struct udphdr); int a; struct udpfakehdr ufh; if(ulen>65535-sizeof(struct iphdr)) return -EMSGSIZE; ufh.uh.source = sk->dummy_th.source; ufh.uh.dest = sin->sin_port; ufh.uh.len = htons(ulen); ufh.uh.check = 0; ufh.daddr = sin->sin_addr.s_addr; ufh.other = (htons(ulen) << 16) + IPPROTO_UDP*256; ufh.from = from; ufh.wcheck = 0; #ifdef CONFIG_IP_TRANSPARENT_PROXY if (rt&MSG_PROXY) { /* * We map the first 8 bytes of a second sockaddr_in * into the last 8 (unused) bytes of a sockaddr_in. * This _is_ ugly, but it's the only way to do it * easily, without adding system calls. */ struct sockaddr_in *sinfrom = (struct sockaddr_in *) sin->sin_zero; if (!suser()) return(-EPERM); if (sinfrom->sin_family && sinfrom->sin_family != AF_INET) return(-EINVAL); if (sinfrom->sin_port == 0) return(-EINVAL); saddr = sinfrom->sin_addr.s_addr; ufh.uh.source = sinfrom->sin_port; } #endif /* RFC1122: OK. Provides the checksumming facility (MUST) as per */ /* 4.1.3.4. It's configurable by the application via setsockopt() */ /* (MAY) and it defaults to on (MUST). Almost makes up for the */ /* violation above. -- MS */ if(sk->no_check) a = ip_build_xmit(sk, udp_getfrag_nosum, &ufh, ulen, sin->sin_addr.s_addr, saddr, sk->opt, rt, IPPROTO_UDP, noblock); else a = ip_build_xmit(sk, udp_getfrag, &ufh, ulen, sin->sin_addr.s_addr, saddr, sk->opt, rt, IPPROTO_UDP, noblock); if(a<0) return a; udp_statistics.UdpOutDatagrams++; return len; }
/* Grrr, addr_type already calculated by caller, but I don't want * to add some silly "cookie" argument to this method just for that. */ static int udp_v6_get_port(struct sock *sk, unsigned short snum) { write_lock_bh(&udp_hash_lock); if (snum == 0) { int best_size_so_far, best, result, i; if (udp_port_rover > sysctl_local_port_range[1] || udp_port_rover < sysctl_local_port_range[0]) udp_port_rover = sysctl_local_port_range[0]; best_size_so_far = 32767; best = result = udp_port_rover; for (i = 0; i < UDP_HTABLE_SIZE; i++, result++) { struct sock *sk; int size; sk = udp_hash[result & (UDP_HTABLE_SIZE - 1)]; if (!sk) { if (result > sysctl_local_port_range[1]) result = sysctl_local_port_range[0] + ((result - sysctl_local_port_range[0]) & (UDP_HTABLE_SIZE - 1)); goto gotit; } size = 0; do { if (++size >= best_size_so_far) goto next; } while ((sk = sk->next) != NULL); best_size_so_far = size; best = result; next: ; } result = best; for(;; result += UDP_HTABLE_SIZE) { if (result > sysctl_local_port_range[1]) result = sysctl_local_port_range[0] + ((result - sysctl_local_port_range[0]) & (UDP_HTABLE_SIZE - 1)); if (!udp_lport_inuse(result)) break; } gotit: udp_port_rover = snum = result; } else { struct sock *sk2; int addr_type = ipv6_addr_type(&sk->net_pinfo.af_inet6.rcv_saddr); for (sk2 = udp_hash[snum & (UDP_HTABLE_SIZE - 1)]; sk2 != NULL; sk2 = sk2->next) { if (sk2->num == snum && sk2 != sk && sk2->bound_dev_if == sk->bound_dev_if && (!sk2->rcv_saddr || addr_type == IPV6_ADDR_ANY || !ipv6_addr_cmp(&sk->net_pinfo.af_inet6.rcv_saddr, &sk2->net_pinfo.af_inet6.rcv_saddr) || (addr_type == IPV6_ADDR_MAPPED && sk2->family == AF_INET && sk->rcv_saddr == sk2->rcv_saddr)) && (!sk2->reuse || !sk->reuse)) goto fail; } } sk->num = snum; if (sk->pprev == NULL) { struct sock **skp = &udp_hash[snum & (UDP_HTABLE_SIZE - 1)]; if ((sk->next = *skp) != NULL) (*skp)->pprev = &sk->next; *skp = sk; sk->pprev = skp; sock_prot_inc_use(sk->prot); sock_hold(sk); } write_unlock_bh(&udp_hash_lock); return 0; fail: write_unlock_bh(&udp_hash_lock); return 1; }
/* Grrr, addr_type already calculated by caller, but I don't want * to add some silly "cookie" argument to this method just for that. */ static int udp_v6_get_port(struct sock *sk, unsigned short snum) { write_lock_bh(&udp_hash_lock); if (snum == 0) { int best_size_so_far, best, result, i; if (udp_port_rover > sysctl_local_port_range[1] || udp_port_rover < sysctl_local_port_range[0]) udp_port_rover = sysctl_local_port_range[0]; best_size_so_far = 32767; best = result = udp_port_rover; for (i = 0; i < UDP_HTABLE_SIZE; i++, result++) { struct sock *sk; int size; sk = udp_hash[result & (UDP_HTABLE_SIZE - 1)]; if (!sk) { if (result > sysctl_local_port_range[1]) result = sysctl_local_port_range[0] + ((result - sysctl_local_port_range[0]) & (UDP_HTABLE_SIZE - 1)); goto gotit; } size = 0; do { if (++size >= best_size_so_far) goto next; } while ((sk = sk->next) != NULL); best_size_so_far = size; best = result; next:; } result = best; for(;; result += UDP_HTABLE_SIZE) { if (result > sysctl_local_port_range[1]) result = sysctl_local_port_range[0] + ((result - sysctl_local_port_range[0]) & (UDP_HTABLE_SIZE - 1)); if (!udp_lport_inuse(result)) break; } gotit: udp_port_rover = snum = result; } else { struct sock *sk2; int sk_reuse, sk2_reuse; int addr_type = ipv6_addr_type(&sk->net_pinfo.af_inet6.rcv_saddr), addr_type2; #if defined(CONFIG_NET_RESTRICTED_REUSE) || defined(CONFIG_IPV6_RESTRICTED_DOUBLE_BIND) uid_t sk_uid = sock_i_uid_t(sk), sk2_uid; #endif sk_reuse = 0; if (sk->reuse) sk_reuse |= 1; #ifdef SO_REUSEPORT if (sk->reuseport) sk_reuse |= 2; #endif if (sk_reuse && (addr_type != IPV6_ADDR_MAPPED ? (addr_type & IPV6_ADDR_MULTICAST) : MULTICAST(sk->rcv_saddr))) sk_reuse |= 4; for (sk2 = udp_hash[snum & (UDP_HTABLE_SIZE - 1)]; sk2 != NULL; sk2 = sk2->next) { #if 1 /* XXX: should be recoded like 2.4.21 */ #if defined(CONFIG_NET_RESTRICTED_REUSE) || defined(CONFIG_IPV6_RESTRICTED_DOUBLE_BIND) int uid_ok; #endif int both_specified = 0; if (sk2->num != snum || sk2 == sk || (sk2->bound_dev_if && sk->bound_dev_if && sk2->bound_dev_if != sk->bound_dev_if)) continue; #if 0 if (sk2->family != AF_INET6 && sk2->family != AF_INET) continue; #endif addr_type2 = sk2->family == AF_INET6 ? ipv6_addr_type(&sk2->net_pinfo.af_inet6.rcv_saddr) : IPV6_ADDR_MAPPED; #if defined(CONFIG_NET_RESTRICTED_REUSE) || defined(CONFIG_IPV6_RESTRICTED_DOUBLE_BIND) sk2_uid = sock_i_uid_t(sk2); #endif if ((addr_type2 != IPV6_ADDR_MAPPED ? addr_type2 != IPV6_ADDR_ANY : sk2->rcv_saddr) && (addr_type != IPV6_ADDR_MAPPED ? addr_type != IPV6_ADDR_ANY : sk->rcv_saddr)) { if (addr_type2 == IPV6_ADDR_MAPPED || addr_type == IPV6_ADDR_MAPPED) { if (addr_type2 != addr_type || sk2->rcv_saddr != sk->rcv_saddr) continue; } else { if (ipv6_addr_cmp(&sk2->net_pinfo.af_inet6.rcv_saddr, &sk->net_pinfo.af_inet6.rcv_saddr)) continue; } both_specified = 1; } #if defined(CONFIG_NET_RESTRICTED_REUSE) || defined(CONFIG_IPV6_RESTRICTED_DOUBLE_BIND) uid_ok = sk2_uid == (uid_t) -1 || sk_uid == sk2_uid; #endif if ((addr_type2 == IPV6_ADDR_MAPPED && addr_type != IPV6_ADDR_MAPPED && sk->net_pinfo.af_inet6.ipv6only) || (addr_type == IPV6_ADDR_MAPPED && addr_type2 != IPV6_ADDR_MAPPED && sk2->net_pinfo.af_inet6.ipv6only)) { #ifdef CONFIG_IPV6_RESTRICTED_DOUBLE_BIND if (sysctl_ipv6_bindv6only_restriction == 0 || uid_ok) continue; #else continue; #endif } sk2_reuse = 0; if (sk2->reuse) sk2_reuse |= 1; #ifdef SO_REUSEPORT if (sk2->reuseport) sk2_reuse |= 2; #endif if (sk2_reuse && (addr_type2 != IPV6_ADDR_MAPPED ? (addr_type2 & IPV6_ADDR_MULTICAST) : MULTICAST(sk2->rcv_saddr))) sk2_reuse |= 4; if (sk2_reuse & sk_reuse & 3) { /* NOT && */ if (sk2_reuse & sk_reuse & 4) continue; #ifdef CONFIG_NET_RESTRICTED_REUSE if (!uid_ok) goto fail; #endif #ifdef SO_REUSEPORT if (sk2_reuse & sk_reuse & 2) continue; #endif if (both_specified) { int addr_type2d = sk2->family == AF_INET6 ? ipv6_addr_type(&sk2->net_pinfo.af_inet6.daddr) : IPV6_ADDR_MAPPED; if (addr_type2d != IPV6_ADDR_MAPPED ? addr_type2d != IPV6_ADDR_ANY : sk2->daddr) continue; } else { if ((addr_type2 != IPV6_ADDR_MAPPED ? addr_type2 != IPV6_ADDR_ANY : sk2->rcv_saddr) || (addr_type != IPV6_ADDR_MAPPED ? addr_type != IPV6_ADDR_ANY : sk->rcv_saddr)) continue; } } goto fail; #else /* XXX: should be recoded like 2.4.21 */ if (sk2->num == snum && sk2 != sk && (!sk2->bound_dev_if || !sk->bound_dev_if || sk2->bound_dev_if == sk->bound_dev_if) && ((!sk2->rcv_saddr && !ipv6_only_sock(sk)) || (sk2->family == AF_INET6 && ipv6_addr_any(&sk2->net_pinfo.af_inet6.rcv_saddr) && !(ipv6_only_sock(sk2) && addr_type == IPV6_ADDR_MAPPED)) || (addr_type == IPV6_ADDR_ANY && (!ipv6_only_sock(sk) || !(sk2->family == AF_INET6 ? (ipv6_addr_type(&sk2->net_pinfo.af_inet6.rcv_saddr) == IPV6_ADDR_MAPPED) : 1))) || (sk2->family == AF_INET6 && !ipv6_addr_cmp(&sk->net_pinfo.af_inet6.rcv_saddr, &sk2->net_pinfo.af_inet6.rcv_saddr)) || (addr_type == IPV6_ADDR_MAPPED && !ipv6_only_sock(sk2) && (!sk2->rcv_saddr || !sk->rcv_saddr || sk->rcv_saddr == sk2->rcv_saddr))) && (!sk2->reuse || !sk->reuse)) goto fail; #endif /* XXX: should be recoded like 2.4.21 */ } } sk->num = snum; if (sk->pprev == NULL) { struct sock **skp = &udp_hash[snum & (UDP_HTABLE_SIZE - 1)]; if ((sk->next = *skp) != NULL) (*skp)->pprev = &sk->next; *skp = sk; sk->pprev = skp; sock_prot_inc_use(sk->prot); sock_hold(sk); } write_unlock_bh(&udp_hash_lock); return 0; fail: write_unlock_bh(&udp_hash_lock); return 1; }
static int udp_v4_get_port(struct sock *sk, unsigned short snum) { write_lock_bh(&udp_hash_lock); if (snum == 0) { int best_size_so_far, best, result, i; if (udp_port_rover > sysctl_local_port_range[1] || udp_port_rover < sysctl_local_port_range[0]) udp_port_rover = sysctl_local_port_range[0]; best_size_so_far = 32767; best = result = udp_port_rover; for (i = 0; i < UDP_HTABLE_SIZE; i++, result++) { struct sock *sk; int size; sk = udp_hash[result & (UDP_HTABLE_SIZE - 1)]; if (!sk) { if (result > sysctl_local_port_range[1]) result = sysctl_local_port_range[0] + ((result - sysctl_local_port_range[0]) & (UDP_HTABLE_SIZE - 1)); goto gotit; } size = 0; do { if (++size >= best_size_so_far) goto next; } while ((sk = sk->next) != NULL); best_size_so_far = size; best = result; next:; } result = best; for(i = 0; i < (1 << 16) / UDP_HTABLE_SIZE; i++, result += UDP_HTABLE_SIZE) { if (result > sysctl_local_port_range[1]) result = sysctl_local_port_range[0] + ((result - sysctl_local_port_range[0]) & (UDP_HTABLE_SIZE - 1)); if (!udp_lport_inuse(result)) break; } if (i >= (1 << 16) / UDP_HTABLE_SIZE) goto fail; gotit: udp_port_rover = snum = result; } else { struct sock *sk2; for (sk2 = udp_hash[snum & (UDP_HTABLE_SIZE - 1)]; sk2 != NULL; sk2 = sk2->next) { if (sk2->num == snum && sk2 != sk && !ipv6_only_sock(sk2) && sk2->bound_dev_if == sk->bound_dev_if && (!sk2->rcv_saddr || !sk->rcv_saddr || sk2->rcv_saddr == sk->rcv_saddr) && (!sk2->reuse || !sk->reuse)) goto fail; } } sk->num = snum; if (sk->pprev == NULL) { struct sock **skp = &udp_hash[snum & (UDP_HTABLE_SIZE - 1)]; if ((sk->next = *skp) != NULL) (*skp)->pprev = &sk->next; *skp = sk; sk->pprev = skp; sock_prot_inc_use(sk->prot); sock_hold(sk); } write_unlock_bh(&udp_hash_lock); return 0; fail: write_unlock_bh(&udp_hash_lock); return 1; }