static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst, struct inetpeer_addr *saddr, struct inetpeer_addr *daddr, unsigned int hash) { struct tcp_metrics_block *tm; struct net *net; bool reclaim = false; spin_lock_bh(&tcp_metrics_lock); net = dev_net(dst->dev); /* While waiting for the spin-lock the cache might have been populated * with this entry and so we have to check again. */ tm = __tcp_get_metrics(saddr, daddr, net, hash); if (tm == TCP_METRICS_RECLAIM_PTR) { reclaim = true; tm = NULL; } if (tm) { tcpm_check_stamp(tm, dst); goto out_unlock; } if (unlikely(reclaim)) { struct tcp_metrics_block *oldest; oldest = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); for (tm = rcu_dereference(oldest->tcpm_next); tm; tm = rcu_dereference(tm->tcpm_next)) { if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp)) oldest = tm; } tm = oldest; } else { tm = kmalloc(sizeof(*tm), GFP_ATOMIC); if (!tm) goto out_unlock; } tm->tcpm_saddr = *saddr; tm->tcpm_daddr = *daddr; tcpm_suck_dst(tm, dst, true); if (likely(!reclaim)) { tm->tcpm_next = net->ipv4.tcp_metrics_hash[hash].chain; rcu_assign_pointer(net->ipv4.tcp_metrics_hash[hash].chain, tm); } out_unlock: spin_unlock_bh(&tcp_metrics_lock); return tm; }
static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk, struct dst_entry *dst, bool create) { struct tcp_metrics_block *tm; struct inetpeer_addr saddr, daddr; unsigned int hash; struct net *net; if (sk->sk_family == AF_INET) { saddr.family = AF_INET; saddr.addr.a4 = inet_sk(sk)->inet_saddr; daddr.family = AF_INET; daddr.addr.a4 = inet_sk(sk)->inet_daddr; hash = (__force unsigned int) daddr.addr.a4; } #if IS_ENABLED(CONFIG_IPV6) else if (sk->sk_family == AF_INET6) { if (ipv6_addr_v4mapped(&sk->sk_v6_daddr)) { saddr.family = AF_INET; saddr.addr.a4 = inet_sk(sk)->inet_saddr; daddr.family = AF_INET; daddr.addr.a4 = inet_sk(sk)->inet_daddr; hash = (__force unsigned int) daddr.addr.a4; } else { saddr.family = AF_INET6; *(struct in6_addr *)saddr.addr.a6 = sk->sk_v6_rcv_saddr; daddr.family = AF_INET6; *(struct in6_addr *)daddr.addr.a6 = sk->sk_v6_daddr; hash = ipv6_addr_hash(&sk->sk_v6_daddr); } } #endif else return NULL; net = dev_net(dst->dev); hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log); tm = __tcp_get_metrics(&saddr, &daddr, net, hash); if (tm == TCP_METRICS_RECLAIM_PTR) tm = NULL; if (!tm && create) tm = tcpm_new(dst, &saddr, &daddr, hash); else tcpm_check_stamp(tm, dst); return tm; }
static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk, struct dst_entry *dst, bool create) { struct tcp_metrics_block *tm; struct inetpeer_addr addr; unsigned int hash; struct net *net; bool reclaim; addr.family = sk->sk_family; switch (addr.family) { case AF_INET: addr.addr.a4 = inet_sk(sk)->inet_daddr; hash = (__force unsigned int) addr.addr.a4; break; case AF_INET6: *(struct in6_addr *)addr.addr.a6 = inet6_sk(sk)->daddr; hash = ipv6_addr_hash(&inet6_sk(sk)->daddr); break; default: return NULL; } net = dev_net(dst->dev); hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log); tm = __tcp_get_metrics(&addr, net, hash); reclaim = false; if (tm == TCP_METRICS_RECLAIM_PTR) { reclaim = true; tm = NULL; } if (!tm && create) tm = tcpm_new(dst, &addr, hash, reclaim); else tcpm_check_stamp(tm, dst); return tm; }