u64 i915_prandom_u64_state(struct rnd_state *rnd) { u64 x; x = prandom_u32_state(rnd); x <<= 32; x |= prandom_u32_state(rnd); return x; }
/** * prandom_u32 - pseudo random number generator * * A 32 bit pseudo-random number is generated using a fast * algorithm suitable for simulation. This algorithm is NOT * considered safe for cryptographic use. */ u32 prandom_u32(void) { unsigned long r; struct rnd_state *state = &get_cpu_var(net_rand_state); r = prandom_u32_state(state); put_cpu_var(state); return r; }
static void init(void) { int i; for (i = 0; i < NODES; i++) { u32 a = prandom_u32_state(&rnd); u32 b = prandom_u32_state(&rnd); if (a <= b) { nodes[i].start = a; nodes[i].last = b; } else { nodes[i].start = b; nodes[i].last = a; } } for (i = 0; i < SEARCHES; i++) queries[i] = prandom_u32_state(&rnd); }
/** * prandom_bytes_state - get the requested number of pseudo-random bytes * * @state: pointer to state structure holding seeded state. * @buf: where to copy the pseudo-random bytes to * @bytes: the requested number of bytes * * This is used for pseudo-randomness with no outside seeding. * For more random results, use prandom_bytes(). */ void prandom_bytes_state(struct rnd_state *state, void *buf, size_t bytes) { u8 *ptr = buf; while (bytes >= sizeof(u32)) { put_unaligned(prandom_u32_state(state), (u32 *) ptr); ptr += sizeof(u32); bytes -= sizeof(u32); } if (bytes > 0) { u32 rem = prandom_u32_state(state); do { *ptr++ = (u8) rem; bytes--; rem >>= BITS_PER_BYTE; } while (bytes > 0); }
/** * prandom_u32 - pseudo random number generator * * A 32 bit pseudo-random number is generated using a fast * algorithm suitable for simulation. This algorithm is NOT * considered safe for cryptographic use. */ u32 prandom_u32(void) { struct rnd_state *state = &get_cpu_var(net_rand_state); u32 res; res = prandom_u32_state(state); put_cpu_var(state); return res; }
static void nft_ng_random_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { struct nft_ng_random *priv = nft_expr_priv(expr); struct rnd_state *state = this_cpu_ptr(&nft_numgen_prandom_state); u32 val; val = reciprocal_scale(prandom_u32_state(state), priv->modulus); regs->data[priv->dreg] = val + priv->offset; }
/* * prandom_bytes_state - get the requested number of pseudo-random bytes * * @state: pointer to state structure holding seeded state. * @buf: where to copy the pseudo-random bytes to * @bytes: the requested number of bytes * * This is used for pseudo-randomness with no outside seeding. * For more random results, use prandom_bytes(). */ void prandom_bytes_state(struct rnd_state *state, void *buf, int bytes) { unsigned char *p = buf; int i; for (i = 0; i < round_down(bytes, sizeof(u32)); i += sizeof(u32)) { u32 random = prandom_u32_state(state); int j; for (j = 0; j < sizeof(u32); j++) { p[i + j] = random; random >>= BITS_PER_BYTE; } } if (i < bytes) { u32 random = prandom_u32_state(state); for (; i < bytes; i++) { p[i] = random; random >>= BITS_PER_BYTE; } }
void nft_meta_get_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { const struct nft_meta *priv = nft_expr_priv(expr); const struct sk_buff *skb = pkt->skb; const struct net_device *in = nft_in(pkt), *out = nft_out(pkt); struct sock *sk; u32 *dest = ®s->data[priv->dreg]; #ifdef CONFIG_NF_TABLES_BRIDGE const struct net_bridge_port *p; #endif switch (priv->key) { case NFT_META_LEN: *dest = skb->len; break; case NFT_META_PROTOCOL: nft_reg_store16(dest, (__force u16)skb->protocol); break; case NFT_META_NFPROTO: nft_reg_store8(dest, nft_pf(pkt)); break; case NFT_META_L4PROTO: if (!pkt->tprot_set) goto err; nft_reg_store8(dest, pkt->tprot); break; case NFT_META_PRIORITY: *dest = skb->priority; break; case NFT_META_MARK: *dest = skb->mark; break; case NFT_META_IIF: if (in == NULL) goto err; *dest = in->ifindex; break; case NFT_META_OIF: if (out == NULL) goto err; *dest = out->ifindex; break; case NFT_META_IIFNAME: if (in == NULL) goto err; strncpy((char *)dest, in->name, IFNAMSIZ); break; case NFT_META_OIFNAME: if (out == NULL) goto err; strncpy((char *)dest, out->name, IFNAMSIZ); break; case NFT_META_IIFTYPE: if (in == NULL) goto err; nft_reg_store16(dest, in->type); break; case NFT_META_OIFTYPE: if (out == NULL) goto err; nft_reg_store16(dest, out->type); break; case NFT_META_SKUID: sk = skb_to_full_sk(skb); if (!sk || !sk_fullsock(sk) || !net_eq(nft_net(pkt), sock_net(sk))) goto err; read_lock_bh(&sk->sk_callback_lock); if (sk->sk_socket == NULL || sk->sk_socket->file == NULL) { read_unlock_bh(&sk->sk_callback_lock); goto err; } *dest = from_kuid_munged(&init_user_ns, sk->sk_socket->file->f_cred->fsuid); read_unlock_bh(&sk->sk_callback_lock); break; case NFT_META_SKGID: sk = skb_to_full_sk(skb); if (!sk || !sk_fullsock(sk) || !net_eq(nft_net(pkt), sock_net(sk))) goto err; read_lock_bh(&sk->sk_callback_lock); if (sk->sk_socket == NULL || sk->sk_socket->file == NULL) { read_unlock_bh(&sk->sk_callback_lock); goto err; } *dest = from_kgid_munged(&init_user_ns, sk->sk_socket->file->f_cred->fsgid); read_unlock_bh(&sk->sk_callback_lock); break; #ifdef CONFIG_IP_ROUTE_CLASSID case NFT_META_RTCLASSID: { const struct dst_entry *dst = skb_dst(skb); if (dst == NULL) goto err; *dest = dst->tclassid; break; } #endif #ifdef CONFIG_NETWORK_SECMARK case NFT_META_SECMARK: *dest = skb->secmark; break; #endif case NFT_META_PKTTYPE: if (skb->pkt_type != PACKET_LOOPBACK) { nft_reg_store8(dest, skb->pkt_type); break; } switch (nft_pf(pkt)) { case NFPROTO_IPV4: if (ipv4_is_multicast(ip_hdr(skb)->daddr)) nft_reg_store8(dest, PACKET_MULTICAST); else nft_reg_store8(dest, PACKET_BROADCAST); break; case NFPROTO_IPV6: nft_reg_store8(dest, PACKET_MULTICAST); break; case NFPROTO_NETDEV: switch (skb->protocol) { case htons(ETH_P_IP): { int noff = skb_network_offset(skb); struct iphdr *iph, _iph; iph = skb_header_pointer(skb, noff, sizeof(_iph), &_iph); if (!iph) goto err; if (ipv4_is_multicast(iph->daddr)) nft_reg_store8(dest, PACKET_MULTICAST); else nft_reg_store8(dest, PACKET_BROADCAST); break; } case htons(ETH_P_IPV6): nft_reg_store8(dest, PACKET_MULTICAST); break; default: WARN_ON_ONCE(1); goto err; } break; default: WARN_ON_ONCE(1); goto err; } break; case NFT_META_CPU: *dest = raw_smp_processor_id(); break; case NFT_META_IIFGROUP: if (in == NULL) goto err; *dest = in->group; break; case NFT_META_OIFGROUP: if (out == NULL) goto err; *dest = out->group; break; #ifdef CONFIG_CGROUP_NET_CLASSID case NFT_META_CGROUP: sk = skb_to_full_sk(skb); if (!sk || !sk_fullsock(sk) || !net_eq(nft_net(pkt), sock_net(sk))) goto err; *dest = sock_cgroup_classid(&sk->sk_cgrp_data); break; #endif case NFT_META_PRANDOM: { struct rnd_state *state = this_cpu_ptr(&nft_prandom_state); *dest = prandom_u32_state(state); break; } #ifdef CONFIG_XFRM case NFT_META_SECPATH: nft_reg_store8(dest, secpath_exists(skb)); break; #endif #ifdef CONFIG_NF_TABLES_BRIDGE case NFT_META_BRI_IIFNAME: if (in == NULL || (p = br_port_get_rcu(in)) == NULL) goto err; strncpy((char *)dest, p->br->dev->name, IFNAMSIZ); return; case NFT_META_BRI_OIFNAME: if (out == NULL || (p = br_port_get_rcu(out)) == NULL) goto err; strncpy((char *)dest, p->br->dev->name, IFNAMSIZ); return; #endif default: WARN_ON(1); goto err; } return; err: regs->verdict.code = NFT_BREAK; }
void nft_meta_get_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { const struct nft_meta *priv = nft_expr_priv(expr); const struct sk_buff *skb = pkt->skb; const struct net_device *in = pkt->in, *out = pkt->out; struct sock *sk; u32 *dest = ®s->data[priv->dreg]; switch (priv->key) { case NFT_META_LEN: *dest = skb->len; break; case NFT_META_PROTOCOL: *dest = 0; *(__be16 *)dest = skb->protocol; break; case NFT_META_NFPROTO: *dest = pkt->pf; break; case NFT_META_L4PROTO: *dest = pkt->tprot; break; case NFT_META_PRIORITY: *dest = skb->priority; break; case NFT_META_MARK: *dest = skb->mark; break; case NFT_META_IIF: if (in == NULL) goto err; *dest = in->ifindex; break; case NFT_META_OIF: if (out == NULL) goto err; *dest = out->ifindex; break; case NFT_META_IIFNAME: if (in == NULL) goto err; strncpy((char *)dest, in->name, IFNAMSIZ); break; case NFT_META_OIFNAME: if (out == NULL) goto err; strncpy((char *)dest, out->name, IFNAMSIZ); break; case NFT_META_IIFTYPE: if (in == NULL) goto err; *dest = 0; *(u16 *)dest = in->type; break; case NFT_META_OIFTYPE: if (out == NULL) goto err; *dest = 0; *(u16 *)dest = out->type; break; case NFT_META_SKUID: sk = skb_to_full_sk(skb); if (!sk || !sk_fullsock(sk)) goto err; read_lock_bh(&sk->sk_callback_lock); if (sk->sk_socket == NULL || sk->sk_socket->file == NULL) { read_unlock_bh(&sk->sk_callback_lock); goto err; } *dest = from_kuid_munged(&init_user_ns, sk->sk_socket->file->f_cred->fsuid); read_unlock_bh(&sk->sk_callback_lock); break; case NFT_META_SKGID: sk = skb_to_full_sk(skb); if (!sk || !sk_fullsock(sk)) goto err; read_lock_bh(&sk->sk_callback_lock); if (sk->sk_socket == NULL || sk->sk_socket->file == NULL) { read_unlock_bh(&sk->sk_callback_lock); goto err; } *dest = from_kgid_munged(&init_user_ns, sk->sk_socket->file->f_cred->fsgid); read_unlock_bh(&sk->sk_callback_lock); break; #ifdef CONFIG_IP_ROUTE_CLASSID case NFT_META_RTCLASSID: { const struct dst_entry *dst = skb_dst(skb); if (dst == NULL) goto err; *dest = dst->tclassid; break; } #endif #ifdef CONFIG_NETWORK_SECMARK case NFT_META_SECMARK: *dest = skb->secmark; break; #endif case NFT_META_PKTTYPE: if (skb->pkt_type != PACKET_LOOPBACK) { *dest = skb->pkt_type; break; } switch (pkt->pf) { case NFPROTO_IPV4: if (ipv4_is_multicast(ip_hdr(skb)->daddr)) *dest = PACKET_MULTICAST; else *dest = PACKET_BROADCAST; break; case NFPROTO_IPV6: if (ipv6_hdr(skb)->daddr.s6_addr[0] == 0xFF) *dest = PACKET_MULTICAST; else *dest = PACKET_BROADCAST; break; default: WARN_ON(1); goto err; } break; case NFT_META_CPU: *dest = raw_smp_processor_id(); break; case NFT_META_IIFGROUP: if (in == NULL) goto err; *dest = in->group; break; case NFT_META_OIFGROUP: if (out == NULL) goto err; *dest = out->group; break; #ifdef CONFIG_CGROUP_NET_CLASSID case NFT_META_CGROUP: sk = skb_to_full_sk(skb); if (!sk || !sk_fullsock(sk)) goto err; *dest = sock_cgroup_classid(&sk->sk_cgrp_data); break; #endif case NFT_META_PRANDOM: { struct rnd_state *state = this_cpu_ptr(&nft_prandom_state); *dest = prandom_u32_state(state); break; } default: WARN_ON(1); goto err; } return; err: regs->verdict.code = NFT_BREAK; }
static inline u32 drm_prandom_u32_max_state(u32 ep_ro, struct rnd_state *state) { return upper_32_bits((u64)prandom_u32_state(state) * ep_ro); }
static int igt_wakeup(void *arg) { I915_RND_STATE(prng); struct intel_engine_cs *engine = arg; struct igt_wakeup *waiters; DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); const int count = 4096; const u32 max_seqno = count / 4; atomic_t ready, set, done; int err = -ENOMEM; int n, step; mock_engine_reset(engine); waiters = kvmalloc_array(count, sizeof(*waiters), GFP_KERNEL); if (!waiters) goto out_engines; /* Create a large number of threads, each waiting on a random seqno. * Multiple waiters will be waiting for the same seqno. */ atomic_set(&ready, count); for (n = 0; n < count; n++) { waiters[n].wq = &wq; waiters[n].ready = &ready; waiters[n].set = &set; waiters[n].done = &done; waiters[n].engine = engine; waiters[n].flags = BIT(IDLE); waiters[n].tsk = kthread_run(igt_wakeup_thread, &waiters[n], "i915/igt:%d", n); if (IS_ERR(waiters[n].tsk)) goto out_waiters; get_task_struct(waiters[n].tsk); } for (step = 1; step <= max_seqno; step <<= 1) { u32 seqno; /* The waiter threads start paused as we assign them a random * seqno and reset the engine. Once the engine is reset, * we signal that the threads may begin their wait upon their * seqno. */ for (n = 0; n < count; n++) { GEM_BUG_ON(!test_bit(IDLE, &waiters[n].flags)); waiters[n].seqno = 1 + prandom_u32_state(&prng) % max_seqno; } mock_seqno_advance(engine, 0); igt_wake_all_sync(&ready, &set, &done, &wq, count); /* Simulate the GPU doing chunks of work, with one or more * seqno appearing to finish at the same time. A random number * of threads will be waiting upon the update and hopefully be * woken. */ for (seqno = 1; seqno <= max_seqno + step; seqno += step) { usleep_range(50, 500); mock_seqno_advance(engine, seqno); } GEM_BUG_ON(intel_engine_get_seqno(engine) < 1 + max_seqno); /* With the seqno now beyond any of the waiting threads, they * should all be woken, see that they are complete and signal * that they are ready for the next test. We wait until all * threads are complete and waiting for us (i.e. not a seqno). */ if (!wait_var_event_timeout(&done, !atomic_read(&done), 10 * HZ)) { pr_err("Timed out waiting for %d remaining waiters\n", atomic_read(&done)); err = -ETIMEDOUT; break; } err = check_rbtree_empty(engine); if (err) break; } out_waiters: for (n = 0; n < count; n++) { if (IS_ERR(waiters[n].tsk)) break; set_bit(STOP, &waiters[n].flags); } mock_seqno_advance(engine, INT_MAX); /* wakeup any broken waiters */ igt_wake_all_sync(&ready, &set, &done, &wq, n); for (n = 0; n < count; n++) { if (IS_ERR(waiters[n].tsk)) break; kthread_stop(waiters[n].tsk); put_task_struct(waiters[n].tsk); } kvfree(waiters); out_engines: mock_engine_flush(engine); return err; }