Ejemplo n.º 1
0
static void double_bit_error_data(void *error_data, void *correct_data,
				size_t size)
{
	unsigned int offset[2];

	offset[0] = prandom_u32() % (size * BITS_PER_BYTE);
	do {
		offset[1] = prandom_u32() % (size * BITS_PER_BYTE);
	} while (offset[0] == offset[1]);

	memcpy(error_data, correct_data, size);

	__change_bit_le(offset[0], error_data);
	__change_bit_le(offset[1], error_data);
}
Ejemplo n.º 2
0
static unsigned int random_ecc_bit(size_t size)
{
	unsigned int offset = prandom_u32() % (3 * BITS_PER_BYTE);

	if (size == 256) {
		/*
		 * Don't inject a bit error into the insignificant bits (16th
		 * and 17th bit) in ECC code for 256 byte data block
		 */
		while (offset == 16 || offset == 17)
			offset = prandom_u32() % (3 * BITS_PER_BYTE);
	}

	return offset;
}
struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen,
						  u8 **image_ptr)
{
	unsigned int sz, hole;
	struct bpf_binary_header *header;

	/* Most of BPF filters are really small,
	 * but if some of them fill a page, allow at least
	 * 128 extra bytes to insert a random section of int3
	 */
	sz = round_up(proglen + sizeof(*header) + 128, PAGE_SIZE);
	header = module_alloc(sz);
	if (!header)
		return NULL;

	memset(header, 0xcc, sz); /* fill whole space with int3 instructions */

	header->pages = sz / PAGE_SIZE;
	hole = sz - (proglen + sizeof(*header));

	/* insert a random number of int3 instructions before BPF code */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
	*image_ptr = &header->image[prandom_u32() % hole];
#else
	*image_ptr = &header->image[32 % hole];
#endif
	return header;
}
Ejemplo n.º 4
0
static int gact_net_rand(struct tcf_gact *gact)
{
	smp_rmb(); /* coupled with smp_wmb() in tcf_gact_init() */
	if (prandom_u32() % gact->tcfg_pval)
		return gact->tcf_action;
	return gact->tcfg_paction;
}
Ejemplo n.º 5
0
static void yam_arbitrate(struct net_device *dev)
{
	struct yam_port *yp = netdev_priv(dev);

	if (yp->magic != YAM_MAGIC || yp->tx_state != TX_OFF ||
	    skb_queue_empty(&yp->send_queue))
		return;
	/* tx_state is TX_OFF and there is data to send */

	if (yp->dupmode) {
		/* Full duplex mode, don't wait */
		yam_start_tx(dev, yp);
		return;
	}
	if (yp->dcd) {
		/* DCD on, wait slotime ... */
		yp->slotcnt = yp->slot / 10;
		return;
	}
	/* Is slottime passed ? */
	if ((--yp->slotcnt) > 0)
		return;

	yp->slotcnt = yp->slot / 10;

	/* is random > persist ? */
	if ((prandom_u32() % 256) > yp->pers)
		return;

	yam_start_tx(dev, yp);
}
Ejemplo n.º 6
0
int __sbitmap_queue_get(struct sbitmap_queue *sbq)
{
	unsigned int hint, depth;
	int nr;

	hint = this_cpu_read(*sbq->alloc_hint);
	depth = READ_ONCE(sbq->sb.depth);
	if (unlikely(hint >= depth)) {
		hint = depth ? prandom_u32() % depth : 0;
		this_cpu_write(*sbq->alloc_hint, hint);
	}
	nr = sbitmap_get(&sbq->sb, hint, sbq->round_robin);

	if (nr == -1) {
		/* If the map is full, a hint won't do us much good. */
		this_cpu_write(*sbq->alloc_hint, 0);
	} else if (nr == hint || unlikely(sbq->round_robin)) {
		/* Only update the hint if we used it. */
		hint = nr + 1;
		if (hint >= depth - 1)
			hint = 0;
		this_cpu_write(*sbq->alloc_hint, hint);
	}

	return nr;
}
Ejemplo n.º 7
0
static int
qtnf_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
	     struct cfg80211_mgmt_tx_params *params, u64 *cookie)
{
	struct qtnf_vif *vif = qtnf_netdev_get_priv(wdev->netdev);
	const struct ieee80211_mgmt *mgmt_frame = (void *)params->buf;
	u32 short_cookie = prandom_u32();
	u16 flags = 0;

	*cookie = short_cookie;

	if (params->offchan)
		flags |= QLINK_MGMT_FRAME_TX_FLAG_OFFCHAN;

	if (params->no_cck)
		flags |= QLINK_MGMT_FRAME_TX_FLAG_NO_CCK;

	if (params->dont_wait_for_ack)
		flags |= QLINK_MGMT_FRAME_TX_FLAG_ACK_NOWAIT;

	pr_debug("%s freq:%u; FC:%.4X; DA:%pM; len:%zu; C:%.8X; FL:%.4X\n",
		 wdev->netdev->name, params->chan->center_freq,
		 le16_to_cpu(mgmt_frame->frame_control), mgmt_frame->da,
		 params->len, short_cookie, flags);

	return qtnf_cmd_send_mgmt_frame(vif, short_cookie, flags,
					params->chan->center_freq,
					params->buf, params->len);
}
Ejemplo n.º 8
0
static int do_operation(void)
{
	if (prandom_u32() & 1)
		return do_read();
	else
		return do_write();
}
Ejemplo n.º 9
0
/*
 * Pick a new monitor at random and set cur_mon.  If we are repicking
 * (i.e. cur_mon is already set), be sure to pick a different one.
 */
static void pick_new_mon(struct ceph_mon_client *monc)
{
	int old_mon = monc->cur_mon;

	BUG_ON(monc->monmap->num_mon < 1);

	if (monc->monmap->num_mon == 1) {
		monc->cur_mon = 0;
	} else {
		int max = monc->monmap->num_mon;
		int o = -1;
		int n;

		if (monc->cur_mon >= 0) {
			if (monc->cur_mon < monc->monmap->num_mon)
				o = monc->cur_mon;
			if (o >= 0)
				max--;
		}

		n = prandom_u32() % max;
		if (o >= 0 && n >= o)
			n++;

		monc->cur_mon = n;
	}

	dout("%s mon%d -> mon%d out of %d mons\n", __func__, old_mon,
	     monc->cur_mon, monc->monmap->num_mon);
}
Ejemplo n.º 10
0
static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen,
						  u8 **image_ptr)
{
	unsigned int sz, hole;
	struct bpf_binary_header *header;

	/* Most of BPF filters are really small,
	 * but if some of them fill a page, allow at least
	 * 128 extra bytes to insert a random section of int3
	 */
	sz = round_up(proglen + sizeof(*header) + 128, PAGE_SIZE);
/*
	header = module_alloc(sz);
	if (!header)
		return NULL;
*/
	header = mmap(NULL, sz, PROT_READ|PROT_WRITE|PROT_EXEC,
                      MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
        if ((void *)header == MAP_FAILED) {
		perror("bpf_jit_comp.c: mmap");
		exit(EXIT_FAILURE);
	}

	memset(header, 0xcc, sz); /* fill whole space with int3 instructions */

	header->pages = sz / PAGE_SIZE;
	hole = min(sz - (proglen + sizeof(*header)), PAGE_SIZE - sizeof(*header));

	/* insert a random number of int3 instructions before BPF code */
#define prandom_u32() 0
	*image_ptr = &header->image[prandom_u32() % hole];
#undef prandom_u32
	return header;
}
Ejemplo n.º 11
0
/*
 * choose a random mds that is "up" (i.e. has a state > 0), or -1.
 */
int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m)
{
	int n = 0;
	int i;

	/* special case for one mds */
	if (1 == m->m_max_mds && m->m_info[0].state > 0)
		return 0;

	/* count */
	for (i = 0; i < m->m_max_mds; i++)
		if (m->m_info[i].state > 0)
			n++;
	if (n == 0)
		return -1;

	/* pick */
	n = prandom_u32() % n;
	i = 0;
	for (i = 0; n > 0; i++, n--)
		while (m->m_info[i].state <= 0)
			i++;

	return i;
}
Ejemplo n.º 12
0
void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
				 struct nf_conntrack_tuple *tuple,
				 const struct nf_nat_range *range,
				 enum nf_nat_manip_type maniptype,
				 const struct nf_conn *ct,
				 u16 *rover)
{
	unsigned int range_size, min, i;
	__be16 *portptr;
	u_int16_t off;

	if (maniptype == NF_NAT_MANIP_SRC)
		portptr = &tuple->src.u.all;
	else
		portptr = &tuple->dst.u.all;

	/* If no range specified... */
	if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
		/* If it's dst rewrite, can't change port */
		if (maniptype == NF_NAT_MANIP_DST)
			return;

		if (ntohs(*portptr) < 1024) {
			/* Loose convention: >> 512 is credential passing */
			if (ntohs(*portptr) < 512) {
				min = 1;
				range_size = 511 - min + 1;
			} else {
				min = 600;
				range_size = 1023 - min + 1;
			}
		} else {
			min = 1024;
			range_size = 65535 - 1024 + 1;
		}
	} else {
		min = ntohs(range->min_proto.all);
		range_size = ntohs(range->max_proto.all) - min + 1;
	}

	if (range->flags & NF_NAT_RANGE_PROTO_RANDOM) {
		off = l3proto->secure_port(tuple, maniptype == NF_NAT_MANIP_SRC
						  ? tuple->dst.u.all
						  : tuple->src.u.all);
	} else if (range->flags & NF_NAT_RANGE_PROTO_RANDOM_FULLY) {
		off = prandom_u32();
	} else {
		off = *rover;
	}

	for (i = 0; ; ++off) {
		*portptr = htons(min + off % range_size);
		if (++i != range_size && nf_nat_used_tuple(tuple, ct))
			continue;
		if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL))
			*rover = off;
		return;
	}
}
Ejemplo n.º 13
0
static int bitfliptest_do_operation(void)
{
	if (prandom_u32() & 1) {
		return bitfliptest_do_read();
	} else {
		return bitfliptest_do_write();
	}
}
Ejemplo n.º 14
0
static void single_bit_error_data(void *error_data, void *correct_data,
				size_t size)
{
	unsigned int offset = prandom_u32() % (size * BITS_PER_BYTE);

	memcpy(error_data, correct_data, size);
	__change_bit_le(offset, error_data);
}
Ejemplo n.º 15
0
static int rand_offs(void)
{
	unsigned int offs;

	offs = prandom_u32();
	offs %= bufsize;
	return offs;
}
Ejemplo n.º 16
0
static int rand_len(int offs)
{
	unsigned int len;

	len = prandom_u32();
	len %= (bufsize - offs);
	return len;
}
Ejemplo n.º 17
0
static int bitfliptest_rand_len(int offs)
{
	unsigned int len = 0;

	len = prandom_u32();
	len %= (bufsize - offs);
	return len;
}
Ejemplo n.º 18
0
static int bitfliptest_rand_offs(void)
{
	unsigned int offs = 0;

	offs = prandom_u32();
	offs %= bufsize;
	return offs;
}
Ejemplo n.º 19
0
inline u32 rtw_random32(void)
{
#ifdef PLATFORM_LINUX
	#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
	return prandom_u32();
	#else
	return random32();
	#endif
#endif
}
Ejemplo n.º 20
0
/*
 * Get a random new sequence number but make sure it is not greater than
 * EXT4_MMP_SEQ_MAX.
 */
static unsigned int mmp_new_seq(void)
{
	u32 new_seq;

	do {
		new_seq = prandom_u32();
	} while (new_seq > EXT4_MMP_SEQ_MAX);

	return new_seq;
}
Ejemplo n.º 21
0
static int __init find_bit_test(void)
{
	unsigned long nbits = BITMAP_LEN / SPARSE;

	pr_err("\nStart testing find_bit() with random-filled bitmap\n");

	get_random_bytes(bitmap, sizeof(bitmap));
	get_random_bytes(bitmap2, sizeof(bitmap2));

	test_find_next_bit(bitmap, BITMAP_LEN);
	test_find_next_zero_bit(bitmap, BITMAP_LEN);
	test_find_last_bit(bitmap, BITMAP_LEN);

	/*
	 * test_find_first_bit() may take some time, so
	 * traverse only part of bitmap to avoid soft lockup.
	 */
	test_find_first_bit(bitmap, BITMAP_LEN / 10);
	test_find_next_and_bit(bitmap, bitmap2, BITMAP_LEN);

	pr_err("\nStart testing find_bit() with sparse bitmap\n");

	bitmap_zero(bitmap, BITMAP_LEN);
	bitmap_zero(bitmap2, BITMAP_LEN);

	while (nbits--) {
		__set_bit(prandom_u32() % BITMAP_LEN, bitmap);
		__set_bit(prandom_u32() % BITMAP_LEN, bitmap2);
	}

	test_find_next_bit(bitmap, BITMAP_LEN);
	test_find_next_zero_bit(bitmap, BITMAP_LEN);
	test_find_last_bit(bitmap, BITMAP_LEN);
	test_find_first_bit(bitmap, BITMAP_LEN);
	test_find_next_and_bit(bitmap, bitmap2, BITMAP_LEN);

	/*
	 * Everything is OK. Return error just to let user run benchmark
	 * again without annoying rmmod.
	 */
	return -EINVAL;
}
Ejemplo n.º 22
0
static ssize_t r_coin(struct file *f, char __user *b,
                      size_t cnt, loff_t *lf)
{
        char *ret;
        u32 value = prandom_u32() % 2;
        ret = msg[value];
        stats[value]++;
        return simple_read_from_buffer(b, cnt,
                                       lf, ret,
                                       strlen(ret));
}
Ejemplo n.º 23
0
static void __uuid_gen_common(__u8 b[16])
{
	int i;
	u32 r;

	for (i = 0; i < 4; i++) {
		r = prandom_u32();
		memcpy(b + i * 4, &r, 4);
	}
	/* reversion 0b10 */
	b[8] = (b[8] & 0x3F) | 0x80;
}
Ejemplo n.º 24
0
static int bitfliptest_rand_eb(void)
{
	unsigned int eb = 0;

again:
	eb = prandom_u32();
	/* Read or write up 2 eraseblocks at a time - hence 'ebcnt - 1' */
	eb %= (ebcnt - 1);
	if (bbt[eb])
		goto again;
	return eb;
}
Ejemplo n.º 25
0
static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
	int i;

	q->perturb_timer.function = sfq_perturbation;
	q->perturb_timer.data = (unsigned long)sch;
	init_timer_deferrable(&q->perturb_timer);

	for (i = 0; i < SFQ_MAX_DEPTH + 1; i++) {
		q->dep[i].next = i + SFQ_MAX_FLOWS;
		q->dep[i].prev = i + SFQ_MAX_FLOWS;
	}

	q->limit = SFQ_MAX_DEPTH;
	q->maxdepth = SFQ_MAX_DEPTH;
	q->cur_depth = 0;
	q->tail = NULL;
	q->divisor = SFQ_DEFAULT_HASH_DIVISOR;
	q->maxflows = SFQ_DEFAULT_FLOWS;
	q->quantum = psched_mtu(qdisc_dev(sch));
	q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
	q->perturb_period = 0;
	q->perturbation = prandom_u32();

	if (opt) {
		int err = sfq_change(sch, opt);
		if (err)
			return err;
	}

	q->ht = sfq_alloc(sizeof(q->ht[0]) * q->divisor);
	q->slots = sfq_alloc(sizeof(q->slots[0]) * q->maxflows);
	if (!q->ht || !q->slots) {
		sfq_destroy(sch);
		return -ENOMEM;
	}
	for (i = 0; i < q->divisor; i++)
		q->ht[i] = SFQ_EMPTY_SLOT;

	for (i = 0; i < q->maxflows; i++) {
		slot_queue_init(&q->slots[i]);
		sfq_link(q, i);
	}
	if (q->limit >= 1)
		sch->flags |= TCQ_F_CAN_BYPASS;
	else
		sch->flags &= ~TCQ_F_CAN_BYPASS;
	return 0;
}
Ejemplo n.º 26
0
static void makedata(int disks)
{
	int i, j;

	for (i = 0; i < disks; i++) {
		for (j = 0; j < PAGE_SIZE/sizeof(u32); j += sizeof(u32)) {
			u32 *p = page_address(data[i]) + j;

			*p = prandom_u32();
		}

		dataptrs[i] = data[i];
	}
}
Ejemplo n.º 27
0
inline u32 rtw_random32(void)
{
#ifdef PLATFORM_LINUX
	#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
	return prandom_u32();
	#else
	return random32();
	#endif
#elif defined(PLATFORM_WINDOWS)
	#error "to be implemented\n"
#elif defined(PLATFORM_FREEBSD)
	#error "to be implemented\n"
#endif
}
Ejemplo n.º 28
0
/* returns -ve errno or +ve port */
static int rds_add_bound(struct rds_sock *rs, __be32 addr, __be16 *port)
{
	int ret = -EADDRINUSE;
	u16 rover, last;
	u64 key;

	if (*port != 0) {
		rover = be16_to_cpu(*port);
		if (rover == RDS_FLAG_PROBE_PORT)
			return -EINVAL;
		last = rover;
	} else {
		rover = max_t(u16, prandom_u32(), 2);
		last = rover - 1;
	}

	do {
		if (rover == 0)
			rover++;

		if (rover == RDS_FLAG_PROBE_PORT)
			continue;
		key = ((u64)addr << 32) | cpu_to_be16(rover);
		if (rhashtable_lookup_fast(&bind_hash_table, &key, ht_parms))
			continue;

		rs->rs_bound_key = key;
		rs->rs_bound_addr = addr;
		net_get_random_once(&rs->rs_hash_initval,
				    sizeof(rs->rs_hash_initval));
		rs->rs_bound_port = cpu_to_be16(rover);
		rs->rs_bound_node.next = NULL;
		rds_sock_addref(rs);
		if (!rhashtable_insert_fast(&bind_hash_table,
					    &rs->rs_bound_node, ht_parms)) {
			*port = rs->rs_bound_port;
			ret = 0;
			rdsdebug("rs %p binding to %pI4:%d\n",
			  rs, &addr, (int)ntohs(*port));
			break;
		} else {
			rds_sock_put(rs);
			ret = -ENOMEM;
			break;
		}
	} while (rover++ != last);

	return ret;
}
Ejemplo n.º 29
0
/*
 * use the kernel prandom call to pick a new random level.  This
 * uses P = .50.  If you bump the SKIP_MAXLEVEL past 32 levels,
 * this function needs updating.
 */
int skiplist_get_new_level(struct sl_list *list, int max_level)
{
	int level = 0;
	unsigned long randseed;

	randseed = prandom_u32();

	while (randseed && (randseed & 1)) {
		randseed >>= 1;
		level++;
		if (level == max_level)
			break;
	}
	return (level >= SKIP_MAXLEVEL ? SKIP_MAXLEVEL - 1: level);
}
Ejemplo n.º 30
0
static void sfq_perturbation(unsigned long arg)
{
	struct Qdisc *sch = (struct Qdisc *)arg;
	struct sfq_sched_data *q = qdisc_priv(sch);
	spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));

	spin_lock(root_lock);
	q->perturbation = prandom_u32();
	if (!q->filter_list && q->tail)
		sfq_rehash(sch);
	spin_unlock(root_lock);

	if (q->perturb_period)
		mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
}