/*
 * Select a packet at random from queue
 * HACK: since queue can have holes from previous deletion; retry several
 *   times to find a random skb but then just give up and return the head
 * Will return NULL if queue is empty (q->head == q->tail)
 */
static struct sk_buff *choke_peek_random(const struct choke_sched_data *q,
					 unsigned int *pidx)
{
	struct sk_buff *skb;
	int retrys = 3;

	do {
		*pidx = (q->head + prandom_u32_max(choke_len(q))) & q->tab_mask;
		skb = q->tab[*pidx];
		if (skb)
			return skb;
	} while (--retrys > 0);

	return q->tab[*pidx = q->head];
}
示例#2
0
static int pkcs1pad_encrypt(struct akcipher_request *req)
{
	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
	struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
	struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
	int err;
	unsigned int i, ps_end;

	if (!ctx->key_size)
		return -EINVAL;

	if (req->src_len > ctx->key_size - 11)
		return -EOVERFLOW;

	if (req->dst_len < ctx->key_size) {
		req->dst_len = ctx->key_size;
		return -EOVERFLOW;
	}

	req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
				  GFP_KERNEL);
	if (!req_ctx->in_buf)
		return -ENOMEM;

	ps_end = ctx->key_size - req->src_len - 2;
	req_ctx->in_buf[0] = 0x02;
	for (i = 1; i < ps_end; i++)
		req_ctx->in_buf[i] = 1 + prandom_u32_max(255);
	req_ctx->in_buf[ps_end] = 0x00;

	pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
			ctx->key_size - 1 - req->src_len, req->src);

	akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
	akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
			pkcs1pad_encrypt_sign_complete_cb, req);

	/* Reuse output buffer */
	akcipher_request_set_crypt(&req_ctx->child_req, req_ctx->in_sg,
				   req->dst, ctx->key_size - 1, req->dst_len);

	err = crypto_akcipher_encrypt(&req_ctx->child_req);
	if (err != -EINPROGRESS && err != -EBUSY)
		return pkcs1pad_encrypt_sign_complete(req, err);

	return err;
}
示例#3
0
static bool rnd_transmit(struct team *team, struct sk_buff *skb)
{
	struct team_port *port;
	int port_index;

	port_index = prandom_u32_max(team->en_port_count);
	port = team_get_port_by_index_rcu(team, port_index);
	if (unlikely(!port))
		goto drop;
	port = team_get_first_port_txable_rcu(team, port);
	if (unlikely(!port))
		goto drop;
	if (team_dev_queue_xmit(team, port, skb))
		return false;
	return true;

drop:
	dev_kfree_skb_any(skb);
	return false;
}
示例#4
0
static void __printf(3, 4) __init
__test(const char *expect, int elen, const char *fmt, ...)
{
	va_list ap;
	int rand;
	char *p;

	if (elen >= BUF_SIZE) {
		pr_err("error in test suite: expected output length %d too long. Format was '%s'.\n",
		       elen, fmt);
		failed_tests++;
		return;
	}

	va_start(ap, fmt);

	/*
	 * Every fmt+args is subjected to four tests: Three where we
	 * tell vsnprintf varying buffer sizes (plenty, not quite
	 * enough and 0), and then we also test that kvasprintf would
	 * be able to print it as expected.
	 */
	failed_tests += do_test(BUF_SIZE, expect, elen, fmt, ap);
	rand = 1 + prandom_u32_max(elen+1);
	/* Since elen < BUF_SIZE, we have 1 <= rand <= BUF_SIZE. */
	failed_tests += do_test(rand, expect, elen, fmt, ap);
	failed_tests += do_test(0, expect, elen, fmt, ap);

	p = kvasprintf(GFP_KERNEL, fmt, ap);
	if (p) {
		total_tests++;
		if (memcmp(p, expect, elen+1)) {
			pr_warn("kvasprintf(..., \"%s\", ...) returned '%s', expected '%s'\n",
				fmt, p, expect);
			failed_tests++;
		}
		kfree(p);
	}
	va_end(ap);
}
示例#5
0
static void vivid_thread_vid_cap_tick(struct vivid_dev *dev, int dropped_bufs)
{
	struct vivid_buffer *vid_cap_buf = NULL;
	struct vivid_buffer *vbi_cap_buf = NULL;

	dprintk(dev, 1, "Video Capture Thread Tick\n");

	while (dropped_bufs-- > 1)
		tpg_update_mv_count(&dev->tpg,
				dev->field_cap == V4L2_FIELD_NONE ||
				dev->field_cap == V4L2_FIELD_ALTERNATE);

	/* Drop a certain percentage of buffers. */
	if (dev->perc_dropped_buffers &&
	    prandom_u32_max(100) < dev->perc_dropped_buffers)
		goto update_mv;

	spin_lock(&dev->slock);
	if (!list_empty(&dev->vid_cap_active)) {
		vid_cap_buf = list_entry(dev->vid_cap_active.next, struct vivid_buffer, list);
		list_del(&vid_cap_buf->list);
	}
示例#6
0
static void vivid_thread_vid_out_tick(struct vivid_dev *dev)
{
	struct vivid_buffer *vid_out_buf = NULL;
	struct vivid_buffer *vbi_out_buf = NULL;

	dprintk(dev, 1, "Video Output Thread Tick\n");

	/* Drop a certain percentage of buffers. */
	if (dev->perc_dropped_buffers &&
	    prandom_u32_max(100) < dev->perc_dropped_buffers)
		return;

	spin_lock(&dev->slock);
	/*
	 * Only dequeue buffer if there is at least one more pending.
	 * This makes video loopback possible.
	 */
	if (!list_empty(&dev->vid_out_active) &&
	    !list_is_singular(&dev->vid_out_active)) {
		vid_out_buf = list_entry(dev->vid_out_active.next,
					 struct vivid_buffer, list);
		list_del(&vid_out_buf->list);
	}
static s64 __init test_rhashtable(struct rhashtable *ht, struct test_obj *array,
				  unsigned int entries)
{
	struct test_obj *obj;
	int err;
	unsigned int i, insert_retries = 0;
	s64 start, end;

	/*
	 * Insertion Test:
	 * Insert entries into table with all keys even numbers
	 */
	pr_info("  Adding %d keys\n", entries);
	start = ktime_get_ns();
	for (i = 0; i < entries; i++) {
		struct test_obj *obj = &array[i];

		obj->value.id = i * 2;
		err = insert_retry(ht, obj, test_rht_params);
		if (err > 0)
			insert_retries += err;
		else if (err)
			return err;
	}

	if (insert_retries)
		pr_info("  %u insertions retried due to memory pressure\n",
			insert_retries);

	test_bucket_stats(ht, entries);
	rcu_read_lock();
	test_rht_lookup(ht, array, entries);
	rcu_read_unlock();

	test_bucket_stats(ht, entries);

	pr_info("  Deleting %d keys\n", entries);
	for (i = 0; i < entries; i++) {
		struct test_obj_val key = {
			.id = i * 2,
		};

		if (array[i].value.id != TEST_INSERT_FAIL) {
			obj = rhashtable_lookup_fast(ht, &key, test_rht_params);
			BUG_ON(!obj);

			rhashtable_remove_fast(ht, &obj->node, test_rht_params);
		}

		cond_resched();
	}

	end = ktime_get_ns();
	pr_info("  Duration of test: %lld ns\n", end - start);

	return end - start;
}

static struct rhashtable ht;
static struct rhltable rhlt;

static int __init test_rhltable(unsigned int entries)
{
	struct test_obj_rhl *rhl_test_objects;
	unsigned long *obj_in_table;
	unsigned int i, j, k;
	int ret, err;

	if (entries == 0)
		entries = 1;

	rhl_test_objects = vzalloc(sizeof(*rhl_test_objects) * entries);
	if (!rhl_test_objects)
		return -ENOMEM;

	ret = -ENOMEM;
	obj_in_table = vzalloc(BITS_TO_LONGS(entries) * sizeof(unsigned long));
	if (!obj_in_table)
		goto out_free;

	/* nulls_base not supported in rhlist interface */
	test_rht_params.nulls_base = 0;
	err = rhltable_init(&rhlt, &test_rht_params);
	if (WARN_ON(err))
		goto out_free;

	k = prandom_u32();
	ret = 0;
	for (i = 0; i < entries; i++) {
		rhl_test_objects[i].value.id = k;
		err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node,
				      test_rht_params);
		if (WARN(err, "error %d on element %d\n", err, i))
			break;
		if (err == 0)
			set_bit(i, obj_in_table);
	}

	if (err)
		ret = err;

	pr_info("test %d add/delete pairs into rhlist\n", entries);
	for (i = 0; i < entries; i++) {
		struct rhlist_head *h, *pos;
		struct test_obj_rhl *obj;
		struct test_obj_val key = {
			.id = k,
		};
		bool found;

		rcu_read_lock();
		h = rhltable_lookup(&rhlt, &key, test_rht_params);
		if (WARN(!h, "key not found during iteration %d of %d", i, entries)) {
			rcu_read_unlock();
			break;
		}

		if (i) {
			j = i - 1;
			rhl_for_each_entry_rcu(obj, pos, h, list_node) {
				if (WARN(pos == &rhl_test_objects[j].list_node, "old element found, should be gone"))
					break;
			}
		}

		cond_resched_rcu();

		found = false;

		rhl_for_each_entry_rcu(obj, pos, h, list_node) {
			if (pos == &rhl_test_objects[i].list_node) {
				found = true;
				break;
			}
		}

		rcu_read_unlock();

		if (WARN(!found, "element %d not found", i))
			break;

		err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
		WARN(err, "rhltable_remove: err %d for iteration %d\n", err, i);
		if (err == 0)
			clear_bit(i, obj_in_table);
	}

	if (ret == 0 && err)
		ret = err;

	for (i = 0; i < entries; i++) {
		WARN(test_bit(i, obj_in_table), "elem %d allegedly still present", i);

		err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node,
				      test_rht_params);
		if (WARN(err, "error %d on element %d\n", err, i))
			break;
		if (err == 0)
			set_bit(i, obj_in_table);
	}

	pr_info("test %d random rhlist add/delete operations\n", entries);
	for (j = 0; j < entries; j++) {
		u32 i = prandom_u32_max(entries);
		u32 prand = prandom_u32();

		cond_resched();

		if (prand == 0)
			prand = prandom_u32();

		if (prand & 1) {
			prand >>= 1;
			continue;
		}

		err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
		if (test_bit(i, obj_in_table)) {
			clear_bit(i, obj_in_table);
			if (WARN(err, "cannot remove element at slot %d", i))
				continue;
		} else {
			if (WARN(err != -ENOENT, "removed non-existant element %d, error %d not %d",
			     i, err, -ENOENT))
				continue;
		}

		if (prand & 1) {
			prand >>= 1;
			continue;
		}

		err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
		if (err == 0) {
			if (WARN(test_and_set_bit(i, obj_in_table), "succeeded to insert same object %d", i))
				continue;
		} else {
			if (WARN(!test_bit(i, obj_in_table), "failed to insert object %d", i))
				continue;
		}

		if (prand & 1) {
			prand >>= 1;
			continue;
		}
		err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
		if (err == 0) {
			if (WARN(test_and_set_bit(i, obj_in_table), "succeeded to insert same object %d", i))
				continue;
		} else {
			if (WARN(!test_bit(i, obj_in_table), "failed to insert object %d", i))
				continue;
		}

		if (prand & 1) {
			prand >>= 1;
			continue;
		}

		i = prandom_u32_max(entries);
		if (test_bit(i, obj_in_table)) {
			err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
			WARN(err, "cannot remove element at slot %d", i);
			if (err == 0)
				clear_bit(i, obj_in_table);
		} else {
			err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
			WARN(err, "failed to insert object %d", i);
			if (err == 0)
				set_bit(i, obj_in_table);
		}
	}

	for (i = 0; i < entries; i++) {
		cond_resched();
示例#9
0
文件: rsa-pkcs1pad.c 项目: 1888/linux
static int pkcs1pad_encrypt(struct akcipher_request *req)
{
	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
	struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
	struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
	int err;
	unsigned int i, ps_end;

	if (!ctx->key_size)
		return -EINVAL;

	if (req->src_len > ctx->key_size - 11)
		return -EOVERFLOW;

	if (req->dst_len < ctx->key_size) {
		req->dst_len = ctx->key_size;
		return -EOVERFLOW;
	}

	if (ctx->key_size > PAGE_SIZE)
		return -ENOTSUPP;

	/*
	 * Replace both input and output to add the padding in the input and
	 * the potential missing leading zeros in the output.
	 */
	req_ctx->child_req.src = req_ctx->in_sg;
	req_ctx->child_req.src_len = ctx->key_size - 1;
	req_ctx->child_req.dst = req_ctx->out_sg;
	req_ctx->child_req.dst_len = ctx->key_size;

	req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
			(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
			GFP_KERNEL : GFP_ATOMIC);
	if (!req_ctx->in_buf)
		return -ENOMEM;

	ps_end = ctx->key_size - req->src_len - 2;
	req_ctx->in_buf[0] = 0x02;
	for (i = 1; i < ps_end; i++)
		req_ctx->in_buf[i] = 1 + prandom_u32_max(255);
	req_ctx->in_buf[ps_end] = 0x00;

	pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
			ctx->key_size - 1 - req->src_len, req->src);

	req_ctx->out_buf = kmalloc(ctx->key_size,
			(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
			GFP_KERNEL : GFP_ATOMIC);
	if (!req_ctx->out_buf) {
		kfree(req_ctx->in_buf);
		return -ENOMEM;
	}

	pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
			ctx->key_size, NULL);

	akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
	akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
			pkcs1pad_encrypt_sign_complete_cb, req);

	err = crypto_akcipher_encrypt(&req_ctx->child_req);
	if (err != -EINPROGRESS &&
			(err != -EBUSY ||
			 !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
		return pkcs1pad_encrypt_sign_complete(req, err);

	return err;
}
示例#10
0
/* precalculate color bar values to speed up rendering */
static void precalculate_color(struct tpg_data *tpg, int k)
{
	int col = k;
	int r = tpg_colors[col].r;
	int g = tpg_colors[col].g;
	int b = tpg_colors[col].b;

	if (k == TPG_COLOR_TEXTBG) {
		col = tpg_get_textbg_color(tpg);

		r = tpg_colors[col].r;
		g = tpg_colors[col].g;
		b = tpg_colors[col].b;
	} else if (k == TPG_COLOR_TEXTFG) {
		col = tpg_get_textfg_color(tpg);

		r = tpg_colors[col].r;
		g = tpg_colors[col].g;
		b = tpg_colors[col].b;
	} else if (tpg->pattern == TPG_PAT_NOISE) {
		r = g = b = prandom_u32_max(256);
	} else if (k == TPG_COLOR_RANDOM) {
		r = g = b = tpg->qual_offset + prandom_u32_max(196);
	} else if (k >= TPG_COLOR_RAMP) {
		r = g = b = k - TPG_COLOR_RAMP;
	}

	if (tpg->pattern == TPG_PAT_CSC_COLORBAR && col <= TPG_COLOR_CSC_BLACK) {
		r = tpg_csc_colors[tpg->colorspace][col].r;
		g = tpg_csc_colors[tpg->colorspace][col].g;
		b = tpg_csc_colors[tpg->colorspace][col].b;
	} else {
		r <<= 4;
		g <<= 4;
		b <<= 4;
	}
	if (tpg->qual == TPG_QUAL_GRAY)
		r = g = b = color_to_y(tpg, r, g, b);

	/*
	 * The assumption is that the RGB output is always full range,
	 * so only if the rgb_range overrides the 'real' rgb range do
	 * we need to convert the RGB values.
	 *
	 * Currently there is no way of signalling to userspace if you
	 * are actually giving it limited range RGB (or full range
	 * YUV for that matter).
	 *
	 * Remember that r, g and b are still in the 0 - 0xff0 range.
	 */
	if (tpg->real_rgb_range == V4L2_DV_RGB_RANGE_LIMITED &&
	    tpg->rgb_range == V4L2_DV_RGB_RANGE_FULL) {
		/*
		 * Convert from full range (which is what r, g and b are)
		 * to limited range (which is the 'real' RGB range), which
		 * is then interpreted as full range.
		 */
		r = (r * 219) / 255 + (16 << 4);
		g = (g * 219) / 255 + (16 << 4);
		b = (b * 219) / 255 + (16 << 4);
	} else if (tpg->real_rgb_range != V4L2_DV_RGB_RANGE_LIMITED &&
		   tpg->rgb_range == V4L2_DV_RGB_RANGE_LIMITED) {
		/*
		 * Clamp r, g and b to the limited range and convert to full
		 * range since that's what we deliver.
		 */
		r = clamp(r, 16 << 4, 235 << 4);
		g = clamp(g, 16 << 4, 235 << 4);
		b = clamp(b, 16 << 4, 235 << 4);
		r = (r - (16 << 4)) * 255 / 219;
		g = (g - (16 << 4)) * 255 / 219;
		b = (b - (16 << 4)) * 255 / 219;
	}

	if (tpg->brightness != 128 || tpg->contrast != 128 ||
	    tpg->saturation != 128 || tpg->hue) {
		/* Implement these operations */

		/* First convert to YCbCr */
		int y = color_to_y(tpg, r, g, b);	/* Luma */
		int cb = color_to_cb(tpg, r, g, b);	/* Cb */
		int cr = color_to_cr(tpg, r, g, b);	/* Cr */
		int tmp_cb, tmp_cr;

		y = (16 << 4) + ((y - (16 << 4)) * tpg->contrast) / 128;
		y += (tpg->brightness << 4) - (128 << 4);

		cb -= 128 << 4;
		cr -= 128 << 4;
		tmp_cb = (cb * cos(128 + tpg->hue)) / 127 + (cr * sin[128 + tpg->hue]) / 127;
		tmp_cr = (cr * cos(128 + tpg->hue)) / 127 - (cb * sin[128 + tpg->hue]) / 127;

		cb = (128 << 4) + (tmp_cb * tpg->contrast * tpg->saturation) / (128 * 128);
		cr = (128 << 4) + (tmp_cr * tpg->contrast * tpg->saturation) / (128 * 128);
		if (tpg->is_yuv) {
			tpg->colors[k][0] = clamp(y >> 4, 1, 254);
			tpg->colors[k][1] = clamp(cb >> 4, 1, 254);
			tpg->colors[k][2] = clamp(cr >> 4, 1, 254);
			return;
		}
示例#11
0
ssize_t vivid_radio_rx_read(struct file *file, char __user *buf,
			 size_t size, loff_t *offset)
{
	struct vivid_dev *dev = video_drvdata(file);
	struct timespec ts;
	struct v4l2_rds_data *data = dev->rds_gen.data;
	bool use_alternates;
	unsigned blk;
	int perc;
	int i;

	if (dev->radio_rx_rds_controls)
		return -EINVAL;
	if (size < sizeof(*data))
		return 0;
	size = sizeof(*data) * (size / sizeof(*data));

	if (mutex_lock_interruptible(&dev->mutex))
		return -ERESTARTSYS;
	if (dev->radio_rx_rds_owner &&
	    file->private_data != dev->radio_rx_rds_owner) {
		mutex_unlock(&dev->mutex);
		return -EBUSY;
	}
	if (dev->radio_rx_rds_owner == NULL) {
		vivid_radio_rds_init(dev);
		dev->radio_rx_rds_owner = file->private_data;
	}

retry:
	ktime_get_ts(&ts);
	use_alternates = ts.tv_sec % 10 >= 5;
	if (dev->radio_rx_rds_last_block == 0 ||
	    dev->radio_rx_rds_use_alternates != use_alternates) {
		dev->radio_rx_rds_use_alternates = use_alternates;
		/* Re-init the RDS generator */
		vivid_radio_rds_init(dev);
	}
	ts = timespec_sub(ts, dev->radio_rds_init_ts);
	blk = ts.tv_sec * 100 + ts.tv_nsec / 10000000;
	blk = (blk * VIVID_RDS_GEN_BLOCKS) / 500;
	if (blk >= dev->radio_rx_rds_last_block + VIVID_RDS_GEN_BLOCKS)
		dev->radio_rx_rds_last_block = blk - VIVID_RDS_GEN_BLOCKS + 1;

	/*
	 * No data is available if there hasn't been time to get new data,
	 * or if the RDS receiver has been disabled, or if we use the data
	 * from the RDS transmitter and that RDS transmitter has been disabled,
	 * or if the signal quality is too weak.
	 */
	if (blk == dev->radio_rx_rds_last_block || !dev->radio_rx_rds_enabled ||
	    (dev->radio_rds_loop && !(dev->radio_tx_subchans & V4L2_TUNER_SUB_RDS)) ||
	    abs(dev->radio_rx_sig_qual) > 200) {
		mutex_unlock(&dev->mutex);
		if (file->f_flags & O_NONBLOCK)
			return -EWOULDBLOCK;
		if (msleep_interruptible(20) && signal_pending(current))
			return -EINTR;
		if (mutex_lock_interruptible(&dev->mutex))
			return -ERESTARTSYS;
		goto retry;
	}

	/* abs(dev->radio_rx_sig_qual) <= 200, map that to a 0-50% range */
	perc = abs(dev->radio_rx_sig_qual) / 4;

	for (i = 0; i < size && blk > dev->radio_rx_rds_last_block;
			dev->radio_rx_rds_last_block++) {
		unsigned data_blk = dev->radio_rx_rds_last_block % VIVID_RDS_GEN_BLOCKS;
		struct v4l2_rds_data rds = data[data_blk];

		if (data_blk == 0 && dev->radio_rds_loop)
			vivid_radio_rds_init(dev);
		if (perc && prandom_u32_max(100) < perc) {
			switch (prandom_u32_max(4)) {
			case 0:
				rds.block |= V4L2_RDS_BLOCK_CORRECTED;
				break;
			case 1:
				rds.block |= V4L2_RDS_BLOCK_INVALID;
				break;
			case 2:
				rds.block |= V4L2_RDS_BLOCK_ERROR;
				rds.lsb = prandom_u32_max(256);
				rds.msb = prandom_u32_max(256);
				break;
			case 3: /* Skip block altogether */
				if (i)
					continue;
				/*
				 * Must make sure at least one block is
				 * returned, otherwise the application
				 * might think that end-of-file occurred.
				 */
				break;
			}
		}
		if (copy_to_user(buf + i, &rds, sizeof(rds))) {
			i = -EFAULT;
			break;
		}
		i += sizeof(rds);
	}
	mutex_unlock(&dev->mutex);
	return i;
}