Example #1
0
static ssize_t
queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
{
	ssize_t ret = -EINVAL;
#ifdef CONFIG_SMP
	unsigned long val;

	ret = queue_var_store(&val, page, count);
	if (ret < 0)
		return ret;

	spin_lock_irq(q->queue_lock);
	if (val == 2) {
		queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
		queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
	} else if (val == 1) {
		queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
		queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
	} else if (val == 0) {
		queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
		queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
	}
	spin_unlock_irq(q->queue_lock);
#endif
	return ret;
}
Example #2
0
/**
 * blk_queue_init_tags - initialize the queue tag info
 * @q:  the request queue for the device
 * @depth:  the maximum queue depth supported
 * @tags: the tag to use
 *
 * Queue lock must be held here if the function is called to resize an
 * existing map.
 **/
int blk_queue_init_tags(struct request_queue *q, int depth,
			struct blk_queue_tag *tags)
{
	int rc;

	BUG_ON(tags && q->queue_tags && tags != q->queue_tags);

	if (!tags && !q->queue_tags) {
		tags = __blk_queue_init_tags(q, depth);

		if (!tags)
			goto fail;
	} else if (q->queue_tags) {
		rc = blk_queue_resize_tags(q, depth);
		if (rc)
			return rc;
		queue_flag_set(QUEUE_FLAG_QUEUED, q);
		return 0;
	} else
		atomic_inc(&tags->refcnt);

	/*
	 * assign it, all done
	 */
	q->queue_tags = tags;
	queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q);
	INIT_LIST_HEAD(&q->tag_busy_list);
	return 0;
fail:
	kfree(tags);
	return -ENOMEM;
}
static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
				    size_t count)
{
	unsigned long nm;
	ssize_t ret = queue_var_store(&nm, page, count);

	spin_lock_irq(q->queue_lock);
	queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
	queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
	if (nm == 2)
		queue_flag_set(QUEUE_FLAG_NOMERGES, q);
	else if (nm)
		queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
	spin_unlock_irq(q->queue_lock);

	return ret;
}
Example #4
0
void dm_stop_queue(struct request_queue *q)
{
	if (!q->mq_ops)
		dm_old_stop_queue(q);
	else {
		spin_lock_irq(q->queue_lock);
		queue_flag_set(QUEUE_FLAG_STOPPED, q);
		spin_unlock_irq(q->queue_lock);

		blk_mq_cancel_requeue_work(q);
		blk_mq_stop_hw_queues(q);
	}
}
Example #5
0
static ssize_t queue_iostats_store(struct request_queue *q, const char *page,
				   size_t count)
{
	unsigned long stats;
	ssize_t ret = queue_var_store(&stats, page, count);

	spin_lock_irq(q->queue_lock);
	if (stats)
		queue_flag_set(QUEUE_FLAG_IO_STAT, q);
	else
		queue_flag_clear(QUEUE_FLAG_IO_STAT, q);
	spin_unlock_irq(q->queue_lock);

	return ret;
}
Example #6
0
static ssize_t queue_random_store(struct request_queue *q, const char *page,
				  size_t count)
{
	unsigned long val;
	ssize_t ret = queue_var_store(&val, page, count);

	spin_lock_irq(q->queue_lock);
	if (val)
		queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
	else
		queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
	spin_unlock_irq(q->queue_lock);

	return ret;
}
Example #7
0
static ssize_t
queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
{
	ssize_t ret = -EINVAL;
#if defined(CONFIG_USE_GENERIC_SMP_HELPERS)
	unsigned long val;

	ret = queue_var_store(&val, page, count);
	spin_lock_irq(q->queue_lock);
	if (val)
		queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
	else
		queue_flag_clear(QUEUE_FLAG_SAME_COMP,  q);
	spin_unlock_irq(q->queue_lock);
#endif
	return ret;
}
Example #8
0
static ssize_t
queue_store_unpriv_sgio(struct request_queue *q, const char *page, size_t count)
{
	unsigned long val;
	ssize_t ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	ret = queue_var_store(&val, page, count);
	spin_lock_irq(q->queue_lock);
	if (val)
		queue_flag_set(QUEUE_FLAG_UNPRIV_SGIO, q);
	else
		queue_flag_clear(QUEUE_FLAG_UNPRIV_SGIO, q);
	spin_unlock_irq(q->queue_lock);
	return ret;
}
Example #9
0
ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr,
			   const char *buf, size_t count)
{
	struct gendisk *disk = dev_to_disk(dev);
	int val;

	if (count) {
		struct request_queue *q = disk->queue;
		char *p = (char *) buf;

		val = simple_strtoul(p, &p, 10);
		spin_lock_irq(q->queue_lock);
		if (val)
			queue_flag_set(QUEUE_FLAG_FAIL_IO, q);
		else
			queue_flag_clear(QUEUE_FLAG_FAIL_IO, q);
		spin_unlock_irq(q->queue_lock);
	}

	return count;
}
Example #10
0
static ssize_t queue_poll_store(struct request_queue *q, const char *page,
				size_t count)
{
	unsigned long poll_on;
	ssize_t ret;

	if (!q->mq_ops || !q->mq_ops->poll)
		return -EINVAL;

	ret = queue_var_store(&poll_on, page, count);
	if (ret < 0)
		return ret;

	spin_lock_irq(q->queue_lock);
	if (poll_on)
		queue_flag_set(QUEUE_FLAG_POLL, q);
	else
		queue_flag_clear(QUEUE_FLAG_POLL, q);
	spin_unlock_irq(q->queue_lock);

	return ret;
}
Example #11
0
static ssize_t queue_wc_store(struct request_queue *q, const char *page,
			      size_t count)
{
	int set = -1;

	if (!strncmp(page, "write back", 10))
		set = 1;
	else if (!strncmp(page, "write through", 13) ||
		 !strncmp(page, "none", 4))
		set = 0;

	if (set == -1)
		return -EINVAL;

	spin_lock_irq(q->queue_lock);
	if (set)
		queue_flag_set(QUEUE_FLAG_WC, q);
	else
		queue_flag_clear(QUEUE_FLAG_WC, q);
	spin_unlock_irq(q->queue_lock);

	return count;
}
void blk_stop_queue(struct request_queue *q)
{
	cancel_delayed_work(&q->delay_work);
	queue_flag_set(QUEUE_FLAG_STOPPED, q);
}