/**
 * blk_execute_rq_nowait - insert a request into queue for execution
 * @q:		queue to insert the request in
 * @bd_disk:	matching gendisk
 * @rq:		request to insert
 * @at_head:    insert request at head or tail of queue
 * @done:	I/O completion handler
 *
 * Description:
 *    Insert a fully prepared request at the back of the I/O scheduler queue
 *    for execution.  Don't wait for completion.
 */
void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
			   struct request *rq, int at_head,
			   rq_end_io_fn *done)
{
	int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;

	WARN_ON(irqs_disabled());
	spin_lock_irq(q->queue_lock);

	if (unlikely(blk_queue_dead(q))) {
		spin_unlock_irq(q->queue_lock);
		rq->errors = -ENXIO;
		if (rq->end_io)
			rq->end_io(rq, rq->errors);
		return;
	}

	rq->rq_disk = bd_disk;
	rq->end_io = done;
	__elv_add_request(q, rq, where);
	__blk_run_queue(q);
	/* the queue is stopped so it won't be run */
	if (rq->cmd_type == REQ_TYPE_PM_RESUME)
		q->request_fn(q);
	spin_unlock_irq(q->queue_lock);
}
Exemple #2
0
static ssize_t queue_state_write(void *data, const char __user *buf,
				 size_t count, loff_t *ppos)
{
	struct request_queue *q = data;
	char opbuf[16] = { }, *op;

	/*
	 * The "state" attribute is removed after blk_cleanup_queue() has called
	 * blk_mq_free_queue(). Return if QUEUE_FLAG_DEAD has been set to avoid
	 * triggering a use-after-free.
	 */
	if (blk_queue_dead(q))
		return -ENOENT;

	if (count >= sizeof(opbuf)) {
		pr_err("%s: operation too long\n", __func__);
		goto inval;
	}

	if (copy_from_user(opbuf, buf, count))
		return -EFAULT;
	op = strstrip(opbuf);
	if (strcmp(op, "run") == 0) {
		blk_mq_run_hw_queues(q, true);
	} else if (strcmp(op, "start") == 0) {
		blk_mq_start_stopped_hw_queues(q, true);
	} else {
		pr_err("%s: unsupported operation '%s'\n", __func__, op);
inval:
		pr_err("%s: use either 'run' or 'start'\n", __func__);
		return -EINVAL;
	}
	return count;
}
inline void __blk_run_queue_uncond(struct request_queue *q)
{
	if (unlikely(blk_queue_dead(q)))
		return;

	q->request_fn_active++;
	if (!q->notified_urgent &&
		q->elevator->type->ops.elevator_is_urgent_fn &&
		q->urgent_request_fn &&
		q->elevator->type->ops.elevator_is_urgent_fn(q) &&
		list_empty(&q->flush_data_in_flight)) {
		q->notified_urgent = true;
		q->urgent_request_fn(q);
	} else
		q->request_fn(q);
	q->request_fn_active--;
}
static ssize_t
queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
	struct queue_sysfs_entry *entry = to_queue(attr);
	struct request_queue *q =
		container_of(kobj, struct request_queue, kobj);
	ssize_t res;

	if (!entry->show)
		return -EIO;
	mutex_lock(&q->sysfs_lock);
	if (blk_queue_dead(q)) {
		mutex_unlock(&q->sysfs_lock);
		return -ENOENT;
	}
	res = entry->show(q, page);
	mutex_unlock(&q->sysfs_lock);
	return res;
}
void blk_run_queue_async(struct request_queue *q)
{
	if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q)))
		mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
}
void blk_delay_queue(struct request_queue *q, unsigned long msecs)
{
	if (likely(!blk_queue_dead(q)))
		queue_delayed_work(kblockd_workqueue, &q->delay_work,
				   msecs_to_jiffies(msecs));
}