static ssize_t queue_state_write(void *data, const char __user *buf, size_t count, loff_t *ppos) { struct request_queue *q = data; char opbuf[16] = { }, *op; /* * The "state" attribute is removed after blk_cleanup_queue() has called * blk_mq_free_queue(). Return if QUEUE_FLAG_DEAD has been set to avoid * triggering a use-after-free. */ if (blk_queue_dead(q)) return -ENOENT; if (count >= sizeof(opbuf)) { pr_err("%s: operation too long\n", __func__); goto inval; } if (copy_from_user(opbuf, buf, count)) return -EFAULT; op = strstrip(opbuf); if (strcmp(op, "run") == 0) { blk_mq_run_hw_queues(q, true); } else if (strcmp(op, "start") == 0) { blk_mq_start_stopped_hw_queues(q, true); } else { pr_err("%s: unsupported operation '%s'\n", __func__, op); inval: pr_err("%s: use either 'run' or 'start'\n", __func__); return -EINVAL; } return count; }
static void mmc_mq_recovery_handler(struct work_struct *work) { struct mmc_queue *mq = container_of(work, struct mmc_queue, recovery_work); struct request_queue *q = mq->queue; mmc_get_card(mq->card, &mq->ctx); mq->in_recovery = true; if (mq->use_cqe) mmc_blk_cqe_recovery(mq); else mmc_blk_mq_recovery(mq); mq->in_recovery = false; spin_lock_irq(&mq->lock); mq->recovery_needed = false; spin_unlock_irq(&mq->lock); mmc_put_card(mq->card, &mq->ctx); blk_mq_run_hw_queues(q, true); }