/* * Don't touch any member of the md after calling this function because * the md may be freed in dm_put() at the end of this function. * Or do dm_get() before calling this function and dm_put() later. */ static void rq_completed(struct mapped_device *md, int rw, bool run_queue) { struct request_queue *q = md->queue; unsigned long flags; atomic_dec(&md->pending[rw]); /* nudge anyone waiting on suspend queue */ if (!md_in_flight(md)) wake_up(&md->wait); /* * Run this off this callpath, as drivers could invoke end_io while * inside their request_fn (and holding the queue lock). Calling * back into ->request_fn() could deadlock attempting to grab the * queue lock again. */ if (!q->mq_ops && run_queue) { spin_lock_irqsave(q->queue_lock, flags); blk_run_queue_async(q); spin_unlock_irqrestore(q->queue_lock, flags); } /* * dm_put() must be at the end of this function. See the comment above */ dm_put(md); }
void bsg_goose_queue(struct request_queue *q) { if (!q) return; blk_run_queue_async(q); }
/* * Requeue the original request of a clone. */ static void dm_old_requeue_request(struct request *rq) { struct request_queue *q = rq->q; unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); blk_requeue_request(q, rq); blk_run_queue_async(q); spin_unlock_irqrestore(q->queue_lock, flags); }