static void dm_dispatch_clone_request(struct request *clone, struct request *rq) { int r; if (blk_queue_io_stat(clone->q)) clone->cmd_flags |= REQ_IO_STAT; clone->start_time = jiffies; r = blk_insert_cloned_request(clone->q, clone); if (r) /* must complete clone in terms of original request */ dm_complete_request(rq, r); }
static blk_status_t dm_dispatch_clone_request(struct request *clone, struct request *rq) { blk_status_t r; if (blk_queue_io_stat(clone->q)) clone->rq_flags |= RQF_IO_STAT; clone->start_time = jiffies; r = blk_insert_cloned_request(clone->q, clone); if (r != BLK_STS_OK && r != BLK_STS_RESOURCE && r != BLK_STS_DEV_RESOURCE) /* must complete clone in terms of original request */ dm_complete_request(rq, r); return r; }
/* * Called with the clone's queue lock held (in the case of .request_fn) */ static void end_clone_request(struct request *clone, blk_status_t error) { struct dm_rq_target_io *tio = clone->end_io_data; /* * Actual request completion is done in a softirq context which doesn't * hold the clone's queue lock. Otherwise, deadlock could occur because: * - another request may be submitted by the upper level driver * of the stacking during the completion * - the submission which requires queue lock may be done * against this clone's queue */ dm_complete_request(tio->orig, error); }
/* * Called with the clone's queue lock held (in the case of .request_fn) */ static void end_clone_request(struct request *clone, int error) { struct dm_rq_target_io *tio = clone->end_io_data; if (!clone->q->mq_ops) { /* * For just cleaning up the information of the queue in which * the clone was dispatched. * The clone is *NOT* freed actually here because it is alloced * from dm own mempool (REQ_ALLOCED isn't set). */ __blk_put_request(clone->q, clone); } /* * Actual request completion is done in a softirq context which doesn't * hold the clone's queue lock. Otherwise, deadlock could occur because: * - another request may be submitted by the upper level driver * of the stacking during the completion * - the submission which requires queue lock may be done * against this clone's queue */ dm_complete_request(tio->orig, error); }
/* * Complete the not-mapped clone and the original request with the error status * through softirq context. * Target's rq_end_io() function isn't called. * This may be used when the target's map_rq() or clone_and_map_rq() functions fail. */ static void dm_kill_unmapped_request(struct request *rq, blk_status_t error) { rq->rq_flags |= RQF_FAILED; dm_complete_request(rq, error); }
/* * Complete the not-mapped clone and the original request with the error status * through softirq context. * Target's rq_end_io() function isn't called. * This may be used when the target's map_rq() or clone_and_map_rq() functions fail. */ static void dm_kill_unmapped_request(struct request *rq, int error) { rq->cmd_flags |= REQ_FAILED; dm_complete_request(rq, error); }