static void map_tio_request(struct kthread_work *work) { struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work); if (map_request(tio) == DM_MAPIO_REQUEUE) dm_requeue_original_request(tio, false); }
static void dm_done(struct request *clone, int error, bool mapped) { int r = error; struct dm_rq_target_io *tio = clone->end_io_data; dm_request_endio_fn rq_end_io = NULL; if (tio->ti) { rq_end_io = tio->ti->type->rq_end_io; if (mapped && rq_end_io) r = rq_end_io(tio->ti, clone, error, &tio->info); } if (unlikely(r == -EREMOTEIO && (clone->cmd_flags & REQ_WRITE_SAME) && !clone->q->limits.max_write_same_sectors)) disable_write_same(tio->md); if (r <= 0) /* The target wants to complete the I/O */ dm_end_request(clone, r); else if (r == DM_ENDIO_INCOMPLETE) /* The target will handle the I/O */ return; else if (r == DM_ENDIO_REQUEUE) /* The target wants to requeue the I/O */ dm_requeue_original_request(tio->md, tio->orig); else { DMWARN("unimplemented target endio return value: %d", r); BUG(); } }
static void map_tio_request(struct kthread_work *work) { struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work); struct request *rq = tio->orig; struct mapped_device *md = tio->md; if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) dm_requeue_original_request(md, rq); }
static void dm_done(struct request *clone, blk_status_t error, bool mapped) { int r = DM_ENDIO_DONE; struct dm_rq_target_io *tio = clone->end_io_data; dm_request_endio_fn rq_end_io = NULL; if (tio->ti) { rq_end_io = tio->ti->type->rq_end_io; if (mapped && rq_end_io) r = rq_end_io(tio->ti, clone, error, &tio->info); } if (unlikely(error == BLK_STS_TARGET)) { if (req_op(clone) == REQ_OP_WRITE_SAME && !clone->q->limits.max_write_same_sectors) disable_write_same(tio->md); if (req_op(clone) == REQ_OP_WRITE_ZEROES && !clone->q->limits.max_write_zeroes_sectors) disable_write_zeroes(tio->md); } switch (r) { case DM_ENDIO_DONE: /* The target wants to complete the I/O */ dm_end_request(clone, error); break; case DM_ENDIO_INCOMPLETE: /* The target will handle the I/O */ return; case DM_ENDIO_REQUEUE: /* The target wants to requeue the I/O */ dm_requeue_original_request(tio, false); break; case DM_ENDIO_DELAY_REQUEUE: /* The target wants to requeue the I/O after a delay */ dm_requeue_original_request(tio, true); break; default: DMWARN("unimplemented target endio return value: %d", r); BUG(); } }
/* * Returns: * DM_MAPIO_* : the request has been processed as indicated * DM_MAPIO_REQUEUE : the original request needs to be immediately requeued * < 0 : the request was completed due to failure */ static int map_request(struct dm_rq_target_io *tio) { int r; struct dm_target *ti = tio->ti; struct mapped_device *md = tio->md; struct request *rq = tio->orig; struct request *clone = NULL; blk_status_t ret; r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone); check_again: switch (r) { case DM_MAPIO_SUBMITTED: /* The target has taken the I/O to submit by itself later */ break; case DM_MAPIO_REMAPPED: if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { /* -ENOMEM */ ti->type->release_clone_rq(clone); return DM_MAPIO_REQUEUE; } /* The target has remapped the I/O so dispatch it */ trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), blk_rq_pos(rq)); ret = dm_dispatch_clone_request(clone, rq); if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) { blk_rq_unprep_clone(clone); tio->ti->type->release_clone_rq(clone); tio->clone = NULL; if (!rq->q->mq_ops) r = DM_MAPIO_DELAY_REQUEUE; else r = DM_MAPIO_REQUEUE; goto check_again; } break; case DM_MAPIO_REQUEUE: /* The target wants to requeue the I/O */ break; case DM_MAPIO_DELAY_REQUEUE: /* The target wants to requeue the I/O after a delay */ dm_requeue_original_request(tio, true); break; case DM_MAPIO_KILL: /* The target wants to complete the I/O */ dm_kill_unmapped_request(rq, BLK_STS_IOERR); break; default: DMWARN("unimplemented target map return value: %d", r); BUG(); } return r; }
/* * Returns: * 0 : the request has been processed * DM_MAPIO_REQUEUE : the original request needs to be requeued * < 0 : the request was completed due to failure */ static int map_request(struct dm_rq_target_io *tio, struct request *rq, struct mapped_device *md) { int r; struct dm_target *ti = tio->ti; struct request *clone = NULL; if (tio->clone) { clone = tio->clone; r = ti->type->map_rq(ti, clone, &tio->info); } else { r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone); if (r < 0) { /* The target wants to complete the I/O */ dm_kill_unmapped_request(rq, r); return r; } if (r != DM_MAPIO_REMAPPED) return r; if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { /* -ENOMEM */ ti->type->release_clone_rq(clone); return DM_MAPIO_REQUEUE; } } switch (r) { case DM_MAPIO_SUBMITTED: /* The target has taken the I/O to submit by itself later */ break; case DM_MAPIO_REMAPPED: /* The target has remapped the I/O so dispatch it */ trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), blk_rq_pos(rq)); dm_dispatch_clone_request(clone, rq); break; case DM_MAPIO_REQUEUE: /* The target wants to requeue the I/O */ dm_requeue_original_request(md, tio->orig); break; default: if (r > 0) { DMWARN("unimplemented target map return value: %d", r); BUG(); } /* The target wants to complete the I/O */ dm_kill_unmapped_request(rq, r); return r; } return 0; }