static void dm_done(struct request *clone, int error, bool mapped) { int r = error; struct dm_rq_target_io *tio = clone->end_io_data; dm_request_endio_fn rq_end_io = NULL; if (tio->ti) { rq_end_io = tio->ti->type->rq_end_io; if (mapped && rq_end_io) r = rq_end_io(tio->ti, clone, error, &tio->info); } if (unlikely(r == -EREMOTEIO && (clone->cmd_flags & REQ_WRITE_SAME) && !clone->q->limits.max_write_same_sectors)) disable_write_same(tio->md); if (r <= 0) /* The target wants to complete the I/O */ dm_end_request(clone, r); else if (r == DM_ENDIO_INCOMPLETE) /* The target will handle the I/O */ return; else if (r == DM_ENDIO_REQUEUE) /* The target wants to requeue the I/O */ dm_requeue_original_request(tio->md, tio->orig); else { DMWARN("unimplemented target endio return value: %d", r); BUG(); } }
static void dm_done(struct request *clone, blk_status_t error, bool mapped) { int r = DM_ENDIO_DONE; struct dm_rq_target_io *tio = clone->end_io_data; dm_request_endio_fn rq_end_io = NULL; if (tio->ti) { rq_end_io = tio->ti->type->rq_end_io; if (mapped && rq_end_io) r = rq_end_io(tio->ti, clone, error, &tio->info); } if (unlikely(error == BLK_STS_TARGET)) { if (req_op(clone) == REQ_OP_WRITE_SAME && !clone->q->limits.max_write_same_sectors) disable_write_same(tio->md); if (req_op(clone) == REQ_OP_WRITE_ZEROES && !clone->q->limits.max_write_zeroes_sectors) disable_write_zeroes(tio->md); } switch (r) { case DM_ENDIO_DONE: /* The target wants to complete the I/O */ dm_end_request(clone, error); break; case DM_ENDIO_INCOMPLETE: /* The target will handle the I/O */ return; case DM_ENDIO_REQUEUE: /* The target wants to requeue the I/O */ dm_requeue_original_request(tio, false); break; case DM_ENDIO_DELAY_REQUEUE: /* The target wants to requeue the I/O after a delay */ dm_requeue_original_request(tio, true); break; default: DMWARN("unimplemented target endio return value: %d", r); BUG(); } }