/* * Decrements the number of outstanding ios that a bio has been * cloned into, completing the original io if necc. */ static void dec_pending(struct dm_io *io, int error) { if (error) io->error = error; if (atomic_dec_and_test(&io->io_count)) { if (end_io_acct(io)) /* nudge anyone waiting on suspend queue */ wake_up(&io->md->wait); bio_endio(io->bio, io->bio->bi_size, io->error); free_io(io->md, io); } }
/* * Decrements the number of outstanding ios that a bio has been * cloned into, completing the original io if necc. */ static void dec_pending(struct dm_io *io, int error) { unsigned long flags; /* Push-back supersedes any I/O errors */ if (error && !(io->error > 0 && __noflush_suspending(io->md))) io->error = error; if (atomic_dec_and_test(&io->io_count)) { if (io->error == DM_ENDIO_REQUEUE) { /* * Target requested pushing back the I/O. * This must be handled before the sleeper on * suspend queue merges the pushback list. */ spin_lock_irqsave(&io->md->pushback_lock, flags); if (__noflush_suspending(io->md)) bio_list_add(&io->md->pushback, io->bio); else /* noflush suspend was interrupted. */ io->error = -EIO; spin_unlock_irqrestore(&io->md->pushback_lock, flags); } if (end_io_acct(io)) /* nudge anyone waiting on suspend queue */ wake_up(&io->md->wait); if (io->error != DM_ENDIO_REQUEUE) { blk_add_trace_bio(io->md->queue, io->bio, BLK_TA_COMPLETE); bio_endio(io->bio, io->bio->bi_size, io->error); } free_io(io->md, io); } }