/* * The request function that just remaps the bio built up by * dm_merge_bvec. */ static int dm_request(request_queue_t *q, struct bio *bio) { int r; int rw = bio_data_dir(bio); struct mapped_device *md = q->queuedata; /* * There is no use in forwarding any barrier request since we can't * guarantee it is (or can be) handled by the targets correctly. */ if (unlikely(bio_barrier(bio))) { bio_endio(bio, bio->bi_size, -EOPNOTSUPP); return 0; } down_read(&md->io_lock); disk_stat_inc(dm_disk(md), ios[rw]); disk_stat_add(dm_disk(md), sectors[rw], bio_sectors(bio)); /* * If we're suspended we have to queue * this io for later. */ while (test_bit(DMF_BLOCK_IO, &md->flags)) { up_read(&md->io_lock); if (bio_rw(bio) == READA) { bio_io_error(bio, bio->bi_size); return 0; } r = queue_io(md, bio); if (r < 0) { bio_io_error(bio, bio->bi_size); return 0; } else if (r == 0) return 0; /* deferred successfully */ /* * We're in a while loop, because someone could suspend * before we get to the following read lock. */ down_read(&md->io_lock); } __split_bio(md, bio); up_read(&md->io_lock); return 0; }
/* * The request function that just remaps the bio built up by * dm_merge_bvec. */ static int dm_request(request_queue_t *q, struct bio *bio) { int r; int rw = bio_data_dir(bio); struct mapped_device *md = q->queuedata; down_read(&md->io_lock); disk_stat_inc(dm_disk(md), ios[rw]); disk_stat_add(dm_disk(md), sectors[rw], bio_sectors(bio)); /* * If we're suspended we have to queue * this io for later. */ while (test_bit(DMF_BLOCK_IO, &md->flags)) { up_read(&md->io_lock); if (bio_rw(bio) == READA) { bio_io_error(bio, bio->bi_size); return 0; } r = queue_io(md, bio); if (r < 0) { bio_io_error(bio, bio->bi_size); return 0; } else if (r == 0) return 0; /* deferred successfully */ /* * We're in a while loop, because someone could suspend * before we get to the following read lock. */ down_read(&md->io_lock); } __split_bio(md, bio); up_read(&md->io_lock); return 0; }