/* * Zero out pages of discarded blocks accessed by a read BIO. */ static void dmz_handle_read_zero(struct dmz_target *dmz, struct bio *bio, sector_t chunk_block, unsigned int nr_blocks) { unsigned int size = nr_blocks << DMZ_BLOCK_SHIFT; /* Clear nr_blocks */ swap(bio->bi_iter.bi_size, size); zero_fill_bio(bio); swap(bio->bi_iter.bi_size, size); bio_advance(bio, size); }
static void endio(struct bio *bio, int error) { struct io *io; unsigned region; if (error && bio_data_dir(bio) == READ) zero_fill_bio(bio); retrieve_io_and_region_from_bio(bio, &io, ®ion); bio_put(bio); dec_count(io, region, error); }
static void endio(struct bio *bio, int error) { struct io *io; unsigned region; if (error && bio_data_dir(bio) == READ) zero_fill_bio(bio); /* * The bio destructor in bio_put() may use the io object. */ retrieve_io_and_region_from_bio(bio, &io, ®ion); bio_put(bio); dec_count(io, region, error); }
/* * Return zeros only on reads */ static int zero_map(struct dm_target *ti, struct bio *bio) { switch(bio_rw(bio)) { case READ: zero_fill_bio(bio); break; case READA: /* readahead of null bytes only wastes buffer cache */ return -EIO; case WRITE: /* writes get silently dropped */ break; } bio_endio(bio, 0); /* accepted bio, don't make new request */ return DM_MAPIO_SUBMITTED; }
/* * Return zeros only on reads */ static int zero_map(struct dm_target *ti, struct bio *bio, union map_info *map_context) { switch(bio_rw(bio)) { case READ: zero_fill_bio(bio); break; case READA: /* readahead of null bytes only wastes buffer cache */ return -EIO; case WRITE: /* writes get silently dropped */ break; } bio_endio(bio, bio->bi_size, 0); /* accepted bio, don't make new request */ return 0; }
/* * Return zeros only on reads */ static int zero_map(struct dm_target *ti, struct bio *bio) { switch (bio_op(bio)) { case REQ_OP_READ: if (bio->bi_rw & REQ_RAHEAD) { /* readahead of null bytes only wastes buffer cache */ return -EIO; } zero_fill_bio(bio); break; case REQ_OP_WRITE: /* writes get silently dropped */ break; default: return -EIO; } bio_endio(bio); /* accepted bio, don't make new request */ return DM_MAPIO_SUBMITTED; }
/* * Process a read BIO. */ static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone, struct bio *bio) { sector_t chunk_block = dmz_chunk_block(dmz->dev, dmz_bio_block(bio)); unsigned int nr_blocks = dmz_bio_blocks(bio); sector_t end_block = chunk_block + nr_blocks; struct dm_zone *rzone, *bzone; int ret; /* Read into unmapped chunks need only zeroing the BIO buffer */ if (!zone) { zero_fill_bio(bio); return 0; } dmz_dev_debug(dmz->dev, "READ chunk %llu -> %s zone %u, block %llu, %u blocks", (unsigned long long)dmz_bio_chunk(dmz->dev, bio), (dmz_is_rnd(zone) ? "RND" : "SEQ"), dmz_id(dmz->metadata, zone), (unsigned long long)chunk_block, nr_blocks); /* Check block validity to determine the read location */ bzone = zone->bzone; while (chunk_block < end_block) { nr_blocks = 0; if (dmz_is_rnd(zone) || chunk_block < zone->wp_block) { /* Test block validity in the data zone */ ret = dmz_block_valid(dmz->metadata, zone, chunk_block); if (ret < 0) return ret; if (ret > 0) { /* Read data zone blocks */ nr_blocks = ret; rzone = zone; } } /* * No valid blocks found in the data zone. * Check the buffer zone, if there is one. */ if (!nr_blocks && bzone) { ret = dmz_block_valid(dmz->metadata, bzone, chunk_block); if (ret < 0) return ret; if (ret > 0) { /* Read buffer zone blocks */ nr_blocks = ret; rzone = bzone; } } if (nr_blocks) { /* Valid blocks found: read them */ nr_blocks = min_t(unsigned int, nr_blocks, end_block - chunk_block); ret = dmz_submit_read_bio(dmz, rzone, bio, chunk_block, nr_blocks); if (ret) return ret; chunk_block += nr_blocks; } else { /* No valid block: zeroout the current BIO block */ dmz_handle_read_zero(dmz, bio, chunk_block, 1); chunk_block++; } }