int pblk_write_to_cache(struct pblk *pblk, struct bio *bio, unsigned long flags) { struct request_queue *q = pblk->dev->q; struct pblk_w_ctx w_ctx; sector_t lba = pblk_get_lba(bio); unsigned long start_time = jiffies; unsigned int bpos, pos; int nr_entries = pblk_get_secs(bio); int i, ret; generic_start_io_acct(q, WRITE, bio_sectors(bio), &pblk->disk->part0); /* Update the write buffer head (mem) with the entries that we can * write. The write in itself cannot fail, so there is no need to * rollback from here on. */ retry: ret = pblk_rb_may_write_user(&pblk->rwb, bio, nr_entries, &bpos); switch (ret) { case NVM_IO_REQUEUE: io_schedule(); goto retry; case NVM_IO_ERR: pblk_pipeline_stop(pblk); goto out; } if (unlikely(!bio_has_data(bio))) goto out; pblk_ppa_set_empty(&w_ctx.ppa); w_ctx.flags = flags; if (bio->bi_opf & REQ_PREFLUSH) w_ctx.flags |= PBLK_FLUSH_ENTRY; for (i = 0; i < nr_entries; i++) { void *data = bio_data(bio); w_ctx.lba = lba + i; pos = pblk_rb_wrap_pos(&pblk->rwb, bpos + i); pblk_rb_write_entry_user(&pblk->rwb, data, w_ctx, pos); bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE); } atomic64_add(nr_entries, &pblk->user_wa); #ifdef CONFIG_NVM_DEBUG atomic_long_add(nr_entries, &pblk->inflight_writes); atomic_long_add(nr_entries, &pblk->req_writes); #endif pblk_rl_inserted(&pblk->rl, nr_entries); out: generic_end_io_acct(q, WRITE, &pblk->disk->part0, start_time); pblk_write_should_kick(pblk); return ret; }
/* * Zero out pages of discarded blocks accessed by a read BIO. */ static void dmz_handle_read_zero(struct dmz_target *dmz, struct bio *bio, sector_t chunk_block, unsigned int nr_blocks) { unsigned int size = nr_blocks << DMZ_BLOCK_SHIFT; /* Clear nr_blocks */ swap(bio->bi_iter.bi_size, size); zero_fill_bio(bio); swap(bio->bi_iter.bi_size, size); bio_advance(bio, size); }
static void req_bio_endio(struct request *rq, struct bio *bio, unsigned int nbytes, int error) { if (error) clear_bit(BIO_UPTODATE, &bio->bi_flags); else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) error = -EIO; if (unlikely(rq->cmd_flags & REQ_QUIET)) set_bit(BIO_QUIET, &bio->bi_flags); bio_advance(bio, nbytes); if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) bio_endio(bio, error); }
/* * Issue a BIO to a zone. The BIO may only partially process the * original target BIO. */ static int dmz_submit_read_bio(struct dmz_target *dmz, struct dm_zone *zone, struct bio *bio, sector_t chunk_block, unsigned int nr_blocks) { struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx)); sector_t sector; struct bio *clone; /* BIO remap sector */ sector = dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block); /* If the read is not partial, there is no need to clone the BIO */ if (nr_blocks == dmz_bio_blocks(bio)) { /* Setup and submit the BIO */ bio->bi_iter.bi_sector = sector; atomic_inc(&bioctx->ref); generic_make_request(bio); return 0; } /* Partial BIO: we need to clone the BIO */ clone = bio_clone_fast(bio, GFP_NOIO, dmz->bio_set); if (!clone) return -ENOMEM; /* Setup the clone */ clone->bi_iter.bi_sector = sector; clone->bi_iter.bi_size = dmz_blk2sect(nr_blocks) << SECTOR_SHIFT; clone->bi_end_io = dmz_read_bio_end_io; clone->bi_private = bioctx; bio_advance(bio, clone->bi_iter.bi_size); /* Submit the clone */ atomic_inc(&bioctx->ref); generic_make_request(clone); return 0; }