/* * The request function that just remaps the bio built up by * dm_merge_bvec. */ static int dm_request(request_queue_t *q, struct bio *bio) { int r; int rw = bio_data_dir(bio); struct mapped_device *md = q->queuedata; /* * There is no use in forwarding any barrier request since we can't * guarantee it is (or can be) handled by the targets correctly. */ if (unlikely(bio_barrier(bio))) { bio_endio(bio, bio->bi_size, -EOPNOTSUPP); return 0; } down_read(&md->io_lock); disk_stat_inc(dm_disk(md), ios[rw]); disk_stat_add(dm_disk(md), sectors[rw], bio_sectors(bio)); /* * If we're suspended we have to queue * this io for later. */ while (test_bit(DMF_BLOCK_IO, &md->flags)) { up_read(&md->io_lock); if (bio_rw(bio) == READA) { bio_io_error(bio, bio->bi_size); return 0; } r = queue_io(md, bio); if (r < 0) { bio_io_error(bio, bio->bi_size); return 0; } else if (r == 0) return 0; /* deferred successfully */ /* * We're in a while loop, because someone could suspend * before we get to the following read lock. */ down_read(&md->io_lock); } __split_bio(md, bio); up_read(&md->io_lock); return 0; }
/* * Split the bio into several clones. */ static void __split_bio(struct mapped_device *md, struct bio *bio) { struct clone_info ci; ci.map = dm_get_table(md); if (!ci.map) { bio_io_error(bio, bio->bi_size); return; } ci.md = md; ci.bio = bio; ci.io = alloc_io(md); ci.io->error = 0; atomic_set(&ci.io->io_count, 1); ci.io->bio = bio; ci.io->md = md; ci.sector = bio->bi_sector; ci.sector_count = bio_sectors(bio); ci.idx = bio->bi_idx; atomic_inc(&md->pending); while (ci.sector_count) __clone_and_map(&ci); /* drop the extra reference count */ dec_pending(ci.io, 0); dm_table_put(ci.map); }
/**ltl * 功能: 分割bio请求 * 参数: * 返回值: * 说明: */ static void __split_bio(struct mapped_device *md, struct bio *bio) { struct clone_info ci; ci.map = dm_get_table(md); if (!ci.map) { bio_io_error(bio, bio->bi_size); return; } ci.md = md; ci.bio = bio; ci.io = alloc_io(md); ci.io->error = 0; atomic_set(&ci.io->io_count, 1); ci.io->bio = bio; ci.io->md = md; ci.sector = bio->bi_sector; /* 请求起始扇区 */ /* 从这里可以看出数据长度必定是512的整数倍 */ ci.sector_count = bio_sectors(bio); /* 数据长度(扇区数) */ ci.idx = bio->bi_idx; /* 当前bi_vec数组下标 */ start_io_acct(ci.io); while (ci.sector_count) /* 分发各个请求 */ __clone_and_map(&ci); /* drop the extra reference count */ dec_pending(ci.io, 0); dm_table_put(ci.map); }
/** * axon_ram_make_request - make_request() method for block device * @queue, @bio: see blk_queue_make_request() */ static void axon_ram_make_request(struct request_queue *queue, struct bio *bio) { struct axon_ram_bank *bank = bio->bi_bdev->bd_disk->private_data; unsigned long phys_mem, phys_end; void *user_mem; struct bio_vec *vec; unsigned int transfered; unsigned short idx; phys_mem = bank->io_addr + (bio->bi_sector << AXON_RAM_SECTOR_SHIFT); phys_end = bank->io_addr + bank->size; transfered = 0; bio_for_each_segment(vec, bio, idx) { if (unlikely(phys_mem + vec->bv_len > phys_end)) { bio_io_error(bio); return; } user_mem = page_address(vec->bv_page) + vec->bv_offset; if (bio_data_dir(bio) == READ) memcpy(user_mem, (void *) phys_mem, vec->bv_len); else memcpy((void *) phys_mem, user_mem, vec->bv_len); phys_mem += vec->bv_len; transfered += vec->bv_len; } bio_endio(bio, 0); }
static bool faulty_make_request(struct mddev *mddev, struct bio *bio) { struct faulty_conf *conf = mddev->private; int failit = 0; if (bio_data_dir(bio) == WRITE) { /* write request */ if (atomic_read(&conf->counters[WriteAll])) { /* special case - don't decrement, don't generic_make_request, * just fail immediately */ bio_io_error(bio); return true; } if (check_sector(conf, bio->bi_iter.bi_sector, bio_end_sector(bio), WRITE)) failit = 1; if (check_mode(conf, WritePersistent)) { add_sector(conf, bio->bi_iter.bi_sector, WritePersistent); failit = 1; } if (check_mode(conf, WriteTransient)) failit = 1; } else { /* read request */ if (check_sector(conf, bio->bi_iter.bi_sector, bio_end_sector(bio), READ)) failit = 1; if (check_mode(conf, ReadTransient)) failit = 1; if (check_mode(conf, ReadPersistent)) { add_sector(conf, bio->bi_iter.bi_sector, ReadPersistent); failit = 1; } if (check_mode(conf, ReadFixable)) { add_sector(conf, bio->bi_iter.bi_sector, ReadFixable); failit = 1; } } if (failit) { struct bio *b = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set); bio_set_dev(b, conf->rdev->bdev); b->bi_private = bio; b->bi_end_io = faulty_fail; bio = b; } else bio_set_dev(bio, conf->rdev->bdev); generic_make_request(bio); return true; }
/* * The request function that just remaps the bio built up by * dm_merge_bvec. */ static int dm_request(request_queue_t *q, struct bio *bio) { int r; int rw = bio_data_dir(bio); struct mapped_device *md = q->queuedata; down_read(&md->io_lock); disk_stat_inc(dm_disk(md), ios[rw]); disk_stat_add(dm_disk(md), sectors[rw], bio_sectors(bio)); /* * If we're suspended we have to queue * this io for later. */ while (test_bit(DMF_BLOCK_IO, &md->flags)) { up_read(&md->io_lock); if (bio_rw(bio) == READA) { bio_io_error(bio, bio->bi_size); return 0; } r = queue_io(md, bio); if (r < 0) { bio_io_error(bio, bio->bi_size); return 0; } else if (r == 0) return 0; /* deferred successfully */ /* * We're in a while loop, because someone could suspend * before we get to the following read lock. */ down_read(&md->io_lock); } __split_bio(md, bio); up_read(&md->io_lock); return 0; }
static void faulty_fail(struct bio *bio) { struct bio *b = bio->bi_private; b->bi_iter.bi_size = bio->bi_iter.bi_size; b->bi_iter.bi_sector = bio->bi_iter.bi_sector; bio_put(bio); bio_io_error(b); }
static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio) { struct rrpc *rrpc = q->queuedata; struct nvm_rq *rqd; int err; if (bio->bi_rw & REQ_DISCARD) { rrpc_discard(rrpc, bio); return BLK_QC_T_NONE; } rqd = mempool_alloc(rrpc->rq_pool, GFP_KERNEL); if (!rqd) { pr_err_ratelimited("rrpc: not able to queue bio."); bio_io_error(bio); return BLK_QC_T_NONE; } memset(rqd, 0, sizeof(struct nvm_rq)); err = rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_NONE); switch (err) { case NVM_IO_OK: return BLK_QC_T_NONE; case NVM_IO_ERR: bio_io_error(bio); break; case NVM_IO_DONE: bio_endio(bio); break; case NVM_IO_REQUEUE: spin_lock(&rrpc->bio_lock); bio_list_add(&rrpc->requeue_bios, bio); spin_unlock(&rrpc->bio_lock); queue_work(rrpc->kgc_wq, &rrpc->ws_requeue); break; } mempool_free(rqd, rrpc->rq_pool); return BLK_QC_T_NONE; }
void dm_cell_error(struct dm_bio_prison *prison, struct dm_bio_prison_cell *cell) { struct bio_list bios; struct bio *bio; unsigned long flags; bio_list_init(&bios); spin_lock_irqsave(&prison->lock, flags); __cell_release(cell, &bios); spin_unlock_irqrestore(&prison->lock, flags); while ((bio = bio_list_pop(&bios))) bio_io_error(bio); }
static void rrpc_discard(struct rrpc *rrpc, struct bio *bio) { sector_t slba = bio->bi_iter.bi_sector / NR_PHY_IN_LOG; sector_t len = bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE; struct nvm_rq *rqd; do { rqd = rrpc_inflight_laddr_acquire(rrpc, slba, len); schedule(); } while (!rqd); if (IS_ERR(rqd)) { pr_err("rrpc: unable to acquire inflight IO\n"); bio_io_error(bio); return; } rrpc_invalidate_range(rrpc, slba, len); rrpc_inflight_laddr_release(rrpc, rqd); }
static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio) { struct pblk *pblk = q->queuedata; if (bio_op(bio) == REQ_OP_DISCARD) { pblk_discard(pblk, bio); if (!(bio->bi_opf & REQ_PREFLUSH)) { bio_endio(bio); return BLK_QC_T_NONE; } } switch (pblk_rw_io(q, pblk, bio)) { case NVM_IO_ERR: bio_io_error(bio); break; case NVM_IO_DONE: bio_endio(bio); break; } return BLK_QC_T_NONE; }
/* static void stackbd_make_request(struct request_queue *q, struct bio *bio) */ static int stackbd_make_request(struct request_queue *q, struct bio *bio) { printk("stackbd: make request %-5s block %-12llu #pages %-4hu total-size " "%-10u\n", bio_data_dir(bio) == WRITE ? "write" : "read", (unsigned long long) bio->bi_sector, bio->bi_vcnt, bio->bi_size); // printk("<%p> Make request %s %s %s\n", bio, // bio->bi_rw & REQ_SYNC ? "SYNC" : "", // bio->bi_rw & REQ_FLUSH ? "FLUSH" : "", // bio->bi_rw & REQ_NOIDLE ? "NOIDLE" : ""); // spin_lock_irq(&stackbd.lock); if (!stackbd.bdev_raw) { printk("stackbd: Request before bdev_raw is ready, aborting\n"); goto abort; } if (!stackbd.is_active) { printk("stackbd: Device not active yet, aborting\n"); goto abort; } bio_list_add(&stackbd.bio_list, bio); wake_up(&req_event); spin_unlock_irq(&stackbd.lock); /* FIXME:VER return; */ return 0; abort: spin_unlock_irq(&stackbd.lock); printk("<%p> Abort request\n\n", bio); bio_io_error(bio); /*FIXME:VER return; */ return 0; }