static int deadline_merge(struct request_queue *q, struct request **req, struct bio *bio) { struct deadline_data *dd = q->elevator->elevator_data; struct request *__rq; int ret; /* * check for front merge */ if (dd->front_merges) { sector_t sector = bio_end_sector(bio); __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector); if (__rq) { BUG_ON(sector != blk_rq_pos(__rq)); if (elv_rq_merge_ok(__rq, bio)) { ret = ELEVATOR_FRONT_MERGE; goto out; } } } return ELEVATOR_NO_MERGE; out: *req = __rq; return ret; }
void drbd_endio_sec(struct bio *bio, int error) { struct drbd_epoch_entry *e = bio->bi_private; struct drbd_conf *mdev = e->mdev; int uptodate = bio_flagged(bio, BIO_UPTODATE); int is_write = bio_data_dir(bio) == WRITE; if (error) dev_warn(DEV, "%s: error=%d s=%llus\n", is_write ? "write" : "read", error, (unsigned long long)e->sector); if (!error && !uptodate) { dev_warn(DEV, "%s: setting error to -EIO s=%llus\n", is_write ? "write" : "read", (unsigned long long)e->sector); /* strange behavior of some lower level drivers... * fail the request by clearing the uptodate flag, * but do not return any error?! */ error = -EIO; } if (error) set_bit(__EE_WAS_ERROR, &e->flags); bio_put(bio); /* no need for the bio anymore */ if (atomic_dec_and_test(&e->pending_bios)) { if (is_write) drbd_endio_write_sec_final(e); else drbd_endio_read_sec_final(e); } }
/* * I/O completion handler for multipage BIOs. * * The mpage code never puts partial pages into a BIO (except for end-of-file). * If a page does not map to a contiguous run of blocks then it simply falls * back to block_read_full_page(). * * Why is this? If a page's completion depends on a number of different BIOs * which can complete in any order (or at the same time) then determining the * status of that page is hard. See end_buffer_async_read() for the details. * There is no point in duplicating all that complexity. */ static void mpage_end_io(struct bio *bio, int err) { const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; do { struct page *page = bvec->bv_page; if (--bvec >= bio->bi_io_vec) prefetchw(&bvec->bv_page->flags); if (bio_data_dir(bio) == READ) { if (uptodate) { SetPageUptodate(page); } else { ClearPageUptodate(page); SetPageError(page); } unlock_page(page); } else { /* bio_data_dir(bio) == WRITE */ if (!uptodate) { SetPageError(page); if (page->mapping) set_bit(AS_EIO, &page->mapping->flags); } end_page_writeback(page); } } while (bvec >= bio->bi_io_vec); bio_put(bio); }
/** * axon_ram_make_request - make_request() method for block device * @queue, @bio: see blk_queue_make_request() */ static void axon_ram_make_request(struct request_queue *queue, struct bio *bio) { struct axon_ram_bank *bank = bio->bi_bdev->bd_disk->private_data; unsigned long phys_mem, phys_end; void *user_mem; struct bio_vec *vec; unsigned int transfered; unsigned short idx; phys_mem = bank->io_addr + (bio->bi_sector << AXON_RAM_SECTOR_SHIFT); phys_end = bank->io_addr + bank->size; transfered = 0; bio_for_each_segment(vec, bio, idx) { if (unlikely(phys_mem + vec->bv_len > phys_end)) { bio_io_error(bio); return; } user_mem = page_address(vec->bv_page) + vec->bv_offset; if (bio_data_dir(bio) == READ) memcpy(user_mem, (void *) phys_mem, vec->bv_len); else memcpy((void *) phys_mem, user_mem, vec->bv_len); phys_mem += vec->bv_len; transfered += vec->bv_len; } bio_endio(bio, 0); }
static int rrpc_end_io(struct nvm_rq *rqd, int error) { struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance); struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd); uint8_t npages = rqd->nr_pages; sector_t laddr = rrpc_get_laddr(rqd->bio) - npages; if (bio_data_dir(rqd->bio) == WRITE) rrpc_end_io_write(rrpc, rrqd, laddr, npages); if (rrqd->flags & NVM_IOTYPE_GC) return 0; rrpc_unlock_rq(rrpc, rqd); bio_put(rqd->bio); if (npages > 1) nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list); if (rqd->metadata) nvm_dev_dma_free(rrpc->dev, rqd->metadata, rqd->dma_metadata); mempool_free(rqd, rrpc->rq_pool); return 0; }
static void _dec_pending(struct iostash_bio *io) { if (atomic_dec_and_test(&io->io_pending)) { struct hdd_info *hdd = io->hdd; struct ssd_info *ssd = io->ssd; struct bio *base_bio = io->base_bio; int error = io->error; #ifdef SCE_AWT if (bio_data_dir(base_bio) != READ) { sce_put4write(hdd->lun, io->psn, io->nr_sctr, io->ssd_werr | io->error); gctx.st_awt++; } #endif mempool_free(io, hdd->io_pool); #if KERNEL_VERSION(4,2,0) <= LINUX_VERSION_CODE (void) error; bio_endio(base_bio); #else bio_endio(base_bio, error); #endif atomic_dec(&hdd->io_pending); BUG_ON(NULL == ssd); atomic_dec(&ssd->nr_ref); } }
void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m) { const unsigned long s = req->rq_state; struct drbd_conf *mdev = req->mdev; int rw = req->master_bio ? bio_data_dir(req->master_bio) : WRITE; if (s & RQ_NET_QUEUED) return; if (s & RQ_NET_PENDING) return; if (s & RQ_LOCAL_PENDING) return; if (req->master_bio) { int ok = (s & RQ_LOCAL_OK) || (s & RQ_NET_OK); int error = PTR_ERR(req->private_bio); if (!hlist_unhashed(&req->collision)) hlist_del(&req->collision); else D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0); if (rw == WRITE) _about_to_complete_local_write(mdev, req); _drbd_end_io_acct(mdev, req); m->error = ok ? 0 : (error ?: -EIO); m->bio = req->master_bio; req->master_bio = NULL; }
static int vrd_make_request(request_queue_t *q, struct bio *bio) { vrd_device *pdevice; char *pVHDDData; char *pBuffer; struct bio_vec *bvec; int i; if( ( (bio->bi_sector*VRD_SECTOR_SIZE) + bio->bi_size ) > VRD_SIZE ) goto fail; pdevice = (vrd_device *) bio->bi_bdev->bd_disk->private_data; pVHDDData = pdevice->data + (bio->bi_sector*VRD_SECTOR_SIZE); bio_for_each_segment(bvec, bio, i) { pBuffer = kmap(bvec->bv_page) + bvec->bv_offset; switch(bio_data_dir(bio)) { case READA : case READ : memcpy(pBuffer, pVHDDData, bvec->bv_len); break; case WRITE : memcpy(pVHDDData, pBuffer, bvec->bv_len); break; default : kunmap(bvec->bv_page); goto fail; } kunmap(bvec->bv_page); pVHDDData += bvec->bv_len; }
static struct drbd_request *drbd_req_new(struct drbd_conf *mdev, struct bio *bio_src) { struct drbd_request *req; req = mempool_alloc(drbd_request_mempool, GFP_NOIO); if (!req) return NULL; drbd_req_make_private_bio(req, bio_src); req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0; req->w.mdev = mdev; req->master_bio = bio_src; req->epoch = 0; drbd_clear_interval(&req->i); req->i.sector = bio_src->bi_sector; req->i.size = bio_src->bi_size; req->i.local = true; req->i.waiting = false; INIT_LIST_HEAD(&req->tl_requests); INIT_LIST_HEAD(&req->w.list); /* one reference to be put by __drbd_make_request */ atomic_set(&req->completion_ref, 1); /* one kref as long as completion_ref > 0 */ kref_init(&req->kref); return req; }
static int pblk_rw_io(struct request_queue *q, struct pblk *pblk, struct bio *bio) { int ret; /* Read requests must be <= 256kb due to NVMe's 64 bit completion bitmap * constraint. Writes can be of arbitrary size. */ if (bio_data_dir(bio) == READ) { blk_queue_split(q, &bio); ret = pblk_submit_read(pblk, bio); if (ret == NVM_IO_DONE && bio_flagged(bio, BIO_CLONED)) bio_put(bio); return ret; } /* Prevent deadlock in the case of a modest LUN configuration and large * user I/Os. Unless stalled, the rate limiter leaves at least 256KB * available for user I/O. */ if (unlikely(pblk_get_secs(bio) >= pblk_rl_sysfs_rate_show(&pblk->rl))) blk_queue_split(q, &bio); return pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER); }
static int mem_block_no_elevator_request_fn(request_queue_t* q,struct bio* bio) { int status = 0,i = 0; struct bio_vec* bvec = NULL; bio_for_each_segment(bvec,bio,i) { char* buffer = __bio_kmap_atomic(bio,i,KM_USER0); switch(bio_data_dir(bio)) { case WRITE: { memcpy(g_mem_buf + (bio->bi_sector << 9),buffer,bio_cur_sectors(bio) << 9); status = 0; break; } case READ: { memcpy(buffer,g_mem_buf + (bio->bi_sector << 9),bio_cur_sectors(bio) << 9); status = 0; break; } default: { Log("[Error] Unknown opetator."); status = -EIO; break; } } bio_endio(bio,bio->bi_size,status); __bio_kunmap_atomic(bio,KM_USER0); }
/* * Transfer a single BIO. */ static int sbull_xfer_bio(struct sbull_dev *dev, struct bio *bio) { int i; struct bio_vec *bvec; sector_t sector = bio->bi_sector; bool do_sync; int do_sync_req = 0; Nand_OS_LOCK(); /* Do each segment independently. */ bio_for_each_segment(bvec, bio, i) //这个是一个宏定义 { char *buffer = __bio_kmap_atomic(bio, i, KM_USER0);//luowl #if 1 if(1)//(nand_page_size_get() == 2048) { do_sync = (bio_rw_flagged(bio, BIO_RW_SYNCIO) && bio_data_dir(bio) == WRITE); if (do_sync) { //printk("detect do write sync\n"); do_sync_req++; } } #endif sbull_transfer(dev, sector, bio_cur_bytes(bio) >> 9, buffer, bio_data_dir(bio) == WRITE); sector += bio_cur_bytes(bio) >> 9; __bio_kunmap_atomic(bio, KM_USER0); }
static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) { struct request_queue *q = dev->q; struct request *rq; struct bio *bio = rqd->bio; rq = blk_mq_alloc_request(q, bio_data_dir(bio), 0); if (IS_ERR(rq)) return -ENOMEM; rq->cmd_type = REQ_TYPE_DRV_PRIV; rq->__sector = bio->bi_iter.bi_sector; rq->ioprio = bio_prio(bio); if (bio_has_data(bio)) rq->nr_phys_segments = bio_phys_segments(q, bio); rq->__data_len = bio->bi_iter.bi_size; rq->bio = rq->biotail = bio; rq->end_io_data = rqd; blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io); return 0; }
/** * bio_integrity_enabled - Check whether integrity can be passed * @bio: bio to check * * Description: Determines whether bio_integrity_prep() can be called * on this bio or not. bio data direction and target device must be * set prior to calling. The functions honors the write_generate and * read_verify flags in sysfs. */ int bio_integrity_enabled(struct bio *bio) { /* Already protected? */ if (bio_integrity(bio)) return 0; return bdev_integrity_enabled(bio->bi_bdev, bio_data_dir(bio)); }
static bool faulty_make_request(struct mddev *mddev, struct bio *bio) { struct faulty_conf *conf = mddev->private; int failit = 0; if (bio_data_dir(bio) == WRITE) { /* write request */ if (atomic_read(&conf->counters[WriteAll])) { /* special case - don't decrement, don't generic_make_request, * just fail immediately */ bio_io_error(bio); return true; } if (check_sector(conf, bio->bi_iter.bi_sector, bio_end_sector(bio), WRITE)) failit = 1; if (check_mode(conf, WritePersistent)) { add_sector(conf, bio->bi_iter.bi_sector, WritePersistent); failit = 1; } if (check_mode(conf, WriteTransient)) failit = 1; } else { /* read request */ if (check_sector(conf, bio->bi_iter.bi_sector, bio_end_sector(bio), READ)) failit = 1; if (check_mode(conf, ReadTransient)) failit = 1; if (check_mode(conf, ReadPersistent)) { add_sector(conf, bio->bi_iter.bi_sector, ReadPersistent); failit = 1; } if (check_mode(conf, ReadFixable)) { add_sector(conf, bio->bi_iter.bi_sector, ReadFixable); failit = 1; } } if (failit) { struct bio *b = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set); bio_set_dev(b, conf->rdev->bdev); b->bi_private = bio; b->bi_end_io = faulty_fail; bio = b; } else bio_set_dev(bio, conf->rdev->bdev); generic_make_request(bio); return true; }
/* Update disk stats at start of I/O request */ static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req, struct bio *bio) { const int rw = bio_data_dir(bio); int cpu; cpu = part_stat_lock(); part_stat_inc(cpu, &mdev->vdisk->part0, ios[rw]); part_stat_add(cpu, &mdev->vdisk->part0, sectors[rw], bio_sectors(bio)); part_inc_in_flight(&mdev->vdisk->part0, rw); part_stat_unlock(); }
static int deadline_merge(request_queue_t *q, struct request **req, struct bio *bio) { struct deadline_data *dd = q->elevator->elevator_data; struct request *__rq; int ret; /* * try last_merge to avoid going to hash */ ret = elv_try_last_merge(q, bio); if (ret != ELEVATOR_NO_MERGE) { __rq = q->last_merge; goto out_insert; } /* * see if the merge hash can satisfy a back merge */ __rq = deadline_find_drq_hash(dd, bio->bi_sector); if (__rq) { BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector); if (elv_rq_merge_ok(__rq, bio)) { ret = ELEVATOR_BACK_MERGE; goto out; } } /* * check for front merge */ if (dd->front_merges) { sector_t rb_key = bio->bi_sector + bio_sectors(bio); __rq = deadline_find_drq_rb(dd, rb_key, bio_data_dir(bio)); if (__rq) { BUG_ON(rb_key != rq_rb_key(__rq)); if (elv_rq_merge_ok(__rq, bio)) { ret = ELEVATOR_FRONT_MERGE; goto out; } } } return ELEVATOR_NO_MERGE; out: q->last_merge = __rq; out_insert: if (ret) deadline_hot_drq_hash(dd, RQ_DATA(__rq)); *req = __rq; return ret; }
static int crypt_convert_block(struct crypt_config *cc, struct convert_context *ctx, struct ablkcipher_request *req) { struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in); struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out); struct dm_crypt_request *dmreq; u8 *iv; int r; dmreq = dmreq_of_req(cc, req); iv = iv_of_dmreq(cc, dmreq); dmreq->iv_sector = ctx->cc_sector; dmreq->ctx = ctx; sg_init_table(&dmreq->sg_in, 1); sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, bv_in->bv_offset + ctx->offset_in); sg_init_table(&dmreq->sg_out, 1); sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT, bv_out->bv_offset + ctx->offset_out); ctx->offset_in += 1 << SECTOR_SHIFT; if (ctx->offset_in >= bv_in->bv_len) { ctx->offset_in = 0; ctx->idx_in++; } ctx->offset_out += 1 << SECTOR_SHIFT; if (ctx->offset_out >= bv_out->bv_len) { ctx->offset_out = 0; ctx->idx_out++; } if (cc->iv_gen_ops) { r = cc->iv_gen_ops->generator(cc, iv, dmreq); if (r < 0) return r; } ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out, 1 << SECTOR_SHIFT, iv); if (bio_data_dir(ctx->bio_in) == WRITE) r = crypto_ablkcipher_encrypt(req); else r = crypto_ablkcipher_decrypt(req); if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post) r = cc->iv_gen_ops->post(cc, iv, dmreq); return r; }
/* Update disk stats when completing request upwards */ static void _drbd_end_io_acct(struct drbd_conf *mdev, struct drbd_request *req) { int rw = bio_data_dir(req->master_bio); unsigned long duration = jiffies - req->start_time; int cpu; cpu = part_stat_lock(); part_stat_add(cpu, &mdev->vdisk->part0, ticks[rw], duration); part_round_stats(cpu, &mdev->vdisk->part0); part_dec_in_flight(&mdev->vdisk->part0, rw); part_stat_unlock(); }
/* read, readA or write requests on R_PRIMARY coming from drbd_make_request */ void drbd_endio_pri(struct bio *bio, int error) { unsigned long flags; struct drbd_request *req = bio->bi_private; struct drbd_conf *mdev = req->mdev; struct bio_and_error m; enum drbd_req_event what; int uptodate = bio_flagged(bio, BIO_UPTODATE); if (error) dev_warn(DEV, "p %s: error=%d\n", bio_data_dir(bio) == WRITE ? "write" : "read", error); if (!error && !uptodate) { dev_warn(DEV, "p %s: setting error to -EIO\n", bio_data_dir(bio) == WRITE ? "write" : "read"); /* strange behavior of some lower level drivers... * fail the request by clearing the uptodate flag, * but do not return any error?! */ error = -EIO; } /* to avoid recursion in __req_mod */ if (unlikely(error)) { what = (bio_data_dir(bio) == WRITE) ? write_completed_with_error : (bio_rw(bio) == READ) ? read_completed_with_error : read_ahead_completed_with_error; } else what = completed_ok; bio_put(req->private_bio); req->private_bio = ERR_PTR(error); spin_lock_irqsave(&mdev->req_lock, flags); __req_mod(req, what, &m); spin_unlock_irqrestore(&mdev->req_lock, flags); if (m.bio) complete_master_bio(mdev, &m); }
static void pmem_make_request(struct request_queue *q, struct bio *bio) { bool do_acct; unsigned long start; struct bio_vec bvec; struct bvec_iter iter; struct block_device *bdev = bio->bi_bdev; struct pmem_device *pmem = bdev->bd_disk->private_data; do_acct = nd_iostat_start(bio, &start); bio_for_each_segment(bvec, bio, iter) pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len, bvec.bv_offset, bio_data_dir(bio), iter.bi_sector); if (do_acct) nd_iostat_end(bio, start); if (bio_data_dir(bio)) wmb_pmem(); bio_endio(bio); }
static inline struct request *start_ordered(struct request_queue *q, struct request *rq) { q->orderr = 0; q->ordered = q->next_ordered; q->ordseq |= QUEUE_ORDSEQ_STARTED; /* * Prep proxy barrier request. */ blkdev_dequeue_request(rq); q->orig_bar_rq = rq; rq = &q->bar_rq; rq->cmd_flags = 0; rq_init(q, rq); if (bio_data_dir(q->orig_bar_rq->bio) == WRITE) rq->cmd_flags |= REQ_RW; if (q->ordered & QUEUE_ORDERED_FUA) rq->cmd_flags |= REQ_FUA; rq->elevator_private = NULL; rq->elevator_private2 = NULL; init_request_from_bio(rq, q->orig_bar_rq->bio); rq->end_io = bar_end_io; /* * Queue ordered sequence. As we stack them at the head, we * need to queue in reverse order. Note that we rely on that * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs * request gets inbetween ordered sequence. If this request is * an empty barrier, we don't need to do a postflush ever since * there will be no data written between the pre and post flush. * Hence a single flush will suffice. */ if ((q->ordered & QUEUE_ORDERED_POSTFLUSH) && !blk_empty_barrier(rq)) queue_flush(q, QUEUE_ORDERED_POSTFLUSH); else q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH; elv_insert(q, rq, ELEVATOR_INSERT_FRONT); if (q->ordered & QUEUE_ORDERED_PREFLUSH) { queue_flush(q, QUEUE_ORDERED_PREFLUSH); rq = &q->pre_flush_rq; } else q->ordseq |= QUEUE_ORDSEQ_PREFLUSH; if ((q->ordered & QUEUE_ORDERED_TAG) || q->in_flight == 0) q->ordseq |= QUEUE_ORDSEQ_DRAIN; else rq = NULL; return rq; }
/* * I/O completion handler for multipage BIOs. * * The mpage code never puts partial pages into a BIO (except for end-of-file). * If a page does not map to a contiguous run of blocks then it simply falls * back to block_read_full_page(). * * Why is this? If a page's completion depends on a number of different BIOs * which can complete in any order (or at the same time) then determining the * status of that page is hard. See end_buffer_async_read() for the details. * There is no point in duplicating all that complexity. */ static void mpage_end_io(struct bio *bio) { struct bio_vec *bv; int i; bio_for_each_segment_all(bv, bio, i) { struct page *page = bv->bv_page; page_endio(page, bio_data_dir(bio), bio->bi_error); } bio_put(bio); }
void nd_iostat_end(struct bio *bio, unsigned long start) { struct gendisk *disk = bio->bi_bdev->bd_disk; unsigned long duration = jiffies - start; const int rw = bio_data_dir(bio); int cpu = part_stat_lock(); part_stat_add(cpu, &disk->part0, ticks[rw], duration); part_round_stats(cpu, &disk->part0); part_dec_in_flight(&disk->part0, rw); part_stat_unlock(); }
static enum promote_result should_promote(struct smq_policy *mq, struct entry *hs_e, struct bio *bio, bool fast_promote) { if (bio_data_dir(bio) == WRITE) { if (!allocator_empty(&mq->cache_alloc) && fast_promote) return PROMOTE_TEMPORARY; else return maybe_promote(hs_e->level >= mq->write_promote_level); } else return maybe_promote(hs_e->level >= mq->read_promote_level); }
static MAKE_REQUEST_FN_RET zvol_request(struct request_queue *q, struct bio *bio) { zvol_state_t *zv = q->queuedata; fstrans_cookie_t cookie = spl_fstrans_mark(); uint64_t offset = BIO_BI_SECTOR(bio); unsigned int sectors = bio_sectors(bio); int rw = bio_data_dir(bio); #ifdef HAVE_GENERIC_IO_ACCT unsigned long start = jiffies; #endif int error = 0; if (bio_has_data(bio) && offset + sectors > get_capacity(zv->zv_disk)) { printk(KERN_INFO "%s: bad access: block=%llu, count=%lu\n", zv->zv_disk->disk_name, (long long unsigned)offset, (long unsigned)sectors); error = SET_ERROR(EIO); goto out1; } generic_start_io_acct(rw, sectors, &zv->zv_disk->part0); if (rw == WRITE) { if (unlikely(zv->zv_flags & ZVOL_RDONLY)) { error = SET_ERROR(EROFS); goto out2; } if (bio->bi_rw & VDEV_REQ_DISCARD) { error = zvol_discard(bio); goto out2; } error = zvol_write(bio); } else error = zvol_read(bio); out2: generic_end_io_acct(rw, &zv->zv_disk->part0, start); out1: BIO_END_IO(bio, -error); spl_fstrans_unmark(cookie); #ifdef HAVE_MAKE_REQUEST_FN_RET_INT return (0); #elif defined(HAVE_MAKE_REQUEST_FN_RET_QC) return (BLK_QC_T_NONE); #endif }
/* Update disk stats at start of I/O request */ static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req) { const int rw = bio_data_dir(req->master_bio); int cpu; cpu = part_stat_lock(); part_round_stats(cpu, &mdev->vdisk->part0); part_stat_inc(cpu, &mdev->vdisk->part0, ios[rw]); part_stat_add(cpu, &mdev->vdisk->part0, sectors[rw], req->i.size >> 9); (void) cpu; /* The macro invocations above want the cpu argument, I do not like the compiler warning about cpu only assigned but never used... */ part_inc_in_flight(&mdev->vdisk->part0, rw); part_stat_unlock(); }
void __nd_iostat_start(struct bio *bio, unsigned long *start) { struct gendisk *disk = bio->bi_bdev->bd_disk; const int rw = bio_data_dir(bio); int cpu = part_stat_lock(); *start = jiffies; part_round_stats(cpu, &disk->part0); part_stat_inc(cpu, &disk->part0, ios[rw]); part_stat_add(cpu, &disk->part0, sectors[rw], bio_sectors(bio)); part_inc_in_flight(&disk->part0, rw); part_stat_unlock(); }
/* *xlg_make_request - the function of constructing request *It's according to not using the request queue. */ static int xlg_make_request(request_queue_t *q, struct bio *bio) { int ret = 0; struct bio_vec *bv = NULL; int i = 0; sector_t sector = bio->bi_sector & ~7; bio_for_each_segment(bv, bio, i) { ret = xcache_xfer(bv, sector, bio_data_dir(bio)); if (ret) break; sector += 8; }
/* * The request function that just remaps the bio built up by * dm_merge_bvec. */ static int dm_request(request_queue_t *q, struct bio *bio) { int r; int rw = bio_data_dir(bio); struct mapped_device *md = q->queuedata; /* * There is no use in forwarding any barrier request since we can't * guarantee it is (or can be) handled by the targets correctly. */ if (unlikely(bio_barrier(bio))) { bio_endio(bio, bio->bi_size, -EOPNOTSUPP); return 0; } down_read(&md->io_lock); disk_stat_inc(dm_disk(md), ios[rw]); disk_stat_add(dm_disk(md), sectors[rw], bio_sectors(bio)); /* * If we're suspended we have to queue * this io for later. */ while (test_bit(DMF_BLOCK_IO, &md->flags)) { up_read(&md->io_lock); if (bio_rw(bio) == READA) { bio_io_error(bio, bio->bi_size); return 0; } r = queue_io(md, bio); if (r < 0) { bio_io_error(bio, bio->bi_size); return 0; } else if (r == 0) return 0; /* deferred successfully */ /* * We're in a while loop, because someone could suspend * before we get to the following read lock. */ down_read(&md->io_lock); } __split_bio(md, bio); up_read(&md->io_lock); return 0; }