/* * Transfer a single BIO. */ static int sbull_xfer_bio(struct sbull_dev *dev, struct bio *bio) { int i; struct bio_vec *bvec; sector_t sector = bio->bi_sector; bool do_sync; int do_sync_req = 0; Nand_OS_LOCK(); /* Do each segment independently. */ bio_for_each_segment(bvec, bio, i) //这个是一个宏定义 { char *buffer = __bio_kmap_atomic(bio, i, KM_USER0);//luowl #if 1 if(1)//(nand_page_size_get() == 2048) { do_sync = (bio_rw_flagged(bio, BIO_RW_SYNCIO) && bio_data_dir(bio) == WRITE); if (do_sync) { //printk("detect do write sync\n"); do_sync_req++; } } #endif sbull_transfer(dev, sector, bio_cur_bytes(bio) >> 9, buffer, bio_data_dir(bio) == WRITE); sector += bio_cur_bytes(bio) >> 9; __bio_kunmap_atomic(bio, KM_USER0); }
static int mem_block_no_elevator_request_fn(request_queue_t* q,struct bio* bio) { int status = 0,i = 0; struct bio_vec* bvec = NULL; bio_for_each_segment(bvec,bio,i) { char* buffer = __bio_kmap_atomic(bio,i,KM_USER0); switch(bio_data_dir(bio)) { case WRITE: { memcpy(g_mem_buf + (bio->bi_sector << 9),buffer,bio_cur_sectors(bio) << 9); status = 0; break; } case READ: { memcpy(buffer,g_mem_buf + (bio->bi_sector << 9),bio_cur_sectors(bio) << 9); status = 0; break; } default: { Log("[Error] Unknown opetator."); status = -EIO; break; } } bio_endio(bio,bio->bi_size,status); __bio_kunmap_atomic(bio,KM_USER0); }
/* * Transfer a single BIO. */ static int vmem_disk_xfer_bio(struct vmem_disk_dev *dev, struct bio *bio) { struct bio_vec bvec; struct bvec_iter iter; sector_t sector = bio->bi_iter.bi_sector; bio_for_each_segment(bvec, bio, iter) { char *buffer = __bio_kmap_atomic(bio, iter); vmem_disk_transfer(dev, sector, bio_cur_bytes(bio) >> 9, buffer, bio_data_dir(bio) == WRITE); sector += bio_cur_bytes(bio) >> 9; __bio_kunmap_atomic(buffer); }
/* * Used for transfering a single bio to a sector. Calls * osurd_tranfer for the actual tranfer to the RAM disk. */ static int osurd_xfer_bio(struct osurd_dev *dev, struct bio *bio) { int i; struct bio_vec *bvec; sector_t sector = bio->bi_sector; bio_for_each_segment(bvec, bio, i) { char *buffer = __bio_kmap_atomic(bio, i, KM_USER0); osurd_transfer(dev, sector, bio_cur_bytes(bio) >> 9, buffer, bio_data_dir(bio) == WRITE); sector += bio_cur_bytes(bio) >> 9; __bio_kunmap_atomic(bio, KM_USER0); }
/* * Transfer a single BIO. */ static int sbull_xfer_bio(struct sbull_dev *dev, struct bio *bio) { int i; struct bio_vec *bvec; sector_t sector = bio->bi_sector; /* Do each segment independently. */ bio_for_each_segment(bvec, bio, i) { char *buffer = __bio_kmap_atomic(bio, i, KM_USER0); sbull_transfer(dev, sector, bio_cur_bytes(bio)>>9, buffer, bio_data_dir(bio) == WRITE); __bio_kunmap_atomic(bio, KM_USER0); }
/* * Transfer a single BIO. */ static int sbull_xfer_bio(struct sbull_dev *dev, struct bio *bio) { struct bio_vec bvec; struct bvec_iter iter; sector_t sector = bio->bi_iter.bi_sector; /* Do each segment independently. */ bio_for_each_segment(bvec, bio, iter) { char *buffer = __bio_kmap_atomic(bio, iter); sbull_transfer(dev, sector,bytes_to_sectors_checked(bio_cur_bytes(bio)), buffer, bio_data_dir(bio) == WRITE); sector += (bytes_to_sectors_checked(bio_cur_bytes(bio))); __bio_kunmap_atomic(bio); }
static int simdisk_xfer_bio(struct simdisk *dev, struct bio *bio) { int i; struct bio_vec *bvec; sector_t sector = bio->bi_iter.bi_sector; bio_for_each_segment(bvec, bio, i) { char *buffer = __bio_kmap_atomic(bio, i, KM_USER0); unsigned len = bvec->bv_len >> SECTOR_SHIFT; simdisk_transfer(dev, sector, len, buffer, bio_data_dir(bio) == WRITE); sector += len; __bio_kunmap_atomic(bio, KM_USER0); }
static blk_qc_t simdisk_make_request(struct request_queue *q, struct bio *bio) { struct simdisk *dev = q->queuedata; struct bio_vec bvec; struct bvec_iter iter; sector_t sector = bio->bi_iter.bi_sector; bio_for_each_segment(bvec, bio, iter) { char *buffer = __bio_kmap_atomic(bio, iter); unsigned len = bvec.bv_len >> SECTOR_SHIFT; simdisk_transfer(dev, sector, len, buffer, bio_data_dir(bio) == WRITE); sector += len; __bio_kunmap_atomic(buffer); }
static int brd_make_request(struct request_queue *q, struct bio *bio) { struct brd_device * brd = q->queuedata; int i, status; struct bio_vec *bvec; sector_t sector = bio->bi_sector; //spin_lock(&brd->brd_lock); /* Do each segment independently. */ bio_for_each_segment(bvec, bio, i) { char *buffer = __bio_kmap_atomic(bio, i, KM_USER0); brd_transfer(brd, sector, bio_cur_sectors(bio), buffer, bio_data_dir(bio) == WRITE); sector += bio_cur_sectors(bio); __bio_kunmap_atomic(bio, KM_USER0); }