예제 #1
0
파일: dm.c 프로젝트: wxlong/Test
static void __map_bio(struct dm_target *ti, struct bio *clone,
		      struct target_io *tio)
{
	int r;

	/*
	 * Sanity checks.
	 */
	BUG_ON(!clone->bi_size);

	clone->bi_end_io = clone_endio;
	clone->bi_private = tio;

	/*
	 * Map the clone.  If r == 0 we don't need to do
	 * anything, the target has assumed ownership of
	 * this io.
	 */
	atomic_inc(&tio->io->io_count);
	r = ti->type->map(ti, clone, &tio->info);
	if (r > 0)
		/* the bio has been remapped so dispatch it */
		generic_make_request(clone);

	else if (r < 0) {
		/* error the io and bail out */
		struct dm_io *io = tio->io;
		free_tio(tio->io->md, tio);
		dec_pending(io, -EIO);
	}
}
예제 #2
0
static bool faulty_make_request(struct mddev *mddev, struct bio *bio)
{
	struct faulty_conf *conf = mddev->private;
	int failit = 0;

	if (bio_data_dir(bio) == WRITE) {
		/* write request */
		if (atomic_read(&conf->counters[WriteAll])) {
			/* special case - don't decrement, don't generic_make_request,
			 * just fail immediately
			 */
			bio_io_error(bio);
			return true;
		}

		if (check_sector(conf, bio->bi_iter.bi_sector,
				 bio_end_sector(bio), WRITE))
			failit = 1;
		if (check_mode(conf, WritePersistent)) {
			add_sector(conf, bio->bi_iter.bi_sector,
				   WritePersistent);
			failit = 1;
		}
		if (check_mode(conf, WriteTransient))
			failit = 1;
	} else {
		/* read request */
		if (check_sector(conf, bio->bi_iter.bi_sector,
				 bio_end_sector(bio), READ))
			failit = 1;
		if (check_mode(conf, ReadTransient))
			failit = 1;
		if (check_mode(conf, ReadPersistent)) {
			add_sector(conf, bio->bi_iter.bi_sector,
				   ReadPersistent);
			failit = 1;
		}
		if (check_mode(conf, ReadFixable)) {
			add_sector(conf, bio->bi_iter.bi_sector,
				   ReadFixable);
			failit = 1;
		}
	}
	if (failit) {
		struct bio *b = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set);

		bio_set_dev(b, conf->rdev->bdev);
		b->bi_private = bio;
		b->bi_end_io = faulty_fail;
		bio = b;
	} else
		bio_set_dev(bio, conf->rdev->bdev);

	generic_make_request(bio);
	return true;
}
예제 #3
0
/*
 * Issue a BIO to a zone. The BIO may only partially process the
 * original target BIO.
 */
static int dmz_submit_read_bio(struct dmz_target *dmz, struct dm_zone *zone,
			       struct bio *bio, sector_t chunk_block,
			       unsigned int nr_blocks)
{
	struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
	sector_t sector;
	struct bio *clone;

	/* BIO remap sector */
	sector = dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);

	/* If the read is not partial, there is no need to clone the BIO */
	if (nr_blocks == dmz_bio_blocks(bio)) {
		/* Setup and submit the BIO */
		bio->bi_iter.bi_sector = sector;
		atomic_inc(&bioctx->ref);
		generic_make_request(bio);
		return 0;
	}

	/* Partial BIO: we need to clone the BIO */
	clone = bio_clone_fast(bio, GFP_NOIO, dmz->bio_set);
	if (!clone)
		return -ENOMEM;

	/* Setup the clone */
	clone->bi_iter.bi_sector = sector;
	clone->bi_iter.bi_size = dmz_blk2sect(nr_blocks) << SECTOR_SHIFT;
	clone->bi_end_io = dmz_read_bio_end_io;
	clone->bi_private = bioctx;

	bio_advance(bio, clone->bi_iter.bi_size);

	/* Submit the clone */
	atomic_inc(&bioctx->ref);
	generic_make_request(clone);

	return 0;
}
예제 #4
0
파일: bio.c 프로젝트: 020gzh/linux
static void bio_alloc_rescue(struct work_struct *work)
{
	struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
	struct bio *bio;

	while (1) {
		spin_lock(&bs->rescue_lock);
		bio = bio_list_pop(&bs->rescue_list);
		spin_unlock(&bs->rescue_lock);

		if (!bio)
			break;

		generic_make_request(bio);
	}
}
예제 #5
0
파일: dm.c 프로젝트: 3sOx/asuswrt-merlin
static void __map_bio(struct dm_target *ti, struct bio *clone,
		      struct target_io *tio)
{
	int r;
	sector_t sector;
	struct mapped_device *md;

	/*
	 * Sanity checks.
	 */
	BUG_ON(!clone->bi_size);

	clone->bi_end_io = clone_endio;
	clone->bi_private = tio;

	/*
	 * Map the clone.  If r == 0 we don't need to do
	 * anything, the target has assumed ownership of
	 * this io.
	 */
	atomic_inc(&tio->io->io_count);
	sector = clone->bi_sector;
	r = ti->type->map(ti, clone, &tio->info);
	if (r == DM_MAPIO_REMAPPED) {
		/* the bio has been remapped so dispatch it */

		blk_add_trace_remap(bdev_get_queue(clone->bi_bdev), clone,
				    tio->io->bio->bi_bdev->bd_dev, sector,
				    clone->bi_sector);

		generic_make_request(clone);
	} else if (r < 0 || r == DM_MAPIO_REQUEUE) {
		/* error the io and bail out, or requeue it if needed */
		md = tio->io->md;
		dec_pending(tio->io, r);
		/*
		 * Store bio_set for cleanup.
		 */
		clone->bi_private = md->bs;
		bio_put(clone);
		free_tio(md, tio);
	} else if (r) {
		DMWARN("unimplemented target map return value: %d", r);
		BUG();
	}
}
예제 #6
0
파일: stackbd-orig.c 프로젝트: rosrez/drv2
static void stackbd_io_fn(struct bio *bio)
{
//    printk("stackdb: Mapping sector: %llu -> %llu, dev: %s -> %s\n",
//            bio->bi_sector,
//            lba != EMPTY_REAL_LBA ? lba : bio->bi_sector,
//            bio->bi_bdev->bd_disk->disk_name,
//            bdev_raw->bd_disk->disk_name);
//
//    if (lba != EMPTY_REAL_LBA)
//        bio->bi_sector = lba;
    bio->bi_bdev = stackbd.bdev_raw;

    trace_block_bio_remap(bdev_get_queue(stackbd.bdev_raw), bio,
            bio->bi_bdev->bd_dev, bio->bi_sector);

    /* No need to call bio_endio() */
    generic_make_request(bio);
}
예제 #7
0
static int multipath_make_request (struct request_queue *q, struct bio * bio)
{
	mddev_t *mddev = q->queuedata;
	multipath_conf_t *conf = mddev_to_conf(mddev);
	struct multipath_bh * mp_bh;
	struct multipath_info *multipath;
	const int rw = bio_data_dir(bio);
	int cpu;

	if (unlikely(bio_barrier(bio))) {
		bio_endio(bio, -EOPNOTSUPP);
		return 0;
	}

	mp_bh = mempool_alloc(conf->pool, GFP_NOIO);

	mp_bh->master_bio = bio;
	mp_bh->mddev = mddev;

	cpu = part_stat_lock();
	part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
	part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
		      bio_sectors(bio));
	part_stat_unlock();

	mp_bh->path = multipath_map(conf);
	if (mp_bh->path < 0) {
		bio_endio(bio, -EIO);
		mempool_free(mp_bh, conf->pool);
		return 0;
	}
	multipath = conf->multipaths + mp_bh->path;

	mp_bh->bio = *bio;
	mp_bh->bio.bi_sector += multipath->rdev->data_offset;
	mp_bh->bio.bi_bdev = multipath->rdev->bdev;
	mp_bh->bio.bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT);
	mp_bh->bio.bi_end_io = multipath_end_request;
	mp_bh->bio.bi_private = mp_bh;
	generic_make_request(&mp_bh->bio);
	return 0;
}
예제 #8
0
static int multipath_make_request (request_queue_t *q, struct bio * bio)
{
	mddev_t *mddev = q->queuedata;
	multipath_conf_t *conf = mddev_to_conf(mddev);
	struct multipath_bh * mp_bh;
	struct multipath_info *multipath;

	if (unlikely(bio_barrier(bio))) {
		bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
		return 0;
	}

	mp_bh = mempool_alloc(conf->pool, GFP_NOIO);

	mp_bh->master_bio = bio;
	mp_bh->mddev = mddev;

	if (bio_data_dir(bio)==WRITE) {
		disk_stat_inc(mddev->gendisk, writes);
		disk_stat_add(mddev->gendisk, write_sectors, bio_sectors(bio));
	} else {
		disk_stat_inc(mddev->gendisk, reads);
		disk_stat_add(mddev->gendisk, read_sectors, bio_sectors(bio));
	}

	mp_bh->path = multipath_map(conf);
	if (mp_bh->path < 0) {
		bio_endio(bio, bio->bi_size, -EIO);
		mempool_free(mp_bh, conf->pool);
		return 0;
	}
	multipath = conf->multipaths + mp_bh->path;

	mp_bh->bio = *bio;
	mp_bh->bio.bi_bdev = multipath->rdev->bdev;
	mp_bh->bio.bi_rw |= (1 << BIO_RW_FAILFAST);
	mp_bh->bio.bi_end_io = multipath_end_request;
	mp_bh->bio.bi_private = mp_bh;
	generic_make_request(&mp_bh->bio);
	return 0;
}
예제 #9
0
/**ltl
 * 功能: 映射bio请求到目标设备上
 * 参数: ti	-> 目标设备
 *		clone-> bio请求
 *		tio	-> 目标io请求对象
 * 返回值:
 * 说明:
 */
static void __map_bio(struct dm_target *ti, struct bio *clone,
		      struct target_io *tio)
{
	int r;
	sector_t sector;

	/*
	 * Sanity checks.
	 */
	BUG_ON(!clone->bi_size);

	clone->bi_end_io = clone_endio;
	clone->bi_private = tio;

	/*
	 * Map the clone.  If r == 0 we don't need to do
	 * anything, the target has assumed ownership of
	 * this io.
	 */
	atomic_inc(&tio->io->io_count);
	sector = clone->bi_sector;
	/* 将请求映射到目标设备上 */
	r = ti->type->map(ti, clone, &tio->info);/* 调用linear_map函数 */
	if (r > 0) {
		/* the bio has been remapped so dispatch it */

		blk_add_trace_remap(bdev_get_queue(clone->bi_bdev), clone,
				    tio->io->bio->bi_bdev->bd_dev, sector,
				    clone->bi_sector);
		/* 将请求提交到块设备层(插入到IO调度队列中) */
		generic_make_request(clone);
	}

	else if (r < 0) {
		/* error the io and bail out */
		struct dm_io *io = tio->io;
		free_tio(tio->io->md, tio);
		dec_pending(io, r);
		bio_put(clone);
	}
}
예제 #10
0
파일: swsusp.c 프로젝트: xricson/knoppix
static int bdev_write_page(struct block_device *bdev, long pos, void *buf)
{
#if 0
	struct buffer_head *bh;
	BUG_ON (pos%PAGE_SIZE);
	bh = __bread(bdev, pos/PAGE_SIZE, PAGE_SIZE);
	if (!bh || (!bh->b_data)) {
		return -1;
	}
	memcpy(bh->b_data, buf, PAGE_SIZE);	/* FIXME: may need kmap() */
	BUG_ON(!buffer_uptodate(bh));
	generic_make_request(WRITE, bh);
	if (!buffer_uptodate(bh))
		printk(KERN_CRIT "%sWarning %s: Fixing swap signatures unsuccessful...\n", name_resume, resume_file);
	wait_on_buffer(bh);
	brelse(bh);
	return 0;
#endif
	printk(KERN_CRIT "%sWarning %s: Fixing swap signatures unimplemented...\n", name_resume, resume_file);
	return 0;
}
예제 #11
0
static void _io_worker(struct work_struct *work)
#endif
{
	struct iostash_bio *io = container_of(work, struct iostash_bio, work);
	struct hdd_info *hdd = io->hdd;
	struct bio *base_bio = io->base_bio;
	struct bio *clone4ssd;
	struct bio *clone4hdd;
	void *hddendfunc;
#if KERNEL_VERSION(4,4,0) <= LINUX_VERSION_CODE
	blk_qc_t ret = BLK_QC_T_NONE;
#endif
	_inc_pending(io);
	do {
		if (bio_data_dir(base_bio) == READ) {	/* Read handling */
			/* First trial */
			if (io->error == 0) {
				clone4ssd = BIO_CLONEBS(base_bio, hdd->bs);

				if (!clone4ssd) {
					io->error = -ENOMEM;
					break;
				}

				/* _clone_init() may fail when SSD became offline */
				if (_clone_init(io, clone4ssd, 1, _endio4read) == 0) {
					_inc_pending(io);
#if KERNEL_VERSION(4,4,0) <= LINUX_VERSION_CODE
					ret = generic_make_request(clone4ssd);
#else
					generic_make_request(clone4ssd);
#endif
					break;
				}

				/* when bio cannot be initialized for SSD for some reason flow to HDD */
				bio_put(clone4ssd);
				sce_put4read(hdd->lun, io->psn, io->nr_sctr);
			}
			hddendfunc = _endio4read;
		} else {	/* Write handling */

			hddendfunc = _endio4write;

			/* create a request to SSD */
			clone4ssd = BIO_CLONEBS(base_bio, hdd->bs);
			if (clone4ssd) {
				if (_clone_init(io, clone4ssd, 1, _endio4write)
				    == 0) {
					_inc_pending(io);
#if KERNEL_VERSION(4,4,0) <= LINUX_VERSION_CODE
					ret = generic_make_request(clone4ssd);
#else
					generic_make_request(clone4ssd);
#endif
				}
			}
		}

		/* I/O handling for HDD */
		clone4hdd = BIO_CLONEBS(base_bio, hdd->bs);
		if (!clone4hdd) {
			io->error = -ENOMEM;
			break;
		}

		/* clone_init() will never fail for HDD */
		_clone_init(io, clone4hdd, 0, hddendfunc);

		/* Call HDD */
		_inc_pending(io);
#if KERNEL_VERSION(4,4,0) <= LINUX_VERSION_CODE
		ret = (*hdd->org_mapreq) (hdd->request_q, clone4hdd);
#else
		(*hdd->org_mapreq) (hdd->request_q, clone4hdd);
#endif
	} while (0);

	_dec_pending(io);
#if KERNEL_VERSION(4,4,0) <= LINUX_VERSION_CODE
	return ret;
#endif
}