示例#1
0
/**
 * Create a copy of a write bio.
 */
struct bio* bio_deep_clone(struct bio *bio, gfp_t gfp_mask)
{
	uint size;
	struct bio *clone;

	ASSERT(bio);
	ASSERT(op_is_write(bio_op(bio)));
	ASSERT(!bio->bi_next);

	if (bio_has_data(bio))
		size = bio->bi_iter.bi_size;
	else
		size = 0;

	clone = bio_alloc_with_pages(size, bio->bi_bdev, gfp_mask);
	if (!clone)
		return NULL;

	clone->bi_opf = bio->bi_opf;
	clone->bi_iter.bi_sector = bio->bi_iter.bi_sector;

	if (size == 0) {
		/* This is for discard IOs. */
		clone->bi_iter.bi_size = bio->bi_iter.bi_size;
	} else {
		bio_copy_data(clone, bio);
	}
	return clone;
}
示例#2
0
文件: mpage.c 项目: Vhacker1995/linux
/*
 * I/O completion handler for multipage BIOs.
 *
 * The mpage code never puts partial pages into a BIO (except for end-of-file).
 * If a page does not map to a contiguous run of blocks then it simply falls
 * back to block_read_full_page().
 *
 * Why is this?  If a page's completion depends on a number of different BIOs
 * which can complete in any order (or at the same time) then determining the
 * status of that page is hard.  See end_buffer_async_read() for the details.
 * There is no point in duplicating all that complexity.
 */
static void mpage_end_io(struct bio *bio)
{
    struct bio_vec *bv;
    int i;

    bio_for_each_segment_all(bv, bio, i) {
        struct page *page = bv->bv_page;
        page_endio(page, bio_op(bio), bio->bi_error);
    }

    bio_put(bio);
}
示例#3
0
文件: blocklayout.c 项目: mdamt/linux
static struct bio *
bl_submit_bio(struct bio *bio)
{
	if (bio) {
		get_parallel(bio->bi_private);
		dprintk("%s submitting %s bio %u@%llu\n", __func__,
			bio_op(bio) == READ ? "read" : "write",
			bio->bi_iter.bi_size,
			(unsigned long long)bio->bi_iter.bi_sector);
		submit_bio(bio);
	}
	return NULL;
}
示例#4
0
static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
{
	blk_status_t rc = 0;
	bool do_acct;
	unsigned long start;
	struct bio_vec bvec;
	struct bvec_iter iter;
	struct pmem_device *pmem = q->queuedata;
	struct nd_region *nd_region = to_region(pmem);

	if (bio->bi_opf & REQ_PREFLUSH)
		nvdimm_flush(nd_region);

	do_acct = nd_iostat_start(bio, &start);
	bio_for_each_segment(bvec, bio, iter) {
		rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
				bvec.bv_offset, bio_op(bio), iter.bi_sector);
		if (rc) {
			bio->bi_status = rc;
			break;
		}
	}
示例#5
0
文件: dm-zero.c 项目: CadeLaRen/linux
/*
 * Return zeros only on reads
 */
static int zero_map(struct dm_target *ti, struct bio *bio)
{
	switch (bio_op(bio)) {
	case REQ_OP_READ:
		if (bio->bi_rw & REQ_RAHEAD) {
			/* readahead of null bytes only wastes buffer cache */
			return -EIO;
		}
		zero_fill_bio(bio);
		break;
	case REQ_OP_WRITE:
		/* writes get silently dropped */
		break;
	default:
		return -EIO;
	}

	bio_endio(bio);

	/* accepted bio, don't make new request */
	return DM_MAPIO_SUBMITTED;
}
示例#6
0
文件: pblk-init.c 项目: mdamt/linux
static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio)
{
	struct pblk *pblk = q->queuedata;

	if (bio_op(bio) == REQ_OP_DISCARD) {
		pblk_discard(pblk, bio);
		if (!(bio->bi_opf & REQ_PREFLUSH)) {
			bio_endio(bio);
			return BLK_QC_T_NONE;
		}
	}

	switch (pblk_rw_io(q, pblk, bio)) {
	case NVM_IO_ERR:
		bio_io_error(bio);
		break;
	case NVM_IO_DONE:
		bio_endio(bio);
		break;
	}

	return BLK_QC_T_NONE;
}