Esempio n. 1
0
static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
{
	struct request_queue *q = dev->q;
	struct request *rq;
	struct bio *bio = rqd->bio;

	rq = blk_mq_alloc_request(q, bio_data_dir(bio), 0);
	if (IS_ERR(rq))
		return -ENOMEM;

	rq->cmd_type = REQ_TYPE_DRV_PRIV;
	rq->__sector = bio->bi_iter.bi_sector;
	rq->ioprio = bio_prio(bio);

	if (bio_has_data(bio))
		rq->nr_phys_segments = bio_phys_segments(q, bio);

	rq->__data_len = bio->bi_iter.bi_size;
	rq->bio = rq->biotail = bio;

	rq->end_io_data = rqd;

	blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io);

	return 0;
}
static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
				   struct bio *nxt)
{
	struct bio_vec end_bv = { NULL }, nxt_bv;
	struct bvec_iter iter;

	if (!blk_queue_cluster(q))
		return 0;

	if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
	    queue_max_segment_size(q))
		return 0;

	if (!bio_has_data(bio))
		return 1;

	bio_for_each_segment(end_bv, bio, iter)
		if (end_bv.bv_len == iter.bi_size)
			break;

	nxt_bv = bio_iovec(nxt);

	if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv))
		return 0;

	/*
	 * bio and nxt are contiguous in memory; check if the queue allows
	 * these two to be merged into one
	 */
	if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv))
		return 1;

	return 0;
}
Esempio n. 3
0
/**
 * Create a copy of a write bio.
 */
struct bio* bio_deep_clone(struct bio *bio, gfp_t gfp_mask)
{
	uint size;
	struct bio *clone;

	ASSERT(bio);
	ASSERT(op_is_write(bio_op(bio)));
	ASSERT(!bio->bi_next);

	if (bio_has_data(bio))
		size = bio->bi_iter.bi_size;
	else
		size = 0;

	clone = bio_alloc_with_pages(size, bio->bi_bdev, gfp_mask);
	if (!clone)
		return NULL;

	clone->bi_opf = bio->bi_opf;
	clone->bi_iter.bi_sector = bio->bi_iter.bi_sector;

	if (size == 0) {
		/* This is for discard IOs. */
		clone->bi_iter.bi_size = bio->bi_iter.bi_size;
	} else {
		bio_copy_data(clone, bio);
	}
	return clone;
}
Esempio n. 4
0
static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
				   struct bio *nxt)
{
	if (!blk_queue_cluster(q))
		return 0;

	if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
	    queue_max_segment_size(q))
		return 0;

	if (!bio_has_data(bio))
		return 1;

	if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
		return 0;

	/*
	 * bio and nxt are contiguous in memory; check if the queue allows
	 * these two to be merged into one
	 */
	if (BIO_SEG_BOUNDARY(q, bio, nxt))
		return 1;

	return 0;
}
Esempio n. 5
0
int pblk_write_to_cache(struct pblk *pblk, struct bio *bio, unsigned long flags)
{
	struct request_queue *q = pblk->dev->q;
	struct pblk_w_ctx w_ctx;
	sector_t lba = pblk_get_lba(bio);
	unsigned long start_time = jiffies;
	unsigned int bpos, pos;
	int nr_entries = pblk_get_secs(bio);
	int i, ret;

	generic_start_io_acct(q, WRITE, bio_sectors(bio), &pblk->disk->part0);

	/* Update the write buffer head (mem) with the entries that we can
	 * write. The write in itself cannot fail, so there is no need to
	 * rollback from here on.
	 */
retry:
	ret = pblk_rb_may_write_user(&pblk->rwb, bio, nr_entries, &bpos);
	switch (ret) {
	case NVM_IO_REQUEUE:
		io_schedule();
		goto retry;
	case NVM_IO_ERR:
		pblk_pipeline_stop(pblk);
		goto out;
	}

	if (unlikely(!bio_has_data(bio)))
		goto out;

	pblk_ppa_set_empty(&w_ctx.ppa);
	w_ctx.flags = flags;
	if (bio->bi_opf & REQ_PREFLUSH)
		w_ctx.flags |= PBLK_FLUSH_ENTRY;

	for (i = 0; i < nr_entries; i++) {
		void *data = bio_data(bio);

		w_ctx.lba = lba + i;

		pos = pblk_rb_wrap_pos(&pblk->rwb, bpos + i);
		pblk_rb_write_entry_user(&pblk->rwb, data, w_ctx, pos);

		bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
	}

	atomic64_add(nr_entries, &pblk->user_wa);

#ifdef CONFIG_NVM_DEBUG
	atomic_long_add(nr_entries, &pblk->inflight_writes);
	atomic_long_add(nr_entries, &pblk->req_writes);
#endif

	pblk_rl_inserted(&pblk->rl, nr_entries);

out:
	generic_end_io_acct(q, WRITE, &pblk->disk->part0, start_time);
	pblk_write_should_kick(pblk);
	return ret;
}
Esempio n. 6
0
File: zvol.c Progetto: koplover/zfs
static MAKE_REQUEST_FN_RET
zvol_request(struct request_queue *q, struct bio *bio)
{
	zvol_state_t *zv = q->queuedata;
	fstrans_cookie_t cookie = spl_fstrans_mark();
	uint64_t offset = BIO_BI_SECTOR(bio);
	unsigned int sectors = bio_sectors(bio);
	int rw = bio_data_dir(bio);
#ifdef HAVE_GENERIC_IO_ACCT
	unsigned long start = jiffies;
#endif
	int error = 0;

	if (bio_has_data(bio) && offset + sectors >
	    get_capacity(zv->zv_disk)) {
		printk(KERN_INFO
		    "%s: bad access: block=%llu, count=%lu\n",
		    zv->zv_disk->disk_name,
		    (long long unsigned)offset,
		    (long unsigned)sectors);
		error = SET_ERROR(EIO);
		goto out1;
	}

	generic_start_io_acct(rw, sectors, &zv->zv_disk->part0);

	if (rw == WRITE) {
		if (unlikely(zv->zv_flags & ZVOL_RDONLY)) {
			error = SET_ERROR(EROFS);
			goto out2;
		}

		if (bio->bi_rw & VDEV_REQ_DISCARD) {
			error = zvol_discard(bio);
			goto out2;
		}

		error = zvol_write(bio);
	} else
		error = zvol_read(bio);

out2:
	generic_end_io_acct(rw, &zv->zv_disk->part0, start);
out1:
	BIO_END_IO(bio, -error);
	spl_fstrans_unmark(cookie);
#ifdef HAVE_MAKE_REQUEST_FN_RET_INT
	return (0);
#elif defined(HAVE_MAKE_REQUEST_FN_RET_QC)
	return (BLK_QC_T_NONE);
#endif
}
Esempio n. 7
0
static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
{
	struct request_queue *q = dev->q;
	struct nvme_ns *ns = q->queuedata;
	struct request *rq;
	struct bio *bio = rqd->bio;
	struct nvme_nvm_command *cmd;

	rq = blk_mq_alloc_request(q, bio_data_dir(bio), 0);
	if (IS_ERR(rq))
		return -ENOMEM;

	cmd = kzalloc(sizeof(struct nvme_nvm_command) +
				sizeof(struct nvme_nvm_completion), GFP_KERNEL);
	if (!cmd) {
		blk_mq_free_request(rq);
		return -ENOMEM;
	}

	rq->cmd_type = REQ_TYPE_DRV_PRIV;
	rq->ioprio = bio_prio(bio);

	if (bio_has_data(bio))
		rq->nr_phys_segments = bio_phys_segments(q, bio);

	rq->__data_len = bio->bi_iter.bi_size;
	rq->bio = rq->biotail = bio;

	nvme_nvm_rqtocmd(rq, rqd, ns, cmd);

	rq->cmd = (unsigned char *)cmd;
	rq->cmd_len = sizeof(struct nvme_nvm_command);
	rq->special = cmd + 1;

	rq->end_io_data = rqd;

	blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io);

	return 0;
}
Esempio n. 8
0
bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
{
	struct request_queue *q = rq->q;

	if (!rq_mergeable(rq) || !bio_mergeable(bio))
		return false;

	if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw))
		return false;

	/* different data direction or already started, don't merge */
	if (bio_data_dir(bio) != rq_data_dir(rq))
		return false;

	/* must be same device and not a special request */
	if (rq->rq_disk != bio->bi_bdev->bd_disk || req_no_special_merge(rq))
		return false;

	/* only merge integrity protected bio into ditto rq */
	if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
		return false;

	/* must be using the same buffer */
	if (rq->cmd_flags & REQ_WRITE_SAME &&
	    !blk_write_same_mergeable(rq->bio, bio))
		return false;

	/* Only check gaps if the bio carries data */
	if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS) && bio_has_data(bio)) {
		struct bio_vec *bprev;

		bprev = &rq->biotail->bi_io_vec[rq->biotail->bi_vcnt - 1];
		if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset))
			return false;
	}

	return true;
}
Esempio n. 9
0
File: zvol.c Progetto: alek-p/zfs
static MAKE_REQUEST_FN_RET
zvol_request(struct request_queue *q, struct bio *bio)
{
	uio_t uio;
	zvol_state_t *zv = q->queuedata;
	fstrans_cookie_t cookie = spl_fstrans_mark();
	int rw = bio_data_dir(bio);
#ifdef HAVE_GENERIC_IO_ACCT
	unsigned long start = jiffies;
#endif
	int error = 0;

	uio.uio_bvec = &bio->bi_io_vec[BIO_BI_IDX(bio)];
	uio.uio_skip = BIO_BI_SKIP(bio);
	uio.uio_resid = BIO_BI_SIZE(bio);
	uio.uio_iovcnt = bio->bi_vcnt - BIO_BI_IDX(bio);
	uio.uio_loffset = BIO_BI_SECTOR(bio) << 9;
	uio.uio_limit = MAXOFFSET_T;
	uio.uio_segflg = UIO_BVEC;

	if (bio_has_data(bio) && uio.uio_loffset + uio.uio_resid >
	    zv->zv_volsize) {
		printk(KERN_INFO
		    "%s: bad access: offset=%llu, size=%lu\n",
		    zv->zv_disk->disk_name,
		    (long long unsigned)uio.uio_loffset,
		    (long unsigned)uio.uio_resid);
		error = SET_ERROR(EIO);
		goto out1;
	}

	generic_start_io_acct(rw, bio_sectors(bio), &zv->zv_disk->part0);

	if (rw == WRITE) {
		if (unlikely(zv->zv_flags & ZVOL_RDONLY)) {
			error = SET_ERROR(EROFS);
			goto out2;
		}

		if (bio_is_discard(bio) || bio_is_secure_erase(bio)) {
			error = zvol_discard(bio);
			goto out2;
		}

		/*
		 * Some requests are just for flush and nothing else.
		 */
		if (uio.uio_resid == 0) {
			if (bio_is_flush(bio))
				zil_commit(zv->zv_zilog, ZVOL_OBJ);
			goto out2;
		}

		error = zvol_write(zv, &uio,
		    bio_is_flush(bio) || bio_is_fua(bio) ||
		    zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS);
	} else
		error = zvol_read(zv, &uio);

out2:
	generic_end_io_acct(rw, &zv->zv_disk->part0, start);
out1:
	BIO_END_IO(bio, -error);
	spl_fstrans_unmark(cookie);
#ifdef HAVE_MAKE_REQUEST_FN_RET_INT
	return (0);
#elif defined(HAVE_MAKE_REQUEST_FN_RET_QC)
	return (BLK_QC_T_NONE);
#endif
}