Пример #1
0
static
int  
xixfs_raw_submit(
	struct block_device *bdev, 
	sector_t startsector, 
	int32 size, 
	int32 sectorsize, 
	PXIX_BUF xbuf,
	int32 rw)
{
	struct bio  * bio;
	int ret = 0;

	bio = bio_alloc(GFP_NOIO, 1);
		
	bio->bi_sector = startsector;
	bio->bi_bdev = bdev;
	bio_add_page(bio, xbuf->xix_page, size, xbuf->xixcore_buffer.xcb_offset);
	bio->bi_private = (void *)xbuf;
	bio->bi_end_io = end_bio_xbuf_io_async;
	bio->bi_rw = rw;
	
	bio_get(bio);
	submit_bio(rw, bio);
	
	if(bio_flagged(bio, BIO_EOPNOTSUPP))
		ret = -EOPNOTSUPP;
	bio_put(bio);
	return ret;
}
Пример #2
0
int hfsplus_submit_bio(struct block_device *bdev, sector_t sector,
		void *data, int rw)
{
	DECLARE_COMPLETION_ONSTACK(wait);
	struct bio *bio;

	bio = bio_alloc(GFP_NOIO, 1);
	bio->bi_sector = sector;
	bio->bi_bdev = bdev;
	bio->bi_end_io = hfsplus_end_io_sync;
	bio->bi_private = &wait;

	/*
	 * We always submit one sector at a time, so bio_add_page must not fail.
	 */
	if (bio_add_page(bio, virt_to_page(data), HFSPLUS_SECTOR_SIZE,
			 offset_in_page(data)) != HFSPLUS_SECTOR_SIZE)
		BUG();

	submit_bio(rw, bio);
	wait_for_completion(&wait);

	if (!bio_flagged(bio, BIO_UPTODATE))
		return -EIO;
	return 0;
}
Пример #3
0
static void atodb_endio(struct bio *bio, int error)
{
	struct drbd_atodb_wait *wc = bio->bi_private;
	struct drbd_conf *mdev = wc->mdev;
	struct page *page;
	int uptodate = bio_flagged(bio, BIO_UPTODATE);

	/* strange behavior of some lower level drivers...
	 * fail the request by clearing the uptodate flag,
	 * but do not return any error?! */
	if (!error && !uptodate)
		error = -EIO;

	drbd_chk_io_error(mdev, error, TRUE);
	if (error && wc->error == 0)
		wc->error = error;

	if (atomic_dec_and_test(&wc->count))
		complete(&wc->io_done);

	page = bio->bi_io_vec[0].bv_page;
	put_page(page);
	bio_put(bio);
	mdev->bm_writ_cnt++;
	put_ldev(mdev);
}
Пример #4
0
static int pblk_rw_io(struct request_queue *q, struct pblk *pblk,
			  struct bio *bio)
{
	int ret;

	/* Read requests must be <= 256kb due to NVMe's 64 bit completion bitmap
	 * constraint. Writes can be of arbitrary size.
	 */
	if (bio_data_dir(bio) == READ) {
		blk_queue_split(q, &bio);
		ret = pblk_submit_read(pblk, bio);
		if (ret == NVM_IO_DONE && bio_flagged(bio, BIO_CLONED))
			bio_put(bio);

		return ret;
	}

	/* Prevent deadlock in the case of a modest LUN configuration and large
	 * user I/Os. Unless stalled, the rate limiter leaves at least 256KB
	 * available for user I/O.
	 */
	if (unlikely(pblk_get_secs(bio) >= pblk_rl_sysfs_rate_show(&pblk->rl)))
		blk_queue_split(q, &bio);

	return pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER);
}
Пример #5
0
/*
 * I/O completion handler for multipage BIOs.
 *
 * The mpage code never puts partial pages into a BIO (except for end-of-file).
 * If a page does not map to a contiguous run of blocks then it simply falls
 * back to block_read_full_page().
 *
 * Why is this?  If a page's completion depends on a number of different BIOs
 * which can complete in any order (or at the same time) then determining the
 * status of that page is hard.  See end_buffer_async_read() for the details.
 * There is no point in duplicating all that complexity.
 */
static void mpage_end_io_read(struct bio *bio, int err)
{
	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;

	do {
		struct page *page = bvec->bv_page;

		if (--bvec >= bio->bi_io_vec)
			prefetchw(&bvec->bv_page->flags);

		if (uptodate) {
			SetPageUptodate(page);
		} else {
			ClearPageUptodate(page);
			SetPageError(page);
		}
		if (bio_flagged(bio, BIO_BAIO)) {
			struct ba_iocb *baiocb =
				(struct ba_iocb *)bio->bi_private2;
		       	BUG_ON(!PageBaio(page));
			ClearPageBaio(page);
			if (!uptodate)
				baiocb->io_error = -EIO;
			baiocb->result += bvec->bv_len;
			baiocb_put(baiocb);
		}
		unlock_page(page);
	} while (bvec >= bio->bi_io_vec);
	bio_put(bio);
}
Пример #6
0
Файл: dm.c Проект: wxlong/Test
static int clone_endio(struct bio *bio, unsigned int done, int error)
{
	int r = 0;
	struct target_io *tio = bio->bi_private;
	struct dm_io *io = tio->io;
	dm_endio_fn endio = tio->ti->type->end_io;

	if (bio->bi_size)
		return 1;

	if (!bio_flagged(bio, BIO_UPTODATE) && !error)
		error = -EIO;

	if (endio) {
		r = endio(tio->ti, bio, error, &tio->info);
		if (r < 0)
			error = r;

		else if (r > 0)
			/* the target wants another shot at the io */
			return 1;
	}

	free_tio(io->md, tio);
	dec_pending(io, error);
	bio_put(bio);
	return r;
}
Пример #7
0
/**
 * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
 * @q:		request queue where request should be inserted
 * @rq:		request to map data to
 * @map_data:   pointer to the rq_map_data holding pages (if necessary)
 * @iter:	iovec iterator
 * @gfp_mask:	memory allocation flags
 *
 * Description:
 *    Data will be mapped directly for zero copy I/O, if possible. Otherwise
 *    a kernel bounce buffer is used.
 *
 *    A matching blk_rq_unmap_user() must be issued at the end of I/O, while
 *    still in process context.
 *
 *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
 *    before being submitted to the device, as pages mapped may be out of
 *    reach. It's the callers responsibility to make sure this happens. The
 *    original bio must be passed back in to blk_rq_unmap_user() for proper
 *    unmapping.
 */
int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
			struct rq_map_data *map_data,
			const struct iov_iter *iter, gfp_t gfp_mask)
{
	bool copy = false;
	unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
	struct bio *bio = NULL;
	struct iov_iter i;
	int ret;

	if (map_data)
		copy = true;
	else if (iov_iter_alignment(iter) & align)
		copy = true;
	else if (queue_virt_boundary(q))
		copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);

	i = *iter;
	do {
		ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy);
		if (ret)
			goto unmap_rq;
		if (!bio)
			bio = rq->bio;
	} while (iov_iter_count(&i));

	if (!bio_flagged(bio, BIO_USER_MAPPED))
		rq->cmd_flags |= REQ_COPY_USER;
	return 0;

unmap_rq:
	__blk_rq_unmap_user(bio);
	rq->bio = NULL;
	return -EINVAL;
}
void drbd_endio_sec(struct bio *bio, int error)
{
	struct drbd_epoch_entry *e = bio->bi_private;
	struct drbd_conf *mdev = e->mdev;
	int uptodate = bio_flagged(bio, BIO_UPTODATE);
	int is_write = bio_data_dir(bio) == WRITE;

	if (error)
		dev_warn(DEV, "%s: error=%d s=%llus\n",
				is_write ? "write" : "read", error,
				(unsigned long long)e->sector);
	if (!error && !uptodate) {
		dev_warn(DEV, "%s: setting error to -EIO s=%llus\n",
				is_write ? "write" : "read",
				(unsigned long long)e->sector);
		/* strange behavior of some lower level drivers...
		 * fail the request by clearing the uptodate flag,
		 * but do not return any error?! */
		error = -EIO;
	}

	if (error)
		set_bit(__EE_WAS_ERROR, &e->flags);

	bio_put(bio); /* no need for the bio anymore */
	if (atomic_dec_and_test(&e->pending_bios)) {
		if (is_write)
			drbd_endio_write_sec_final(e);
		else
			drbd_endio_read_sec_final(e);
	}
}
Пример #9
0
/**
 * blkdev_issue_discard - queue a discard
 * @bdev:	blockdev to issue discard for
 * @sector:	start sector
 * @nr_sects:	number of sectors to discard
 * @gfp_mask:	memory allocation flags (for bio_alloc)
 *
 * Description:
 *    Issue a discard request for the sectors in question. Does not wait.
 */
int blkdev_issue_discard(struct block_device *bdev,
			 sector_t sector, sector_t nr_sects, gfp_t gfp_mask)
{
	struct request_queue *q;
	struct bio *bio;
	int ret = 0;

	if (bdev->bd_disk == NULL)
		return -ENXIO;

	q = bdev_get_queue(bdev);
	if (!q)
		return -ENXIO;

	if (!q->prepare_discard_fn)
		return -EOPNOTSUPP;

	while (nr_sects && !ret) {
		bio = bio_alloc(gfp_mask, 0);
		if (!bio)
			return -ENOMEM;

		bio->bi_end_io = blkdev_discard_end_io;
		bio->bi_bdev = bdev;

		bio->bi_sector = sector;

		if (nr_sects > q->max_hw_sectors) {
			bio->bi_size = q->max_hw_sectors << 9;
			nr_sects -= q->max_hw_sectors;
			sector += q->max_hw_sectors;
		} else {
			bio->bi_size = nr_sects << 9;
			nr_sects = 0;
		}
		bio_get(bio);
		submit_bio(DISCARD_BARRIER, bio);

		/* Check if it failed immediately */
		if (bio_flagged(bio, BIO_EOPNOTSUPP))
			ret = -EOPNOTSUPP;
		else if (!bio_flagged(bio, BIO_UPTODATE))
			ret = -EIO;
		bio_put(bio);
	}
	return ret;
}
Пример #10
0
int ll_front_merge_fn(struct request_queue *q, struct request *req,
		      struct bio *bio)
{
	if (blk_rq_sectors(req) + bio_sectors(bio) >
	    blk_rq_get_max_sectors(req)) {
		req->cmd_flags |= REQ_NOMERGE;
		if (req == q->last_merge)
			q->last_merge = NULL;
		return 0;
	}
	if (!bio_flagged(bio, BIO_SEG_VALID))
		blk_recount_segments(q, bio);
	if (!bio_flagged(req->bio, BIO_SEG_VALID))
		blk_recount_segments(q, req->bio);

	return ll_new_hw_segment(q, req, bio);
}
static void chromeos_invalidate_kernel_endio(struct bio *bio, int err)
{
	const char *mode = ((bio->bi_rw & REQ_WRITE) ? "write" : "read");
	if (err)
		chromeos_set_need_recovery();

	if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
		DMERR("invalidate_kernel: %s not supported", mode);
		chromeos_set_need_recovery();
	} else if (!bio_flagged(bio, BIO_UPTODATE)) {
		DMERR("invalidate_kernel: %s not up to date", mode);
		chromeos_set_need_recovery();
	} else {
		DMERR("invalidate_kernel: partition header %s completed", mode);
	}

	complete(bio->bi_private);
}
Пример #12
0
STATIC int _drbd_md_sync_page_io(struct drbd_conf *mdev,
				 struct drbd_backing_dev *bdev,
				 struct page *page, sector_t sector,
				 int rw, int size)
{
	struct bio *bio;
	struct drbd_md_io md_io;
	int ok;

	md_io.mdev = mdev;
	init_completion(&md_io.event);
	md_io.error = 0;

	if ((rw & WRITE) && !test_bit(MD_NO_BARRIER, &mdev->flags))
		rw |= DRBD_REQ_FUA | DRBD_REQ_FLUSH;
	rw |= DRBD_REQ_UNPLUG | DRBD_REQ_SYNC;

#ifndef REQ_FLUSH
	/* < 2.6.36, "barrier" semantic may fail with EOPNOTSUPP */
 retry:
#endif
	bio = bio_alloc(GFP_NOIO, 1);
	bio->bi_bdev = bdev->md_bdev;
	bio->bi_sector = sector;
	ok = (bio_add_page(bio, page, size, 0) == size);
	if (!ok)
		goto out;
	bio->bi_private = &md_io;
	bio->bi_end_io = drbd_md_io_complete;
	bio->bi_rw = rw;

	trace_drbd_bio(mdev, "Md", bio, 0, NULL);

	if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
		bio_endio(bio, -EIO);
	else
		submit_bio(rw, bio);
	wait_for_completion(&md_io.event);
	ok = bio_flagged(bio, BIO_UPTODATE) && md_io.error == 0;

#ifndef REQ_FLUSH
	/* check for unsupported barrier op.
	 * would rather check on EOPNOTSUPP, but that is not reliable.
	 * don't try again for ANY return value != 0 */
	if (unlikely((bio->bi_rw & DRBD_REQ_HARDBARRIER) && !ok)) {
		/* Try again with no barrier */
		dev_warn(DEV, "Barriers not supported on meta data device - disabling\n");
		set_bit(MD_NO_BARRIER, &mdev->flags);
		rw &= ~DRBD_REQ_HARDBARRIER;
		bio_put(bio);
		goto retry;
	}
#endif
 out:
	bio_put(bio);
	return ok;
}
Пример #13
0
void probe_block_bio_queue(void *data, struct request_queue *q, struct bio *bio)
{
	trace_mark_tp(block, bio_queue, block_bio_queue,
		probe_block_bio_queue,
		"sector %llu size %u rw(FAILFAST_DRIVER,FAILFAST_TRANSPORT,"
		"FAILFAST_DEV,DISCARD,META,SYNC,BARRIER,AHEAD,RW) %lX "
		"not_uptodate #1u%d",
		(unsigned long long)bio->bi_sector, bio->bi_size,
		bio->bi_rw, !bio_flagged(bio, BIO_UPTODATE));
}
Пример #14
0
/**
 * blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage
 * @q:		request queue where request should be inserted
 * @rq:		request structure to fill
 * @map_data:   pointer to the rq_map_data holding pages (if necessary)
 * @ubuf:	the user buffer
 * @len:	length of user data
 * @gfp_mask:	memory allocation flags
 *
 * Description:
 *    Data will be mapped directly for zero copy I/O, if possible. Otherwise
 *    a kernel bounce buffer is used.
 *
 *    A matching blk_rq_unmap_user() must be issued at the end of I/O, while
 *    still in process context.
 *
 *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
 *    before being submitted to the device, as pages mapped may be out of
 *    reach. It's the callers responsibility to make sure this happens. The
 *    original bio must be passed back in to blk_rq_unmap_user() for proper
 *    unmapping.
 */
int blk_rq_map_user(struct request_queue *q, struct request *rq,
		    struct rq_map_data *map_data, void __user *ubuf,
		    unsigned long len, gfp_t gfp_mask)
{
	unsigned long bytes_read = 0;
	struct bio *bio = NULL;
	int ret;

	if (len > (queue_max_hw_sectors(q) << 9))
		return -EINVAL;
	if (!len)
		return -EINVAL;

	if (!ubuf && (!map_data || !map_data->null_mapped))
		return -EINVAL;

	while (bytes_read != len) {
		unsigned long map_len, end, start;

		map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
		end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
								>> PAGE_SHIFT;
		start = (unsigned long)ubuf >> PAGE_SHIFT;

		/*
		 * A bad offset could cause us to require BIO_MAX_PAGES + 1
		 * pages. If this happens we just lower the requested
		 * mapping len by a page so that we can fit
		 */
		if (end - start > BIO_MAX_PAGES)
			map_len -= PAGE_SIZE;

		ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len,
					gfp_mask);
		if (ret < 0)
			goto unmap_rq;
		if (!bio)
			bio = rq->bio;
		bytes_read += ret;
		ubuf += ret;

		if (map_data)
			map_data->offset += ret;
	}

	if (!bio_flagged(bio, BIO_USER_MAPPED))
		rq->cmd_flags |= REQ_COPY_USER;

	rq->buffer = NULL;
	return 0;
unmap_rq:
	blk_rq_unmap_user(bio);
	rq->bio = NULL;
	return ret;
}
Пример #15
0
/**
 * blkdev_issue_flush - queue a flush
 * @bdev:	blockdev to issue flush for
 * @error_sector:	error sector
 *
 * Description:
 *    Issue a flush for the block device in question. Caller can supply
 *    room for storing the error offset in case of a flush error, if they
 *    wish to.
 */
int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
{
	DECLARE_COMPLETION_ONSTACK(wait);
	struct request_queue *q;
	struct bio *bio;
	int ret;

	if (bdev->bd_disk == NULL)
		return -ENXIO;

	q = bdev_get_queue(bdev);
	if (!q)
		return -ENXIO;

	bio = bio_alloc(GFP_KERNEL, 0);
	if (!bio)
		return -ENOMEM;

	bio->bi_end_io = bio_end_empty_barrier;
	bio->bi_private = &wait;
	bio->bi_bdev = bdev;
	submit_bio(WRITE_BARRIER, bio);

	wait_for_completion(&wait);

	/*
	 * The driver must store the error location in ->bi_sector, if
	 * it supports it. For non-stacked drivers, this should be copied
	 * from rq->sector.
	 */
	if (error_sector)
		*error_sector = bio->bi_sector;

	ret = 0;
	if (bio_flagged(bio, BIO_EOPNOTSUPP))
		ret = -EOPNOTSUPP;
	else if (!bio_flagged(bio, BIO_UPTODATE))
		ret = -EIO;

	bio_put(bio);
	return ret;
}
Пример #16
0
/**
 * blk_add_trace_bio - Add a trace for a bio oriented action
 * @q:		queue the io is for
 * @bio:	the source bio
 * @what:	the action
 *
 * Description:
 *     Records an action against a bio. Will log the bio offset + size.
 *
 **/
static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
				     u32 what)
{
	struct blk_trace *bt = q->blk_trace;

	if (likely(!bt))
		return;

	__blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what,
			!bio_flagged(bio, BIO_UPTODATE), 0, NULL);
}
Пример #17
0
/**
 * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
 * @q:		request queue where request should be inserted
 * @rq:		request to map data to
 * @map_data:   pointer to the rq_map_data holding pages (if necessary)
 * @iov:	pointer to the iovec
 * @iov_count:	number of elements in the iovec
 * @len:	I/O byte count
 * @gfp_mask:	memory allocation flags
 *
 * Description:
 *    Data will be mapped directly for zero copy I/O, if possible. Otherwise
 *    a kernel bounce buffer is used.
 *
 *    A matching blk_rq_unmap_user() must be issued at the end of I/O, while
 *    still in process context.
 *
 *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
 *    before being submitted to the device, as pages mapped may be out of
 *    reach. It's the callers responsibility to make sure this happens. The
 *    original bio must be passed back in to blk_rq_unmap_user() for proper
 *    unmapping.
 */
int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
			struct rq_map_data *map_data, struct sg_iovec *iov,
			int iov_count, unsigned int len, gfp_t gfp_mask)
{
	struct bio *bio;
	int i, read = rq_data_dir(rq) == READ;
	int unaligned = 0;

	if (!iov || iov_count <= 0)
		return -EINVAL;

	for (i = 0; i < iov_count; i++) {
		unsigned long uaddr = (unsigned long)iov[i].iov_base;

		if (!iov[i].iov_len)
			return -EINVAL;

		/*
		 * Keep going so we check length of all segments
		 */
		if (uaddr & queue_dma_alignment(q))
			unaligned = 1;
	}

	if (unaligned || (q->dma_pad_mask & len) || map_data)
		bio = bio_copy_user_iov(q, map_data, iov, iov_count, read,
					gfp_mask);
	else
		bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask);

	if (IS_ERR(bio))
		return PTR_ERR(bio);

	if (bio->bi_size != len) {
		/*
		 * Grab an extra reference to this bio, as bio_unmap_user()
		 * expects to be able to drop it twice as it happens on the
		 * normal IO completion path
		 */
		bio_get(bio);
		bio_endio(bio, 0);
		__blk_rq_unmap_user(bio);
		return -EINVAL;
	}

	if (!bio_flagged(bio, BIO_USER_MAPPED))
		rq->cmd_flags |= REQ_COPY_USER;

	blk_queue_bounce(q, &bio);
	bio_get(bio);
	blk_rq_bio_prep(q, rq, bio);
	rq->buffer = NULL;
	return 0;
}
Пример #18
0
/*
 * hfsplus_submit_bio - Perfrom block I/O
 * @sb: super block of volume for I/O
 * @sector: block to read or write, for blocks of HFSPLUS_SECTOR_SIZE bytes
 * @buf: buffer for I/O
 * @data: output pointer for location of requested data
 * @rw: direction of I/O
 *
 * The unit of I/O is hfsplus_min_io_size(sb), which may be bigger than
 * HFSPLUS_SECTOR_SIZE, and @buf must be sized accordingly. On reads
 * @data will return a pointer to the start of the requested sector,
 * which may not be the same location as @buf.
 *
 * If @sector is not aligned to the bdev logical block size it will
 * be rounded down. For writes this means that @buf should contain data
 * that starts at the rounded-down address. As long as the data was
 * read using hfsplus_submit_bio() and the same buffer is used things
 * will work correctly.
 */
int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
                       void *buf, void **data, int rw)
{
    DECLARE_COMPLETION_ONSTACK(wait);
    struct bio *bio;
    int ret = 0;
    u64 io_size;
    loff_t start;
    int offset;

    /*
     * Align sector to hardware sector size and find offset. We
     * assume that io_size is a power of two, which _should_
     * be true.
     */
    io_size = hfsplus_min_io_size(sb);
    start = (loff_t)sector << HFSPLUS_SECTOR_SHIFT;
    offset = start & (io_size - 1);
    sector &= ~((io_size >> HFSPLUS_SECTOR_SHIFT) - 1);

    bio = bio_alloc(GFP_NOIO, 1);
    bio->bi_sector = sector;
    bio->bi_bdev = sb->s_bdev;
    bio->bi_end_io = hfsplus_end_io_sync;
    bio->bi_private = &wait;

    if (!(rw & WRITE) && data)
        *data = (u8 *)buf + offset;

    while (io_size > 0) {
        unsigned int page_offset = offset_in_page(buf);
        unsigned int len = min_t(unsigned int, PAGE_SIZE - page_offset,
                                 io_size);

        ret = bio_add_page(bio, virt_to_page(buf), len, page_offset);
        if (ret != len) {
            ret = -EIO;
            goto out;
        }
        io_size -= len;
        buf = (u8 *)buf + len;
    }

    submit_bio(rw, bio);
    wait_for_completion(&wait);

    if (!bio_flagged(bio, BIO_UPTODATE))
        ret = -EIO;

out:
    bio_put(bio);
    return ret < 0 ? ret : 0;
}
Пример #19
0
static int __blk_rq_unmap_user(struct bio *bio)
{
	int ret = 0;

	if (bio) {
		if (bio_flagged(bio, BIO_USER_MAPPED))
			bio_unmap_user(bio);
		else
			ret = bio_uncopy_user(bio);
	}

	return ret;
}
Пример #20
0
static void blk_add_trace_split(struct request_queue *q, struct bio *bio,
				unsigned int pdu)
{
	struct blk_trace *bt = q->blk_trace;

	if (bt) {
		__be64 rpdu = cpu_to_be64(pdu);

		__blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
				BLK_TA_SPLIT, !bio_flagged(bio, BIO_UPTODATE),
				sizeof(rpdu), &rpdu);
	}
}
Пример #21
0
void ext4_io_submit(struct ext4_io_submit *io)
{
	struct bio *bio = io->io_bio;

	if (bio) {
		bio_get(io->io_bio);
		submit_bio(io->io_op, io->io_bio);
		BUG_ON(bio_flagged(io->io_bio, BIO_EOPNOTSUPP));
		bio_put(io->io_bio);
	}
	io->io_bio = NULL;
	io->io_op = 0;
	io->io_end = NULL;
}
Пример #22
0
static void _endio4read(struct bio *clone, int error)
#endif
{
	struct iostash_bio *io = clone->bi_private;
	struct hdd_info *hdd = io->hdd;
	int ssd_online_to_be = 0;
	DBG("Got end_io (%lu) %p s=%lu l=%u base_bio=%p base_bio s=%lu l=%u.",
		clone->bi_rw, clone, BIO_SECTOR(clone), bio_sectors(clone), io->base_bio,
		BIO_SECTOR(io->base_bio), bio_sectors(io->base_bio));

	do {
#if KERNEL_VERSION(4,2,0) <= LINUX_VERSION_CODE
		const int error = clone->bi_error;
#else
		if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error))
		{
			ERR("cloned bio not UPTODATE.");
			error = -EIO;
			ssd_online_to_be = 1;	/* because this error does not mean SSD failure */
		}
#endif
		io->error = error;

		/* if this bio is for SSD: common case */
		if (clone->bi_bdev != io->base_bio->bi_bdev) {
			DBG("SSD cloned bio endio.");
			if (unlikely(error)) {	/* Error handling */
				ERR("iostash: SSD read error: error = %d, sctr = %ld :::\n",
				     error, io->psn);
				io->ssd->online = ssd_online_to_be;

				_inc_pending(io);	/* to prevent io from releasing */
				_io_queue(io);
				break;
			}

			sce_put4read(hdd->lun, io->psn, io->nr_sctr);
			gctx.st_cread++;
			break;
		}
		DBG("iostash: Retried HDD read return = %d, sctr = %ld :::\n",
		       error, io->psn);
		_dec_pending(io);

	} while (0);

	bio_put(clone);
	_dec_pending(io);
}
Пример #23
0
void probe_block_remap(void *data, struct request_queue *q, struct bio *bio,
		       dev_t dev, sector_t from)
{
	trace_mark_tp(block, remap, block_remap,
		probe_block_remap,
		"device_from %lu sector_from %llu device_to %lu "
		"size %u rw(FAILFAST_DRIVER,FAILFAST_TRANSPORT,"
		"FAILFAST_DEV,DISCARD,META,SYNC,BARRIER,AHEAD,RW) %lX "
		"not_uptodate #1u%d",
		(unsigned long)bio->bi_bdev->bd_dev,
		(unsigned long long)from,
		(unsigned long)dev,
		bio->bi_size, bio->bi_rw,
		!bio_flagged(bio, BIO_UPTODATE));
}
Пример #24
0
/**
 * bio_integrity_free - Free bio integrity payload
 * @bio:	bio containing bip to be freed
 * @bs:		bio_set this bio was allocated from
 *
 * Description: Used to free the integrity portion of a bio. Usually
 * called from bio_free().
 */
void bio_integrity_free(struct bio *bio, struct bio_set *bs)
{
	struct bio_integrity_payload *bip = bio->bi_integrity;

	BUG_ON(bip == NULL);

	/* A cloned bio doesn't own the integrity metadata */
	if (!bio_flagged(bio, BIO_CLONED) && bip->bip_buf != NULL)
		kfree(bip->bip_buf);

	mempool_free(bip->bip_vec, bs->bvec_pools[bip->bip_pool]);
	mempool_free(bip, bs->bio_integrity_pool);

	bio->bi_integrity = NULL;
}
Пример #25
0
/**
 * blk_add_trace_remap - Add a trace for a remap operation
 * @q:		queue the io is for
 * @bio:	the source bio
 * @dev:	target device
 * @from:	source sector
 * @to:		target sector
 *
 * Description:
 *     Device mapper or raid target sometimes need to split a bio because
 *     it spans a stripe (or similar). Add a trace for that action.
 *
 **/
static void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
				       dev_t dev, sector_t from, sector_t to)
{
	struct blk_trace *bt = q->blk_trace;
	struct blk_io_trace_remap r;

	if (likely(!bt))
		return;

	r.device = cpu_to_be32(dev);
	r.device_from = cpu_to_be32(bio->bi_bdev->bd_dev);
	r.sector = cpu_to_be64(to);

	__blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP,
			!bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
}
Пример #26
0
/**
 * __blkdev_issue_flush - queue a flush
 * @bdev:	blockdev to issue flush for
 * @gfp_mask:	memory allocation flags (for bio_alloc)
 * @error_sector:	error sector
 *
 * Description:
 *    Issue a flush for the block device in question. Caller can supply
 *    room for storing the error offset in case of a flush error, if they
 *    wish to. If WAIT flag is not passed then caller may check only what
 *    request was pushed in some internal queue for later handling.
 */
int __blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
		sector_t *error_sector)
{
	DECLARE_COMPLETION_ONSTACK(wait);
	struct request_queue *q;
	struct bio *bio;
	int ret = 0;

	if (bdev->bd_disk == NULL)
		return -ENXIO;

	q = bdev_get_queue(bdev);
	if (!q)
		return -ENXIO;

	/*
	 * some block devices may not have their queue correctly set up here
	 * (e.g. loop device without a backing file) and so issuing a flush
	 * here will panic. Ensure there is a request function before issuing
	 * the flush.
	 */
	if (!q->make_request_fn)
		return -ENXIO;

	bio = bio_alloc(gfp_mask, 0);
	bio->bi_end_io = bio_end_flush;
	bio->bi_bdev = bdev;
	bio->bi_private = &wait;

	bio_get(bio);
	submit_bio(WRITE_FLUSH, bio);
	wait_for_completion(&wait);

	/*
	 * The driver must store the error location in ->bi_sector, if
	 * it supports it. For non-stacked drivers, this should be
	 * copied from blk_rq_pos(rq).
	 */
	if (error_sector)
               *error_sector = bio->bi_sector;

	if (!bio_flagged(bio, BIO_UPTODATE))
		ret = -EIO;

	bio_put(bio);
	return ret;
}
Пример #27
0
/**
 * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage
 * @q:		request queue where request should be inserted
 * @rq:		request to map data to
 * @iov:	pointer to the iovec
 * @iov_count:	number of elements in the iovec
 * @len:	I/O byte count
 *
 * Description:
 *    Data will be mapped directly for zero copy io, if possible. Otherwise
 *    a kernel bounce buffer is used.
 *
 *    A matching blk_rq_unmap_user() must be issued at the end of io, while
 *    still in process context.
 *
 *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
 *    before being submitted to the device, as pages mapped may be out of
 *    reach. It's the callers responsibility to make sure this happens. The
 *    original bio must be passed back in to blk_rq_unmap_user() for proper
 *    unmapping.
 */
int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
			struct sg_iovec *iov, int iov_count, unsigned int len)
{
	struct bio *bio;
	int i, read = rq_data_dir(rq) == READ;
	int unaligned = 0;

	if (!iov || iov_count <= 0)
		return -EINVAL;

	for (i = 0; i < iov_count; i++) {
		unsigned long uaddr = (unsigned long)iov[i].iov_base;

		if (uaddr & queue_dma_alignment(q)) {
			unaligned = 1;
			break;
		}
		if (!iov[i].iov_len)
			return -EINVAL;
	}

	if (unaligned || (q->dma_pad_mask & len))
		bio = bio_copy_user_iov(q, iov, iov_count, read);
	else
		bio = bio_map_user_iov(q, NULL, iov, iov_count, read);

	if (IS_ERR(bio))
		return PTR_ERR(bio);

	if (bio->bi_size != len) {
		bio_endio(bio, 0);
		bio_unmap_user(bio);
		return -EINVAL;
	}

	if (!bio_flagged(bio, BIO_USER_MAPPED))
		rq->cmd_flags |= REQ_COPY_USER;

	blk_queue_bounce(q, &bio);
	bio_get(bio);
	blk_rq_bio_prep(q, rq, bio);
	rq->buffer = rq->data = NULL;
	return 0;
}
Пример #28
0
void blk_recount_segments(struct request_queue *q, struct bio *bio)
{
	bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE,
			&q->queue_flags);

	if (no_sg_merge && !bio_flagged(bio, BIO_CLONED) &&
			bio->bi_vcnt < queue_max_segments(q))
		bio->bi_phys_segments = bio->bi_vcnt;
	else {
		struct bio *nxt = bio->bi_next;

		bio->bi_next = NULL;
		bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio,
				no_sg_merge);
		bio->bi_next = nxt;
	}

	bio->bi_flags |= (1 << BIO_SEG_VALID);
}
Пример #29
0
static void ata_io_complete(struct bio *bio, int error)
{
	struct aoereq *rq;
	struct aoedev *d;
	struct sk_buff *skb;
	struct aoe_hdr *aoe;
	struct aoe_atahdr *ata;
	int len;
	unsigned int bytes = 0;

	if (!error)
		bytes = bio->bi_io_vec[0].bv_len;

	rq = bio->bi_private;
	d = rq->d;
	skb = rq->skb;

	aoe = (struct aoe_hdr *) skb_mac_header(skb);
	ata = (struct aoe_atahdr *) aoe->data;

	len = sizeof *aoe + sizeof *ata;
	if (bio_flagged(bio, BIO_UPTODATE)) {
		if (bio_data_dir(bio) == READ)
			len += bytes;
		ata->scnt = 0;
		ata->cmdstat = ATA_DRDY;
		ata->errfeat = 0;
		// should increment lba here, too
	} else {
		printk(KERN_ERR "I/O error %d on %s\n", error, d->kobj.name);
		ata->cmdstat = ATA_ERR | ATA_DF;
		ata->errfeat = ATA_UNC | ATA_ABORTED;
	}

	bio_put(bio);
	rq->skb = NULL;
	atomic_dec(&d->busy);

	skb_trim(skb, len);
	skb_queue_tail(&skb_outq, skb);

	wake_up(&ktwaitq);
}
/* read, readA or write requests on R_PRIMARY coming from drbd_make_request
 */
void drbd_endio_pri(struct bio *bio, int error)
{
	unsigned long flags;
	struct drbd_request *req = bio->bi_private;
	struct drbd_conf *mdev = req->mdev;
	struct bio_and_error m;
	enum drbd_req_event what;
	int uptodate = bio_flagged(bio, BIO_UPTODATE);

	if (error)
		dev_warn(DEV, "p %s: error=%d\n",
			 bio_data_dir(bio) == WRITE ? "write" : "read", error);
	if (!error && !uptodate) {
		dev_warn(DEV, "p %s: setting error to -EIO\n",
			 bio_data_dir(bio) == WRITE ? "write" : "read");
		/* strange behavior of some lower level drivers...
		 * fail the request by clearing the uptodate flag,
		 * but do not return any error?! */
		error = -EIO;
	}

	/* to avoid recursion in __req_mod */
	if (unlikely(error)) {
		what = (bio_data_dir(bio) == WRITE)
			? write_completed_with_error
			: (bio_rw(bio) == READ)
			  ? read_completed_with_error
			  : read_ahead_completed_with_error;
	} else
		what = completed_ok;

	bio_put(req->private_bio);
	req->private_bio = ERR_PTR(error);

	spin_lock_irqsave(&mdev->req_lock, flags);
	__req_mod(req, what, &m);
	spin_unlock_irqrestore(&mdev->req_lock, flags);

	if (m.bio)
		complete_master_bio(mdev, &m);
}