Example #1
0
/*
 * Generate a new unfragmented bio with the given size
 * This should never violate the device limitations
 *
 * This function may be called concurrently. If we allocate from the mempool
 * concurrently, there is a possibility of deadlock. For example, if we have
 * mempool of 256 pages, two processes, each wanting 256, pages allocate from
 * the mempool concurrently, it may deadlock in a situation where both processes
 * have allocated 128 pages and the mempool is exhausted.
 *
 * In order to avoid this scenario we allocate the pages under a mutex.
 *
 * In order to not degrade performance with excessive locking, we try
 * non-blocking allocations without a mutex first but on failure we fallback
 * to blocking allocations with a mutex.
 */
static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
{
	struct crypt_config *cc = io->cc;
	struct bio *clone;
	unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
	gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
	unsigned i, len, remaining_size;
	struct page *page;
	struct bio_vec *bvec;

retry:
	if (unlikely(gfp_mask & __GFP_WAIT))
		mutex_lock(&cc->bio_alloc_lock);

	clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
	if (!clone)
		goto return_clone;

	clone_init(io, clone);

	remaining_size = size;

	for (i = 0; i < nr_iovecs; i++) {
		page = mempool_alloc(cc->page_pool, gfp_mask);
		if (!page) {
			crypt_free_buffer_pages(cc, clone);
			bio_put(clone);
			gfp_mask |= __GFP_WAIT;
			goto retry;
		}

		len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size;

		bvec = &clone->bi_io_vec[clone->bi_vcnt++];
		bvec->bv_page = page;
		bvec->bv_len = len;
		bvec->bv_offset = 0;

		clone->bi_iter.bi_size += len;

		remaining_size -= len;
	}

return_clone:
	if (unlikely(gfp_mask & __GFP_WAIT))
		mutex_unlock(&cc->bio_alloc_lock);

	return clone;
}
Example #2
0
static void _endio4read(struct bio *clone, int error)
#endif
{
	struct iostash_bio *io = clone->bi_private;
	struct hdd_info *hdd = io->hdd;
	int ssd_online_to_be = 0;
	DBG("Got end_io (%lu) %p s=%lu l=%u base_bio=%p base_bio s=%lu l=%u.",
		clone->bi_rw, clone, BIO_SECTOR(clone), bio_sectors(clone), io->base_bio,
		BIO_SECTOR(io->base_bio), bio_sectors(io->base_bio));

	do {
#if KERNEL_VERSION(4,2,0) <= LINUX_VERSION_CODE
		const int error = clone->bi_error;
#else
		if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error))
		{
			ERR("cloned bio not UPTODATE.");
			error = -EIO;
			ssd_online_to_be = 1;	/* because this error does not mean SSD failure */
		}
#endif
		io->error = error;

		/* if this bio is for SSD: common case */
		if (clone->bi_bdev != io->base_bio->bi_bdev) {
			DBG("SSD cloned bio endio.");
			if (unlikely(error)) {	/* Error handling */
				ERR("iostash: SSD read error: error = %d, sctr = %ld :::\n",
				     error, io->psn);
				io->ssd->online = ssd_online_to_be;

				_inc_pending(io);	/* to prevent io from releasing */
				_io_queue(io);
				break;
			}

			sce_put4read(hdd->lun, io->psn, io->nr_sctr);
			gctx.st_cread++;
			break;
		}
		DBG("iostash: Retried HDD read return = %d, sctr = %ld :::\n",
		       error, io->psn);
		_dec_pending(io);

	} while (0);

	bio_put(clone);
	_dec_pending(io);
}
Example #3
0
/**
 * toi_end_bio - bio completion function.
 * @bio: bio that has completed.
 * @err: Error value. Yes, like end_swap_bio_read, we ignore it.
 *
 * Function called by the block driver from interrupt context when I/O is
 * completed. If we were writing the page, we want to free it and will have
 * set bio->bi_private to the parameter we should use in telling the page
 * allocation accounting code what the page was allocated for. If we're
 * reading the page, it will be in the singly linked list made from
 * page->private pointers.
 **/
static void toi_end_bio(struct bio *bio, int err)
{
	struct page *page = bio->bi_io_vec[0].bv_page;

	BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));

	unlock_page(page);
	bio_put(bio);

	if (waiting_on == page)
		waiting_on = NULL;

	put_page(page);

	if (bio->bi_private)
		toi__free_page((int) ((unsigned long) bio->bi_private) , page);

	bio_put(bio);

	atomic_dec(&toi_io_in_progress);
	atomic_inc(&toi_io_done);

	wake_up(&num_in_progress_wait);
}
Example #4
0
/**
 * Free its all pages and call bio_put().
 */
void bio_put_with_pages(struct bio *bio)
{
	struct bio_vec *bv;
	int i;
	ASSERT(bio);

	bio_for_each_segment_all(bv, bio, i) {
		if (bv->bv_page) {
			free_page_dec(bv->bv_page);
			bv->bv_page = NULL;
		}
	}
	ASSERT(atomic_read(&bio->__bi_cnt) == 1);
	bio_put(bio);
}
Example #5
0
/*
 * sbs_bio_callback - callback function when bio returns. 
 */
static int sbs_bio_callback(struct bio *bio, unsigned int bytes_done, int error)
{
	struct sbs_bio_context *tmp_bio_context = bio->bi_private;
	/* just for test */
	klog(WARN, "step in function sbs_bio_callback.\n");
	spin_lock(&tmp_bio_context->lock);
	tmp_bio_context->error |= error;
	tmp_bio_context->bytes_done += bytes_done;
	spin_unlock(&tmp_bio_context->lock);
	dec_io_counter( );
	sbs_context_put(tmp_bio_context);
	bio_put(bio);
	printk(KERN_NOTICE "%s, pid: %d\n", __func__, current->pid);
	return 0;
}
Example #6
0
static void ext4_release_io_end(ext4_io_end_t *io_end)
{
	struct bio *bio, *next_bio;

	BUG_ON(!list_empty(&io_end->list));
	BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN);
	WARN_ON(io_end->handle);

	for (bio = io_end->bio; bio; bio = next_bio) {
		next_bio = bio->bi_private;
		ext4_finish_bio(bio);
		bio_put(bio);
	}
	kmem_cache_free(io_end_cachep, io_end);
}
Example #7
0
/**
 *	submit - submit BIO request.
 *	@rw:	READ or WRITE.
 *	@off	physical offset of page.
 *	@page:	page we're reading or writing.
 *	@bio_chain: list of pending biod (for async reading)
 *
 *	Straight from the textbook - allocate and initialize the bio.
 *	If we're reading, make sure the page is marked as dirty.
 *	Then submit it and, if @bio_chain == NULL, wait.
 */
static int submit(int rw, struct block_device *bdev, sector_t sector,
		struct page *page, struct bio **bio_chain)
{
	const int bio_rw = rw | REQ_SYNC;
	struct bio *bio;

	bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1);
	bio->bi_sector = sector;
	bio->bi_bdev = bdev;
	bio->bi_end_io = end_swap_bio_read;

	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
		printk(KERN_ERR "PM: Adding page to bio failed at %llu\n",
			(unsigned long long)sector);
		bio_put(bio);
		return -EFAULT;
	}

	lock_page(page);
	bio_get(bio);

	if (bio_chain == NULL) {
		submit_bio(bio_rw, bio);
		wait_on_page_locked(page);
		if (rw == READ)
			bio_set_pages_dirty(bio);
		bio_put(bio);
	} else {
		if (rw == READ)
			get_page(page);	/* These pages are freed later */
		bio->bi_private = *bio_chain;
		*bio_chain = bio;
		submit_bio(bio_rw, bio);
	}
	return 0;
}
int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
		void *buf, void **data, int rw)
{
	DECLARE_COMPLETION_ONSTACK(wait);
	struct bio *bio;
	int ret = 0;
	unsigned int io_size;
	loff_t start;
	int offset;

	io_size = hfsplus_min_io_size(sb);
	start = (loff_t)sector << HFSPLUS_SECTOR_SHIFT;
	offset = start & (io_size - 1);
	sector &= ~((io_size >> HFSPLUS_SECTOR_SHIFT) - 1);

	bio = bio_alloc(GFP_NOIO, 1);
	bio->bi_sector = sector;
	bio->bi_bdev = sb->s_bdev;
	bio->bi_end_io = hfsplus_end_io_sync;
	bio->bi_private = &wait;

	if (!(rw & WRITE) && data)
		*data = (u8 *)buf + offset;

	while (io_size > 0) {
		unsigned int page_offset = offset_in_page(buf);
		unsigned int len = min_t(unsigned int, PAGE_SIZE - page_offset,
					 io_size);

		ret = bio_add_page(bio, virt_to_page(buf), len, page_offset);
		if (ret != len) {
			ret = -EIO;
			goto out;
		}
		io_size -= len;
		buf = (u8 *)buf + len;
	}

	submit_bio(rw, bio);
	wait_for_completion(&wait);

	if (!bio_flagged(bio, BIO_UPTODATE))
		ret = -EIO;

out:
	bio_put(bio);
	return ret < 0 ? ret : 0;
}
Example #9
0
/*
 * Generate a new unfragmented bio with the given size
 * This should never violate the device limitations
 * May return a smaller bio when running out of pages, indicated by
 * *out_of_pages set to 1.
 */
static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
                                      unsigned *out_of_pages)
{
    struct crypt_config *cc = io->cc;
    struct bio *clone;
    unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
    gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
    unsigned i, len;
    struct page *page;

    clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
    if (!clone)
        return NULL;

    clone_init(io, clone);
    *out_of_pages = 0;

    for (i = 0; i < nr_iovecs; i++) {
        page = mempool_alloc(cc->page_pool, gfp_mask);
        if (!page) {
            *out_of_pages = 1;
            break;
        }

        /*
         * If additional pages cannot be allocated without waiting,
         * return a partially-allocated bio.  The caller will then try
         * to allocate more bios while submitting this partial bio.
         */
        gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;

        len = (size > PAGE_SIZE) ? PAGE_SIZE : size;

        if (!bio_add_page(clone, page, len, 0)) {
            mempool_free(page, cc->page_pool);
            break;
        }

        size -= len;
    }

    if (!clone->bi_size) {
        bio_put(clone);
        return NULL;
    }

    return clone;
}
Example #10
0
static int vbc_blk_access(struct page *page, sector_t sector, bool is_read)
{
	struct block_device *bdev;
	struct bio *bio;
	int err, rq;
	fmode_t devmode = is_read ? FMODE_READ : FMODE_WRITE;

	bdev = vbc_blk_get_device(config.phandle, devmode);
	if (IS_ERR(bdev)) {
		pr_err("could not open block dev\n");
		return PTR_ERR(bdev);
	}

	/* map the sector to page */
	bio = bio_alloc(GFP_NOIO, 1);
	if (!bio) {
		err = -ENOMEM;
		goto unwind_bdev;
	}
	bio->bi_bdev	= bdev;
	bio->bi_sector	= sector;
	bio->bi_vcnt	= 1;
	bio->bi_idx	= 0;
	bio->bi_size	= SECTOR_SIZE;
	bio->bi_io_vec[0].bv_page	= page;
	bio->bi_io_vec[0].bv_len	= SECTOR_SIZE;
	bio->bi_io_vec[0].bv_offset	= 0;

	/* submit bio */
	rq = REQ_SYNC | REQ_SOFTBARRIER | REQ_NOIDLE;
	if (!is_read)
		rq |= REQ_WRITE;

	vbc_blk_submit_bio(bio, rq);

	/* vbc_blk_endio passes up any error in bi_private */
	err = (int)bio->bi_private;
	bio_put(bio);

unwind_bdev:
	if (!is_read) {
		fsync_bdev(bdev);
		invalidate_bdev(bdev);
	}
	blkdev_put(bdev, devmode);

	return err;
}
Example #11
0
/*
 * Partial completion handling for request-based dm
 */
static void end_clone_bio(struct bio *clone)
{
	struct dm_rq_clone_bio_info *info =
		container_of(clone, struct dm_rq_clone_bio_info, clone);
	struct dm_rq_target_io *tio = info->tio;
	struct bio *bio = info->orig;
	unsigned int nr_bytes = info->orig->bi_iter.bi_size;
	int error = clone->bi_error;

	bio_put(clone);

	if (tio->error)
		/*
		 * An error has already been detected on the request.
		 * Once error occurred, just let clone->end_io() handle
		 * the remainder.
		 */
		return;
	else if (error) {
		/*
		 * Don't notice the error to the upper layer yet.
		 * The error handling decision is made by the target driver,
		 * when the request is completed.
		 */
		tio->error = error;
		return;
	}

	/*
	 * I/O for the bio successfully completed.
	 * Notice the data completion to the upper layer.
	 */

	/*
	 * bios are processed from the head of the list.
	 * So the completing bio should always be rq->bio.
	 * If it's not, something wrong is happening.
	 */
	if (tio->orig->bio != bio)
		DMERR("bio completion is going in the middle of the request");

	/*
	 * Update the original request.
	 * Do not use blk_end_request() here, because it may complete
	 * the original request before the clone, and break the ordering.
	 */
	blk_update_request(tio->orig, 0, nr_bytes);
}
static void iblock_end_io_flush(struct bio *bio, int err)
{
	struct se_cmd *cmd = bio->bi_private;

	if (err)
		pr_err("IBLOCK: cache flush failed: %d\n", err);

	if (cmd) {
		if (err)
			target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
		else
			target_complete_cmd(cmd, SAM_STAT_GOOD);
	}

	bio_put(bio);
}
Example #13
0
/* completion handler for single page bio-based read.

   mpage_end_io_read() would also do. But it's static.

*/
static void
end_bio_single_page_read(struct bio *bio, int err UNUSED_ARG)
{
	struct page *page;

	page = bio->bi_io_vec[0].bv_page;

	if (test_bit(BIO_UPTODATE, &bio->bi_flags)) {
		SetPageUptodate(page);
	} else {
		ClearPageUptodate(page);
		SetPageError(page);
	}
	unlock_page(page);
	bio_put(bio);
}
Example #14
0
/**
 * blkdev_issue_discard - queue a discard
 * @bdev:	blockdev to issue discard for
 * @sector:	start sector
 * @nr_sects:	number of sectors to discard
 * @gfp_mask:	memory allocation flags (for bio_alloc)
 *
 * Description:
 *    Issue a discard request for the sectors in question. Does not wait.
 */
int blkdev_issue_discard(struct block_device *bdev,
			 sector_t sector, sector_t nr_sects, gfp_t gfp_mask)
{
	struct request_queue *q;
	struct bio *bio;
	int ret = 0;

	if (bdev->bd_disk == NULL)
		return -ENXIO;

	q = bdev_get_queue(bdev);
	if (!q)
		return -ENXIO;

	if (!q->prepare_discard_fn)
		return -EOPNOTSUPP;

	while (nr_sects && !ret) {
		bio = bio_alloc(gfp_mask, 0);
		if (!bio)
			return -ENOMEM;

		bio->bi_end_io = blkdev_discard_end_io;
		bio->bi_bdev = bdev;

		bio->bi_sector = sector;

		if (nr_sects > q->max_hw_sectors) {
			bio->bi_size = q->max_hw_sectors << 9;
			nr_sects -= q->max_hw_sectors;
			sector += q->max_hw_sectors;
		} else {
			bio->bi_size = nr_sects << 9;
			nr_sects = 0;
		}
		bio_get(bio);
		submit_bio(DISCARD_BARRIER, bio);

		/* Check if it failed immediately */
		if (bio_flagged(bio, BIO_EOPNOTSUPP))
			ret = -EOPNOTSUPP;
		else if (!bio_flagged(bio, BIO_UPTODATE))
			ret = -EIO;
		bio_put(bio);
	}
	return ret;
}
Example #15
0
/**
 * __blkdev_issue_flush - queue a flush
 * @bdev:	blockdev to issue flush for
 * @gfp_mask:	memory allocation flags (for bio_alloc)
 * @error_sector:	error sector
 *
 * Description:
 *    Issue a flush for the block device in question. Caller can supply
 *    room for storing the error offset in case of a flush error, if they
 *    wish to. If WAIT flag is not passed then caller may check only what
 *    request was pushed in some internal queue for later handling.
 */
int __blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
		sector_t *error_sector)
{
	DECLARE_COMPLETION_ONSTACK(wait);
	struct request_queue *q;
	struct bio *bio;
	int ret = 0;

	if (bdev->bd_disk == NULL)
		return -ENXIO;

	q = bdev_get_queue(bdev);
	if (!q)
		return -ENXIO;

	/*
	 * some block devices may not have their queue correctly set up here
	 * (e.g. loop device without a backing file) and so issuing a flush
	 * here will panic. Ensure there is a request function before issuing
	 * the flush.
	 */
	if (!q->make_request_fn)
		return -ENXIO;

	bio = bio_alloc(gfp_mask, 0);
	bio->bi_end_io = bio_end_flush;
	bio->bi_bdev = bdev;
	bio->bi_private = &wait;

	bio_get(bio);
	submit_bio(WRITE_FLUSH, bio);
	wait_for_completion(&wait);

	/*
	 * The driver must store the error location in ->bi_sector, if
	 * it supports it. For non-stacked drivers, this should be
	 * copied from blk_rq_pos(rq).
	 */
	if (error_sector)
               *error_sector = bio->bi_sector;

	if (!bio_flagged(bio, BIO_UPTODATE))
		ret = -EIO;

	bio_put(bio);
	return ret;
}
Example #16
0
static void __map_bio(struct dm_target *ti, struct bio *clone,
		      struct target_io *tio)
{
	int r;
	sector_t sector;
	struct mapped_device *md;

	/*
	 * Sanity checks.
	 */
	BUG_ON(!clone->bi_size);

	clone->bi_end_io = clone_endio;
	clone->bi_private = tio;

	/*
	 * Map the clone.  If r == 0 we don't need to do
	 * anything, the target has assumed ownership of
	 * this io.
	 */
	atomic_inc(&tio->io->io_count);
	sector = clone->bi_sector;
	r = ti->type->map(ti, clone, &tio->info);
	if (r == DM_MAPIO_REMAPPED) {
		/* the bio has been remapped so dispatch it */

		blk_add_trace_remap(bdev_get_queue(clone->bi_bdev), clone,
				    tio->io->bio->bi_bdev->bd_dev, sector,
				    clone->bi_sector);

		generic_make_request(clone);
	} else if (r < 0 || r == DM_MAPIO_REQUEUE) {
		/* error the io and bail out, or requeue it if needed */
		md = tio->io->md;
		dec_pending(tio->io, r);
		/*
		 * Store bio_set for cleanup.
		 */
		clone->bi_private = md->bs;
		bio_put(clone);
		free_tio(md, tio);
	} else if (r) {
		DMWARN("unimplemented target map return value: %d", r);
		BUG();
	}
}
Example #17
0
/**
 * This is synchronous, so we use a struct completion to block
 * until the request fnishes. The completion will be finished by the callback
 * finish_bio_sync
 */
static int issue_bio_sync(KrDevice* dev, struct page* page, int sector, int rw)
{
    struct completion event;
    struct bio* bio = kr_create_bio(dev, page, sector);
    if (!bio)
        return -KR_ENOMEM;

    bio->bi_end_io = finish_bio_sync;
    bio->bi_private = &event;

    init_completion(&event);
    submit_bio(rw | REQ_SYNC, bio);
    wait_for_completion(&event);

    bio_put(bio);
    return 0;
}
Example #18
0
static void endio(struct bio *bio, int error)
{
	struct io *io;
	unsigned region;

	if (error && bio_data_dir(bio) == READ)
		zero_fill_bio(bio);

	/*
	 * The bio destructor in bio_put() may use the io object.
	 */
	retrieve_io_and_region_from_bio(bio, &io, &region);

	bio_put(bio);

	dec_count(io, region, error);
}
Example #19
0
static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
			     struct rq_map_data *map_data, void __user *ubuf,
			     unsigned int len, gfp_t gfp_mask)
{
	unsigned long uaddr;
	struct bio *bio, *orig_bio;
	int reading, ret;

	reading = rq_data_dir(rq) == READ;

	/*
	 * if alignment requirement is satisfied, map in user pages for
	 * direct dma. else, set up kernel bounce buffers
	 */
	uaddr = (unsigned long) ubuf;
	if (blk_rq_aligned(q, uaddr, len) && !map_data)
		bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
	else
		bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);

	if (IS_ERR(bio))
		return PTR_ERR(bio);

	if (map_data && map_data->null_mapped)
		bio->bi_flags |= (1 << BIO_NULL_MAPPED);

	orig_bio = bio;
	blk_queue_bounce(q, &bio);

	/*
	 * We link the bounce buffer in and could have to traverse it
	 * later so we have to get a ref to prevent it from being freed
	 */
	bio_get(bio);

	ret = blk_rq_append_bio(q, rq, bio);
	if (!ret)
		return bio->bi_size;

	/* if it was boucned we must call the end io function */
	bio_endio(bio, 0);
	__blk_rq_unmap_user(orig_bio);
	bio_put(bio);
	return ret;
}
Example #20
0
static void ext4_release_io_end(ext4_io_end_t *io_end)
{
	struct bio *bio, *next_bio;

	BUG_ON(!list_empty(&io_end->list));
	BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN);
	WARN_ON(io_end->handle);

	if (atomic_dec_and_test(&EXT4_I(io_end->inode)->i_ioend_count))
		wake_up_all(ext4_ioend_wq(io_end->inode));

	for (bio = io_end->bio; bio; bio = next_bio) {
		next_bio = bio->bi_private;
		ext4_finish_bio(bio);
		bio_put(bio);
	}
	kmem_cache_free(io_end_cachep, io_end);
}
Example #21
0
void end_swap_bio_read(struct bio *bio, int err)
{
	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	struct page *page = bio->bi_io_vec[0].bv_page;

	if (!uptodate) {
		SetPageError(page);
		ClearPageUptodate(page);
		printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n",
				imajor(bio->bi_bdev->bd_inode),
				iminor(bio->bi_bdev->bd_inode),
				(unsigned long long)bio->bi_sector);
	} else {
		SetPageUptodate(page);
	}
	unlock_page(page);
	bio_put(bio);
}
Example #22
0
/*
 * Fill the locked page with data located in the block address.
 * Return unlocked page.
 */
int f2fs_submit_page_bio(struct f2fs_io_info *fio)
{
    struct bio *bio;
    struct page *page = fio->encrypted_page ? fio->encrypted_page : fio->page;

    trace_f2fs_submit_page_bio(page, fio);
    f2fs_trace_ios(fio, 0);

    /* Allocate a new bio */
    bio = __bio_alloc(fio->sbi, fio->blk_addr, 1, is_read_io(fio->rw));

    if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
        bio_put(bio);
        return -EFAULT;
    }

    submit_bio(fio->rw, bio);
    return 0;
}
Example #23
0
static void mpage_end_io_write(struct bio *bio, int err)
{
	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;

	do {
		struct page *page = bvec->bv_page;

		if (--bvec >= bio->bi_io_vec)
			prefetchw(&bvec->bv_page->flags);

		if (!uptodate){
			if (page->mapping)
				set_bit(AS_EIO, &page->mapping->flags);
		}
		end_page_writeback(page);
	} while (bvec >= bio->bi_io_vec);
	bio_put(bio);
}
Example #24
0
/*
 * Partial completion handling for request-based dm
 */
static void end_clone_bio(struct bio *clone)
{
	struct dm_rq_clone_bio_info *info =
		container_of(clone, struct dm_rq_clone_bio_info, clone);
	struct dm_rq_target_io *tio = info->tio;
	unsigned int nr_bytes = info->orig->bi_iter.bi_size;
	blk_status_t error = clone->bi_status;
	bool is_last = !clone->bi_next;

	bio_put(clone);

	if (tio->error)
		/*
		 * An error has already been detected on the request.
		 * Once error occurred, just let clone->end_io() handle
		 * the remainder.
		 */
		return;
	else if (error) {
		/*
		 * Don't notice the error to the upper layer yet.
		 * The error handling decision is made by the target driver,
		 * when the request is completed.
		 */
		tio->error = error;
		goto exit;
	}

	/*
	 * I/O for the bio successfully completed.
	 * Notice the data completion to the upper layer.
	 */
	tio->completed += nr_bytes;

	/*
	 * Update the original request.
	 * Do not use blk_end_request() here, because it may complete
	 * the original request before the clone, and break the ordering.
	 */
	if (is_last)
 exit:
		blk_update_request(tio->orig, BLK_STS_OK, tio->completed);
}
static void iblock_end_io_flush(struct bio *bio, int err)
{
	struct se_cmd *cmd = bio->bi_private;

	if (err)
		pr_err("IBLOCK: cache flush failed: %d\n", err);

	if (cmd) {
		if (err) {
			cmd->scsi_sense_reason =
				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
			target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
		} else {
			target_complete_cmd(cmd, SAM_STAT_GOOD);
		}
	}

	bio_put(bio);
}
Example #26
0
static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
			     void __user *ubuf, unsigned int len)
{
	unsigned long uaddr;
	unsigned int alignment;
	struct bio *bio, *orig_bio;
	int reading, ret;

	reading = rq_data_dir(rq) == READ;

	/*
	 * if alignment requirement is satisfied, map in user pages for
	 * direct dma. else, set up kernel bounce buffers
	 */
	uaddr = (unsigned long) ubuf;
	alignment = queue_dma_alignment(q) | q->dma_pad_mask;
	if (!(uaddr & alignment) && !(len & alignment))
		bio = bio_map_user(q, NULL, uaddr, len, reading);
	else
		bio = bio_copy_user(q, uaddr, len, reading);

	if (IS_ERR(bio))
		return PTR_ERR(bio);

	orig_bio = bio;
	blk_queue_bounce(q, &bio);

	/*
	 * We link the bounce buffer in and could have to traverse it
	 * later so we have to get a ref to prevent it from being freed
	 */
	bio_get(bio);

	ret = blk_rq_append_bio(q, rq, bio);
	if (!ret)
		return bio->bi_size;

	/* if it was boucned we must call the end io function */
	bio_endio(bio, 0);
	__blk_rq_unmap_user(orig_bio);
	bio_put(bio);
	return ret;
}
Example #27
0
static void ata_io_complete(struct bio *bio, int error)
{
	struct aoereq *rq;
	struct aoedev *d;
	struct sk_buff *skb;
	struct aoe_hdr *aoe;
	struct aoe_atahdr *ata;
	int len;
	unsigned int bytes = 0;

	if (!error)
		bytes = bio->bi_io_vec[0].bv_len;

	rq = bio->bi_private;
	d = rq->d;
	skb = rq->skb;

	aoe = (struct aoe_hdr *) skb_mac_header(skb);
	ata = (struct aoe_atahdr *) aoe->data;

	len = sizeof *aoe + sizeof *ata;
	if (bio_flagged(bio, BIO_UPTODATE)) {
		if (bio_data_dir(bio) == READ)
			len += bytes;
		ata->scnt = 0;
		ata->cmdstat = ATA_DRDY;
		ata->errfeat = 0;
		// should increment lba here, too
	} else {
		printk(KERN_ERR "I/O error %d on %s\n", error, d->kobj.name);
		ata->cmdstat = ATA_ERR | ATA_DF;
		ata->errfeat = ATA_UNC | ATA_ABORTED;
	}

	bio_put(bio);
	rq->skb = NULL;
	atomic_dec(&d->busy);

	skb_trim(skb, len);
	skb_queue_tail(&skb_outq, skb);

	wake_up(&ktwaitq);
}
static int __blk_rq_map_user_iov(struct request *rq,
		struct rq_map_data *map_data, struct iov_iter *iter,
		gfp_t gfp_mask, bool copy)
{
	struct request_queue *q = rq->q;
	struct bio *bio, *orig_bio;
	int ret;

	if (copy)
		bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
	else
		bio = bio_map_user_iov(q, iter, gfp_mask);

	if (IS_ERR(bio))
		return PTR_ERR(bio);

	if (map_data && map_data->null_mapped)
		bio_set_flag(bio, BIO_NULL_MAPPED);

	iov_iter_advance(iter, bio->bi_iter.bi_size);
	if (map_data)
		map_data->offset += bio->bi_iter.bi_size;

	orig_bio = bio;
	blk_queue_bounce(q, &bio);

	/*
	 * We link the bounce buffer in and could have to traverse it
	 * later so we have to get a ref to prevent it from being freed
	 */
	bio_get(bio);

	ret = blk_rq_append_bio(q, rq, bio);
	if (ret) {
		bio_endio(bio);
		__blk_rq_unmap_user(orig_bio);
		bio_put(bio);
		return ret;
	}

	return 0;
}
Example #29
0
/**
 * blkdev_issue_flush - queue a flush
 * @bdev:	blockdev to issue flush for
 * @error_sector:	error sector
 *
 * Description:
 *    Issue a flush for the block device in question. Caller can supply
 *    room for storing the error offset in case of a flush error, if they
 *    wish to.
 */
int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
{
	DECLARE_COMPLETION_ONSTACK(wait);
	struct request_queue *q;
	struct bio *bio;
	int ret;

	if (bdev->bd_disk == NULL)
		return -ENXIO;

	q = bdev_get_queue(bdev);
	if (!q)
		return -ENXIO;

	bio = bio_alloc(GFP_KERNEL, 0);
	if (!bio)
		return -ENOMEM;

	bio->bi_end_io = bio_end_empty_barrier;
	bio->bi_private = &wait;
	bio->bi_bdev = bdev;
	submit_bio(WRITE_BARRIER, bio);

	wait_for_completion(&wait);

	/*
	 * The driver must store the error location in ->bi_sector, if
	 * it supports it. For non-stacked drivers, this should be copied
	 * from rq->sector.
	 */
	if (error_sector)
		*error_sector = bio->bi_sector;

	ret = 0;
	if (bio_flagged(bio, BIO_EOPNOTSUPP))
		ret = -EOPNOTSUPP;
	else if (!bio_flagged(bio, BIO_UPTODATE))
		ret = -EIO;

	bio_put(bio);
	return ret;
}
/* read, readA or write requests on R_PRIMARY coming from drbd_make_request
 */
void drbd_endio_pri(struct bio *bio, int error)
{
	unsigned long flags;
	struct drbd_request *req = bio->bi_private;
	struct drbd_conf *mdev = req->mdev;
	struct bio_and_error m;
	enum drbd_req_event what;
	int uptodate = bio_flagged(bio, BIO_UPTODATE);

	if (error)
		dev_warn(DEV, "p %s: error=%d\n",
			 bio_data_dir(bio) == WRITE ? "write" : "read", error);
	if (!error && !uptodate) {
		dev_warn(DEV, "p %s: setting error to -EIO\n",
			 bio_data_dir(bio) == WRITE ? "write" : "read");
		/* strange behavior of some lower level drivers...
		 * fail the request by clearing the uptodate flag,
		 * but do not return any error?! */
		error = -EIO;
	}

	/* to avoid recursion in __req_mod */
	if (unlikely(error)) {
		what = (bio_data_dir(bio) == WRITE)
			? write_completed_with_error
			: (bio_rw(bio) == READ)
			  ? read_completed_with_error
			  : read_ahead_completed_with_error;
	} else
		what = completed_ok;

	bio_put(req->private_bio);
	req->private_bio = ERR_PTR(error);

	spin_lock_irqsave(&mdev->req_lock, flags);
	__req_mod(req, what, &m);
	spin_unlock_irqrestore(&mdev->req_lock, flags);

	if (m.bio)
		complete_master_bio(mdev, &m);
}