Example #1
0
static int sync_request(struct page *page, struct block_device *bdev, int rw)
{
	struct bio bio;
	struct bio_vec bio_vec;
	struct completion complete;

	bio_init(&bio);
	bio.bi_max_vecs = 1;
	bio.bi_io_vec = &bio_vec;
	bio_vec.bv_page = page;
	bio_vec.bv_len = PAGE_SIZE;
	bio_vec.bv_offset = 0;
	bio.bi_vcnt = 1;
	bio.bi_idx = 0;
	bio.bi_size = PAGE_SIZE;
	bio.bi_bdev = bdev;
	bio.bi_sector = page->index * (PAGE_SIZE >> 9);
	init_completion(&complete);
	bio.bi_private = &complete;
	bio.bi_end_io = request_complete;

	submit_bio(rw, &bio);
	wait_for_completion(&complete);
	return test_bit(BIO_UPTODATE, &bio.bi_flags) ? 0 : -EIO;
}
int swap_readpage(struct page *page)
{
	struct bio *bio;
	int ret = 0;

	VM_BUG_ON(!PageLocked(page));
	VM_BUG_ON(PageUptodate(page));
	#ifdef CONFIG_FRONTSWAP
    if (frontswap_load(page) == 0) {
            SetPageUptodate(page);
            unlock_page(page);
            goto out;
    }
    #endif
	bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read);
	if (bio == NULL) {
		unlock_page(page);
		ret = -ENOMEM;
		goto out;
	}
	count_vm_event(PSWPIN);
	submit_bio(READ, bio);
out:
	return ret;
}
Example #3
0
static int submit(int rw, pgoff_t page_off, void *page)
{
	int error = 0;
	struct bio *bio;

	bio = bio_alloc(GFP_ATOMIC, 1);
	if (!bio)
		return -ENOMEM;
	bio->bi_sector = page_off * (PAGE_SIZE >> 9);
	bio->bi_bdev = resume_bdev;
	bio->bi_end_io = end_io;

	if (bio_add_page(bio, virt_to_page(page), PAGE_SIZE, 0) < PAGE_SIZE) {
		printk("swsusp: ERROR: adding page to bio at %ld\n",page_off);
		error = -EFAULT;
		goto Done;
	}

	atomic_set(&io_done, 1);
	submit_bio(rw | (1 << BIO_RW_SYNC), bio);
	while (atomic_read(&io_done))
		yield();
	if (rw == READ)
		bio_set_pages_dirty(bio);
 Done:
	bio_put(bio);
	return error;
}
Example #4
0
static ssize_t __ext4_read_buff(struct super_block *sb, size_t offset, void *buff, size_t size)
{
	ssize_t ret, start_blk;
	struct bio *bio;

	start_blk = offset / SECT_SIZE;

	bio = bio_alloc();
	if (!bio)
		return -ENOMEM;

	bio->size = (offset % SECT_SIZE + size + SECT_SIZE - 1) & ~(SECT_SIZE - 1);
	bio->data = malloc(bio->size);
	if (!bio->data) {
		ret = -ENOMEM;
		goto L1;
	}

	bio->bdev = sb->s_bdev;
	bio->sect = start_blk;
	submit_bio(READ, bio);

	memcpy(buff, bio->data + offset % SECT_SIZE, size);
	ret = size;

	free(bio->data);
L1:
	bio_free(bio);
	return ret;
}
Example #5
0
int hfsplus_submit_bio(struct block_device *bdev, sector_t sector,
		void *data, int rw)
{
	DECLARE_COMPLETION_ONSTACK(wait);
	struct bio *bio;

	bio = bio_alloc(GFP_NOIO, 1);
	bio->bi_sector = sector;
	bio->bi_bdev = bdev;
	bio->bi_end_io = hfsplus_end_io_sync;
	bio->bi_private = &wait;

	/*
	 * We always submit one sector at a time, so bio_add_page must not fail.
	 */
	if (bio_add_page(bio, virt_to_page(data), HFSPLUS_SECTOR_SIZE,
			 offset_in_page(data)) != HFSPLUS_SECTOR_SIZE)
		BUG();

	submit_bio(rw, bio);
	wait_for_completion(&wait);

	if (!bio_flagged(bio, BIO_UPTODATE))
		return -EIO;
	return 0;
}
Example #6
0
static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent)
{
	struct super_block *sb = sdp->sd_vfs;
	struct gfs2_sb *p;
	struct page *page;
	struct bio *bio;

	page = alloc_page(GFP_NOFS);
	if (unlikely(!page))
		return -ENOBUFS;

	ClearPageUptodate(page);
	ClearPageDirty(page);
	lock_page(page);

	bio = bio_alloc(GFP_NOFS, 1);
	bio->bi_sector = sector * (sb->s_blocksize >> 9);
	bio->bi_bdev = sb->s_bdev;
	bio_add_page(bio, page, PAGE_SIZE, 0);

	bio->bi_end_io = end_bio_io_page;
	bio->bi_private = page;
	submit_bio(READ_SYNC | REQ_META, bio);
	wait_on_page_locked(page);
	bio_put(bio);
	if (!PageUptodate(page)) {
		__free_page(page);
		return -EIO;
	}
	p = kmap(page);
	gfs2_sb_in(sdp, p);
	kunmap(page);
	__free_page(page);
	return gfs2_check_sb(sdp, silent);
}
Example #7
0
/*
 * We may have stale swap cache pages in memory: notice
 * them here and get rid of the unnecessary final write.
 */
int swap_writepage(struct page *page, struct writeback_control *wbc)
{
	struct bio *bio;
	int ret = 0, rw = WRITE;

	if (try_to_free_swap(page)) {
		unlock_page(page);
		goto out;
	}
	bio = get_swap_bio(GFP_NOIO, page, end_swap_bio_write);
	if (bio == NULL) {
		set_page_dirty(page);
		unlock_page(page);
		ret = -ENOMEM;
		goto out;
	}
	if (wbc->sync_mode == WB_SYNC_ALL)
		rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
	count_vm_event(PSWPOUT);
	set_page_writeback(page);
	trace_swap_out(page);
	unlock_page(page);
	submit_bio(rw, bio);
out:
	return ret;
}
/*
 * We may have stale swap cache pages in memory: notice
 * them here and get rid of the unnecessary final write.
 */
int swap_writepage(struct page *page, struct writeback_control *wbc)
{
	struct bio *bio;
	int ret = 0, rw = WRITE;

	if (try_to_free_swap(page)) {
		unlock_page(page);
		goto out;
	}
	#ifdef CONFIG_FRONTSWAP
    if (frontswap_store(page) == 0) {
            set_page_writeback(page);
            unlock_page(page);
            end_page_writeback(page);
            goto out;
    }
    #endif
	bio = get_swap_bio(GFP_NOIO, page, end_swap_bio_write);
	if (bio == NULL) {
		set_page_dirty(page);
		unlock_page(page);
		ret = -ENOMEM;
		goto out;
	}
	if (wbc->sync_mode == WB_SYNC_ALL)
		rw |= REQ_SYNC;
	count_vm_event(PSWPOUT);
	set_page_writeback(page);
	unlock_page(page);
	submit_bio(rw, bio);
out:
	return ret;
}
static int chromeos_invalidate_kernel_submit(struct bio *bio,
					     struct block_device *bdev,
					     int rw, struct page *page)
{
	DECLARE_COMPLETION_ONSTACK(wait);

	bio->bi_private = &wait;
	bio->bi_end_io = chromeos_invalidate_kernel_endio;
	bio->bi_bdev = bdev;

	bio->bi_sector = 0;
	bio->bi_vcnt = 1;
	bio->bi_idx = 0;
	bio->bi_size = 512;
	bio->bi_rw = rw;
	bio->bi_io_vec[0].bv_page = page;
	bio->bi_io_vec[0].bv_len = 512;
	bio->bi_io_vec[0].bv_offset = 0;

	submit_bio(rw, bio);
	/* Wait up to 2 seconds for completion or fail. */
	if (!wait_for_completion_timeout(&wait, msecs_to_jiffies(2000)))
		return -1;
	return 0;
}
Example #10
0
/**
 * submit - submit BIO request
 * @writing: READ or WRITE.
 * @dev: The block device we're using.
 * @first_block: The first sector we're using.
 * @page: The page being used for I/O.
 * @free_group: If writing, the group that was used in allocating the page
 * 	and which will be used in freeing the page from the completion
 * 	routine.
 *
 * Based on Patrick Mochell's pmdisk code from long ago: "Straight from the
 * textbook - allocate and initialize the bio. If we're writing, make sure
 * the page is marked as dirty. Then submit it and carry on."
 *
 * If we're just testing the speed of our own code, we fake having done all
 * the hard work and all toi_end_bio immediately.
 **/
static int submit(int writing, struct block_device *dev, sector_t first_block,
		struct page *page, int free_group)
{
	struct bio *bio = NULL;
	int cur_outstanding_io, result;

	/*
	 * Shouldn't throttle if reading - can deadlock in the single
	 * threaded case as pages are only freed when we use the
	 * readahead.
	 */
	if (writing) {
		result = throttle_if_needed(MEMORY_ONLY | THROTTLE_WAIT);
		if (result)
			return result;
	}

	while (!bio) {
		bio = bio_alloc(TOI_ATOMIC_GFP, 1);
		if (!bio) {
			set_free_mem_throttle();
			do_bio_wait(1);
		}
	}

	bio->bi_bdev = dev;
	bio->bi_sector = first_block;
	bio->bi_private = (void *) ((unsigned long) free_group);
	bio->bi_end_io = toi_end_bio;

	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
		printk(KERN_DEBUG "ERROR: adding page to bio at %lld\n",
				(unsigned long long) first_block);
		bio_put(bio);
		return -EFAULT;
	}

	bio_get(bio);

	cur_outstanding_io = atomic_add_return(1, &toi_io_in_progress);
	if (writing) {
		if (cur_outstanding_io > max_outstanding_writes)
			max_outstanding_writes = cur_outstanding_io;
	} else {
		if (cur_outstanding_io > max_outstanding_reads)
			max_outstanding_reads = cur_outstanding_io;
	}


	if (unlikely(test_action_state(TOI_TEST_FILTER_SPEED))) {
		/* Fake having done the hard work */
		set_bit(BIO_UPTODATE, &bio->bi_flags);
		toi_end_bio(bio, 0);
	} else
		submit_bio(writing | (1 << BIO_RW_SYNCIO) |
				(1 << BIO_RW_UNPLUG), bio);

	return 0;
}
Example #11
0
static struct bio *mpage_bio_submit(int op, int op_flags, struct bio *bio)
{
    bio->bi_end_io = mpage_end_io;
    bio_set_op_attrs(bio, op, op_flags);
    guard_bio_eod(op, bio);
    submit_bio(bio);
    return NULL;
}
static struct bio *mpage_bio_submit(int rw, struct bio *bio)
{
	struct compressed_bio *cb = bio->bi_private;
	atomic_inc(&cb->pending_bios);
	bio->bi_end_io = mpage_end_io;
	submit_bio(rw, bio);
	return NULL;
}
Example #13
0
struct bio *mpage_bio_submit(int rw, struct bio *bio)
{
	bio->bi_end_io = mpage_end_io_read;
	if (rw == WRITE)
		bio->bi_end_io = mpage_end_io_write;
	submit_bio(rw, bio);
	return NULL;
}
static int block_read(const char *user_dev_path, /* Path to rpmb device */
		char *read_buff, /* User buffer */
		size_t size) /* Size of data to read (in bytes) */
{
	int i = 0, index = 0;
	int err;
	struct block_device *bdev;
	struct bio bio;
	struct bio_vec bio_vec;
	struct completion complete;
	struct page *page;
	int end_sect;

	bdev = blkdev_get_by_path(user_dev_path,
				  FMODE_READ, block_read);

	if (IS_ERR(bdev)) {
		pr_err("failed to get block device %s (%ld)\n",
		      user_dev_path, PTR_ERR(bdev));
		return -ENODEV;
	}

	page = virt_to_page(bio_buff);

	end_sect = (size - 1) / 512;

	for (i = 0; i <= end_sect; i++) {
		bio_init(&bio);
		bio.bi_io_vec = &bio_vec;
		bio_vec.bv_page = page;
		bio_vec.bv_len = 512;
		bio_vec.bv_offset = 0;
		bio.bi_vcnt = 1;
		bio.bi_idx = 0;
		bio.bi_size = 512;
		bio.bi_bdev = bdev;
		bio.bi_sector = 0;
		init_completion(&complete);
		bio.bi_private = &complete;
		bio.bi_end_io = emmc_rpmb_bio_complete;
		submit_bio(READ, &bio);
		wait_for_completion(&complete);
		if (!test_bit(BIO_UPTODATE, &bio.bi_flags)) {
			err = -EIO;
			goto out_blkdev;
		}

		memcpy(read_buff + index, bio_buff, 512);
		index += 512;
	}

	err = size;

out_blkdev:
	blkdev_put(bdev, FMODE_READ);

	return err;
}
Example #15
0
/**
 * blkdev_issue_write_same - queue a write same operation
 * @bdev:	target blockdev
 * @sector:	start sector
 * @nr_sects:	number of sectors to write
 * @gfp_mask:	memory allocation flags (for bio_alloc)
 * @page:	page containing data to write
 *
 * Description:
 *    Issue a write same request for the sectors in question.
 */
int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
			    sector_t nr_sects, gfp_t gfp_mask,
			    struct page *page)
{
	DECLARE_COMPLETION_ONSTACK(wait);
	struct request_queue *q = bdev_get_queue(bdev);
	unsigned int max_write_same_sectors;
	struct bio_batch bb;
	struct bio *bio;
	int ret = 0;

	if (!q)
		return -ENXIO;

	/* Ensure that max_write_same_sectors doesn't overflow bi_size */
	max_write_same_sectors = UINT_MAX >> 9;

	atomic_set(&bb.done, 1);
	bb.error = 0;
	bb.wait = &wait;

	while (nr_sects) {
		bio = bio_alloc(gfp_mask, 1);
		if (!bio) {
			ret = -ENOMEM;
			break;
		}

		bio->bi_iter.bi_sector = sector;
		bio->bi_end_io = bio_batch_end_io;
		bio->bi_bdev = bdev;
		bio->bi_private = &bb;
		bio->bi_vcnt = 1;
		bio->bi_io_vec->bv_page = page;
		bio->bi_io_vec->bv_offset = 0;
		bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);

		if (nr_sects > max_write_same_sectors) {
			bio->bi_iter.bi_size = max_write_same_sectors << 9;
			nr_sects -= max_write_same_sectors;
			sector += max_write_same_sectors;
		} else {
			bio->bi_iter.bi_size = nr_sects << 9;
			nr_sects = 0;
		}

		atomic_inc(&bb.done);
		submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio);
	}

	/* Wait for bios in-flight */
	if (!atomic_dec_and_test(&bb.done))
		wait_for_completion_io(&wait);

	if (bb.error)
		return bb.error;
	return ret;
}
Example #16
0
static void vbc_blk_submit_bio(struct bio *bio, int rq)
{
	DECLARE_COMPLETION_ONSTACK(wait);

	bio->bi_end_io	= vbc_blk_endio;
	bio->bi_private = &wait;
	submit_bio(rq, bio);
	wait_for_completion(&wait);
}
Example #17
0
STATIC int _drbd_md_sync_page_io(struct drbd_conf *mdev,
				 struct drbd_backing_dev *bdev,
				 struct page *page, sector_t sector,
				 int rw, int size)
{
	struct bio *bio;
	struct drbd_md_io md_io;
	int ok;

	md_io.mdev = mdev;
	init_completion(&md_io.event);
	md_io.error = 0;

	if ((rw & WRITE) && !test_bit(MD_NO_BARRIER, &mdev->flags))
		rw |= DRBD_REQ_FUA | DRBD_REQ_FLUSH;
	rw |= DRBD_REQ_UNPLUG | DRBD_REQ_SYNC;

#ifndef REQ_FLUSH
	/* < 2.6.36, "barrier" semantic may fail with EOPNOTSUPP */
 retry:
#endif
	bio = bio_alloc(GFP_NOIO, 1);
	bio->bi_bdev = bdev->md_bdev;
	bio->bi_sector = sector;
	ok = (bio_add_page(bio, page, size, 0) == size);
	if (!ok)
		goto out;
	bio->bi_private = &md_io;
	bio->bi_end_io = drbd_md_io_complete;
	bio->bi_rw = rw;

	trace_drbd_bio(mdev, "Md", bio, 0, NULL);

	if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
		bio_endio(bio, -EIO);
	else
		submit_bio(rw, bio);
	wait_for_completion(&md_io.event);
	ok = bio_flagged(bio, BIO_UPTODATE) && md_io.error == 0;

#ifndef REQ_FLUSH
	/* check for unsupported barrier op.
	 * would rather check on EOPNOTSUPP, but that is not reliable.
	 * don't try again for ANY return value != 0 */
	if (unlikely((bio->bi_rw & DRBD_REQ_HARDBARRIER) && !ok)) {
		/* Try again with no barrier */
		dev_warn(DEV, "Barriers not supported on meta data device - disabling\n");
		set_bit(MD_NO_BARRIER, &mdev->flags);
		rw &= ~DRBD_REQ_HARDBARRIER;
		bio_put(bio);
		goto retry;
	}
#endif
 out:
	bio_put(bio);
	return ok;
}
static void iblock_submit_bios(struct bio_list *list, int rw)
{
	struct blk_plug plug;
	struct bio *bio;

	blk_start_plug(&plug);
	while ((bio = bio_list_pop(list)))
		submit_bio(rw, bio);
	blk_finish_plug(&plug);
}
Example #19
0
static int issue_bio_async(KrDevice* dev, struct page* page, int sector, int rw)
{
    struct bio* bio = kr_create_bio(dev, page, sector);

    if (!bio)
        return -KR_ENOMEM;

    bio->bi_end_io = finish_bio_async;
    submit_bio(rw, bio);
    return 0;
}
static void mmc_panic_erase(void)
{
	int i = 0;
	int err;
	struct apanic_data *ctx = &drv_ctx;
	struct block_device *bdev;
	struct bio bio;
	struct bio_vec bio_vec;
	struct completion complete;
	struct page *page;

	bdev = lookup_bdev(ctx->devpath);
	if (IS_ERR(bdev)) {
		printk(KERN_ERR DRVNAME "failed to look up device %s (%ld)\n",
		       ctx->devpath, PTR_ERR(bdev));
		return;
	}
	err = blkdev_get(bdev, FMODE_WRITE);
	if (err) {
		printk(KERN_ERR DRVNAME "failed to open device %s (%d)\n",
		       ctx->devpath, err);
		return;
	}
	page = virt_to_page(ctx->bounce);
	memset(ctx->bounce, 0, PAGE_SIZE);

	while (i < bdev->bd_part->nr_sects) {
		bio_init(&bio);
		bio.bi_io_vec = &bio_vec;
		bio_vec.bv_offset = 0;
		bio_vec.bv_page = page;
		bio.bi_vcnt = 1;
		bio.bi_idx = 0;
		bio.bi_sector = i;
		if (bdev->bd_part->nr_sects - i >= 8) {
			bio_vec.bv_len = PAGE_SIZE;
			bio.bi_size = PAGE_SIZE;
			i += 8;
		} else {
			bio_vec.bv_len = (bdev->bd_part->nr_sects - i) * 512;
			bio.bi_size = (bdev->bd_part->nr_sects - i) * 512;
			i = bdev->bd_part->nr_sects;
		}
		bio.bi_bdev = bdev;
		init_completion(&complete);
		bio.bi_private = &complete;
		bio.bi_end_io = mmc_bio_complete;
		submit_bio(WRITE, &bio);
		wait_for_completion(&complete);
	}
	blkdev_put(bdev, FMODE_WRITE);

	return;
}
static int block_write(const char *user_dev_path, /* Path to rpmb device node */
		const char *write_buff, /* buffer to write to rpmb */
		size_t size, /* size of data to write (in bytes) */
		int flags) /* REQ_META flags for Reliable writes */
{
	int i = 0, index = 0;
	struct block_device *bdev;
	struct bio bio;
	struct bio_vec bio_vec;
	struct completion complete;
	struct page *page;
	int end_sect;

	bdev = blkdev_get_by_path(user_dev_path,
				  FMODE_WRITE, block_write);

	if (IS_ERR(bdev)) {
		pr_err("failed to get block device %s (%ld)\n",
		      user_dev_path, PTR_ERR(bdev));
		return -ENODEV;
	}

	page = virt_to_page(bio_buff);

	end_sect = (size - 1) / 512;

	for (i = 0; i <= end_sect; i++) {
		/* Copy data from user buffer to bio buffer */
		memcpy(bio_buff, write_buff + index, 512);
		index += 512;

		bio_init(&bio);
		bio.bi_io_vec = &bio_vec;
		bio_vec.bv_page = page;
		bio_vec.bv_len = 512;
		bio_vec.bv_offset = 0;
		bio.bi_vcnt = 1;
		bio.bi_idx = 0;
		bio.bi_size = 512;
		bio.bi_bdev = bdev;
		/* Set to 0 because the addr is part of RPMB data frame */
		bio.bi_sector = 0;
		init_completion(&complete);
		bio.bi_private = &complete;
		bio.bi_end_io = emmc_rpmb_bio_complete;
		submit_bio(WRITE | flags, &bio);
		wait_for_completion(&complete);
	}

	blkdev_put(bdev, FMODE_WRITE);

	return 0;
}
Example #22
0
/*
 * hfsplus_submit_bio - Perfrom block I/O
 * @sb: super block of volume for I/O
 * @sector: block to read or write, for blocks of HFSPLUS_SECTOR_SIZE bytes
 * @buf: buffer for I/O
 * @data: output pointer for location of requested data
 * @rw: direction of I/O
 *
 * The unit of I/O is hfsplus_min_io_size(sb), which may be bigger than
 * HFSPLUS_SECTOR_SIZE, and @buf must be sized accordingly. On reads
 * @data will return a pointer to the start of the requested sector,
 * which may not be the same location as @buf.
 *
 * If @sector is not aligned to the bdev logical block size it will
 * be rounded down. For writes this means that @buf should contain data
 * that starts at the rounded-down address. As long as the data was
 * read using hfsplus_submit_bio() and the same buffer is used things
 * will work correctly.
 */
int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
                       void *buf, void **data, int rw)
{
    DECLARE_COMPLETION_ONSTACK(wait);
    struct bio *bio;
    int ret = 0;
    u64 io_size;
    loff_t start;
    int offset;

    /*
     * Align sector to hardware sector size and find offset. We
     * assume that io_size is a power of two, which _should_
     * be true.
     */
    io_size = hfsplus_min_io_size(sb);
    start = (loff_t)sector << HFSPLUS_SECTOR_SHIFT;
    offset = start & (io_size - 1);
    sector &= ~((io_size >> HFSPLUS_SECTOR_SHIFT) - 1);

    bio = bio_alloc(GFP_NOIO, 1);
    bio->bi_sector = sector;
    bio->bi_bdev = sb->s_bdev;
    bio->bi_end_io = hfsplus_end_io_sync;
    bio->bi_private = &wait;

    if (!(rw & WRITE) && data)
        *data = (u8 *)buf + offset;

    while (io_size > 0) {
        unsigned int page_offset = offset_in_page(buf);
        unsigned int len = min_t(unsigned int, PAGE_SIZE - page_offset,
                                 io_size);

        ret = bio_add_page(bio, virt_to_page(buf), len, page_offset);
        if (ret != len) {
            ret = -EIO;
            goto out;
        }
        io_size -= len;
        buf = (u8 *)buf + len;
    }

    submit_bio(rw, bio);
    wait_for_completion(&wait);

    if (!bio_flagged(bio, BIO_UPTODATE))
        ret = -EIO;

out:
    bio_put(bio);
    return ret < 0 ? ret : 0;
}
Example #23
0
static struct bio *
bl_submit_bio(int rw, struct bio *bio)
{
	if (bio) {
		get_parallel(bio->bi_private);
		dprintk("%s submitting %s bio %u@%llu\n", __func__,
			rw == READ ? "read" : "write", bio->bi_iter.bi_size,
			(unsigned long long)bio->bi_iter.bi_sector);
		submit_bio(rw, bio);
	}
	return NULL;
}
Example #24
0
/*-----------------------------------------------------------------
 * IO routines that accept a list of pages.
 *---------------------------------------------------------------*/
static void do_region(int rw, unsigned region, struct dm_io_region *where,
		      struct dpages *dp, struct io *io)
{
	struct bio *bio;
	struct page *page;
	unsigned long len;
	unsigned offset;
	unsigned num_bvecs;
	sector_t remaining = where->count;

	/*
	 * where->count may be zero if rw holds a flush and we need to
	 * send a zero-sized flush.
	 */
	do {
		/*
		 * Allocate a suitably sized-bio.
		 */
		num_bvecs = dm_sector_div_up(remaining,
					     (PAGE_SIZE >> SECTOR_SHIFT));
		num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev), num_bvecs);
		bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
		if (!bio) {
			printk(KERN_WARNING "%s : %s() failed\n", __FILE__, __func__);
			BUG_ON(1);
		}
		bio->bi_sector = where->sector + (where->count - remaining);
		bio->bi_bdev = where->bdev;
		bio->bi_end_io = endio;
		bio->bi_destructor = dm_bio_destructor;
		store_io_and_region_in_bio(bio, io, region);

		/*
		 * Try and add as many pages as possible.
		 */
		while (remaining) {
			dp->get_page(dp, &page, &len, &offset);
			len = min(len, to_bytes(remaining));
			if (!bio_add_page(bio, page, len, offset))
				break;

			offset = 0;
			remaining -= to_sector(len);
			dp->next_page(dp);
		}

		atomic_inc(&io->count);
		submit_bio(rw, bio);
	} while (remaining);
}
Example #25
0
static int write_metadata(struct log_writes_c *lc, void *entry,
			  size_t entrylen, void *data, size_t datalen,
			  sector_t sector)
{
	struct bio *bio;
	struct page *page;
	void *ptr;
	size_t ret;

	bio = bio_alloc(GFP_KERNEL, 1);
	if (!bio) {
		DMERR("Couldn't alloc log bio");
		goto error;
	}
	bio->bi_iter.bi_size = 0;
	bio->bi_iter.bi_sector = sector;
	bio->bi_bdev = lc->logdev->bdev;
	bio->bi_end_io = log_end_io;
	bio->bi_private = lc;
	bio_set_op_attrs(bio, REQ_OP_WRITE, 0);

	page = alloc_page(GFP_KERNEL);
	if (!page) {
		DMERR("Couldn't alloc log page");
		bio_put(bio);
		goto error;
	}

	ptr = kmap_atomic(page);
	memcpy(ptr, entry, entrylen);
	if (datalen)
		memcpy(ptr + entrylen, data, datalen);
	memset(ptr + entrylen + datalen, 0,
	       lc->sectorsize - entrylen - datalen);
	kunmap_atomic(ptr);

	ret = bio_add_page(bio, page, lc->sectorsize, 0);
	if (ret != lc->sectorsize) {
		DMERR("Couldn't add page to the log block");
		goto error_bio;
	}
	submit_bio(bio);
	return 0;
error_bio:
	bio_put(bio);
	__free_page(page);
error:
	put_io_block(lc);
	return -1;
}
Example #26
0
void ext4_io_submit(struct ext4_io_submit *io)
{
	struct bio *bio = io->io_bio;

	if (bio) {
		bio_get(io->io_bio);
		submit_bio(io->io_op, io->io_bio);
		BUG_ON(bio_flagged(io->io_bio, BIO_EOPNOTSUPP));
		bio_put(io->io_bio);
	}
	io->io_bio = NULL;
	io->io_op = 0;
	io->io_end = NULL;
}
Example #27
0
static void mmc_panic_erase(void)
{
	int i = 0;
	dev_t devid;
	struct apanic_data *ctx = &drv_ctx;
	struct block_device *bdev;
	struct bio bio;
	struct bio_vec bio_vec;
	struct completion complete;
	struct page *page;

	devid = MKDEV(ctx->mmchd->major, ctx->mmchd->first_minor +
		ctx->mmchd->partno);
	bdev = open_by_devnum(devid, FMODE_WRITE);
	if (IS_ERR(bdev)) {
		printk(KERN_ERR "apanic: open device failed with %ld\n",
			PTR_ERR(bdev));
		goto out_err;
	}
	page = virt_to_page(ctx->bounce);
	memset(ctx->bounce, 0, PAGE_SIZE);

	while (i < ctx->mmchd->nr_sects) {
		bio_init(&bio);
		bio.bi_io_vec = &bio_vec;
		bio_vec.bv_offset = 0;
		bio_vec.bv_page = page;
		bio.bi_vcnt = 1;
		bio.bi_idx = 0;
		bio.bi_sector = i;
		if (ctx->mmchd->nr_sects - i >= 8) {
			bio_vec.bv_len = PAGE_SIZE;
			bio.bi_size = PAGE_SIZE;
			i += 8;
		} else {
			bio_vec.bv_len = (ctx->mmchd->nr_sects - i) * 512;
			bio.bi_size = (ctx->mmchd->nr_sects - i) * 512;
			i = ctx->mmchd->nr_sects;
		}
		bio.bi_bdev = bdev;
		init_completion(&complete);
		bio.bi_private = &complete;
		bio.bi_end_io = mmc_bio_complete;
		submit_bio(WRITE, &bio);
		wait_for_completion(&complete);
	}
	blkdev_put(bdev, FMODE_WRITE);
out_err:
	return;
}
Example #28
0
static int ext4_check_fs_type(const char *bdev_name)
{
	struct ext4_super_block *e4_sb;
	struct bio *bio;
	char buff[EXT4_SUPER_BLK_SIZE];
	struct block_device *bdev;
	uint32_t fc, frc, fi;
	int ret;

	bdev = bdev_get(bdev_name);
	if (NULL == bdev) {
		DPRINT("bdev %s not found!\n", bdev_name);
		return -ENODEV;
	}

	bio = bio_alloc();
	if (!bio)
		return -ENOMEM;

	bio->bdev = bdev;
	bio->sect = 1024 / SECT_SIZE;
	bio->size = sizeof(buff);
	bio->data = buff;
	submit_bio(READ, bio);
	// TODO: check flags here
	bio_free(bio);

	e4_sb = (struct ext4_super_block *)buff;

	if (0xEF53 != e4_sb->s_magic) {
		DPRINT("%s is not \"ext4\" fs!\n", bdev_name);
		return -EINVAL;
	}

	fc = e4_sb->s_feature_compat;
	fi = e4_sb->s_feature_incompat;
	frc = e4_sb->s_feature_ro_compat;


	ret = ck_ext4_feature(fc, frc, fi);

#ifdef CONFIG_DEBUG
	extern void ext_sb_list(struct ext4_super_block * esb);
	if (ret == 0) {
		ext_sb_list(e4_sb);
	}
#endif

	return ret;
}
Example #29
0
static void mmc_panic_erase(void)
{
	int i = 0;
	struct apanic_data *ctx = &drv_ctx;
	struct block_device *bdev;
	struct bio bio;
	struct bio_vec bio_vec;
	struct completion complete;
	struct page *page;
	struct device *dev = part_to_dev(drv_ctx.hd);

	if (!ctx->hd || !ctx->mmc_panic_ops)
		goto out_err;

	bdev = blkdev_get_by_dev(dev->devt, FMODE_WRITE, NULL);
	if (IS_ERR(bdev)) {
		pr_err("apanic: open device failed with %ld\n", PTR_ERR(bdev));
		goto out_err;
	}
	page = virt_to_page(ctx->bounce);
	memset(ctx->bounce, 0, PAGE_SIZE);

	while (i < ctx->hd->nr_sects) {
		bio_init(&bio);
		bio.bi_io_vec = &bio_vec;
		bio_vec.bv_offset = 0;
		bio_vec.bv_page = page;
		bio.bi_vcnt = 1;
		bio.bi_idx = 0;
		bio.bi_sector = i;
		if (ctx->hd->nr_sects - i >= 8) {
			bio_vec.bv_len = PAGE_SIZE;
			bio.bi_size = PAGE_SIZE;
			i += 8;
		} else {
			bio_vec.bv_len = (ctx->hd->nr_sects - i) * 512;
			bio.bi_size = (ctx->hd->nr_sects - i) * 512;
			i = ctx->hd->nr_sects;
		}
		bio.bi_bdev = bdev;
		init_completion(&complete);
		bio.bi_private = &complete;
		bio.bi_end_io = mmc_bio_complete;
		submit_bio(WRITE, &bio);
		wait_for_completion(&complete);
	}
	blkdev_put(bdev, FMODE_WRITE);
out_err:
	return;
}
Example #30
0
static void __submit_merged_bio(struct f2fs_bio_info *io)
{
    struct f2fs_io_info *fio = &io->fio;

    if (!io->bio)
        return;

    if (is_read_io(fio->rw))
        trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio);
    else
        trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio);

    submit_bio(fio->rw, io->bio);
    io->bio = NULL;
}