Esempio n. 1
0
static int test_do_bio_alloc(int num)
{
	tbiop = bio_alloc(GFP_KERNEL, num);
	if (tbiop == NULL) {
		printk("tbio: bio_alloc failed\n");
		return -1;
	}
	bio_put(tbiop);

	return 0;
}
Esempio n. 2
0
static struct bio *
bl_alloc_init_bio(int npg, struct block_device *bdev, sector_t disk_sector,
		bio_end_io_t end_io, struct parallel_io *par)
{
	struct bio *bio;

	npg = min(npg, BIO_MAX_PAGES);
	bio = bio_alloc(GFP_NOIO, npg);
	if (!bio && (current->flags & PF_MEMALLOC)) {
		while (!bio && (npg /= 2))
			bio = bio_alloc(GFP_NOIO, npg);
	}

	if (bio) {
		bio->bi_iter.bi_sector = disk_sector;
		bio->bi_bdev = bdev;
		bio->bi_end_io = end_io;
		bio->bi_private = par;
	}
	return bio;
}
Esempio n. 3
0
/*
 * hfsplus_submit_bio - Perfrom block I/O
 * @sb: super block of volume for I/O
 * @sector: block to read or write, for blocks of HFSPLUS_SECTOR_SIZE bytes
 * @buf: buffer for I/O
 * @data: output pointer for location of requested data
 * @rw: direction of I/O
 *
 * The unit of I/O is hfsplus_min_io_size(sb), which may be bigger than
 * HFSPLUS_SECTOR_SIZE, and @buf must be sized accordingly. On reads
 * @data will return a pointer to the start of the requested sector,
 * which may not be the same location as @buf.
 *
 * If @sector is not aligned to the bdev logical block size it will
 * be rounded down. For writes this means that @buf should contain data
 * that starts at the rounded-down address. As long as the data was
 * read using hfsplus_submit_bio() and the same buffer is used things
 * will work correctly.
 */
int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
                       void *buf, void **data, int rw)
{
    DECLARE_COMPLETION_ONSTACK(wait);
    struct bio *bio;
    int ret = 0;
    u64 io_size;
    loff_t start;
    int offset;

    /*
     * Align sector to hardware sector size and find offset. We
     * assume that io_size is a power of two, which _should_
     * be true.
     */
    io_size = hfsplus_min_io_size(sb);
    start = (loff_t)sector << HFSPLUS_SECTOR_SHIFT;
    offset = start & (io_size - 1);
    sector &= ~((io_size >> HFSPLUS_SECTOR_SHIFT) - 1);

    bio = bio_alloc(GFP_NOIO, 1);
    bio->bi_sector = sector;
    bio->bi_bdev = sb->s_bdev;
    bio->bi_end_io = hfsplus_end_io_sync;
    bio->bi_private = &wait;

    if (!(rw & WRITE) && data)
        *data = (u8 *)buf + offset;

    while (io_size > 0) {
        unsigned int page_offset = offset_in_page(buf);
        unsigned int len = min_t(unsigned int, PAGE_SIZE - page_offset,
                                 io_size);

        ret = bio_add_page(bio, virt_to_page(buf), len, page_offset);
        if (ret != len) {
            ret = -EIO;
            goto out;
        }
        io_size -= len;
        buf = (u8 *)buf + len;
    }

    submit_bio(rw, bio);
    wait_for_completion(&wait);

    if (!bio_flagged(bio, BIO_UPTODATE))
        ret = -EIO;

out:
    bio_put(bio);
    return ret < 0 ? ret : 0;
}
Esempio n. 4
0
static struct bio *
mpage_alloc(struct block_device *bdev,
		sector_t first_sector, int nr_vecs,
		gfp_t gfp_flags)
{
	struct bio *bio;

	bio = bio_alloc(gfp_flags, nr_vecs);

	if (bio == NULL && (current->flags & PF_MEMALLOC)) {
		while (!bio && (nr_vecs /= 2))
			bio = bio_alloc(gfp_flags, nr_vecs);
	}
	assert(bio);
	
	if (bio) {
		bio->bi_bdev = bdev;
		bio->bi_sector = first_sector;
	}
	return bio;
}
Esempio n. 5
0
static struct bio *
mpage_alloc(struct block_device *bdev,
            sector_t first_sector, int nr_vecs,
            gfp_t gfp_flags)
{
    struct bio *bio;

    /* Restrict the given (page cache) mask for slab allocations */
    gfp_flags &= GFP_KERNEL;
    bio = bio_alloc(gfp_flags, nr_vecs);

    if (bio == NULL && (current->flags & PF_MEMALLOC)) {
        while (!bio && (nr_vecs /= 2))
            bio = bio_alloc(gfp_flags, nr_vecs);
    }

    if (bio) {
        bio->bi_bdev = bdev;
        bio->bi_iter.bi_sector = first_sector;
    }
    return bio;
}
Esempio n. 6
0
static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
				     struct pnfs_block_extent *be,
				     void (*end_io)(struct bio *, int err),
				     struct parallel_io *par)
{
	struct bio *bio;

	npg = min(npg, BIO_MAX_PAGES);
	bio = bio_alloc(GFP_NOIO, npg);
	if (!bio && (current->flags & PF_MEMALLOC)) {
		while (!bio && (npg /= 2))
			bio = bio_alloc(GFP_NOIO, npg);
	}

	if (bio) {
		bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
		bio->bi_bdev = be->be_mdev;
		bio->bi_end_io = end_io;
		bio->bi_private = par;
	}
	return bio;
}
Esempio n. 7
0
/**
 * blkdev_reset_zones - Reset zones write pointer
 * @bdev:	Target block device
 * @sector:	Start sector of the first zone to reset
 * @nr_sectors:	Number of sectors, at least the length of one zone
 * @gfp_mask:	Memory allocation flags (for bio_alloc)
 *
 * Description:
 *    Reset the write pointer of the zones contained in the range
 *    @sector..@sector+@nr_sectors. Specifying the entire disk sector range
 *    is valid, but the specified range should not contain conventional zones.
 */
int blkdev_reset_zones(struct block_device *bdev,
		       sector_t sector, sector_t nr_sectors,
		       gfp_t gfp_mask)
{
	struct request_queue *q = bdev_get_queue(bdev);
	sector_t zone_sectors;
	sector_t end_sector = sector + nr_sectors;
	struct bio *bio;
	int ret;

	if (!q)
		return -ENXIO;

	if (!blk_queue_is_zoned(q))
		return -EOPNOTSUPP;

	if (end_sector > bdev->bd_part->nr_sects)
		/* Out of range */
		return -EINVAL;

	/* Check alignment (handle eventual smaller last zone) */
	zone_sectors = blk_queue_zone_size(q);
	if (sector & (zone_sectors - 1))
		return -EINVAL;

	if ((nr_sectors & (zone_sectors - 1)) &&
	    end_sector != bdev->bd_part->nr_sects)
		return -EINVAL;

	while (sector < end_sector) {

		bio = bio_alloc(gfp_mask, 0);
		bio->bi_iter.bi_sector = sector;
		bio->bi_bdev = bdev;
		bio_set_op_attrs(bio, REQ_OP_ZONE_RESET, 0);

		ret = submit_bio_wait(bio);
		bio_put(bio);

		if (ret)
			return ret;

		sector += zone_sectors;

		/* This may take a while, so be nice to others */
		cond_resched();

	}

	return 0;
}
Esempio n. 8
0
int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex)
{
	struct ext4_crypto_ctx	*ctx;
	struct page		*ciphertext_page = NULL;
	struct bio		*bio;
	ext4_lblk_t		lblk = ex->ee_block;
	ext4_fsblk_t		pblk = ext4_ext_pblock(ex);
	unsigned int		len = ext4_ext_get_actual_len(ex);
	int			err = 0;

	BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE);

	ctx = ext4_get_crypto_ctx(inode);
	if (IS_ERR(ctx))
		return PTR_ERR(ctx);

	ciphertext_page = alloc_bounce_page(ctx);
	if (IS_ERR(ciphertext_page)) {
		err = PTR_ERR(ciphertext_page);
		goto errout;
	}

	while (len--) {
		err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, lblk,
				       ZERO_PAGE(0), ciphertext_page);
		if (err)
			goto errout;

		bio = bio_alloc(GFP_KERNEL, 1);
		if (!bio) {
			err = -ENOMEM;
			goto errout;
		}
		bio->bi_bdev = inode->i_sb->s_bdev;
		bio->bi_sector = pblk;
		err = bio_add_page(bio, ciphertext_page,
				   inode->i_sb->s_blocksize, 0);
		if (err) {
			bio_put(bio);
			goto errout;
		}
		err = submit_bio_wait(WRITE, bio);
		bio_put(bio);
		if (err)
			goto errout;
	}
	err = 0;
errout:
	ext4_release_crypto_ctx(ctx);
	return err;
}
Esempio n. 9
0
static int ext4_check_fs_type(const char *bdev_name)
{
	struct ext4_super_block *e4_sb;
	struct bio *bio;
	char buff[EXT4_SUPER_BLK_SIZE];
	struct block_device *bdev;
	uint32_t fc, frc, fi;
	int ret;

	bdev = bdev_get(bdev_name);
	if (NULL == bdev) {
		DPRINT("bdev %s not found!\n", bdev_name);
		return -ENODEV;
	}

	bio = bio_alloc();
	if (!bio)
		return -ENOMEM;

	bio->bdev = bdev;
	bio->sect = 1024 / SECT_SIZE;
	bio->size = sizeof(buff);
	bio->data = buff;
	submit_bio(READ, bio);
	// TODO: check flags here
	bio_free(bio);

	e4_sb = (struct ext4_super_block *)buff;

	if (0xEF53 != e4_sb->s_magic) {
		DPRINT("%s is not \"ext4\" fs!\n", bdev_name);
		return -EINVAL;
	}

	fc = e4_sb->s_feature_compat;
	fi = e4_sb->s_feature_incompat;
	frc = e4_sb->s_feature_ro_compat;


	ret = ck_ext4_feature(fc, frc, fi);

#ifdef CONFIG_DEBUG
	extern void ext_sb_list(struct ext4_super_block * esb);
	if (ret == 0) {
		ext_sb_list(e4_sb);
	}
#endif

	return ret;
}
Esempio n. 10
0
static struct flat_binder_object* bio_alloc_obj(struct binder_io* bio) {
  struct flat_binder_object* obj;

  obj = bio_alloc(bio, sizeof(*obj));

  if (obj && bio->offs_avail) {
    bio->offs_avail--;
    *bio->offs++ = ((char*) obj) - ((char*) bio->data0);
    return obj;
  }

  bio->flags |= BIO_F_OVERFLOW;
  return NULL;
}
Esempio n. 11
0
static int write_metadata(struct log_writes_c *lc, void *entry,
			  size_t entrylen, void *data, size_t datalen,
			  sector_t sector)
{
	struct bio *bio;
	struct page *page;
	void *ptr;
	size_t ret;

	bio = bio_alloc(GFP_KERNEL, 1);
	if (!bio) {
		DMERR("Couldn't alloc log bio");
		goto error;
	}
	bio->bi_iter.bi_size = 0;
	bio->bi_iter.bi_sector = sector;
	bio->bi_bdev = lc->logdev->bdev;
	bio->bi_end_io = log_end_io;
	bio->bi_private = lc;
	bio_set_op_attrs(bio, REQ_OP_WRITE, 0);

	page = alloc_page(GFP_KERNEL);
	if (!page) {
		DMERR("Couldn't alloc log page");
		bio_put(bio);
		goto error;
	}

	ptr = kmap_atomic(page);
	memcpy(ptr, entry, entrylen);
	if (datalen)
		memcpy(ptr + entrylen, data, datalen);
	memset(ptr + entrylen + datalen, 0,
	       lc->sectorsize - entrylen - datalen);
	kunmap_atomic(ptr);

	ret = bio_add_page(bio, page, lc->sectorsize, 0);
	if (ret != lc->sectorsize) {
		DMERR("Couldn't add page to the log block");
		goto error_bio;
	}
	submit_bio(bio);
	return 0;
error_bio:
	bio_put(bio);
	__free_page(page);
error:
	put_io_block(lc);
	return -1;
}
Esempio n. 12
0
static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
				 struct drbd_backing_dev *bdev,
				 struct page *page, sector_t sector,
				 int rw, int size)
{
	struct bio *bio;
	struct drbd_md_io md_io;
	int ok;

	md_io.mdev = mdev;
	init_completion(&md_io.event);
	md_io.error = 0;

	if ((rw & WRITE) && !test_bit(MD_NO_BARRIER, &mdev->flags))
		rw |= REQ_HARDBARRIER;
	rw |= REQ_UNPLUG | REQ_SYNC;

 retry:
	bio = bio_alloc(GFP_NOIO, 1);
	bio->bi_bdev = bdev->md_bdev;
	bio->bi_sector = sector;
	ok = (bio_add_page(bio, page, size, 0) == size);
	if (!ok)
		goto out;
	bio->bi_private = &md_io;
	bio->bi_end_io = drbd_md_io_complete;
	bio->bi_rw = rw;

	if (FAULT_ACTIVE(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
		bio_endio(bio, -EIO);
	else
		submit_bio(rw, bio);
	wait_for_completion(&md_io.event);
	ok = bio_flagged(bio, BIO_UPTODATE) && md_io.error == 0;

	/* check for unsupported barrier op.
	 * would rather check on EOPNOTSUPP, but that is not reliable.
	 * don't try again for ANY return value != 0 */
	if (unlikely((bio->bi_rw & REQ_HARDBARRIER) && !ok)) {
		/* Try again with no barrier */
		dev_warn(DEV, "Barriers not supported on meta data device - disabling\n");
		set_bit(MD_NO_BARRIER, &mdev->flags);
		rw &= ~REQ_HARDBARRIER;
		bio_put(bio);
		goto retry;
	}
 out:
	bio_put(bio);
	return ok;
}
Esempio n. 13
0
static struct bio* kr_create_bio(KrDevice* dev, struct page* page, int sector)
{
    struct bio* bio = bio_alloc(GFP_NOIO, 1);

    if (!bio)
        return NULL;

    /* setup bio. */
    bio->bi_bdev = dev->bdev;
    bio->bi_sector = sector;

    bio_add_page(bio, page, KR_BLOCK_SIZE, 0);

    return bio;
}
Esempio n. 14
0
static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
				  sector_t nr_sects, gfp_t gfp_mask)
{
	int ret;
	struct bio *bio;
	struct bio_batch bb;
	unsigned int sz;
	DECLARE_COMPLETION_ONSTACK(wait);

	atomic_set(&bb.done, 1);
	bb.error = 0;
	bb.wait = &wait;

	ret = 0;
	while (nr_sects != 0) {
		bio = bio_alloc(gfp_mask,
				min(nr_sects, (sector_t)BIO_MAX_PAGES));
		if (!bio) {
			ret = -ENOMEM;
			break;
		}

		bio->bi_iter.bi_sector = sector;
		bio->bi_bdev   = bdev;
		bio->bi_end_io = bio_batch_end_io;
		bio->bi_private = &bb;

		while (nr_sects != 0) {
			sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
			ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
			nr_sects -= ret >> 9;
			sector += ret >> 9;
			if (ret < (sz << 9))
				break;
		}
		ret = 0;
		atomic_inc(&bb.done);
		submit_bio(WRITE, bio);
	}

	/* Wait for bios in-flight */
	if (!atomic_dec_and_test(&bb.done))
		wait_for_completion_io(&wait);

	if (bb.error)
		return bb.error;
	return ret;
}
Esempio n. 15
0
static int vbc_blk_access(struct page *page, sector_t sector, bool is_read)
{
	struct block_device *bdev;
	struct bio *bio;
	int err, rq;
	fmode_t devmode = is_read ? FMODE_READ : FMODE_WRITE;

	bdev = vbc_blk_get_device(config.phandle, devmode);
	if (IS_ERR(bdev)) {
		pr_err("could not open block dev\n");
		return PTR_ERR(bdev);
	}

	/* map the sector to page */
	bio = bio_alloc(GFP_NOIO, 1);
	if (!bio) {
		err = -ENOMEM;
		goto unwind_bdev;
	}
	bio->bi_bdev	= bdev;
	bio->bi_sector	= sector;
	bio->bi_vcnt	= 1;
	bio->bi_idx	= 0;
	bio->bi_size	= SECTOR_SIZE;
	bio->bi_io_vec[0].bv_page	= page;
	bio->bi_io_vec[0].bv_len	= SECTOR_SIZE;
	bio->bi_io_vec[0].bv_offset	= 0;

	/* submit bio */
	rq = REQ_SYNC | REQ_SOFTBARRIER | REQ_NOIDLE;
	if (!is_read)
		rq |= REQ_WRITE;

	vbc_blk_submit_bio(bio, rq);

	/* vbc_blk_endio passes up any error in bi_private */
	err = (int)bio->bi_private;
	bio_put(bio);

unwind_bdev:
	if (!is_read) {
		fsync_bdev(bdev);
		invalidate_bdev(bdev);
	}
	blkdev_put(bdev, devmode);

	return err;
}
Esempio n. 16
0
static struct bio *get_swap_bio(gfp_t gfp_flags,
				struct page *page, bio_end_io_t end_io)
{
	struct bio *bio;

	bio = bio_alloc(gfp_flags, 1);
	if (bio) {
		bio->bi_iter.bi_sector = map_swap_page(page, &bio->bi_bdev);
		bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9;
		bio->bi_end_io = end_io;

		bio_add_page(bio, page, PAGE_SIZE, 0);
		BUG_ON(bio->bi_iter.bi_size != PAGE_SIZE);
	}
	return bio;
}
Esempio n. 17
0
void bio_put_ref(struct binder_io* bio, uint32_t handle) {
  struct flat_binder_object* obj;

  if (handle)
    obj = bio_alloc_obj(bio);
  else
    obj = bio_alloc(bio, sizeof(*obj));

  if (!obj)
    return;

  obj->flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
  obj->type = BINDER_TYPE_HANDLE;
  obj->handle = handle;
  obj->cookie = 0;
}
Esempio n. 18
0
/**
 * __blkdev_issue_flush - queue a flush
 * @bdev:	blockdev to issue flush for
 * @gfp_mask:	memory allocation flags (for bio_alloc)
 * @error_sector:	error sector
 *
 * Description:
 *    Issue a flush for the block device in question. Caller can supply
 *    room for storing the error offset in case of a flush error, if they
 *    wish to. If WAIT flag is not passed then caller may check only what
 *    request was pushed in some internal queue for later handling.
 */
int __blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
		sector_t *error_sector)
{
	DECLARE_COMPLETION_ONSTACK(wait);
	struct request_queue *q;
	struct bio *bio;
	int ret = 0;

	if (bdev->bd_disk == NULL)
		return -ENXIO;

	q = bdev_get_queue(bdev);
	if (!q)
		return -ENXIO;

	/*
	 * some block devices may not have their queue correctly set up here
	 * (e.g. loop device without a backing file) and so issuing a flush
	 * here will panic. Ensure there is a request function before issuing
	 * the flush.
	 */
	if (!q->make_request_fn)
		return -ENXIO;

	bio = bio_alloc(gfp_mask, 0);
	bio->bi_end_io = bio_end_flush;
	bio->bi_bdev = bdev;
	bio->bi_private = &wait;

	bio_get(bio);
	submit_bio(WRITE_FLUSH, bio);
	wait_for_completion(&wait);

	/*
	 * The driver must store the error location in ->bi_sector, if
	 * it supports it. For non-stacked drivers, this should be
	 * copied from blk_rq_pos(rq).
	 */
	if (error_sector)
               *error_sector = bio->bi_sector;

	if (!bio_flagged(bio, BIO_UPTODATE))
		ret = -EIO;

	bio_put(bio);
	return ret;
}
Esempio n. 19
0
/**
 * blkdev_issue_discard - queue a discard
 * @bdev:	blockdev to issue discard for
 * @sector:	start sector
 * @nr_sects:	number of sectors to discard
 * @gfp_mask:	memory allocation flags (for bio_alloc)
 *
 * Description:
 *    Issue a discard request for the sectors in question. Does not wait.
 */
int blkdev_issue_discard(struct block_device *bdev,
			 sector_t sector, sector_t nr_sects, gfp_t gfp_mask)
{
	struct request_queue *q;
	struct bio *bio;
	int ret = 0;

	if (bdev->bd_disk == NULL)
		return -ENXIO;

	q = bdev_get_queue(bdev);
	if (!q)
		return -ENXIO;

	if (!q->prepare_discard_fn)
		return -EOPNOTSUPP;

	while (nr_sects && !ret) {
		bio = bio_alloc(gfp_mask, 0);
		if (!bio)
			return -ENOMEM;

		bio->bi_end_io = blkdev_discard_end_io;
		bio->bi_bdev = bdev;

		bio->bi_sector = sector;

		if (nr_sects > q->max_hw_sectors) {
			bio->bi_size = q->max_hw_sectors << 9;
			nr_sects -= q->max_hw_sectors;
			sector += q->max_hw_sectors;
		} else {
			bio->bi_size = nr_sects << 9;
			nr_sects = 0;
		}
		bio_get(bio);
		submit_bio(DISCARD_BARRIER, bio);

		/* Check if it failed immediately */
		if (bio_flagged(bio, BIO_EOPNOTSUPP))
			ret = -EOPNOTSUPP;
		else if (!bio_flagged(bio, BIO_UPTODATE))
			ret = -EIO;
		bio_put(bio);
	}
	return ret;
}
Esempio n. 20
0
void bio_put_ref(struct binder_io *bio, void *ptr)
{
    struct binder_object *obj;

    if (ptr)
        obj = bio_alloc_obj(bio);
    else
        obj = bio_alloc(bio, sizeof(*obj));

    if (!obj)
        return;

    obj->flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
    obj->type = BINDER_TYPE_HANDLE;
    obj->pointer = ptr;
    obj->cookie = 0;
}
Esempio n. 21
0
int vecio(int rw, struct block_device *dev, loff_t offset, unsigned vecs, struct bio_vec *vec,
	bio_end_io_t endio, void *info)
{
	BUG_ON(vecs > bio_get_nr_vecs(dev));
	struct bio *bio = bio_alloc(GFP_NOIO, vecs);
	if (!bio)
		return -ENOMEM;
	bio->bi_bdev = dev;
	bio->bi_sector = offset >> 9;
	bio->bi_end_io = endio;
	bio->bi_private = info;
	bio->bi_vcnt = vecs;
	memcpy(bio->bi_io_vec, vec, sizeof(*vec) * vecs);
	while (vecs--)
		bio->bi_size += bio->bi_io_vec[vecs].bv_len;
	submit_bio(rw, bio);
	return 0;
}
Esempio n. 22
0
static struct bio *get_swap_bio(gfp_t gfp_flags,
				struct page *page, bio_end_io_t end_io)
{
	struct bio *bio;

	bio = bio_alloc(gfp_flags, 1);
	if (bio) {
		bio->bi_sector = map_swap_page(page, &bio->bi_bdev);
		bio->bi_sector <<= PAGE_SHIFT - 9;
		bio->bi_io_vec[0].bv_page = page;
		bio->bi_io_vec[0].bv_len = PAGE_SIZE;
		bio->bi_io_vec[0].bv_offset = 0;
		bio->bi_vcnt = 1;
		bio->bi_idx = 0;
		bio->bi_size = PAGE_SIZE;
		bio->bi_end_io = end_io;
	}
	return bio;
}
Esempio n. 23
0
/**
 * blkdev_issue_flush - queue a flush
 * @bdev:	blockdev to issue flush for
 * @error_sector:	error sector
 *
 * Description:
 *    Issue a flush for the block device in question. Caller can supply
 *    room for storing the error offset in case of a flush error, if they
 *    wish to.
 */
int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
{
	DECLARE_COMPLETION_ONSTACK(wait);
	struct request_queue *q;
	struct bio *bio;
	int ret;

	if (bdev->bd_disk == NULL)
		return -ENXIO;

	q = bdev_get_queue(bdev);
	if (!q)
		return -ENXIO;

	bio = bio_alloc(GFP_KERNEL, 0);
	if (!bio)
		return -ENOMEM;

	bio->bi_end_io = bio_end_empty_barrier;
	bio->bi_private = &wait;
	bio->bi_bdev = bdev;
	submit_bio(WRITE_BARRIER, bio);

	wait_for_completion(&wait);

	/*
	 * The driver must store the error location in ->bi_sector, if
	 * it supports it. For non-stacked drivers, this should be copied
	 * from rq->sector.
	 */
	if (error_sector)
		*error_sector = bio->bi_sector;

	ret = 0;
	if (bio_flagged(bio, BIO_EOPNOTSUPP))
		ret = -EOPNOTSUPP;
	else if (!bio_flagged(bio, BIO_UPTODATE))
		ret = -EIO;

	bio_put(bio);
	return ret;
}
Esempio n. 24
0
File: dm.c Progetto: wxlong/Test
/*
 * Creates a little bio that is just does part of a bvec.
 */
static struct bio *split_bvec(struct bio *bio, sector_t sector,
			      unsigned short idx, unsigned int offset,
			      unsigned int len)
{
	struct bio *clone;
	struct bio_vec *bv = bio->bi_io_vec + idx;

	clone = bio_alloc(GFP_NOIO, 1);
	memcpy(clone->bi_io_vec, bv, sizeof(*bv));

	clone->bi_sector = sector;
	clone->bi_bdev = bio->bi_bdev;
	clone->bi_rw = bio->bi_rw;
	clone->bi_vcnt = 1;
	clone->bi_size = to_bytes(len);
	clone->bi_io_vec->bv_offset = offset;
	clone->bi_io_vec->bv_len = clone->bi_size;

	return clone;
}
Esempio n. 25
0
/**
 * blkmtd_add_page - add a page to the current BIO
 * @bio: bio to add to (NULL to alloc initial bio)
 * @blkdev: block device
 * @page: page to add
 * @pagecnt: pages left to add
 *
 * Adds a page to the current bio, allocating it if necessary. If it cannot be
 * added, the current bio is written out and a new one is allocated. Returns
 * the new bio to add or NULL on error
 */
static struct bio *blkmtd_add_page(struct bio *bio, struct block_device *blkdev,
                                   struct page *page, int pagecnt)
{

retry:
    if(!bio) {
        bio = bio_alloc(GFP_KERNEL, pagecnt);
        if(!bio)
            return NULL;
        bio->bi_sector = page->index << (PAGE_SHIFT-9);
        bio->bi_bdev = blkdev;
    }

    if(bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE) {
        blkmtd_write_out(bio);
        bio = NULL;
        goto retry;
    }
    return bio;
}
/*
 * Implement SYCHRONIZE CACHE.  Note that we can't handle lba ranges and must
 * always flush the whole cache.
 */
static int iblock_execute_sync_cache(struct se_cmd *cmd)
{
	struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
	int immed = (cmd->t_task_cdb[1] & 0x2);
	struct bio *bio;

	/*
	 * If the Immediate bit is set, queue up the GOOD response
	 * for this SYNCHRONIZE_CACHE op.
	 */
	if (immed)
		target_complete_cmd(cmd, SAM_STAT_GOOD);

	bio = bio_alloc(GFP_KERNEL, 0);
	bio->bi_end_io = iblock_end_io_flush;
	bio->bi_bdev = ib_dev->ibd_bd;
	if (!immed)
		bio->bi_private = cmd;
	submit_bio(WRITE_FLUSH, bio);
	return 0;
}
Esempio n. 27
0
static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector)
{
	struct super_block *sb = sdp->sd_vfs;
	struct gfs2_sb *p;
	struct page *page;
	struct bio *bio;

	page = alloc_page(GFP_NOFS);
	if (unlikely(!page))
		return -ENOBUFS;

	ClearPageUptodate(page);
	ClearPageDirty(page);
	lock_page(page);

	bio = bio_alloc(GFP_NOFS, 1);
	if (unlikely(!bio)) {
		__free_page(page);
		return -ENOBUFS;
	}

	bio->bi_sector = sector * (sb->s_blocksize >> 9);
	bio->bi_bdev = sb->s_bdev;
	bio_add_page(bio, page, PAGE_SIZE, 0);

	bio->bi_end_io = end_bio_io_page;
	bio->bi_private = page;
	submit_bio(READ_SYNC, bio);
	wait_on_page_locked(page);
	bio_put(bio);
	if (!PageUptodate(page)) {
		__free_page(page);
		return -EIO;
	}
	p = kmap(page);
	gfs2_sb_in(&sdp->sd_sb, p);
	kunmap(page);
	__free_page(page);
	return 0;
}
Esempio n. 28
0
static int test_bio_alloc(void)
{
	int res = 0;
	res = test_do_bio_alloc(2);
	if(res < 0){
		printk("can not alloc bio for %d\n",2);
		return -1;
	}

	res = test_do_bio_alloc(8);
	if(res < 0){
		printk("can not alloc bio for %d\n",8);
		return -1;
	}
	res = test_do_bio_alloc(32);
	if(res < 0){
		printk("can not alloc bio for %d\n",32);
		return -1;
	}
	res = test_do_bio_alloc(96);
	if(res < 0){
		printk("can not alloc bio for %d\n",96);
		return -1;
	}
	res = test_do_bio_alloc(BIO_MAX_PAGES);
	if(res < 0){
		printk("can not alloc bio for %d\n",BIO_MAX_PAGES);
		return -1;
	}


	tbiop = bio_alloc(GFP_KERNEL , BIO_MAX_PAGES);
	if(tbiop == NULL ) {
		printk("tbio: bio_alloc failed\n");
		return -1;
	}

	return 0;
}
Esempio n. 29
0
static int test_bio_alloc(void)
{
	if (test_do_bio_alloc(2) < 0) {
		prk_err("can not alloc bio for %d", 2);
		return -1;
	}

	if (test_do_bio_alloc(8) < 0) {
		prk_err("can not alloc bio for %d", 8);
		return -1;
	}

	if (test_do_bio_alloc(32) < 0) {
		prk_err("can not alloc bio for %d", 32);
		return -1;
	}

	if (test_do_bio_alloc(96) < 0) {
		prk_err("can not alloc bio for %d", 96);
		return -1;
	}

	if (test_do_bio_alloc(BIO_MAX_PAGES) < 0) {
		prk_err("can not alloc bio for %d", BIO_MAX_PAGES);
		return -1;
	}

	tbiop = bio_alloc(GFP_KERNEL, BIO_MAX_PAGES);
	if (tbiop == NULL) {
		prk_err("bio_alloc failed");
		return -1;
	}

	tbiop->bi_bdev = tbio_dev.bdev;
	tbiop->bi_sector = 0;

	return 0;
}
Esempio n. 30
0
/* read one page from the block device */
static int blkmtd_readpage(struct blkmtd_dev *dev, struct page *page)
{
    struct bio *bio;
    struct completion event;
    int err = -ENOMEM;

    if(PageUptodate(page)) {
        DEBUG(2, "blkmtd: readpage page %ld is already upto date\n", page->index);
        unlock_page(page);
        return 0;
    }

    ClearPageUptodate(page);
    ClearPageError(page);

    bio = bio_alloc(GFP_KERNEL, 1);
    if(bio) {
        init_completion(&event);
        bio->bi_bdev = dev->blkdev;
        bio->bi_sector = page->index << (PAGE_SHIFT-9);
        bio->bi_private = &event;
        bio->bi_end_io = bi_read_complete;
        if(bio_add_page(bio, page, PAGE_SIZE, 0) == PAGE_SIZE) {
            submit_bio(READ_SYNC, bio);
            wait_for_completion(&event);
            err = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : -EIO;
            bio_put(bio);
        }
    }

    if(err)
        SetPageError(page);
    else
        SetPageUptodate(page);
    flush_dcache_page(page);
    unlock_page(page);
    return err;
}