示例#1
0
static int __init test_init(void)
{
	dev_t dev;
	int err = 0;
	struct block_device *bdev;

	LOGe("BUILD_DATE %s\n", BUILD_DATE);

	if (path_) {
		bdev = blkdev_get_by_path(
			path_, FMODE_READ|FMODE_WRITE|FMODE_EXCL, lock_);
		if (IS_ERR(bdev)) {
			err = PTR_ERR(bdev);
			goto error0;
		}
		blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
	}

	if (major_ != UINT_MAX && minor_ != UINT_MAX) {
		dev = MKDEV(major_, minor_);
		bdev = blkdev_get_by_dev(
			dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, lock_);
		if (IS_ERR(bdev)) {
			err = PTR_ERR(bdev);
			goto error0;
		}
		blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
	}
	LOGn("succeeded.\n");
	return -1;

error0:
	LOGn("failed %d.\n", err);
	return -1;
}
示例#2
0
/*
 * Opens block device and performs read and compare operations
 */
int dedup_calc(void)
{
	// Check if we did not init already
	if (need_to_init)
	{
		// Get bdev
		dedup_bdev = get_our_bdev();
		if(!dedup_bdev) {
			printk(KERN_ERR "get bdev failed.\n");
			return -1;
		}

		if (blocks_count > BLOCKS_MAX_COUNT)
			blocks_count = BLOCKS_MAX_COUNT;

		printk(KERN_ERR "blocks count = %ld (max = %ld)\n", blocks_count, BLOCKS_MAX_COUNT);
		printk(KERN_ERR "each block logical size is (%ld)\n", dedup_get_block_size());

		// Initialize block structure
		if (dedup_init_blocks()) {
			blkdev_put(dedup_bdev, FMODE_READ|FMODE_WRITE);
			dedup_bdev = NULL;
			return -1;
		}

		// Release bdev
		blkdev_put(dedup_bdev, FMODE_READ|FMODE_WRITE);
		dedup_bdev = NULL;
		need_to_init = 0;
		printk(KERN_ERR "blocks init done!\n");
	}

	return 0;
}
示例#3
0
static __exit void kvblade_module_exit(void)
{
	struct aoedev *d, *nd;

	printk("Testing exiting\n");
	dev_remove_pack(&pt);
	spin_lock(&lock);
	d = devlist;
	devlist = NULL;
	spin_unlock(&lock);
	for (; d; d=nd) {
		nd = d->next;
		while (atomic_read(&d->busy))
			msleep(100);
		blkdev_put(d->blkdev, FMODE_READ|FMODE_WRITE);
		
		kobject_del(&d->kobj);
		kobject_put(&d->kobj);
	}
	kthread_stop(task);
	wait_for_completion(&ktrendez);
	skb_queue_purge(&skb_outq);
	skb_queue_purge(&skb_inq);
	
	kobject_del(&kvblade_kobj);
	kobject_put(&kvblade_kobj);
}
示例#4
0
int swsusp_read(void)
{
	int error;
	struct swap_map_handle handle;
	struct snapshot_handle snapshot;
	struct swsusp_info *header;

	if (IS_ERR(resume_bdev)) {
		pr_debug("swsusp: block device not initialised\n");
		return PTR_ERR(resume_bdev);
	}

	memset(&snapshot, 0, sizeof(struct snapshot_handle));
	error = snapshot_write_next(&snapshot, PAGE_SIZE);
	if (error < PAGE_SIZE)
		return error < 0 ? error : -EFAULT;
	header = (struct swsusp_info *)data_of(snapshot);
	error = get_swap_reader(&handle, swsusp_header.image);
	if (!error)
		error = swap_read_page(&handle, header);
	if (!error)
		error = load_image(&handle, &snapshot, header->pages - 1);
	release_swap_reader(&handle);

	blkdev_put(resume_bdev);

	if (!error)
		pr_debug("swsusp: Reading resume file was successful\n");
	else
		pr_debug("swsusp: Error %d resuming\n", error);
	return error;
}
示例#5
0
int swsusp_check(void)
{
	int error;

	resume_bdev = open_by_devnum(swsusp_resume_device, FMODE_READ);
	if (!IS_ERR(resume_bdev)) {
		set_blocksize(resume_bdev, PAGE_SIZE);
		memset(&swsusp_header, 0, sizeof(swsusp_header));
		if ((error = bio_read_page(0, &swsusp_header)))
			return error;
		if (!memcmp(SWSUSP_SIG, swsusp_header.sig, 10)) {
			memcpy(swsusp_header.sig, swsusp_header.orig_sig, 10);
			/* Reset swap signature now */
			error = bio_write_page(0, &swsusp_header);
		} else {
			return -EINVAL;
		}
		if (error)
			blkdev_put(resume_bdev);
		else
			pr_debug("swsusp: Signature found, resuming\n");
	} else {
		error = PTR_ERR(resume_bdev);
	}

	if (error)
		pr_debug("swsusp: Error %d check for resume file\n", error);

	return error;
}
示例#6
0
int test3(char *buf, char **start, off_t offset, int count, int *eof,
        void *data)
{
    struct block_device *bdev;
    int ret = 0;
    void *bytes;

    bdev = blkdev_get_by_path(BDEV_NAME, BDEV_MODE,  NULL);
    if(IS_ERR(bdev))
    {
	    printk("Cannot open block device for CF\n");
	    return -EFAULT;
    }

    bytes = kmalloc(PAGE_SIZE, GFP_KERNEL);
    if (!bytes)
	    return -ENOMEM;

    if ((ret = bd_read_block_from_disk(bdev, 0, bytes)))
	goto out;

    printk(KERN_INFO "First byte is %c\n", ((char*)bytes)[0]);
    
    blkdev_put(bdev, BDEV_MODE); 

out:
    kfree(bytes);
    printk(KERN_INFO "BDEV test 3 succeeded\n");
    return ret;
}
示例#7
0
/*
 * Used for debug, to see if we read the right block
 */
void print_block(int block_num)
{
	// PRINT
	char *curr_data;
	size_t block_size;

	// Get BDEV
	dedup_bdev = get_our_bdev();
	if(!dedup_bdev) {
		printk("get bdev failed.\n");
		return;
	}

	// Read block
	block_size = dedup_get_block_size();
	curr_data = (char *)kmalloc(block_size, GFP_KERNEL);

	if (!curr_data)	{
		printk("Failed to kmalloc buf to store block data.\n");
		return;
	}

	read_block(curr_data, block_size, block_num);

	// Print block
	printk("block no.%d: \"%s\"\n", block_num, curr_data);

	// Release
	kfree(curr_data);
	blkdev_put(dedup_bdev, FMODE_READ|FMODE_WRITE);
	dedup_bdev = NULL;
}
/*
 * Remove all inodes in the system for a device, delete the
 * partitions and make device unusable by setting its size to zero.
 */
void dasd_destroy_partitions(struct dasd_block *block)
{
	/* The two structs have 168/176 byte on 31/64 bit. */
	struct blkpg_partition bpart;
	struct blkpg_ioctl_arg barg;
	struct block_device *bdev;

	/*
	 * Get the bdev pointer from the device structure and clear
	 * device->bdev to lower the offline open_count limit again.
	 */
	bdev = block->bdev;
	block->bdev = NULL;

	/*
	 * See fs/partition/check.c:delete_partition
	 * Can't call delete_partitions directly. Use ioctl.
	 * The ioctl also does locking and invalidation.
	 */
	memset(&bpart, 0, sizeof(struct blkpg_partition));
	memset(&barg, 0, sizeof(struct blkpg_ioctl_arg));
	barg.data = (void __force __user *) &bpart;
	barg.op = BLKPG_DEL_PARTITION;
	for (bpart.pno = block->gdp->minors - 1; bpart.pno > 0; bpart.pno--)
		ioctl_by_bdev(bdev, BLKPG, (unsigned long) &barg);

	invalidate_partition(block->gdp, 0);
	/* Matching blkdev_put to the blkdev_get in dasd_scan_partitions. */
	blkdev_put(bdev, FMODE_READ);
	set_capacity(block->gdp, 0);
}
示例#9
0
文件: dev.c 项目: AK101111/linux
static void
bl_free_device(struct pnfs_block_dev *dev)
{
	if (dev->nr_children) {
		int i;

		for (i = 0; i < dev->nr_children; i++)
			bl_free_device(&dev->children[i]);
		kfree(dev->children);
	} else {
		if (dev->pr_registered) {
			const struct pr_ops *ops =
				dev->bdev->bd_disk->fops->pr_ops;
			int error;

			error = ops->pr_register(dev->bdev, dev->pr_key, 0,
				false);
			if (error)
				pr_err("failed to unregister PR key.\n");
		}

		if (dev->bdev)
			blkdev_put(dev->bdev, FMODE_READ | FMODE_WRITE);
	}
}
示例#10
0
/* assumes ctl_mtx is held */
static void _unload_ssd(struct ssd_info * ssd)
{
	if (NULL == ssd)
		return;

	/* first remove it from the set of cache devices, no more
	 * requests will be queued to this device beyond this point */
	if (ssd->cdev) {
		sce_rmcdev(ssd->cdev);
		ssd->cdev = NULL;
	}

	/* make offline and quiesce requests already in flight */
	if (ssd->online) {
		ssd->online = 0;
		list_del_rcu(&ssd->list);
		wmb();
		synchronize_rcu(); /* wait for references to quiesce */
		while(atomic_read(&ssd->nr_ref))
			schedule();
		gctx.nr_ssd--;
	}

	if (ssd->bdev) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38)
		blkdev_put(ssd->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
		close_bdev_exclusive(ssd->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
#else
		ERR("Kernel version < 2.6.28 currently not supported.\n");
#endif
		ssd->bdev = NULL;
	}
}
static int block_read(const char *user_dev_path, /* Path to rpmb device */
		char *read_buff, /* User buffer */
		size_t size) /* Size of data to read (in bytes) */
{
	int i = 0, index = 0;
	int err;
	struct block_device *bdev;
	struct bio bio;
	struct bio_vec bio_vec;
	struct completion complete;
	struct page *page;
	int end_sect;

	bdev = blkdev_get_by_path(user_dev_path,
				  FMODE_READ, block_read);

	if (IS_ERR(bdev)) {
		pr_err("failed to get block device %s (%ld)\n",
		      user_dev_path, PTR_ERR(bdev));
		return -ENODEV;
	}

	page = virt_to_page(bio_buff);

	end_sect = (size - 1) / 512;

	for (i = 0; i <= end_sect; i++) {
		bio_init(&bio);
		bio.bi_io_vec = &bio_vec;
		bio_vec.bv_page = page;
		bio_vec.bv_len = 512;
		bio_vec.bv_offset = 0;
		bio.bi_vcnt = 1;
		bio.bi_idx = 0;
		bio.bi_size = 512;
		bio.bi_bdev = bdev;
		bio.bi_sector = 0;
		init_completion(&complete);
		bio.bi_private = &complete;
		bio.bi_end_io = emmc_rpmb_bio_complete;
		submit_bio(READ, &bio);
		wait_for_completion(&complete);
		if (!test_bit(BIO_UPTODATE, &bio.bi_flags)) {
			err = -EIO;
			goto out_blkdev;
		}

		memcpy(read_buff + index, bio_buff, 512);
		index += 512;
	}

	err = size;

out_blkdev:
	blkdev_put(bdev, FMODE_READ);

	return err;
}
示例#12
0
void swsusp_close(void)
{
	if (IS_ERR(resume_bdev)) {
		pr_debug("swsusp: block device not initialised\n");
		return;
	}

	blkdev_put(resume_bdev);
}
示例#13
0
/*
 * Close a device that we've been using.
 */
static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
{
	if (!d->dm_dev.bdev)
		return;

	bd_release_from_disk(d->dm_dev.bdev, dm_disk(md));
	blkdev_put(d->dm_dev.bdev);
	d->dm_dev.bdev = NULL;
}
示例#14
0
/*
 * Close a device that we've been using.
 */
static void close_dev(struct dm_dev *d)
{
	if (!d->bdev)
		return;

	bd_release(d->bdev);
	blkdev_put(d->bdev);
	d->bdev = NULL;
}
示例#15
0
int release_cmdline(struct inode *i, struct file *f)
{
    struct cmdline_priv *p =
        (struct cmdline_priv *)f->private_data;
    put_dev_sector(p->sect);
    blkdev_put(p->bdev, f->f_mode);
    kfree(p);
    return 0;
}
示例#16
0
void swsusp_close(fmode_t mode)
{
	if (IS_ERR(hib_resume_bdev)) {
		pr_debug("PM: Image device not initialised\n");
		return;
	}

	blkdev_put(hib_resume_bdev, mode);
}
示例#17
0
文件: raw.c 项目: sarnobat/knoppix
/*
 * Open/close code for raw IO.
 *
 * We just rewrite the i_mapping for the /dev/raw/rawN file descriptor to
 * point at the blockdev's address_space and set the file handle to use
 * O_DIRECT.
 *
 * Set the device's soft blocksize to the minimum possible.  This gives the
 * finest possible alignment and has no adverse impact on performance.
 */
static int raw_open(struct inode *inode, struct file *filp)
{
	const int minor = iminor(inode);
	struct block_device *bdev;
	int err;

	if (minor == 0) {	/* It is the control device */
		filp->f_op = &raw_ctl_fops;
		return 0;
	}

	down(&raw_mutex);

	/*
	 * All we need to do on open is check that the device is bound.
	 */
	bdev = raw_devices[minor].binding;
	err = -ENODEV;
	if (bdev) {
		err = blkdev_get(bdev, filp->f_mode, 0, BDEV_RAW);
		if (err)
			goto out;
		igrab(bdev->bd_inode);
		err = bd_claim(bdev, raw_open);
		if (err) {
			blkdev_put(bdev, BDEV_RAW);
			goto out;
		}
		err = set_blocksize(bdev, bdev_hardsect_size(bdev));
		if (err) {
			bd_release(bdev);
			blkdev_put(bdev, BDEV_RAW);
			goto out;
		}
		filp->f_flags |= O_DIRECT;
		if (++raw_devices[minor].inuse == 1)
			filp->f_dentry->d_inode->i_mapping =
				bdev->bd_inode->i_mapping;
	}
	filp->private_data = bdev;
out:
	up(&raw_mutex);
	return err;
}
static void iblock_free_device(void *p)
{
	struct iblock_dev *ib_dev = p;

	if (ib_dev->ibd_bd != NULL)
		blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
	if (ib_dev->ibd_bio_set != NULL)
		bioset_free(ib_dev->ibd_bio_set);
	kfree(ib_dev);
}
static void iblock_free_device(struct se_device *dev)
{
	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);

	if (ib_dev->ibd_bd != NULL)
		blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
	if (ib_dev->ibd_bio_set != NULL)
		bioset_free(ib_dev->ibd_bio_set);
	kfree(ib_dev);
}
示例#20
0
static void
dumpfs_kill_sb(struct super_block *sb)
{
	struct	block_device *bdev = sb->s_bdev;
	fmode_t	mode = sb->s_mode;
	kfree(sb->s_fs_info);
	//kill_litter_super(sb);
	generic_shutdown_super(sb);
	blkdev_put(bdev, mode | FMODE_EXCL);
}
static void mmc_panic_erase(void)
{
	int i = 0;
	int err;
	struct apanic_data *ctx = &drv_ctx;
	struct block_device *bdev;
	struct bio bio;
	struct bio_vec bio_vec;
	struct completion complete;
	struct page *page;

	bdev = lookup_bdev(ctx->devpath);
	if (IS_ERR(bdev)) {
		printk(KERN_ERR DRVNAME "failed to look up device %s (%ld)\n",
		       ctx->devpath, PTR_ERR(bdev));
		return;
	}
	err = blkdev_get(bdev, FMODE_WRITE);
	if (err) {
		printk(KERN_ERR DRVNAME "failed to open device %s (%d)\n",
		       ctx->devpath, err);
		return;
	}
	page = virt_to_page(ctx->bounce);
	memset(ctx->bounce, 0, PAGE_SIZE);

	while (i < bdev->bd_part->nr_sects) {
		bio_init(&bio);
		bio.bi_io_vec = &bio_vec;
		bio_vec.bv_offset = 0;
		bio_vec.bv_page = page;
		bio.bi_vcnt = 1;
		bio.bi_idx = 0;
		bio.bi_sector = i;
		if (bdev->bd_part->nr_sects - i >= 8) {
			bio_vec.bv_len = PAGE_SIZE;
			bio.bi_size = PAGE_SIZE;
			i += 8;
		} else {
			bio_vec.bv_len = (bdev->bd_part->nr_sects - i) * 512;
			bio.bi_size = (bdev->bd_part->nr_sects - i) * 512;
			i = bdev->bd_part->nr_sects;
		}
		bio.bi_bdev = bdev;
		init_completion(&complete);
		bio.bi_private = &complete;
		bio.bi_end_io = mmc_bio_complete;
		submit_bio(WRITE, &bio);
		wait_for_completion(&complete);
	}
	blkdev_put(bdev, FMODE_WRITE);

	return;
}
static int block_write(const char *user_dev_path, /* Path to rpmb device node */
		const char *write_buff, /* buffer to write to rpmb */
		size_t size, /* size of data to write (in bytes) */
		int flags) /* REQ_META flags for Reliable writes */
{
	int i = 0, index = 0;
	struct block_device *bdev;
	struct bio bio;
	struct bio_vec bio_vec;
	struct completion complete;
	struct page *page;
	int end_sect;

	bdev = blkdev_get_by_path(user_dev_path,
				  FMODE_WRITE, block_write);

	if (IS_ERR(bdev)) {
		pr_err("failed to get block device %s (%ld)\n",
		      user_dev_path, PTR_ERR(bdev));
		return -ENODEV;
	}

	page = virt_to_page(bio_buff);

	end_sect = (size - 1) / 512;

	for (i = 0; i <= end_sect; i++) {
		/* Copy data from user buffer to bio buffer */
		memcpy(bio_buff, write_buff + index, 512);
		index += 512;

		bio_init(&bio);
		bio.bi_io_vec = &bio_vec;
		bio_vec.bv_page = page;
		bio_vec.bv_len = 512;
		bio_vec.bv_offset = 0;
		bio.bi_vcnt = 1;
		bio.bi_idx = 0;
		bio.bi_size = 512;
		bio.bi_bdev = bdev;
		/* Set to 0 because the addr is part of RPMB data frame */
		bio.bi_sector = 0;
		init_completion(&complete);
		bio.bi_private = &complete;
		bio.bi_end_io = emmc_rpmb_bio_complete;
		submit_bio(WRITE | flags, &bio);
		wait_for_completion(&complete);
	}

	blkdev_put(bdev, FMODE_WRITE);

	return 0;
}
示例#23
0
文件: rd.c 项目: nhanh0/hah
static int initrd_release(struct inode *inode,struct file *file)
{
	extern void free_initrd_mem(unsigned long, unsigned long);

	lock_kernel();
	if (!--initrd_users) {
		free_initrd_mem(initrd_start, initrd_end);
		initrd_start = 0;
	}
	unlock_kernel();
	blkdev_put(inode->i_bdev, BDEV_FILE);
	return 0;
}
示例#24
0
static void mmc_panic_erase(void)
{
	int i = 0;
	struct apanic_data *ctx = &drv_ctx;
	struct block_device *bdev;
	struct bio bio;
	struct bio_vec bio_vec;
	struct completion complete;
	struct page *page;
	struct device *dev = part_to_dev(drv_ctx.hd);

	if (!ctx->hd || !ctx->mmc_panic_ops)
		goto out_err;

	bdev = blkdev_get_by_dev(dev->devt, FMODE_WRITE, NULL);
	if (IS_ERR(bdev)) {
		pr_err("apanic: open device failed with %ld\n", PTR_ERR(bdev));
		goto out_err;
	}
	page = virt_to_page(ctx->bounce);
	memset(ctx->bounce, 0, PAGE_SIZE);

	while (i < ctx->hd->nr_sects) {
		bio_init(&bio);
		bio.bi_io_vec = &bio_vec;
		bio_vec.bv_offset = 0;
		bio_vec.bv_page = page;
		bio.bi_vcnt = 1;
		bio.bi_idx = 0;
		bio.bi_sector = i;
		if (ctx->hd->nr_sects - i >= 8) {
			bio_vec.bv_len = PAGE_SIZE;
			bio.bi_size = PAGE_SIZE;
			i += 8;
		} else {
			bio_vec.bv_len = (ctx->hd->nr_sects - i) * 512;
			bio.bi_size = (ctx->hd->nr_sects - i) * 512;
			i = ctx->hd->nr_sects;
		}
		bio.bi_bdev = bdev;
		init_completion(&complete);
		bio.bi_private = &complete;
		bio.bi_end_io = mmc_bio_complete;
		submit_bio(WRITE, &bio);
		wait_for_completion(&complete);
	}
	blkdev_put(bdev, FMODE_WRITE);
out_err:
	return;
}
示例#25
0
static void mmc_panic_erase(void)
{
	int i = 0;
	dev_t devid;
	struct apanic_data *ctx = &drv_ctx;
	struct block_device *bdev;
	struct bio bio;
	struct bio_vec bio_vec;
	struct completion complete;
	struct page *page;

	devid = MKDEV(ctx->mmchd->major, ctx->mmchd->first_minor +
		ctx->mmchd->partno);
	bdev = open_by_devnum(devid, FMODE_WRITE);
	if (IS_ERR(bdev)) {
		printk(KERN_ERR "apanic: open device failed with %ld\n",
			PTR_ERR(bdev));
		goto out_err;
	}
	page = virt_to_page(ctx->bounce);
	memset(ctx->bounce, 0, PAGE_SIZE);

	while (i < ctx->mmchd->nr_sects) {
		bio_init(&bio);
		bio.bi_io_vec = &bio_vec;
		bio_vec.bv_offset = 0;
		bio_vec.bv_page = page;
		bio.bi_vcnt = 1;
		bio.bi_idx = 0;
		bio.bi_sector = i;
		if (ctx->mmchd->nr_sects - i >= 8) {
			bio_vec.bv_len = PAGE_SIZE;
			bio.bi_size = PAGE_SIZE;
			i += 8;
		} else {
			bio_vec.bv_len = (ctx->mmchd->nr_sects - i) * 512;
			bio.bi_size = (ctx->mmchd->nr_sects - i) * 512;
			i = ctx->mmchd->nr_sects;
		}
		bio.bi_bdev = bdev;
		init_completion(&complete);
		bio.bi_private = &complete;
		bio.bi_end_io = mmc_bio_complete;
		submit_bio(WRITE, &bio);
		wait_for_completion(&complete);
	}
	blkdev_put(bdev, FMODE_WRITE);
out_err:
	return;
}
示例#26
0
static void pt_exit(void)
{
	if (passthrough->gd) {
		del_gendisk(passthrough->gd);
		put_disk(passthrough->gd);
	}
	if (passthrough->queue)
		blk_cleanup_queue(passthrough->queue);

	blkdev_put(passthrough->target_dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);

	unregister_blkdev(passthrough->major, "passthrough");
	kfree(passthrough);
}
示例#27
0
文件: dev.c 项目: AK101111/linux
static int
bl_parse_scsi(struct nfs_server *server, struct pnfs_block_dev *d,
		struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask)
{
	struct pnfs_block_volume *v = &volumes[idx];
	struct block_device *bdev;
	const struct pr_ops *ops;
	int error;

	if (!bl_validate_designator(v))
		return -EINVAL;

	bdev = bl_open_dm_mpath_udev_path(v);
	if (IS_ERR(bdev))
		bdev = bl_open_udev_path(v);
	if (IS_ERR(bdev))
		return PTR_ERR(bdev);
	d->bdev = bdev;

	d->len = i_size_read(d->bdev->bd_inode);
	d->map = bl_map_simple;
	d->pr_key = v->scsi.pr_key;

	pr_info("pNFS: using block device %s (reservation key 0x%llx)\n",
		d->bdev->bd_disk->disk_name, d->pr_key);

	ops = d->bdev->bd_disk->fops->pr_ops;
	if (!ops) {
		pr_err("pNFS: block device %s does not support reservations.",
				d->bdev->bd_disk->disk_name);
		error = -EINVAL;
		goto out_blkdev_put;
	}

	error = ops->pr_register(d->bdev, 0, d->pr_key, true);
	if (error) {
		pr_err("pNFS: failed to register key for block device %s.",
				d->bdev->bd_disk->disk_name);
		goto out_blkdev_put;
	}

	d->pr_registered = true;
	return 0;

out_blkdev_put:
	blkdev_put(d->bdev, FMODE_READ | FMODE_WRITE);
	return error;
}
示例#28
0
static int vbc_blk_access(struct page *page, sector_t sector, bool is_read)
{
	struct block_device *bdev;
	struct bio *bio;
	int err, rq;
	fmode_t devmode = is_read ? FMODE_READ : FMODE_WRITE;

	bdev = vbc_blk_get_device(config.phandle, devmode);
	if (IS_ERR(bdev)) {
		pr_err("could not open block dev\n");
		return PTR_ERR(bdev);
	}

	/* map the sector to page */
	bio = bio_alloc(GFP_NOIO, 1);
	if (!bio) {
		err = -ENOMEM;
		goto unwind_bdev;
	}
	bio->bi_bdev	= bdev;
	bio->bi_sector	= sector;
	bio->bi_vcnt	= 1;
	bio->bi_idx	= 0;
	bio->bi_size	= SECTOR_SIZE;
	bio->bi_io_vec[0].bv_page	= page;
	bio->bi_io_vec[0].bv_len	= SECTOR_SIZE;
	bio->bi_io_vec[0].bv_offset	= 0;

	/* submit bio */
	rq = REQ_SYNC | REQ_SOFTBARRIER | REQ_NOIDLE;
	if (!is_read)
		rq |= REQ_WRITE;

	vbc_blk_submit_bio(bio, rq);

	/* vbc_blk_endio passes up any error in bi_private */
	err = (int)bio->bi_private;
	bio_put(bio);

unwind_bdev:
	if (!is_read) {
		fsync_bdev(bdev);
		invalidate_bdev(bdev);
	}
	blkdev_put(bdev, devmode);

	return err;
}
示例#29
0
static void __exit stackbd_exit(void)
{
    printk("stackbd: exit\n");

    if (stackbd.is_active)
    {
        kthread_stop(stackbd.thread);
        blkdev_put(stackbd.bdev_raw, STACKBD_BDEV_MODE);
        bdput(stackbd. bdev_raw);
    }

	del_gendisk(stackbd.gd);
	put_disk(stackbd.gd);
	unregister_blkdev(major_num, STACKBD_NAME);
	blk_cleanup_queue(stackbd.queue);
}
static ssize_t
mmc_protect_clear(struct device *dev, struct device_attribute *attr,
                  const char *buf, size_t count)
{
  char *device_path;
  struct block_device *target = NULL;
  u32 start;
  u32 size;
  bool device_holding = false;
  struct mmc_card *card;

  card = get_mmc_card();
  if (!card) {
    return count;
  }

  device_path = kmalloc(PATH_MAX + count, GFP_KERNEL);
  if (!device_path) {
    return -ENOMEM;
  }

  snprintf(device_path, PATH_MAX, "/dev/block/%s", buf);
  target = lookup_bdev(device_path);
  if (!target) {
    kfree(device_path);
    return count;
  }

  if (!target->bd_part) {
    if (blkdev_get(target, FMODE_READ | FMODE_NDELAY, 0)) {
      kfree(device_path);
      return count;
    }
    device_holding = true;
  }

  start = (u32)target->bd_part->start_sect;
  size = (u32)target->bd_part->nr_sects;

  clear_write_protect(card, start, size);
  if (device_holding) {
    blkdev_put(target, FMODE_READ | FMODE_NDELAY);
  }
  kfree(device_path);

  return count;
}