static ssize_t reset_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { int ret; unsigned short do_reset; struct zram *zram; struct block_device *bdev; zram = dev_to_zram(dev); bdev = bdget_disk(zram->disk, 0); /* Do not reset an active device! */ if (bdev->bd_holders) return -EBUSY; ret = kstrtou16(buf, 10, &do_reset); if (ret) return ret; if (!do_reset) return -EINVAL; /* Make sure all pending I/O is finished */ if (bdev) fsync_bdev(bdev); down_write(&zram->init_lock); if (zram->init_done) __zram_reset_device(zram); up_write(&zram->init_lock); return len; }
static ssize_t reset_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { int ret; unsigned long do_reset; struct xzram *xzram; struct block_device *bdev; xzram = dev_to_xzram(dev); bdev = bdget_disk(xzram->disk, 0); /* Do not reset an active device! */ if (bdev->bd_holders) return -EBUSY; ret = strict_strtoul(buf, 10, &do_reset); if (ret) return ret; if (!do_reset) return -EINVAL; /* Make sure all pending I/O is finished */ if (bdev) fsync_bdev(bdev); if (xzram->init_done) xzram_reset_device(xzram); return len; }
/*Close the device. Decrement the usage count.*/ int sba_release(struct inode *inode, struct file *filp) { struct block_device *bd = I_BDEV(inode); sba_device.usage --; if (!sba_device.usage) { fsync_bdev(bd); } return 0; }
static int vbc_blk_access(struct page *page, sector_t sector, bool is_read) { struct block_device *bdev; struct bio *bio; int err, rq; fmode_t devmode = is_read ? FMODE_READ : FMODE_WRITE; bdev = vbc_blk_get_device(config.phandle, devmode); if (IS_ERR(bdev)) { pr_err("could not open block dev\n"); return PTR_ERR(bdev); } /* map the sector to page */ bio = bio_alloc(GFP_NOIO, 1); if (!bio) { err = -ENOMEM; goto unwind_bdev; } bio->bi_bdev = bdev; bio->bi_sector = sector; bio->bi_vcnt = 1; bio->bi_idx = 0; bio->bi_size = SECTOR_SIZE; bio->bi_io_vec[0].bv_page = page; bio->bi_io_vec[0].bv_len = SECTOR_SIZE; bio->bi_io_vec[0].bv_offset = 0; /* submit bio */ rq = REQ_SYNC | REQ_SOFTBARRIER | REQ_NOIDLE; if (!is_read) rq |= REQ_WRITE; vbc_blk_submit_bio(bio, rq); /* vbc_blk_endio passes up any error in bi_private */ err = (int)bio->bi_private; bio_put(bio); unwind_bdev: if (!is_read) { fsync_bdev(bdev); invalidate_bdev(bdev); } blkdev_put(bdev, devmode); return err; }
static ssize_t reset_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { int ret; unsigned long do_reset; struct zram *zram; struct block_device *bdev; zram = dev_to_zram(dev); bdev = bdget_disk(zram->disk, 0); if (!bdev) return -ENOMEM; /* Do not reset an active device! */ if (bdev->bd_holders) { ret = -EBUSY; goto out; } ret = strict_strtoul(buf, 10, &do_reset); if (ret) goto out; if (!do_reset) { ret = -EINVAL; goto out; } /* Make sure all pending I/O is finished */ fsync_bdev(bdev); bdput(bdev); down_write(&zram->init_lock); if (zram->init_done) __zram_reset_device(zram); up_write(&zram->init_lock); return len; out: bdput(bdev); return ret; }