Example #1
0
File: dev.c Project: AK101111/linux
static int
bl_parse_simple(struct nfs_server *server, struct pnfs_block_dev *d,
		struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask)
{
	struct pnfs_block_volume *v = &volumes[idx];
	struct block_device *bdev;
	dev_t dev;

	dev = bl_resolve_deviceid(server, v, gfp_mask);
	if (!dev)
		return -EIO;

	bdev = blkdev_get_by_dev(dev, FMODE_READ | FMODE_WRITE, NULL);
	if (IS_ERR(bdev)) {
		printk(KERN_WARNING "pNFS: failed to open device %d:%d (%ld)\n",
			MAJOR(dev), MINOR(dev), PTR_ERR(bdev));
		return PTR_ERR(bdev);
	}
	d->bdev = bdev;


	d->len = i_size_read(d->bdev->bd_inode);
	d->map = bl_map_simple;

	printk(KERN_INFO "pNFS: using block device %s\n",
		d->bdev->bd_disk->disk_name);
	return 0;
}
Example #2
0
/*
 * Will return a pointer to the block device, used for the dedup access.
 * The device is found by its name, configured in bdev_name.
 * If there is no such device, NULL will be returned.
 */
struct block_device* get_our_bdev(void)
{
	struct block_device *bdev =
			lookup_bdev((bdev_name) ? bdev_name : DEDUP_BDEV_NAME);

	return ((bdev == NULL) ?NULL : blkdev_get_by_dev(bdev->bd_dev, FMODE_READ|FMODE_WRITE, NULL));
}
Example #3
0
static int simplefs_load_journal(struct super_block *sb, int devnum)
{
    struct journal_s *journal;
    char b[BDEVNAME_SIZE];
    dev_t dev;
    struct block_device *bdev;
    int hblock, blocksize, len;
    struct simplefs_super_block *sfs_sb = SIMPLEFS_SB(sb);

    dev = new_decode_dev(devnum);
    printk(KERN_INFO "Journal device is: %s\n", __bdevname(dev, b));

    bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb);
    if (IS_ERR(bdev))
        return 1;
    blocksize = sb->s_blocksize;
    hblock = bdev_logical_block_size(bdev);
    len = SIMPLEFS_MAX_FILESYSTEM_OBJECTS_SUPPORTED;

    journal = jbd2_journal_init_dev(bdev, sb->s_bdev, 1, -1, blocksize);
    if (!journal) {
        printk(KERN_ERR "Can't load journal\n");
        return 1;
    }
    journal->j_private = sb;

    sfs_sb->journal = journal;

    return 0;
}
Example #4
0
static int __init test_init(void)
{
	dev_t dev;
	int err = 0;
	struct block_device *bdev;

	LOGe("BUILD_DATE %s\n", BUILD_DATE);

	if (path_) {
		bdev = blkdev_get_by_path(
			path_, FMODE_READ|FMODE_WRITE|FMODE_EXCL, lock_);
		if (IS_ERR(bdev)) {
			err = PTR_ERR(bdev);
			goto error0;
		}
		blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
	}

	if (major_ != UINT_MAX && minor_ != UINT_MAX) {
		dev = MKDEV(major_, minor_);
		bdev = blkdev_get_by_dev(
			dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, lock_);
		if (IS_ERR(bdev)) {
			err = PTR_ERR(bdev);
			goto error0;
		}
		blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
	}
	LOGn("succeeded.\n");
	return -1;

error0:
	LOGn("failed %d.\n", err);
	return -1;
}
Example #5
0
static void mmc_panic_erase(void)
{
	int i = 0;
	struct apanic_data *ctx = &drv_ctx;
	struct block_device *bdev;
	struct bio bio;
	struct bio_vec bio_vec;
	struct completion complete;
	struct page *page;
	struct device *dev = part_to_dev(drv_ctx.hd);

	if (!ctx->hd || !ctx->mmc_panic_ops)
		goto out_err;

	bdev = blkdev_get_by_dev(dev->devt, FMODE_WRITE, NULL);
	if (IS_ERR(bdev)) {
		pr_err("apanic: open device failed with %ld\n", PTR_ERR(bdev));
		goto out_err;
	}
	page = virt_to_page(ctx->bounce);
	memset(ctx->bounce, 0, PAGE_SIZE);

	while (i < ctx->hd->nr_sects) {
		bio_init(&bio);
		bio.bi_io_vec = &bio_vec;
		bio_vec.bv_offset = 0;
		bio_vec.bv_page = page;
		bio.bi_vcnt = 1;
		bio.bi_idx = 0;
		bio.bi_sector = i;
		if (ctx->hd->nr_sects - i >= 8) {
			bio_vec.bv_len = PAGE_SIZE;
			bio.bi_size = PAGE_SIZE;
			i += 8;
		} else {
			bio_vec.bv_len = (ctx->hd->nr_sects - i) * 512;
			bio.bi_size = (ctx->hd->nr_sects - i) * 512;
			i = ctx->hd->nr_sects;
		}
		bio.bi_bdev = bdev;
		init_completion(&complete);
		bio.bi_private = &complete;
		bio.bi_end_io = mmc_bio_complete;
		submit_bio(WRITE, &bio);
		wait_for_completion(&complete);
	}
	blkdev_put(bdev, FMODE_WRITE);
out_err:
	return;
}
static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
			  unsigned major, unsigned minor, int readonly,
			  int cdrom)
{
	struct xen_vbd *vbd;
	struct block_device *bdev;
	struct request_queue *q;

	vbd = &blkif->vbd;
	vbd->handle   = handle;
	vbd->readonly = readonly;
	vbd->type     = 0;

	vbd->pdevice  = MKDEV(major, minor);

	bdev = blkdev_get_by_dev(vbd->pdevice, vbd->readonly ?
				 FMODE_READ : FMODE_WRITE, NULL);

	if (IS_ERR(bdev)) {
		DPRINTK("xen_vbd_create: device %08x could not be opened.\n",
			vbd->pdevice);
		return -ENOENT;
	}

	vbd->bdev = bdev;
	if (vbd->bdev->bd_disk == NULL) {
		DPRINTK("xen_vbd_create: device %08x doesn't exist.\n",
			vbd->pdevice);
		xen_vbd_free(vbd);
		return -ENOENT;
	}
	vbd->size = vbd_sz(vbd);

	if (vbd->bdev->bd_disk->flags & GENHD_FL_CD || cdrom)
		vbd->type |= VDISK_CDROM;
	if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
		vbd->type |= VDISK_REMOVABLE;

	q = bdev_get_queue(bdev);
	if (q && q->flush_flags)
		vbd->flush_support = true;

	if (q && blk_queue_secdiscard(q))
		vbd->discard_secure = true;

	DPRINTK("Successful creation of handle=%04x (dom=%u)\n",
		handle, blkif->domid);
	return 0;
}
static dev_t find_devt_for_selftest(struct device *dev)
{
	int i, idx = 0;
	uint32_t count = 0;
	uint64_t size;
	uint64_t size_list[MAX_SCAN_PART];
	dev_t devt_list[MAX_SCAN_PART];
	dev_t devt_scan, devt;
	struct block_device *bdev;
	fmode_t fmode = FMODE_WRITE | FMODE_READ;

	do {
		for (i = 0; i < MAX_SCAN_PART; i++) {
			devt_scan = blk_lookup_devt("sda", i);
			bdev = blkdev_get_by_dev(devt_scan, fmode, NULL);
			if (IS_ERR(bdev))
				continue;
			else {
				size_list[idx++] = (uint64_t)i_size_read(bdev->bd_inode);
				devt_list[idx++] = devt_scan;
			}
		}

		if (!idx) {
			mdelay(100);
			count++;
			continue;
		}

		for (i = 0; i < idx; i++) {
			if (i == 0) {
				size = size_list[i];
				devt = devt_list[i];
			} else {
				if (size < size_list[i])
					devt = devt_list[i];
			}
		}

		return devt;
	} while (count < 100);

	dev_err(dev, "SCSI disk isn't initialized yet. It makes to fail FMP selftest\n");
	return (dev_t)0;
}
Example #8
0
static struct block_device *vbc_blk_get_device(phandle phandle, fmode_t devmode)
{
	struct device_node *dn;
	struct device *dev;

	dn = of_find_node_by_phandle(phandle);
	if (!dn)
		return ERR_PTR(-ENODEV);

	dev = bus_find_device(&platform_bus_type, NULL, dn, match_of_node);
	if (!dev)
		return ERR_PTR(-ENODEV);

	/*
	 * TODO(chrome-os-partner:16441): Search block_device from the dev
	 * struct we just found instead of hard-coding major and minor here.
	 */
	return blkdev_get_by_dev(MKDEV(MMC_BLOCK_MAJOR, 0), devmode, NULL);
}
static dev_t find_devt_for_selftest(void)
{
	int i, idx = 0;
	uint64_t size;
	uint64_t size_list[MAX_SCAN_PART];
	dev_t devt_list[MAX_SCAN_PART];
	dev_t devt_scan, devt;
	struct block_device *bdev;
	fmode_t fmode = FMODE_WRITE | FMODE_READ;

	for (i = 0; i < MAX_SCAN_PART; i++) {
		devt_scan = blk_lookup_devt("sda", i);
		bdev = blkdev_get_by_dev(devt_scan, fmode, NULL);
		if (IS_ERR(bdev))
			continue;
		else {
			size_list[idx++] = (uint64_t)i_size_read(bdev->bd_inode);
			devt_list[idx++] = devt_scan;
		}
	}

	if (!idx)
		goto err;

	for (i = 0; i < idx; i++) {
		if (i == 0) {
			size = size_list[i];
			devt = devt_list[i];
		} else {
			if (size < size_list[i])
				devt = devt_list[i];
		}
	}

	return devt;

err:
	return (dev_t)0;
}
Example #10
0
/* Check if the root device is read-only (e.g. dm-verity is enabled).
 * This must be called after early kernel init, since then the rootdev
 * is available.
 */
static bool rootdev_readonly(void)
{
	bool rc;
	struct block_device *bdev;
	const fmode_t mode = FMODE_WRITE;

	bdev = blkdev_get_by_dev(ROOT_DEV, mode, NULL);
	if (IS_ERR(bdev)) {
		/* In this weird case, assume it is read-only. */
		pr_info("dev(%u,%u): FMODE_WRITE disallowed?!\n",
			MAJOR(ROOT_DEV), MINOR(ROOT_DEV));
		return true;
	}

	rc = bdev_read_only(bdev);
	blkdev_put(bdev, mode);

	pr_info("dev(%u,%u): %s\n", MAJOR(ROOT_DEV), MINOR(ROOT_DEV),
		rc ? "read-only" : "writable");

	return rc;
}
Example #11
0
static void mmc_panic_notify_add(struct hd_struct *hd)
{
	struct apanic_data *ctx = &drv_ctx;
	struct panic_header *hdr = ctx->bounce;
	struct block_device *bdev;
	struct bio bio;
	struct bio_vec bio_vec;
	struct completion complete;
	struct page *page;
	struct device *dev = part_to_dev(hd);

	if (!ctx->mmc_panic_ops) {
		pr_err("apanic: found apanic partition, but apanic not "
				"initialized\n");
		return;
	}

	bdev = blkdev_get_by_dev(dev->devt, FMODE_WRITE, NULL);
	if (IS_ERR(bdev)) {
		pr_err("apanic: open device failed with %ld\n",
		       PTR_ERR(bdev));
		goto out;
	}

	ctx->hd = hd;
	page = virt_to_page(ctx->bounce);

	bio_init(&bio);
	bio.bi_io_vec = &bio_vec;
	bio_vec.bv_page = page;
	bio_vec.bv_len = PAGE_SIZE;
	bio_vec.bv_offset = 0;
	bio.bi_vcnt = 1;
	bio.bi_idx = 0;
	bio.bi_size = PAGE_SIZE;
	bio.bi_bdev = bdev;
	bio.bi_sector = 0;
	init_completion(&complete);
	bio.bi_private = &complete;
	bio.bi_end_io = mmc_bio_complete;
	submit_bio(READ, &bio);
	wait_for_completion(&complete);

	blkdev_put(bdev, FMODE_READ);

	pr_err("apanic: Bound to mmc block device '%s(%u:%u)'\n",
	       dev_name(dev), MAJOR(dev->devt), MINOR(dev->devt));

	if (hdr->magic != PANIC_MAGIC) {
		pr_info("apanic: No panic data available\n");
		goto out;
	}

	if (hdr->version != PHDR_VERSION) {
		pr_info("apanic: Version mismatch (%d != %d)\n",
		       hdr->version, PHDR_VERSION);
		goto out;
	}

	memcpy(&ctx->curr, hdr, sizeof(struct panic_header));

	pr_info("apanic: c(%u, %u) t(%u, %u) a(%u, %u)\n",
	       hdr->console_offset, hdr->console_length,
	       hdr->threads_offset, hdr->threads_length,
	       hdr->app_threads_offset, hdr->app_threads_length);

	if (hdr->console_length) {
		ctx->apanic_console = create_proc_entry("apanic_console",
							S_IFREG | S_IRUGO,
							NULL);
		if (!ctx->apanic_console)
			pr_err("apanic: failed creating procfile\n");
		else {
			ctx->apanic_console->read_proc =
			    apanic_proc_read_mmc;
			ctx->apanic_console->write_proc =
			    apanic_proc_write;
			ctx->apanic_console->size = hdr->console_length;
			ctx->apanic_console->data = (void *) 1;
		}
	}

	if (hdr->threads_length) {
		ctx->apanic_threads = create_proc_entry("apanic_threads",
							S_IFREG | S_IRUGO,
							NULL);
		if (!ctx->apanic_threads)
			pr_err("apanic: failed creating procfile\n");
		else {
			ctx->apanic_threads->read_proc =
			    apanic_proc_read_mmc;
			ctx->apanic_threads->write_proc =
			    apanic_proc_write;
			ctx->apanic_threads->size = hdr->threads_length;
			ctx->apanic_threads->data = (void *) 2;
		}
	}

	if (hdr->app_threads_length) {
		ctx->apanic_app_threads = create_proc_entry(
			"apanic_app_threads", S_IFREG | S_IRUGO, NULL);
		if (!ctx->apanic_app_threads)
			pr_err("%s: failed creating procfile\n", __func__);
		else {
			ctx->apanic_app_threads->read_proc
					= apanic_proc_read_mmc;
			ctx->apanic_app_threads->write_proc = apanic_proc_write;
			ctx->apanic_app_threads->size = hdr->app_threads_length;
			ctx->apanic_app_threads->data = (void *) 3;
		}

	}

out:
	ctx->apanic_annotate = create_proc_entry("apanic_annotate",
				S_IFREG | S_IRUGO | S_IWUSR, NULL);
	if (!ctx->apanic_annotate)
		printk(KERN_ERR "%s: failed creating procfile\n", __func__);
	else {
		ctx->apanic_annotate->read_proc = apanic_proc_read_annotation;
		ctx->apanic_annotate->write_proc = apanic_proc_annotate;
		ctx->apanic_annotate->size = 0;
		ctx->apanic_annotate->data = NULL;
	}

	return;
}
Example #12
0
static int apanic_proc_read_mmc(char *buffer, char **start, off_t offset,
				int count, int *peof, void *dat)
{
	int i, index = 0;
	int ret;
	int start_sect;
	int end_sect;
	size_t file_length;
	off_t file_offset;
	struct apanic_data *ctx = &drv_ctx;
	struct block_device *bdev;
	struct bio bio;
	struct bio_vec bio_vec;
	struct completion complete;
	struct page *page;
	struct device *dev = part_to_dev(drv_ctx.hd);

	if (!ctx->hd || !ctx->mmc_panic_ops)
		return -EBUSY;

	if (!count)
		return 0;

	mutex_lock(&drv_mutex);

	switch ((int) dat) {
	case 1:		/* apanic_console */
		file_length = ctx->curr.console_length;
		file_offset = ctx->curr.console_offset;
		break;
	case 2:		/* apanic_threads */
		file_length = ctx->curr.threads_length;
		file_offset = ctx->curr.threads_offset;
		break;
	case 3:	/* apanic_app_threads */
		file_length = ctx->curr.app_threads_length;
		file_offset = ctx->curr.app_threads_offset;
		break;
	default:
		pr_err("Bad dat (%d)\n", (int) dat);
		mutex_unlock(&drv_mutex);
		return -EINVAL;
	}

	if ((offset + count) > file_length) {
		mutex_unlock(&drv_mutex);
		return 0;
	}

	bdev = blkdev_get_by_dev(dev->devt, FMODE_READ, NULL);
	if (IS_ERR(bdev)) {
		pr_err("apanic: open device failed with %ld\n", PTR_ERR(bdev));
		ret = PTR_ERR(bdev);
		goto out_err;
	}
	page = virt_to_page(ctx->bounce);

	start_sect = (file_offset + offset) / 512;
	end_sect = (file_offset + offset + count - 1) / 512;

	for (i = start_sect; i <= end_sect; i++) {
		bio_init(&bio);
		bio.bi_io_vec = &bio_vec;
		bio_vec.bv_page = page;
		bio_vec.bv_len = 512;
		bio_vec.bv_offset = 0;
		bio.bi_vcnt = 1;
		bio.bi_idx = 0;
		bio.bi_size = 512;
		bio.bi_bdev = bdev;
		bio.bi_sector = i;
		init_completion(&complete);
		bio.bi_private = &complete;
		bio.bi_end_io = mmc_bio_complete;
		submit_bio(READ, &bio);
		wait_for_completion(&complete);
		if (!test_bit(BIO_UPTODATE, &bio.bi_flags)) {
			ret = -EIO;
			goto out_err;
		}

		if ((i == start_sect)
		    && ((file_offset + offset) % 512 != 0)) {
			/* first sect, may be the only sect */
			memcpy(buffer, ctx->bounce + (file_offset + offset)
			       % 512, min((unsigned long) count,
					  (unsigned long)
					  (512 -
					   (file_offset + offset) % 512)));
			index += min((unsigned long) count, (unsigned long)
				     (512 - (file_offset + offset) % 512));
		} else if ((i == end_sect)
			   && ((file_offset + offset + count)
			       % 512 != 0)) {
			/* last sect */
			memcpy(buffer + index, ctx->bounce, (file_offset +
							     offset +
							     count) % 512);
		} else {
			/* middle sect */
			memcpy(buffer + index, ctx->bounce, 512);
			index += 512;
		}
	}
	blkdev_put(bdev, FMODE_READ);

	*start = (char *) count;

	if ((offset + count) == file_length)
		*peof = 1;

	mutex_unlock(&drv_mutex);
	return count;

out_err:
	mutex_unlock(&drv_mutex);
	return ret;
}
Example #13
0
static void fn_fstat64(l4fdx_srv_obj srv_obj, struct internal_request *r)
{
    struct kstat stat;
    struct l4fdx_result_t ret;
    struct l4fdx_client *c = srv_obj->client;
    struct l4x_fdx_srv_data *data = l4x_fdx_srv_get_srv_data(srv_obj);
    int err, fid = r->fstat.fid;
    unsigned long shm_addr = data->shm_base + r->fstat.shm_offset;

    if (   sizeof(struct l4fdx_stat_t *) > data->shm_size
            || fid >= ARRAY_SIZE(c->files)
            || r->fstat.shm_offset + sizeof(struct l4fdx_stat_t *) > data->shm_size) {
        ret.payload.ret = -EINVAL;
        goto out;
    }

    err = vfs_getattr(&c->files[fid]->f_path, &stat);

    if (!err) {
        struct l4fdx_stat_t *b;

        b = (struct l4fdx_stat_t *)shm_addr;

        // is this ok to munge this like this?
        if (S_ISBLK(stat.mode)) {
            struct block_device *bdev;
            bdev = blkdev_get_by_dev(stat.rdev, FMODE_READ, NULL);
            if (IS_ERR(bdev))
                goto out_err;

            b->blksize = 512;
            b->blocks  = bdev->bd_part->nr_sects;
            b->size    = b->blocks * 512;

            if (0)
                pr_info("l4fdx: fs: %lld %lld %lld  %d:%d\n",
                        b->size, b->blocks, b->blksize,
                        MAJOR(stat.rdev), MINOR(stat.rdev));

            blkdev_put(bdev, FMODE_READ);
        } else {
            b->size    = stat.size;
            b->blocks  = stat.blocks;
            b->blksize = stat.blksize;
        }

        b->dev        = stat.dev;
        b->ino        = stat.ino;
        b->mode       = stat.mode;
        b->nlink      = stat.nlink;
        b->rdev       = stat.rdev;
        b->uid        = from_kuid(&init_user_ns, stat.uid);
        b->gid        = from_kgid(&init_user_ns, stat.gid);
        b->atime.sec  = stat.atime.tv_sec;
        b->atime.nsec = stat.atime.tv_nsec;
        b->mtime.sec  = stat.mtime.tv_sec;
        b->mtime.nsec = stat.mtime.tv_nsec;
        b->ctime.sec  = stat.ctime.tv_sec;
        b->ctime.nsec = stat.ctime.tv_nsec;
    }

out_err:
    ret.payload.ret = err;

out:
    ret.payload.fid = fid;
    res_event(srv_obj, &ret, r->client_req_id);

    kfree(r);
}
Example #14
0
int fips_fmp_init(struct device *dev)
{
	struct ufs_fmp_work *work;
	struct device_node *dev_node;
	struct platform_device *pdev_ufs;
	struct device *dev_ufs;
	struct ufs_hba *hba;
	struct Scsi_Host *host;
	struct inode *inode;
	struct scsi_device *sdev;
	struct super_block *sb;
	unsigned long blocksize;
	unsigned char blocksize_bits;

	sector_t self_test_block;
	fmode_t fmode = FMODE_WRITE | FMODE_READ;

	work = kmalloc(sizeof(*work), GFP_KERNEL);
	if (!work) {
		dev_err(dev, "Fail to alloc fmp work buffer\n");
		return -ENOMEM;
	}

	dev_node = of_find_compatible_node(NULL, NULL, "samsung,exynos-ufs");
	if (!dev_node) {
		dev_err(dev, "Fail to find exynos ufs device node\n");
		goto out;
	}

	pdev_ufs = of_find_device_by_node(dev_node);
	if (!pdev_ufs) {
		dev_err(dev, "Fail to find exynos ufs pdev\n");
		goto out;
	}

	dev_ufs = &pdev_ufs->dev;
	hba = dev_get_drvdata(dev_ufs);
	if (!hba) {
		dev_err(dev, "Fail to find hba from dev\n");
		goto out;
	}

	host = hba->host;
	sdev = to_scsi_device(dev_ufs);
	work->host = host;
	work->sdev = sdev;

	work->devt = find_devt_for_selftest(dev);
	if (!work->devt) {
		dev_err(dev, "Fail to find devt for self test\n");
		return -ENODEV;
	}

	work->bdev = blkdev_get_by_dev(work->devt, fmode, NULL);
	if (IS_ERR(work->bdev)) {
		dev_err(dev, "Fail to open block device\n");
		return -ENODEV;
	}
	inode = work->bdev->bd_inode;
	sb = inode->i_sb;
	blocksize = sb->s_blocksize;
	blocksize_bits = sb->s_blocksize_bits;
	self_test_block = (i_size_read(inode) - (blocksize * SF_BLK_OFFSET)) >> blocksize_bits;
	work->sector = self_test_block;

	dev_set_drvdata(dev, work);

	return 0;

out:
	if (work)
		kfree(work);

	return -ENODEV;
}
static void mmc_panic_notify_add(struct raw_hd_struct *hd,
			struct raw_mmc_panic_ops *panic_ops)
{
	dev_t devid;
	struct apanic_data *ctx = &drv_ctx;
	struct panic_header *hdr = ctx->bounce;
	struct block_device *bdev;
	struct bio bio;
	struct bio_vec bio_vec;
	struct completion complete;
	struct page *page;

	ctx->mmchd = hd;
	ctx->mmc_panic_ops = panic_ops;

	devid = MKDEV(hd->major, hd->first_minor + hd->partno);
	bdev = blkdev_get_by_dev(devid, FMODE_READ, NULL);
	if (IS_ERR(bdev)) {
		printk(KERN_ERR "apanic: open device failed with %ld\n",
			PTR_ERR(bdev));
		goto out_err;
	}
	page = virt_to_page(ctx->bounce);

	bio_init(&bio);
	bio.bi_io_vec = &bio_vec;
	bio_vec.bv_page = page;
	bio_vec.bv_len = PAGE_SIZE;
	bio_vec.bv_offset = 0;
	bio.bi_vcnt = 1;
	bio.bi_idx = 0;
	bio.bi_size = PAGE_SIZE;
	bio.bi_bdev = bdev;
	bio.bi_sector = 0;
	init_completion(&complete);
	bio.bi_private = &complete;
	bio.bi_end_io = mmc_bio_complete;
	submit_bio(READ, &bio);
	wait_for_completion(&complete);

	blkdev_put(bdev, FMODE_READ);
	printk(KERN_ERR "apanic: Bound to mmc block device '%s(%d:%d)'\n",
		apanic_dev_name, hd->major, hd->first_minor + hd->partno);

	if (hdr->magic != PANIC_MAGIC) {
		printk(KERN_INFO "apanic: No panic data available\n");
		return;
	}

	if (hdr->version != PHDR_VERSION) {
		printk(KERN_INFO "apanic: Version mismatch (%d != %d)\n",
		       hdr->version, PHDR_VERSION);
		return;
	}

	memcpy(&ctx->curr, hdr, sizeof(struct panic_header));

	printk(KERN_INFO "apanic: c(%u, %u) t(%u, %u) a(%u, %u)\n",
	       hdr->console_offset, hdr->console_length,
	       hdr->threads_offset, hdr->threads_length,
	       hdr->app_threads_offset, hdr->app_threads_length);

	if (hdr->console_length) {
		ctx->apanic_console = create_proc_entry("apanic_console",
						      S_IFREG | S_IRUGO, NULL);
		if (!ctx->apanic_console)
			printk(KERN_ERR "%s: failed creating procfile\n",
			       __func__);
		else {
			ctx->apanic_console->read_proc = apanic_proc_read_mmc;
			ctx->apanic_console->write_proc = apanic_proc_write;
			ctx->apanic_console->size = hdr->console_length;
			ctx->apanic_console->data = (void *) 1;
			has_apanic_mmc_dump = 1;
		}
	}

	if (hdr->threads_length) {
		ctx->apanic_threads = create_proc_entry("apanic_threads",
						       S_IFREG | S_IRUGO, NULL);
		if (!ctx->apanic_threads)
			printk(KERN_ERR "%s: failed creating procfile\n",
			       __func__);
		else {
			ctx->apanic_threads->read_proc = apanic_proc_read_mmc;
			ctx->apanic_threads->write_proc = apanic_proc_write;
			ctx->apanic_threads->size = hdr->threads_length;
			ctx->apanic_threads->data = (void *) 2;
		}
	}

	if (hdr->app_threads_length) {
		ctx->apanic_app_threads = create_proc_entry("apanic_app_threads",
						       S_IFREG | S_IRUGO, NULL);
		if (!ctx->apanic_app_threads)
			printk(KERN_ERR "%s: failed creating procfile\n",
			       __func__);
		else {
			ctx->apanic_app_threads->read_proc = apanic_proc_read_mmc;
			ctx->apanic_app_threads->write_proc = apanic_proc_write;
			ctx->apanic_app_threads->size = hdr->app_threads_length;
			ctx->apanic_app_threads->data = (void *) 3;
		}
	}

	return;
out_err:
	ctx->mmchd = NULL;
	return;
}
/* Replaces the first 8 bytes of a partition with DMVERROR */
static int chromeos_invalidate_kernel(struct block_device *root_bdev)
{
	int ret = 0;
	struct block_device *bdev;
	struct bio *bio;
	struct page *page;
	dev_t devt;
	fmode_t dev_mode;
	/* Ensure we do synchronous unblocked I/O. We may also need
	 * sync_bdev() on completion, but it really shouldn't.
	 */
	int rw = REQ_SYNC | REQ_SOFTBARRIER | REQ_NOIDLE;

	devt = get_boot_dev_from_root_dev(root_bdev);
	if (!devt) {
		devt = get_boot_dev();
		if (!devt)
			return -EINVAL;
	}

	/* First we open the device for reading. */
	dev_mode = FMODE_READ | FMODE_EXCL;
	bdev = blkdev_get_by_dev(devt, dev_mode, chromeos_invalidate_kernel);
	if (IS_ERR(bdev)) {
		DMERR("invalidate_kernel: could not open device for reading");
		dev_mode = 0;
		ret = -1;
		goto failed_to_read;
	}

	bio = bio_alloc(GFP_NOIO, 1);
	if (!bio) {
		ret = -1;
		goto failed_bio_alloc;
	}

	page = alloc_page(GFP_NOIO);
	if (!page) {
		ret = -ENOMEM;
		goto failed_to_alloc_page;
	}

	if (chromeos_invalidate_kernel_submit(bio, bdev, rw, page)) {
		ret = -1;
		goto failed_to_submit_read;
	}

	/* We have a page. Let's make sure it looks right. */
	if (memcmp("CHROMEOS", page_address(page), 8)) {
		DMERR("invalidate_kernel called on non-kernel partition");
		ret = -EINVAL;
		goto invalid_header;
	} else {
		DMERR("invalidate_kernel: found CHROMEOS kernel partition");
	}

	/* Stamp it and rewrite */
	memcpy(page_address(page), "DMVERROR", 8);

	/* The block dev was being changed on read. Let's reopen here. */
	blkdev_put(bdev, dev_mode);
	dev_mode = FMODE_WRITE | FMODE_EXCL;
	bdev = blkdev_get_by_dev(devt, dev_mode, chromeos_invalidate_kernel);
	if (IS_ERR(bdev)) {
		DMERR("invalidate_kernel: could not open device for reading");
		dev_mode = 0;
		ret = -1;
		goto failed_to_write;
	}

	rw |= REQ_WRITE;
	if (chromeos_invalidate_kernel_submit(bio, bdev, rw, page)) {
		ret = -1;
		goto failed_to_submit_write;
	}

	DMERR("invalidate_kernel: completed.");
	ret = 0;
failed_to_submit_write:
failed_to_write:
invalid_header:
	__free_page(page);
failed_to_submit_read:
	/* Technically, we'll leak a page with the pending bio, but
	 *  we're about to panic so it's safer to do the panic() we expect.
	 */
failed_to_alloc_page:
	bio_put(bio);
failed_bio_alloc:
	if (dev_mode)
		blkdev_put(bdev, dev_mode);
failed_to_read:
	return ret;
}