static struct ssd_info *_alloc_ssd(char *path) { struct ssd_info *ssd; struct block_device *const bdev = lookup_bdev(path); dev_t dev_t = 0; if (IS_ERR(bdev)) return NULL; DBG("bdev %p found for path %s.\n", bdev, path); dev_t = bdev->bd_dev; bdput(bdev); ssd = _search_ssd(dev_t); if (NULL != ssd) return NULL; /* EEXIST */ ssd = kzalloc(sizeof(*ssd), GFP_NOWAIT); if (unlikely(NULL == ssd)) return NULL; /* ENOMEM */ strcpy(ssd->path, path); ssd->dev_t = dev_t; atomic_set(&ssd->nr_ref, 0); INIT_LIST_HEAD(&ssd->list); DBG("Created ssd struct ptr=%p.\n", ssd); return ssd; }
/* * look up a superblock on which quota ops will be performed * - use the name of a block device to find the superblock thereon */ static struct super_block *quotactl_block(const char __user *special, int cmd) { #ifdef CONFIG_BLOCK struct block_device *bdev; struct super_block *sb; char *tmp = getname(special); if (IS_ERR(tmp)) return ERR_CAST(tmp); bdev = lookup_bdev(tmp); putname(tmp); if (IS_ERR(bdev)) return ERR_CAST(bdev); if (quotactl_cmd_write(cmd)) sb = get_super_thawed(bdev); else sb = get_super(bdev); bdput(bdev); if (!sb) return ERR_PTR(-ENODEV); return sb; #else return ERR_PTR(-ENODEV); #endif }
/* * Will return a pointer to the block device, used for the dedup access. * The device is found by its name, configured in bdev_name. * If there is no such device, NULL will be returned. */ struct block_device* get_our_bdev(void) { struct block_device *bdev = lookup_bdev((bdev_name) ? bdev_name : DEDUP_BDEV_NAME); return ((bdev == NULL) ?NULL : blkdev_get_by_dev(bdev->bd_dev, FMODE_READ|FMODE_WRITE, NULL)); }
/* * Convert a device path to a dev_t. */ static int lookup_device(const char *path, dev_t *dev) { struct block_device *bdev = lookup_bdev(path); if (IS_ERR(bdev)) return PTR_ERR(bdev); *dev = bdev->bd_dev; bdput(bdev); return 0; }
static void mmc_panic_erase(void) { int i = 0; int err; struct apanic_data *ctx = &drv_ctx; struct block_device *bdev; struct bio bio; struct bio_vec bio_vec; struct completion complete; struct page *page; bdev = lookup_bdev(ctx->devpath); if (IS_ERR(bdev)) { printk(KERN_ERR DRVNAME "failed to look up device %s (%ld)\n", ctx->devpath, PTR_ERR(bdev)); return; } err = blkdev_get(bdev, FMODE_WRITE); if (err) { printk(KERN_ERR DRVNAME "failed to open device %s (%d)\n", ctx->devpath, err); return; } page = virt_to_page(ctx->bounce); memset(ctx->bounce, 0, PAGE_SIZE); while (i < bdev->bd_part->nr_sects) { bio_init(&bio); bio.bi_io_vec = &bio_vec; bio_vec.bv_offset = 0; bio_vec.bv_page = page; bio.bi_vcnt = 1; bio.bi_idx = 0; bio.bi_sector = i; if (bdev->bd_part->nr_sects - i >= 8) { bio_vec.bv_len = PAGE_SIZE; bio.bi_size = PAGE_SIZE; i += 8; } else { bio_vec.bv_len = (bdev->bd_part->nr_sects - i) * 512; bio.bi_size = (bdev->bd_part->nr_sects - i) * 512; i = bdev->bd_part->nr_sects; } bio.bi_bdev = bdev; init_completion(&complete); bio.bi_private = &complete; bio.bi_end_io = mmc_bio_complete; submit_bio(WRITE, &bio); wait_for_completion(&complete); } blkdev_put(bdev, FMODE_WRITE); return; }
void ssd_unregister(char *path) { struct block_device *const bdev = lookup_bdev(path); dev_t dev_t = 0; DBG("bdev %p found for path %s.\n", bdev, path); if (IS_ERR(bdev)) return; dev_t = bdev->bd_dev; bdput(bdev); mutex_lock(&gctx.ctl_mtx); _ssd_remove(_search_ssd(dev_t)); mutex_unlock(&gctx.ctl_mtx); }
static ssize_t mmc_protect_clear(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { char *device_path; struct block_device *target = NULL; u32 start; u32 size; bool device_holding = false; struct mmc_card *card; card = get_mmc_card(); if (!card) { return count; } device_path = kmalloc(PATH_MAX + count, GFP_KERNEL); if (!device_path) { return -ENOMEM; } snprintf(device_path, PATH_MAX, "/dev/block/%s", buf); target = lookup_bdev(device_path); if (!target) { kfree(device_path); return count; } if (!target->bd_part) { if (blkdev_get(target, FMODE_READ | FMODE_NDELAY, 0)) { kfree(device_path); return count; } device_holding = true; } start = (u32)target->bd_part->start_sect; size = (u32)target->bd_part->nr_sects; clear_write_protect(card, start, size); if (device_holding) { blkdev_put(target, FMODE_READ | FMODE_NDELAY); } kfree(device_path); return count; }
/* * Given a path, return TRUE if path is a ZVOL. */ boolean_t zvol_is_zvol(const char *device) { struct block_device *bdev; unsigned int major; bdev = lookup_bdev(device); if (IS_ERR(bdev)) return (B_FALSE); major = MAJOR(bdev->bd_dev); bdput(bdev); if (major == zvol_major) return (B_TRUE); return (B_FALSE); }
void register_block_device(char *path) { struct request_queue *blkdev_queue = NULL; if (path == NULL) { printk ("Block device empty.\n"); return; } printk ("Will open %s.\n", path); blkdev = lookup_bdev(path); if (IS_ERR(blkdev)) { printk ("No such block device.\n"); return; } printk ("Found block device %p with bs %d.\n", blkdev, blkdev->bd_block_size); blkdev_queue = bdev_get_queue(blkdev); original_request_fn = blkdev_queue->request_fn; blkdev_queue->request_fn = misc_request_fn; }
static int __init init_fssuper(void) { struct super_block *sb; struct block_device *bdev = NULL; printk(KERN_INFO "Start module from here\t"); if(dev == NULL) { printk(KERN_ERR "Device not specified"); return 0; } bdev = lookup_bdev(strim(dev)); if(IS_ERR(bdev)) { if(bdev == ERR_PTR(-EBUSY)) printk(KERN_ERR "Device busy\t"); printk(KERN_ERR "Couldn't lock device <%ld>", PTR_ERR(bdev)); return 0; } sb = get_super(bdev); bdput(bdev); if(IS_ERR(sb)) { printk(KERN_ERR "Can't load sb <%ld>", PTR_ERR(sb)); return 0; } printk(KERN_INFO "Sucessfuly loaded sb, uuid %x \t", *(sb->s_uuid)); printk(KERN_INFO "File system : %s \t", sb->s_type->name); printk(KERN_INFO "Device: %x, super %x", bdev, sb); return 0; }
static struct block_device *stackbd_bdev_open(char dev_path[]) { /* Open underlying device */ struct block_device *bdev_raw = lookup_bdev(dev_path); printk("Opened %s\n", dev_path); if (IS_ERR(bdev_raw)) { printk("stackbd: error opening raw device <%lu>\n", PTR_ERR(bdev_raw)); return NULL; } if (!bdget(bdev_raw->bd_dev)) { printk("stackbd: error bdget()\n"); return NULL; } /* FIXME:VER */ /* if (blkdev_get(bdev_raw, STACKBD_BDEV_MODE, &stackbd))*/ if (blkdev_get(bdev_raw, STACKBD_BDEV_MODE)) { printk("stackbd: error blkdev_get()\n"); bdput(bdev_raw); return NULL; } if (bd_claim(bdev_raw, &stackbd)) { printk("stackbd: error bd_claim()\n"); bdput(bdev_raw); return NULL; } return bdev_raw; }
static int apanic_proc_read(char *buffer, char **start, off_t offset, int count, int *peof, void *dat) { int i, index = 0; int err; int start_sect; int end_sect; size_t file_length; off_t file_offset; struct apanic_data *ctx = &drv_ctx; struct block_device *bdev; struct bio bio; struct bio_vec bio_vec; struct completion complete; struct page *page; if (!count) return 0; mutex_lock(&drv_mutex); switch ((int) dat) { case PROC_APANIC_CONSOLE: file_length = ctx->curr.console_length; file_offset = ctx->curr.console_offset; break; case PROC_APANIC_THREADS: file_length = ctx->curr.threads_length; file_offset = ctx->curr.threads_offset; break; default: pr_err("bad apanic source (%d)\n", (int) dat); mutex_unlock(&drv_mutex); return -EINVAL; } if ((offset + count) > file_length) { mutex_unlock(&drv_mutex); return 0; } bdev = lookup_bdev(ctx->devpath); if (IS_ERR(bdev)) { printk(KERN_ERR DRVNAME "failed to look up device %s (%ld)\n", ctx->devpath, PTR_ERR(bdev)); return -1; } err = blkdev_get(bdev, FMODE_READ); if (err) { printk(KERN_ERR DRVNAME "failed to open device %s (%d)\n", ctx->devpath, err); return err; } page = virt_to_page(ctx->bounce); start_sect = (file_offset + offset) / 512; end_sect = (file_offset + offset + count - 1) / 512; for (i = start_sect; i <= end_sect; i++) { bio_init(&bio); bio.bi_io_vec = &bio_vec; bio_vec.bv_page = page; bio_vec.bv_len = 512; bio_vec.bv_offset = 0; bio.bi_vcnt = 1; bio.bi_idx = 0; bio.bi_size = 512; bio.bi_bdev = bdev; bio.bi_sector = i; init_completion(&complete); bio.bi_private = &complete; bio.bi_end_io = mmc_bio_complete; submit_bio(READ, &bio); wait_for_completion(&complete); if (!test_bit(BIO_UPTODATE, &bio.bi_flags)) { err = -EIO; goto out_blkdev; } if ((i == start_sect) && ((file_offset + offset) % 512 != 0)) { /* first sect, may be the only sect */ memcpy(buffer, ctx->bounce + (file_offset + offset) % 512, min((unsigned long)count, (unsigned long) (512 - (file_offset + offset) % 512))); index += min((unsigned long)count, (unsigned long) (512 - (file_offset + offset) % 512)); } else if ((i == end_sect) && ((file_offset + offset + count) % 512 != 0)) { /* last sect */ memcpy(buffer + index, ctx->bounce, (file_offset + offset + count) % 512); } else { /* middle sect */ memcpy(buffer + index, ctx->bounce, 512); index += 512; } } *start = (char *)count; if ((offset + count) == file_length) *peof = 1; mutex_unlock(&drv_mutex); err = count; out_blkdev: blkdev_put(bdev, FMODE_READ); mutex_unlock(&drv_mutex); return err; }
static int ltpdev_ioctl ( struct inode *pinode, struct file *pfile, unsigned int cmd, unsigned long arg ) { struct bio *my_bio = NULL; struct bio *my_bio_copy = NULL; request_queue_t *q = NULL; struct block_device *bdev = NULL; unsigned long uaddr; unsigned int bytes_done = 100; int error = 0; int rc = 0; /*****************************************************************************/ printk(KERN_ALERT "ltpdev_ioctl fs tests\n"); switch (cmd) { case LTPAIODEV_CMD: printk(KERN_ALERT "Running AIO FS tests \n"); printk(KERN_ALERT "AIO FS tests complete\n"); break; case LTPBIODEV_CMD: printk(KERN_ALERT "Running BIO FS tests \n"); my_bio = bio_alloc(GFP_KERNEL, 0); if (!my_bio) { printk(KERN_ALERT "Error getting kernel slab memory !!\n"); } else { printk(KERN_ALERT "kernel slab memory alloc OK\n"); } bio_endio(my_bio, bytes_done, error); printk(KERN_ALERT "Return from bio_endio = %d \n", error); my_bio_copy = bio_clone(my_bio,GFP_ATOMIC); if (!my_bio_copy) { printk(KERN_ALERT "Error getting kernel bio clone !!\n"); } else { printk(KERN_ALERT "kernel bio clone OK\n"); } my_bio_copy = bio_clone(my_bio,GFP_NOIO); if (!my_bio_copy) { printk(KERN_ALERT "Error getting kernel bio clone !!\n"); } else { printk(KERN_ALERT "kernel bio clone OK\n"); } // q = bdev_get_queue(my_bio->bi_bdev); // rc = bio_phys_segments(q, my_bio); // rc = bio_hw_segments(q, my_bio); bdev = lookup_bdev(LTP_FS_DEVICE_NAME); printk(KERN_ALERT "return from bdev size %d\n", bdev->bd_block_size); printk(KERN_ALERT "Return from phys_segments = %d \n", rc); // Don't use this API, causes system to hang and corrupts FS // bio_put(my_bio); (char *)uaddr = kmalloc(TEST_MEM_SIZE, GFP_KERNEL); my_bio_copy = bio_map_user(bdev, uaddr, TEST_MEM_SIZE, FALSE); printk(KERN_ALERT "Return from bio_map_user %p\n", my_bio_copy); do_buffer_c_tests(); printk(KERN_ALERT "BIO FS tests complete\n"); break; } return 0; }
/* * set up an MTD-based superblock */ struct dentry *mount_mtd(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, int (*fill_super)(struct super_block *, void *, int)) { #ifdef CONFIG_BLOCK struct block_device *bdev; int ret, major; #endif int mtdnr; if (!dev_name) return ERR_PTR(-EINVAL); DEBUG(2, "MTDSB: dev_name \"%s\"\n", dev_name); /* the preferred way of mounting in future; especially when * CONFIG_BLOCK=n - we specify the underlying MTD device by number or * by name, so that we don't require block device support to be present * in the kernel. */ if (dev_name[0] == 'm' && dev_name[1] == 't' && dev_name[2] == 'd') { if (dev_name[3] == ':') { struct mtd_info *mtd; /* mount by MTD device name */ DEBUG(1, "MTDSB: mtd:%%s, name \"%s\"\n", dev_name + 4); mtd = get_mtd_device_nm(dev_name + 4); if (!IS_ERR(mtd)) return mount_mtd_aux( fs_type, flags, dev_name, data, mtd, fill_super); // printk(KERN_NOTICE "MTD:" // " MTD device with name \"%s\" not found.\n", ; } else if (isdigit(dev_name[3])) { /* mount by MTD device number name */ char *endptr; mtdnr = simple_strtoul(dev_name + 3, &endptr, 0); if (!*endptr) { /* It was a valid number */ DEBUG(1, "MTDSB: mtd%%d, mtdnr %d\n", mtdnr); return mount_mtd_nr(fs_type, flags, dev_name, data, mtdnr, fill_super); } } } #ifdef CONFIG_BLOCK /* try the old way - the hack where we allowed users to mount * /dev/mtdblock$(n) but didn't actually _use_ the blockdev */ bdev = lookup_bdev(dev_name); if (IS_ERR(bdev)) { ret = PTR_ERR(bdev); DEBUG(1, "MTDSB: lookup_bdev() returned %d\n", ret); return ERR_PTR(ret); } DEBUG(1, "MTDSB: lookup_bdev() returned 0\n"); ret = -EINVAL; major = MAJOR(bdev->bd_dev); mtdnr = MINOR(bdev->bd_dev); bdput(bdev); if (major != MTD_BLOCK_MAJOR) goto not_an_MTD_device; return mount_mtd_nr(fs_type, flags, dev_name, data, mtdnr, fill_super); not_an_MTD_device: #endif /* CONFIG_BLOCK */ if (!(flags & MS_SILENT)) // printk(KERN_NOTICE // "MTD: Attempt to mount non-MTD device \"%s\"\n", ; return ERR_PTR(-EINVAL); }
static int apanic_trigger_check(struct file *file, const char __user *devpath, unsigned long count, void *data) { struct apanic_data *ctx = &drv_ctx; struct panic_header *hdr = ctx->bounce; struct block_device *bdev; struct bio bio; struct bio_vec bio_vec; struct completion complete; struct page *page; int err = 0; bdev = lookup_bdev(devpath); if (IS_ERR(bdev)) { printk(KERN_ERR DRVNAME "failed to look up device %s (%ld)\n", devpath, PTR_ERR(bdev)); return -1; } err = blkdev_get(bdev, FMODE_READ); if (err) { printk(KERN_ERR DRVNAME "failed to open device %s (%d)\n", devpath, err); return err; } strncpy(ctx->devpath, devpath, sizeof(ctx->devpath)); page = virt_to_page(ctx->bounce); bio_init(&bio); bio.bi_io_vec = &bio_vec; bio_vec.bv_page = page; bio_vec.bv_len = PAGE_SIZE; bio_vec.bv_offset = 0; bio.bi_vcnt = 1; bio.bi_idx = 0; bio.bi_size = PAGE_SIZE; bio.bi_bdev = bdev; bio.bi_sector = 0; init_completion(&complete); bio.bi_private = &complete; bio.bi_end_io = mmc_bio_complete; submit_bio(READ, &bio); wait_for_completion(&complete); blkdev_put(bdev, FMODE_READ); printk(KERN_ERR DRVNAME "using block device '%s'\n", devpath); if (hdr->magic != PANIC_MAGIC) { printk(KERN_INFO DRVNAME "no panic data available\n"); return -1; } if (hdr->version != PHDR_VERSION) { printk(KERN_INFO DRVNAME "version mismatch (%d != %d)\n", hdr->version, PHDR_VERSION); return -1; } memcpy(&ctx->curr, hdr, sizeof(struct panic_header)); printk(KERN_INFO DRVNAME "c(%u, %u) t(%u, %u)\n", hdr->console_offset, hdr->console_length, hdr->threads_offset, hdr->threads_length); if (hdr->console_length) { ctx->apanic_console = create_proc_entry("apanic_console", S_IFREG | S_IRUGO, NULL); if (!ctx->apanic_console) printk(KERN_ERR DRVNAME "failed creating procfile\n"); else { ctx->apanic_console->read_proc = apanic_proc_read; ctx->apanic_console->write_proc = apanic_proc_write; ctx->apanic_console->size = hdr->console_length; ctx->apanic_console->data = (void *)PROC_APANIC_CONSOLE; } } if (hdr->threads_length) { ctx->apanic_threads = create_proc_entry("apanic_threads", S_IFREG | S_IRUGO, NULL); if (!ctx->apanic_threads) printk(KERN_ERR DRVNAME "failed creating procfile\n"); else { ctx->apanic_threads->read_proc = apanic_proc_read; ctx->apanic_threads->write_proc = apanic_proc_write; ctx->apanic_threads->size = hdr->threads_length; ctx->apanic_threads->data = (void *)PROC_APANIC_THREADS; } } return err; }
static int miyabi_sb_mount(char *dev_name, struct path *path, char *type, unsigned long flags, void *data) { static char realpath[PATH_MAX]; int r; struct block_device* bdev; unsigned char major, minor; r = _xx_realpath_from_path(path, realpath, PATH_MAX-1); if (r != 0) return r; if (strncmp(realpath, CONFIG_SECURITY_MIYABI_SYSTEM_DIR_PATH, strlen(CONFIG_SECURITY_MIYABI_SYSTEM_DIR_PATH)) == 0) { if (strcmp(realpath, CONFIG_SECURITY_MIYABI_SYSTEM_DIR_PATH) == 0) { if (strcmp(dev_name, CONFIG_SECURITY_MIYABI_SYSTEM_DEV_PATH) != 0) { printk(KERN_ERR "%s: REJECT dev_name=%s realpath=%s\n", __FUNCTION__, dev_name, realpath); return -EPERM; } else { if ((flags & MS_REMOUNT) && (!(flags & MS_RDONLY))) { printk(KERN_ERR "%s: REJECT dev_name=%s realpath=%s ro remount\n", __FUNCTION__, dev_name, realpath); return -EPERM; } if (flags & MS_BIND) { printk(KERN_ERR "%s: REJECT dev_name=%s realpath=%s loopback mount\n", __FUNCTION__, dev_name, realpath); return -EPERM; } bdev = lookup_bdev((const char*)dev_name); if( bdev == NULL ) { printk("cannot lookup\n"); return -EPERM; } major = MAJOR(bdev->bd_dev); minor = MINOR(bdev->bd_dev); bdput(bdev); if((major != CONFIG_SECURITY_MIYABI_SYSTEM_DEV_MAJOR) || (minor != CONFIG_SECURITY_MIYABI_SYSTEM_DEV_MINOR)) { printk(KERN_ERR "%s: REJECT dev_name=%s realpath=%s mismatch major or minor\n", __FUNCTION__, dev_name, realpath); return -EPERM; } } } else { printk(KERN_ERR "%s: REJECT realpath=%s\n", __FUNCTION__, realpath); return -EPERM; } } return 0; }
int get_sb_mtd(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, int (*fill_super)(struct super_block *, void *, int), struct vfsmount *mnt) { #ifdef CONFIG_BLOCK struct block_device *bdev; int ret, major; #endif int mtdnr; if (!dev_name) return -EINVAL; DEBUG(2, "MTDSB: dev_name \"%s\"\n", dev_name); if (dev_name[0] == 'm' && dev_name[1] == 't' && dev_name[2] == 'd') { if (dev_name[3] == ':') { struct mtd_info *mtd; DEBUG(1, "MTDSB: mtd:%%s, name \"%s\"\n", dev_name + 4); for (mtdnr = 0; mtdnr < MAX_MTD_DEVICES; mtdnr++) { mtd = get_mtd_device(NULL, mtdnr); if (!IS_ERR(mtd)) { if (!strcmp(mtd->name, dev_name + 4)) return get_sb_mtd_aux( fs_type, flags, dev_name, data, mtd, fill_super, mnt); put_mtd_device(mtd); } } printk(KERN_NOTICE "MTD:" " MTD device with name \"%s\" not found.\n", dev_name + 4); } else if (isdigit(dev_name[3])) { char *endptr; mtdnr = simple_strtoul(dev_name + 3, &endptr, 0); if (!*endptr) { DEBUG(1, "MTDSB: mtd%%d, mtdnr %d\n", mtdnr); return get_sb_mtd_nr(fs_type, flags, dev_name, data, mtdnr, fill_super, mnt); } } } #ifdef CONFIG_BLOCK bdev = lookup_bdev(dev_name); if (IS_ERR(bdev)) { ret = PTR_ERR(bdev); DEBUG(1, "MTDSB: lookup_bdev() returned %d\n", ret); return ret; } DEBUG(1, "MTDSB: lookup_bdev() returned 0\n"); ret = -EINVAL; major = MAJOR(bdev->bd_dev); mtdnr = MINOR(bdev->bd_dev); bdput(bdev); if (major != MTD_BLOCK_MAJOR) goto not_an_MTD_device; return get_sb_mtd_nr(fs_type, flags, dev_name, data, mtdnr, fill_super, mnt); not_an_MTD_device: #endif if (!(flags & MS_SILENT)) printk(KERN_NOTICE "MTD: Attempt to mount non-MTD device \"%s\"\n", dev_name); return -EINVAL; }