int test3(char *buf, char **start, off_t offset, int count, int *eof, void *data) { struct block_device *bdev; int ret = 0; void *bytes; bdev = blkdev_get_by_path(BDEV_NAME, BDEV_MODE, NULL); if(IS_ERR(bdev)) { printk("Cannot open block device for CF\n"); return -EFAULT; } bytes = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!bytes) return -ENOMEM; if ((ret = bd_read_block_from_disk(bdev, 0, bytes))) goto out; printk(KERN_INFO "First byte is %c\n", ((char*)bytes)[0]); blkdev_put(bdev, BDEV_MODE); out: kfree(bytes); printk(KERN_INFO "BDEV test 3 succeeded\n"); return ret; }
static int __init imgrement_init(void) { char* err; struct imgrement_device *dev; imgrement_device = kzalloc(sizeof(struct imgrement_device), GFP_KERNEL); _astgo(imgrement_device != NULL, "Error allocating", err, init_error); dev = imgrement_device; dev->major = register_blkdev(0, DRIVER_NAME); _astgo(dev->major > 0, "Error register block device", err, init_error); dev->base_dev = blkdev_get_by_path("/dev/sdb", FMODE_READ, NULL); _astgo(dev->base_dev != NULL, "Error getting base block device", err, init_error); dev->base_queue = bdev_get_queue(dev->base_dev); _astgo(dev->base_queue != NULL, "Error getting queue", err, init_error); dev->orig_req_fn = dev->base_queue->make_request_fn; dev->base_queue->make_request_fn = trace_request_fn; LOG("%s trace initialization succeeded", dev->base_dev->bd_disk->disk_name); return 0; init_error: LOG_VAR(err); imgrement_exit(); return -1; }
static int __init test_init(void) { dev_t dev; int err = 0; struct block_device *bdev; LOGe("BUILD_DATE %s\n", BUILD_DATE); if (path_) { bdev = blkdev_get_by_path( path_, FMODE_READ|FMODE_WRITE|FMODE_EXCL, lock_); if (IS_ERR(bdev)) { err = PTR_ERR(bdev); goto error0; } blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); } if (major_ != UINT_MAX && minor_ != UINT_MAX) { dev = MKDEV(major_, minor_); bdev = blkdev_get_by_dev( dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, lock_); if (IS_ERR(bdev)) { err = PTR_ERR(bdev); goto error0; } blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); } LOGn("succeeded.\n"); return -1; error0: LOGn("failed %d.\n", err); return -1; }
static int block_read(const char *user_dev_path, /* Path to rpmb device */ char *read_buff, /* User buffer */ size_t size) /* Size of data to read (in bytes) */ { int i = 0, index = 0; int err; struct block_device *bdev; struct bio bio; struct bio_vec bio_vec; struct completion complete; struct page *page; int end_sect; bdev = blkdev_get_by_path(user_dev_path, FMODE_READ, block_read); if (IS_ERR(bdev)) { pr_err("failed to get block device %s (%ld)\n", user_dev_path, PTR_ERR(bdev)); return -ENODEV; } page = virt_to_page(bio_buff); end_sect = (size - 1) / 512; for (i = 0; i <= end_sect; i++) { bio_init(&bio); bio.bi_io_vec = &bio_vec; bio_vec.bv_page = page; bio_vec.bv_len = 512; bio_vec.bv_offset = 0; bio.bi_vcnt = 1; bio.bi_idx = 0; bio.bi_size = 512; bio.bi_bdev = bdev; bio.bi_sector = 0; init_completion(&complete); bio.bi_private = &complete; bio.bi_end_io = emmc_rpmb_bio_complete; submit_bio(READ, &bio); wait_for_completion(&complete); if (!test_bit(BIO_UPTODATE, &bio.bi_flags)) { err = -EIO; goto out_blkdev; } memcpy(read_buff + index, bio_buff, 512); index += 512; } err = size; out_blkdev: blkdev_put(bdev, FMODE_READ); return err; }
static int block_write(const char *user_dev_path, /* Path to rpmb device node */ const char *write_buff, /* buffer to write to rpmb */ size_t size, /* size of data to write (in bytes) */ int flags) /* REQ_META flags for Reliable writes */ { int i = 0, index = 0; struct block_device *bdev; struct bio bio; struct bio_vec bio_vec; struct completion complete; struct page *page; int end_sect; bdev = blkdev_get_by_path(user_dev_path, FMODE_WRITE, block_write); if (IS_ERR(bdev)) { pr_err("failed to get block device %s (%ld)\n", user_dev_path, PTR_ERR(bdev)); return -ENODEV; } page = virt_to_page(bio_buff); end_sect = (size - 1) / 512; for (i = 0; i <= end_sect; i++) { /* Copy data from user buffer to bio buffer */ memcpy(bio_buff, write_buff + index, 512); index += 512; bio_init(&bio); bio.bi_io_vec = &bio_vec; bio_vec.bv_page = page; bio_vec.bv_len = 512; bio_vec.bv_offset = 0; bio.bi_vcnt = 1; bio.bi_idx = 0; bio.bi_size = 512; bio.bi_bdev = bdev; /* Set to 0 because the addr is part of RPMB data frame */ bio.bi_sector = 0; init_completion(&complete); bio.bi_private = &complete; bio.bi_end_io = emmc_rpmb_bio_complete; submit_bio(WRITE | flags, &bio); wait_for_completion(&complete); } blkdev_put(bdev, FMODE_WRITE); return 0; }
static void mmc_panic_erase(void) { int i = 0; struct apanic_data *ctx = &drv_ctx; struct block_device *bdev; struct bio bio; struct bio_vec bio_vec; struct completion complete; struct page *page; bdev = blkdev_get_by_path(ctx->dev_path, FMODE_WRITE, mmc_panic_erase); if (IS_ERR(bdev)) { printk(KERN_ERR DRVNAME "failed to get block device %s (%ld)\n", ctx->dev_path, PTR_ERR(bdev)); return; } page = virt_to_page(ctx->bounce); memset(ctx->bounce, 0, PAGE_SIZE); while (i < bdev->bd_part->nr_sects) { bio_init(&bio); bio.bi_io_vec = &bio_vec; bio_vec.bv_offset = 0; bio_vec.bv_page = page; bio.bi_vcnt = 1; bio.bi_idx = 0; bio.bi_sector = i; if (bdev->bd_part->nr_sects - i >= 8) { bio_vec.bv_len = PAGE_SIZE; bio.bi_size = PAGE_SIZE; i += 8; } else { bio_vec.bv_len = (bdev->bd_part->nr_sects - i) * 512; bio.bi_size = (bdev->bd_part->nr_sects - i) * 512; i = bdev->bd_part->nr_sects; } bio.bi_bdev = bdev; init_completion(&complete); bio.bi_private = &complete; bio.bi_end_io = mmc_bio_complete; submit_bio(WRITE, &bio); wait_for_completion(&complete); } blkdev_put(bdev, FMODE_WRITE); return; }
int test1(char *buf, char **start, off_t offset, int count, int *eof, void *data) { struct block_device *bdev; bdev = blkdev_get_by_path(BDEV_NAME, BDEV_MODE, NULL); if(IS_ERR(bdev)) { printk("Cannot open block device for CF\n"); return -EFAULT; } blkdev_put(bdev, BDEV_MODE); printk(KERN_INFO "BDEV test 1 succeeded\n"); return 0; }
/* * Try to open the RH/Fedora specific dm-mpath udev path for this WWN, as the * wwn- links will only point to the first discovered SCSI device there. */ static struct block_device * bl_open_dm_mpath_udev_path(struct pnfs_block_volume *v) { struct block_device *bdev; const char *devname; devname = kasprintf(GFP_KERNEL, "/dev/disk/by-id/dm-uuid-mpath-%d%*phN", v->scsi.designator_type, v->scsi.designator_len, v->scsi.designator); if (!devname) return ERR_PTR(-ENOMEM); bdev = blkdev_get_by_path(devname, FMODE_READ | FMODE_WRITE, NULL); kfree(devname); return bdev; }
static int tbio_ioctl(struct block_device *blk, fmode_t mode, unsigned cmd, unsigned long arg) { int err = 0; tbio_dev.bdev = blkdev_get_by_path( DEVICE_NAME, FMODE_READ | FMODE_WRITE, NULL); switch (cmd) { case LTP_TBIO_DO_IO: prk_info("TEST-CASE: LTP_TBIO_DO_IO:"); err = tbio_io(tbio_dev.bdev, (struct tbio_interface *)arg); break; case LTP_TBIO_CLONE: prk_info("TEST-CASE: LTP_TBIO_CLONE:"); err = test_bio_clone(); break; case LTP_TBIO_ADD_PAGE: prk_info("TEST-CASE: LTP_TBIO_ADD_PAGE:"); err = test_bio_add_page(); break; case LTP_TBIO_ALLOC: prk_info("TEST-CASE: LTP_TBIO_ALLOC:"); err = test_bio_alloc(); break; case LTP_TBIO_GET_NR_VECS: prk_info("TEST-CASE: LTP_TBIO_GET_NR_VECS:"); err = test_bio_get_nr_vecs(); break; case LTP_TBIO_PUT: prk_info("TEST-CASE: LTP_TBIO_PUT:"); err = test_bio_put(tbiop); break; case LTP_TBIO_SPLIT: prk_info("TEST-CASE: LTP_TBIO_SPLIT:"); err = test_bio_split(tbio_dev.bdev, (struct tbio_interface *)arg); break; } prk_info("TEST-CASE DONE"); blkdev_put(tbio_dev.bdev, FMODE_READ | FMODE_WRITE); return err; }
/* * Try to open the udev path for the WWN. At least on Debian the udev * by-id path will always point to the dm-multipath device if one exists. */ static struct block_device * bl_open_udev_path(struct pnfs_block_volume *v) { struct block_device *bdev; const char *devname; devname = kasprintf(GFP_KERNEL, "/dev/disk/by-id/wwn-0x%*phN", v->scsi.designator_len, v->scsi.designator); if (!devname) return ERR_PTR(-ENOMEM); bdev = blkdev_get_by_path(devname, FMODE_READ | FMODE_WRITE, NULL); if (IS_ERR(bdev)) { pr_warn("pNFS: failed to open device %s (%ld)\n", devname, PTR_ERR(bdev)); } kfree(devname); return bdev; }
KrDevice* kr_device_create (const char* path, size_t cachesz) { struct block_device* block_dev; KrDevice* dev; /* get a struct block_device pointer from a string path */ block_dev = blkdev_get_by_path(path, FMODE_READ | FMODE_WRITE, NULL); if (!block_dev) return NULL; /* allocate the KrDevice */ dev = (KrDevice*)kmalloc(sizeof(KrDevice), GFP_KERNEL); dev->bufcnt = 0; dev->bdev = block_dev; dev->maxbufs = cachesz; dev->bufhash = kcalloc(dev->maxbufs, sizeof(KrBuf*), GFP_KERNEL); dev->n_evict = dev->n_read = dev->n_hit = dev->n_dbl = 0; return dev; }
static void stop_queue(int index, char *path) { unsigned long flags; struct request_queue *q; bdev[index] = blkdev_get_by_path(path, FMODE_READ, stop_queue); if (IS_ERR(bdev[index])) { pr_err("failed to get block device %s (%ld)\n", path, PTR_ERR(bdev[index])); bdev[index] = NULL; return; } q = bdev_get_queue(bdev[index]); if (!q) { pr_err("queue not found bdev[index]=%d\n", index); return; } spin_lock_irqsave(q->queue_lock, flags); blk_stop_queue(q); spin_unlock_irqrestore(q->queue_lock, flags); }
static struct dentry * nilfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { struct nilfs_super_data sd; struct super_block *s; fmode_t mode = FMODE_READ | FMODE_EXCL; struct dentry *root_dentry; int err, s_new = false; if (!(flags & MS_RDONLY)) mode |= FMODE_WRITE; sd.bdev = blkdev_get_by_path(dev_name, mode, fs_type); if (IS_ERR(sd.bdev)) return ERR_CAST(sd.bdev); sd.cno = 0; sd.flags = flags; if (nilfs_identify((char *)data, &sd)) { err = -EINVAL; goto failed; } /* * once the super is inserted into the list by sget, s_umount * will protect the lockfs code from trying to start a snapshot * while we are mounting */ mutex_lock(&sd.bdev->bd_fsfreeze_mutex); if (sd.bdev->bd_fsfreeze_count > 0) { mutex_unlock(&sd.bdev->bd_fsfreeze_mutex); err = -EBUSY; goto failed; } s = sget(fs_type, nilfs_test_bdev_super, nilfs_set_bdev_super, sd.bdev); mutex_unlock(&sd.bdev->bd_fsfreeze_mutex); if (IS_ERR(s)) { err = PTR_ERR(s); goto failed; } if (!s->s_root) { char b[BDEVNAME_SIZE]; s_new = true; /* New superblock instance created */ s->s_flags = flags; s->s_mode = mode; strlcpy(s->s_id, bdevname(sd.bdev, b), sizeof(s->s_id)); sb_set_blocksize(s, block_size(sd.bdev)); err = nilfs_fill_super(s, data, flags & MS_SILENT ? 1 : 0); if (err) goto failed_super; s->s_flags |= MS_ACTIVE; } else if (!sd.cno) { int busy = false; if (nilfs_tree_was_touched(s->s_root)) { busy = nilfs_try_to_shrink_tree(s->s_root); if (busy && (flags ^ s->s_flags) & MS_RDONLY) { printk(KERN_ERR "NILFS: the device already " "has a %s mount.\n", (s->s_flags & MS_RDONLY) ? "read-only" : "read/write"); err = -EBUSY; goto failed_super; } } if (!busy) { /* * Try remount to setup mount states if the current * tree is not mounted and only snapshots use this sb. */ err = nilfs_remount(s, &flags, data); if (err) goto failed_super; } } if (sd.cno) { err = nilfs_attach_snapshot(s, sd.cno, &root_dentry); if (err) goto failed_super; } else { root_dentry = dget(s->s_root); } if (!s_new) blkdev_put(sd.bdev, mode); return root_dentry; failed_super: deactivate_locked_super(s); failed: if (!s_new) blkdev_put(sd.bdev, mode); return ERR_PTR(err); }
/* * Initialize a new device for device replace target from a given source dev * and path. * * Return 0 and new device in @device_out, otherwise return < 0 */ static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info, const char *device_path, struct btrfs_device *srcdev, struct btrfs_device **device_out) { struct btrfs_device *device; struct block_device *bdev; struct list_head *devices; struct rcu_string *name; u64 devid = BTRFS_DEV_REPLACE_DEVID; int ret = 0; *device_out = NULL; if (fs_info->fs_devices->seeding) { btrfs_err(fs_info, "the filesystem is a seed filesystem!"); return -EINVAL; } bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL, fs_info->bdev_holder); if (IS_ERR(bdev)) { btrfs_err(fs_info, "target device %s is invalid!", device_path); return PTR_ERR(bdev); } filemap_write_and_wait(bdev->bd_inode->i_mapping); devices = &fs_info->fs_devices->devices; list_for_each_entry(device, devices, dev_list) { if (device->bdev == bdev) { btrfs_err(fs_info, "target device is in the filesystem!"); ret = -EEXIST; goto error; } } if (i_size_read(bdev->bd_inode) < btrfs_device_get_total_bytes(srcdev)) { btrfs_err(fs_info, "target device is smaller than source device!"); ret = -EINVAL; goto error; } device = btrfs_alloc_device(NULL, &devid, NULL); if (IS_ERR(device)) { ret = PTR_ERR(device); goto error; } name = rcu_string_strdup(device_path, GFP_KERNEL); if (!name) { btrfs_free_device(device); ret = -ENOMEM; goto error; } rcu_assign_pointer(device->name, name); mutex_lock(&fs_info->fs_devices->device_list_mutex); set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); device->generation = 0; device->io_width = fs_info->sectorsize; device->io_align = fs_info->sectorsize; device->sector_size = fs_info->sectorsize; device->total_bytes = btrfs_device_get_total_bytes(srcdev); device->disk_total_bytes = btrfs_device_get_disk_total_bytes(srcdev); device->bytes_used = btrfs_device_get_bytes_used(srcdev); device->commit_total_bytes = srcdev->commit_total_bytes; device->commit_bytes_used = device->bytes_used; device->fs_info = fs_info; device->bdev = bdev; set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); set_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); device->mode = FMODE_EXCL; device->dev_stats_valid = 1; set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE); device->fs_devices = fs_info->fs_devices; list_add(&device->dev_list, &fs_info->fs_devices->devices); fs_info->fs_devices->num_devices++; fs_info->fs_devices->open_devices++; mutex_unlock(&fs_info->fs_devices->device_list_mutex); *device_out = device; return 0; error: blkdev_put(bdev, FMODE_EXCL); return ret; }
static int apanic_proc_read(char *buffer, char **start, off_t offset, int count, int *peof, void *dat) { int i, index = 0; int err; int start_sect; int end_sect; size_t file_length; off_t file_offset; struct apanic_data *ctx = &drv_ctx; struct block_device *bdev; struct bio bio; struct bio_vec bio_vec; struct completion complete; struct page *page; if (!count) return 0; mutex_lock(&drv_mutex); switch ((int) dat) { case 1: file_length = ctx->curr.console_length; file_offset = ctx->curr.console_offset; break; #ifndef CONFIG_CDEBUGGER case 2: file_length = ctx->curr.threads_length; file_offset = ctx->curr.threads_offset; break; #endif default: pr_err("bad apanic source (%d)\n", (int) dat); mutex_unlock(&drv_mutex); return -EINVAL; } /* * If the requested offset is greater than or is equal to the file * size, we have already reached the end of file. */ if (offset >= file_length) { mutex_unlock(&drv_mutex); return 0; } /* * The bytes to read request is greater than the actual file size, * so trim the request. */ if ((offset + count) > file_length) count = file_length - offset; bdev = blkdev_get_by_path(ctx->dev_path, FMODE_READ, apanic_proc_read); if (IS_ERR(bdev)) { printk(KERN_ERR DRVNAME "failed to get block device %s (%ld)\n", ctx->dev_path, PTR_ERR(bdev)); mutex_unlock(&drv_mutex); return -1; } page = virt_to_page(ctx->bounce); start_sect = (file_offset + offset) / 512; end_sect = (file_offset + offset + count - 1) / 512; for (i = start_sect; i <= end_sect; i++) { bio_init(&bio); bio.bi_io_vec = &bio_vec; bio_vec.bv_page = page; bio_vec.bv_len = 512; bio_vec.bv_offset = 0; bio.bi_vcnt = 1; bio.bi_idx = 0; bio.bi_size = 512; bio.bi_bdev = bdev; bio.bi_sector = i; init_completion(&complete); bio.bi_private = &complete; bio.bi_end_io = mmc_bio_complete; submit_bio(READ, &bio); wait_for_completion(&complete); if (!test_bit(BIO_UPTODATE, &bio.bi_flags)) { err = -EIO; goto out_blkdev; } if ((i == start_sect) && ((file_offset + offset) % 512 != 0)) { /* first sect, may be the only sect */ memcpy(buffer, ctx->bounce + (file_offset + offset) % 512, min((unsigned long)count, (unsigned long) (512 - (file_offset + offset) % 512))); index += min((unsigned long)count, (unsigned long) (512 - (file_offset + offset) % 512)); } else if ((i == end_sect) && ((file_offset + offset + count) % 512 != 0)) { /* last sect */ memcpy(buffer + index, ctx->bounce, (file_offset + offset + count) % 512); } else { /* middle sect */ memcpy(buffer + index, ctx->bounce, 512); index += 512; } } *start = (char *)count; if ((offset + count) == file_length) *peof = 1; err = count; out_blkdev: blkdev_put(bdev, FMODE_READ); mutex_unlock(&drv_mutex); return err; }
static struct se_device *iblock_create_virtdevice( struct se_hba *hba, struct se_subsystem_dev *se_dev, void *p) { struct iblock_dev *ib_dev = p; struct se_device *dev; struct se_dev_limits dev_limits; struct block_device *bd = NULL; struct request_queue *q; struct queue_limits *limits; u32 dev_flags = 0; fmode_t mode; int ret = -EINVAL; if (!ib_dev) { pr_err("Unable to locate struct iblock_dev parameter\n"); return ERR_PTR(ret); } memset(&dev_limits, 0, sizeof(struct se_dev_limits)); ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0); if (!ib_dev->ibd_bio_set) { pr_err("IBLOCK: Unable to create bioset()\n"); return ERR_PTR(-ENOMEM); } pr_debug("IBLOCK: Created bio_set()\n"); /* * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path * must already have been set in order for echo 1 > $HBA/$DEV/enable to run. */ pr_debug( "IBLOCK: Claiming struct block_device: %s\n", ib_dev->ibd_udev_path); mode = FMODE_READ|FMODE_EXCL; if (!ib_dev->ibd_readonly) mode |= FMODE_WRITE; bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev); if (IS_ERR(bd)) { ret = PTR_ERR(bd); goto failed; } /* * Setup the local scope queue_limits from struct request_queue->limits * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. */ q = bdev_get_queue(bd); limits = &dev_limits.limits; limits->logical_block_size = bdev_logical_block_size(bd); limits->max_hw_sectors = UINT_MAX; limits->max_sectors = UINT_MAX; dev_limits.hw_queue_depth = q->nr_requests; dev_limits.queue_depth = q->nr_requests; ib_dev->ibd_bd = bd; dev = transport_add_device_to_core_hba(hba, &iblock_template, se_dev, dev_flags, ib_dev, &dev_limits, "IBLOCK", IBLOCK_VERSION); if (!dev) goto failed; /* * Check if the underlying struct block_device request_queue supports * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM * in ATA and we need to set TPE=1 */ if (blk_queue_discard(q)) { dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = q->limits.max_discard_sectors; /* * Currently hardcoded to 1 in Linux/SCSI code.. */ dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1; dev->se_sub_dev->se_dev_attrib.unmap_granularity = q->limits.discard_granularity >> 9; dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = q->limits.discard_alignment; pr_debug("IBLOCK: BLOCK Discard support available," " disabled by default\n"); } if (blk_queue_nonrot(q)) dev->se_sub_dev->se_dev_attrib.is_nonrot = 1; return dev; failed: if (ib_dev->ibd_bio_set) { bioset_free(ib_dev->ibd_bio_set); ib_dev->ibd_bio_set = NULL; } ib_dev->ibd_bd = NULL; return ERR_PTR(ret); }
static int bl_parse_scsi(struct nfs_server *server, struct pnfs_block_dev *d, struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) { struct pnfs_block_volume *v = &volumes[idx]; const struct pr_ops *ops; const char *devname; int error; if (!bl_validate_designator(v)) return -EINVAL; switch (v->scsi.designator_len) { case 8: devname = kasprintf(GFP_KERNEL, "/dev/disk/by-id/wwn-0x%8phN", v->scsi.designator); break; case 12: devname = kasprintf(GFP_KERNEL, "/dev/disk/by-id/wwn-0x%12phN", v->scsi.designator); break; case 16: devname = kasprintf(GFP_KERNEL, "/dev/disk/by-id/wwn-0x%16phN", v->scsi.designator); break; default: return -EINVAL; } d->bdev = blkdev_get_by_path(devname, FMODE_READ, NULL); if (IS_ERR(d->bdev)) { pr_warn("pNFS: failed to open device %s (%ld)\n", devname, PTR_ERR(d->bdev)); kfree(devname); return PTR_ERR(d->bdev); } kfree(devname); d->len = i_size_read(d->bdev->bd_inode); d->map = bl_map_simple; d->pr_key = v->scsi.pr_key; pr_info("pNFS: using block device %s (reservation key 0x%llx)\n", d->bdev->bd_disk->disk_name, d->pr_key); ops = d->bdev->bd_disk->fops->pr_ops; if (!ops) { pr_err("pNFS: block device %s does not support reservations.", d->bdev->bd_disk->disk_name); error = -EINVAL; goto out_blkdev_put; } error = ops->pr_register(d->bdev, 0, d->pr_key, true); if (error) { pr_err("pNFS: failed to register key for block device %s.", d->bdev->bd_disk->disk_name); goto out_blkdev_put; } d->pr_registered = true; return 0; out_blkdev_put: blkdev_put(d->bdev, FMODE_READ); return error; }
int ssd_register(char *path) { struct ssd_info *ssd; int ret = -1; mutex_lock(&gctx.ctl_mtx); do { ssd = _alloc_ssd(path); if (!ssd) { ERR("iostash: Could not allocate ssd_info struct.\n"); break; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38) ssd->bdev = blkdev_get_by_path(path, FMODE_READ | FMODE_WRITE | FMODE_EXCL, &gctx.ssdtbl); #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) ssd->bdev = open_bdev_exclusive(path, FMODE_READ | FMODE_WRITE | FMODE_EXCL, &gctx.ssdtbl); #else ERR("Kernel version < 2.6.28 currently not supported.\n"); ssd->bdev = ERR_PTR(-ENOENT); #endif if (IS_ERR(ssd->bdev)) { ERR("iostash: SSD device lookup failed.\n"); ssd->bdev = NULL; break; } rmb(); if (1 < ssd->bdev->bd_openers) { ERR("iostash: the SSD device is in use, cannot open it exclusively.\n"); break; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ssd->nr_sctr = get_capacity(ssd->bdev->bd_disk); #elif LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0) ssd->nr_sctr = ssd->bdev->bd_part->nr_sects; #else ssd->nr_sctr = part_nr_sects_read(ssd->bdev->bd_part); #endif if (ssd->nr_sctr < IOSTASH_HEADERSCT) { ERR("SSD capacity less than minimum size of %uB", IOSTASH_HEADERSIZE); break; } ssd->nr_sctr -= IOSTASH_HEADERSCT; DBG("iostash: ssd->nr_sctr = %ld\n", (long)ssd->nr_sctr); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31) ssd->queue_max_hw_sectors = queue_max_hw_sectors(bdev_get_queue(ssd->bdev)); #else /* 2.6.29 and 2.6.30 */ ssd->queue_max_hw_sectors = (bdev_get_queue(ssd->bdev))->max_hw_sectors; #endif ssd->cdev = sce_addcdev(gctx.sce, ssd->nr_sctr, ssd); if (ssd->cdev == NULL) { ERR("iostash: sce_add_device() failed.\n"); break; } ret = _ssd_create_kobj(ssd); if (ret) { ERR("ssd_create_kobj failed with %d.\n", ret); break; } /* insert it to our ssd hash table, it is ready to service requests */ _insert_ssd(ssd); ssd->online = 1; gctx.nr_ssd++; DBG("iostash: SSD %s has been added successfully.\n", path); ret = 0; } while (0); if (ret) _destroy_ssd(ssd); mutex_unlock(&gctx.ctl_mtx); return ret; }
static ssize_t kvblade_add(u32 major, u32 minor, char *ifname, char *path) { struct net_device *nd; struct block_device *bd; struct aoedev *d, *td; int ret = 0; printk("kvblade_add\n"); nd = dev_get_by_name(&init_net, ifname); if (nd == NULL) { eprintk("add failed: interface %s not found.\n", ifname); return -ENOENT; } dev_put(nd); bd = blkdev_get_by_path(path, FMODE_READ|FMODE_WRITE, NULL); if (!bd || IS_ERR(bd)) { printk(KERN_ERR "add failed: can't open block device %s: %ld\n", path, PTR_ERR(bd)); return -ENOENT; } if (get_capacity(bd->bd_disk) == 0) { printk(KERN_ERR "add failed: zero sized block device.\n"); ret = -ENOENT; goto err; } spin_lock(&lock); for (td = devlist; td; td = td->next) if (td->major == major && td->minor == minor && td->netdev == nd) { spin_unlock(&lock); printk(KERN_ERR "add failed: device %d.%d already exists on %s.\n", major, minor, ifname); ret = -EEXIST; goto err; } d = kmalloc(sizeof(struct aoedev), GFP_KERNEL); if (!d) { printk(KERN_ERR "add failed: kmalloc error for %d.%d\n", major, minor); ret = -ENOMEM; goto err; } memset(d, 0, sizeof(struct aoedev)); atomic_set(&d->busy, 0); d->blkdev = bd; d->netdev = nd; d->major = major; d->minor = minor; d->scnt = get_capacity(bd->bd_disk); strncpy(d->path, path, nelem(d->path)-1); spncpy(d->model, "EtherDrive(R) kvblade", nelem(d->model)); spncpy(d->sn, "SN HERE", nelem(d->sn)); kobject_init_and_add(&d->kobj, &kvblade_ktype, &kvblade_kobj, "%d.%d@%s", major, minor, ifname); d->next = devlist; devlist = d; spin_unlock(&lock); dprintk("added %s as %d.%d@%s: %Lu sectors.\n", path, major, minor, ifname, d->scnt); kvblade_announce(d); return 0; err: blkdev_put(bd, FMODE_READ|FMODE_WRITE); return ret; }
static int iblock_configure_device(struct se_device *dev) { struct iblock_dev *ib_dev = IBLOCK_DEV(dev); struct request_queue *q; struct block_device *bd = NULL; fmode_t mode; int ret = -ENOMEM; if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) { pr_err("Missing udev_path= parameters for IBLOCK\n"); return -EINVAL; } ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0); if (!ib_dev->ibd_bio_set) { pr_err("IBLOCK: Unable to create bioset\n"); goto out; } pr_debug( "IBLOCK: Claiming struct block_device: %s\n", ib_dev->ibd_udev_path); mode = FMODE_READ|FMODE_EXCL; if (!ib_dev->ibd_readonly) mode |= FMODE_WRITE; bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev); if (IS_ERR(bd)) { ret = PTR_ERR(bd); goto out_free_bioset; } ib_dev->ibd_bd = bd; q = bdev_get_queue(bd); dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd); dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q); dev->dev_attrib.hw_queue_depth = q->nr_requests; /* * Check if the underlying struct block_device request_queue supports * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM * in ATA and we need to set TPE=1 */ if (blk_queue_discard(q)) { dev->dev_attrib.max_unmap_lba_count = q->limits.max_discard_sectors; /* * Currently hardcoded to 1 in Linux/SCSI code.. */ dev->dev_attrib.max_unmap_block_desc_count = 1; dev->dev_attrib.unmap_granularity = q->limits.discard_granularity >> 9; dev->dev_attrib.unmap_granularity_alignment = q->limits.discard_alignment; pr_debug("IBLOCK: BLOCK Discard support available," " disabled by default\n"); } /* * Enable write same emulation for IBLOCK and use 0xFFFF as * the smaller WRITE_SAME(10) only has a two-byte block count. */ dev->dev_attrib.max_write_same_len = 0xFFFF; if (blk_queue_nonrot(q)) dev->dev_attrib.is_nonrot = 1; return 0; out_free_bioset: bioset_free(ib_dev->ibd_bio_set); ib_dev->ibd_bio_set = NULL; out: return ret; }