static int nilfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, struct vfsmount *mnt) { struct nilfs_super_data sd; struct super_block *s, *s2; struct the_nilfs *nilfs = NULL; int err, need_to_close = 1; sd.bdev = open_bdev_exclusive(dev_name, flags, fs_type); if (IS_ERR(sd.bdev)) return PTR_ERR(sd.bdev); /* * To get mount instance using sget() vfs-routine, NILFS needs * much more information than normal filesystems to identify mount * instance. For snapshot mounts, not only a mount type (ro-mount * or rw-mount) but also a checkpoint number is required. * The results are passed in sget() using nilfs_super_data. */ sd.cno = 0; sd.flags = flags; if (nilfs_identify((char *)data, &sd)) { err = -EINVAL; goto failed; } /* * once the super is inserted into the list by sget, s_umount * will protect the lockfs code from trying to start a snapshot * while we are mounting */ down(&sd.bdev->bd_mount_sem); if (!sd.cno && (err = test_exclusive_mount(fs_type, sd.bdev, flags ^ MS_RDONLY))) { err = (err < 0) ? : -EBUSY; goto failed_unlock; }
static int nilfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, struct vfsmount *mnt) { struct nilfs_super_data sd; struct super_block *s; fmode_t mode = FMODE_READ; struct the_nilfs *nilfs; int err, need_to_close = 1; if (!(flags & MS_RDONLY)) mode |= FMODE_WRITE; sd.bdev = open_bdev_exclusive(dev_name, mode, fs_type); if (IS_ERR(sd.bdev)) return PTR_ERR(sd.bdev); /* * To get mount instance using sget() vfs-routine, NILFS needs * much more information than normal filesystems to identify mount * instance. For snapshot mounts, not only a mount type (ro-mount * or rw-mount) but also a checkpoint number is required. */ sd.cno = 0; sd.flags = flags; if (nilfs_identify((char *)data, &sd)) { err = -EINVAL; goto failed; } nilfs = find_or_create_nilfs(sd.bdev); if (!nilfs) { err = -ENOMEM; goto failed; } mutex_lock(&nilfs->ns_mount_mutex); if (!sd.cno) { /* * Check if an exclusive mount exists or not. * Snapshot mounts coexist with a current mount * (i.e. rw-mount or ro-mount), whereas rw-mount and * ro-mount are mutually exclusive. */ down_read(&nilfs->ns_super_sem); if (nilfs->ns_current && ((nilfs->ns_current->s_super->s_flags ^ flags) & MS_RDONLY)) { up_read(&nilfs->ns_super_sem); err = -EBUSY; goto failed_unlock; } up_read(&nilfs->ns_super_sem); } /* * Find existing nilfs_sb_info struct */ sd.sbi = nilfs_find_sbinfo(nilfs, !(flags & MS_RDONLY), sd.cno); /* * Get super block instance holding the nilfs_sb_info struct. * A new instance is allocated if no existing mount is present or * existing instance has been unmounted. */ s = sget(fs_type, nilfs_test_bdev_super, nilfs_set_bdev_super, &sd); if (sd.sbi) nilfs_put_sbinfo(sd.sbi); if (IS_ERR(s)) { err = PTR_ERR(s); goto failed_unlock; } if (!s->s_root) { char b[BDEVNAME_SIZE]; /* New superblock instance created */ s->s_flags = flags; s->s_mode = mode; strlcpy(s->s_id, bdevname(sd.bdev, b), sizeof(s->s_id)); sb_set_blocksize(s, block_size(sd.bdev)); err = nilfs_fill_super(s, data, flags & MS_SILENT ? 1 : 0, nilfs); if (err) goto cancel_new; s->s_flags |= MS_ACTIVE; need_to_close = 0; } mutex_unlock(&nilfs->ns_mount_mutex); put_nilfs(nilfs); if (need_to_close) close_bdev_exclusive(sd.bdev, mode); simple_set_mnt(mnt, s); return 0; failed_unlock: mutex_unlock(&nilfs->ns_mount_mutex); put_nilfs(nilfs); failed: close_bdev_exclusive(sd.bdev, mode); return err; cancel_new: /* Abandoning the newly allocated superblock */ mutex_unlock(&nilfs->ns_mount_mutex); put_nilfs(nilfs); deactivate_locked_super(s); /* * deactivate_locked_super() invokes close_bdev_exclusive(). * We must finish all post-cleaning before this call; * put_nilfs() needs the block device. */ return err; }
/****************************************************************************** * * axfs_get_sb_block * * Description: * Populates a axfs_fill_super_info struct after sanity checking the * block device * * Parameters: * (IN) fs_type - pointer to file_system_type * * (IN) flags - mount flags * * (IN) dev_name - block device name passed in from mount * * (IN) secondary_blk_dev - block device name enter from mount -o * * Returns: * pointer to a axfs_file_super_info or an error pointer * *****************************************************************************/ static struct axfs_fill_super_info *axfs_get_sb_block(struct file_system_type *fs_type, int flags, const char *dev_name, char *secondary_blk_dev) { struct nameidata nd; struct buffer_head *bh = NULL; struct axfs_fill_super_info *output; struct block_device *bdev = NULL; int err; err = path_lookup(dev_name, LOOKUP_FOLLOW, &nd); if (err) return ERR_PTR(-EINVAL); if (secondary_blk_dev) { printk(KERN_ERR "axfs: can't mount 2 block device's \"%s\" and \"%s\"\n", dev_name, secondary_blk_dev); err = -EINVAL; goto path_out; } if (!S_ISBLK(nd.path.dentry->d_inode->i_mode)) { err = -EINVAL; goto path_out; } if (nd.path.mnt->mnt_flags & MNT_NODEV) { err = -EACCES; goto path_out; } bdev = open_bdev_exclusive(dev_name, flags, fs_type); if (IS_ERR(bdev)){ err = PTR_ERR(bdev); goto path_out; } bh = __bread(bdev, 0, bdev->bd_block_size); if (!bh) { err = -EIO; goto out; } output = (struct axfs_fill_super_info *)vmalloc(sizeof(struct axfs_fill_super_info)); if(!output) { err = -ENOMEM; goto out; } output->onmedia_super_block = (struct axfs_super_onmedia *)vmalloc(sizeof(struct axfs_super_onmedia)); if(!output->onmedia_super_block) { err = -ENOMEM; goto out; } memcpy((char *)output->onmedia_super_block, bh->b_data, sizeof(struct axfs_super_onmedia)); if(IS_ERR(output)) { err = PTR_ERR(output); goto out; } output->physical_start_address = 0; output->virtual_start_address = 0; path_put(&nd.path); close_bdev_exclusive(bdev, flags); free_buffer_head(bh); return output; out: close_bdev_exclusive(bdev,flags); if (bh) { free_buffer_head(bh); } path_out: path_put(&nd.path); printk(KERN_NOTICE "axfs_get_sb_block(): Invalid device \"%s\"\n", dev_name); return ERR_PTR(err); }
static int nilfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, struct vfsmount *mnt) { struct nilfs_super_data sd; struct super_block *s; fmode_t mode = FMODE_READ; struct dentry *root_dentry; int err, s_new = false; if (!(flags & MS_RDONLY)) mode |= FMODE_WRITE; sd.bdev = open_bdev_exclusive(dev_name, mode, fs_type); if (IS_ERR(sd.bdev)) return PTR_ERR(sd.bdev); sd.cno = 0; sd.flags = flags; if (nilfs_identify((char *)data, &sd)) { err = -EINVAL; goto failed; } /* * once the super is inserted into the list by sget, s_umount * will protect the lockfs code from trying to start a snapshot * while we are mounting */ mutex_lock(&sd.bdev->bd_fsfreeze_mutex); if (sd.bdev->bd_fsfreeze_count > 0) { mutex_unlock(&sd.bdev->bd_fsfreeze_mutex); err = -EBUSY; goto failed; } s = sget(fs_type, nilfs_test_bdev_super, nilfs_set_bdev_super, sd.bdev); mutex_unlock(&sd.bdev->bd_fsfreeze_mutex); if (IS_ERR(s)) { err = PTR_ERR(s); goto failed; } if (!s->s_root) { char b[BDEVNAME_SIZE]; s_new = true; /* New superblock instance created */ s->s_flags = flags; s->s_mode = mode; strlcpy(s->s_id, bdevname(sd.bdev, b), sizeof(s->s_id)); sb_set_blocksize(s, block_size(sd.bdev)); err = nilfs_fill_super(s, data, flags & MS_SILENT ? 1 : 0); if (err) goto failed_super; s->s_flags |= MS_ACTIVE; } else if (!sd.cno) { int busy = false; if (nilfs_tree_was_touched(s->s_root)) { busy = nilfs_try_to_shrink_tree(s->s_root); if (busy && (flags ^ s->s_flags) & MS_RDONLY) { printk(KERN_ERR "NILFS: the device already " "has a %s mount.\n", (s->s_flags & MS_RDONLY) ? "read-only" : "read/write"); err = -EBUSY; goto failed_super; } } if (!busy) { /* * Try remount to setup mount states if the current * tree is not mounted and only snapshots use this sb. */ err = nilfs_remount(s, &flags, data); if (err) goto failed_super; } } if (sd.cno) { err = nilfs_attach_snapshot(s, sd.cno, &root_dentry); if (err) goto failed_super; } else { root_dentry = dget(s->s_root); } if (!s_new) close_bdev_exclusive(sd.bdev, mode); mnt->mnt_sb = s; mnt->mnt_root = root_dentry; return 0; failed_super: deactivate_locked_super(s); failed: if (!s_new) close_bdev_exclusive(sd.bdev, mode); return err; }
int ssd_register(char *path) { struct ssd_info *ssd; int ret = -1; mutex_lock(&gctx.ctl_mtx); do { ssd = _alloc_ssd(path); if (!ssd) { ERR("iostash: Could not allocate ssd_info struct.\n"); break; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38) ssd->bdev = blkdev_get_by_path(path, FMODE_READ | FMODE_WRITE | FMODE_EXCL, &gctx.ssdtbl); #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) ssd->bdev = open_bdev_exclusive(path, FMODE_READ | FMODE_WRITE | FMODE_EXCL, &gctx.ssdtbl); #else ERR("Kernel version < 2.6.28 currently not supported.\n"); ssd->bdev = ERR_PTR(-ENOENT); #endif if (IS_ERR(ssd->bdev)) { ERR("iostash: SSD device lookup failed.\n"); ssd->bdev = NULL; break; } rmb(); if (1 < ssd->bdev->bd_openers) { ERR("iostash: the SSD device is in use, cannot open it exclusively.\n"); break; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ssd->nr_sctr = get_capacity(ssd->bdev->bd_disk); #elif LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0) ssd->nr_sctr = ssd->bdev->bd_part->nr_sects; #else ssd->nr_sctr = part_nr_sects_read(ssd->bdev->bd_part); #endif if (ssd->nr_sctr < IOSTASH_HEADERSCT) { ERR("SSD capacity less than minimum size of %uB", IOSTASH_HEADERSIZE); break; } ssd->nr_sctr -= IOSTASH_HEADERSCT; DBG("iostash: ssd->nr_sctr = %ld\n", (long)ssd->nr_sctr); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31) ssd->queue_max_hw_sectors = queue_max_hw_sectors(bdev_get_queue(ssd->bdev)); #else /* 2.6.29 and 2.6.30 */ ssd->queue_max_hw_sectors = (bdev_get_queue(ssd->bdev))->max_hw_sectors; #endif ssd->cdev = sce_addcdev(gctx.sce, ssd->nr_sctr, ssd); if (ssd->cdev == NULL) { ERR("iostash: sce_add_device() failed.\n"); break; } ret = _ssd_create_kobj(ssd); if (ret) { ERR("ssd_create_kobj failed with %d.\n", ret); break; } /* insert it to our ssd hash table, it is ready to service requests */ _insert_ssd(ssd); ssd->online = 1; gctx.nr_ssd++; DBG("iostash: SSD %s has been added successfully.\n", path); ret = 0; } while (0); if (ret) _destroy_ssd(ssd); mutex_unlock(&gctx.ctl_mtx); return ret; }