/** * nilfs_fill_super() - initialize a super block instance * @sb: super_block * @data: mount options * @silent: silent mode flag * * This function is called exclusively by nilfs->ns_mount_mutex. * So, the recovery process is protected from other simultaneous mounts. */ static int nilfs_fill_super(struct super_block *sb, void *data, int silent) { struct the_nilfs *nilfs; struct nilfs_sb_info *sbi; struct nilfs_root *fsroot; struct backing_dev_info *bdi; __u64 cno; int err; sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); if (!sbi) return -ENOMEM; sb->s_fs_info = sbi; sbi->s_super = sb; nilfs = alloc_nilfs(sb->s_bdev); if (!nilfs) { err = -ENOMEM; goto failed_sbi; } sbi->s_nilfs = nilfs; err = init_nilfs(nilfs, sbi, (char *)data); if (err) goto failed_nilfs; spin_lock_init(&sbi->s_inode_lock); INIT_LIST_HEAD(&sbi->s_dirty_files); /* * Following initialization is overlapped because * nilfs_sb_info structure has been cleared at the beginning. * But we reserve them to keep our interest and make ready * for the future change. */ get_random_bytes(&sbi->s_next_generation, sizeof(sbi->s_next_generation)); spin_lock_init(&sbi->s_next_gen_lock); sb->s_op = &nilfs_sops; sb->s_export_op = &nilfs_export_ops; sb->s_root = NULL; sb->s_time_gran = 1; bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info; sb->s_bdi = bdi ? : &default_backing_dev_info; err = load_nilfs(nilfs, sbi); if (err) goto failed_nilfs; cno = nilfs_last_cno(nilfs); err = nilfs_attach_checkpoint(sbi, cno, true, &fsroot); if (err) { printk(KERN_ERR "NILFS: error loading last checkpoint " "(checkpoint number=%llu).\n", (unsigned long long)cno); goto failed_unload; } if (!(sb->s_flags & MS_RDONLY)) { err = nilfs_attach_segment_constructor(sbi, fsroot); if (err) goto failed_checkpoint; } err = nilfs_get_root_dentry(sb, fsroot, &sb->s_root); if (err) goto failed_segctor; nilfs_put_root(fsroot); if (!(sb->s_flags & MS_RDONLY)) { down_write(&nilfs->ns_sem); nilfs_setup_super(sbi, true); up_write(&nilfs->ns_sem); } return 0; failed_segctor: nilfs_detach_segment_constructor(sbi); failed_checkpoint: nilfs_put_root(fsroot); failed_unload: iput(nilfs->ns_sufile); iput(nilfs->ns_cpfile); iput(nilfs->ns_dat); failed_nilfs: destroy_nilfs(nilfs); failed_sbi: sb->s_fs_info = NULL; kfree(sbi); return err; }
/** * nilfs_fill_super() - initialize a super block instance * @sb: super_block * @data: mount options * @silent: silent mode flag * @nilfs: the_nilfs struct * * This function is called exclusively by nilfs->ns_mount_mutex. * So, the recovery process is protected from other simultaneous mounts. */ static int nilfs_fill_super(struct super_block *sb, void *data, int silent, struct the_nilfs *nilfs) { struct nilfs_sb_info *sbi; struct inode *root; __u64 cno; int err; sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); if (!sbi) return -ENOMEM; sb->s_fs_info = sbi; get_nilfs(nilfs); sbi->s_nilfs = nilfs; sbi->s_super = sb; atomic_set(&sbi->s_count, 1); err = init_nilfs(nilfs, sbi, (char *)data); if (err) goto failed_sbi; spin_lock_init(&sbi->s_inode_lock); INIT_LIST_HEAD(&sbi->s_dirty_files); INIT_LIST_HEAD(&sbi->s_list); /* * Following initialization is overlapped because * nilfs_sb_info structure has been cleared at the beginning. * But we reserve them to keep our interest and make ready * for the future change. */ get_random_bytes(&sbi->s_next_generation, sizeof(sbi->s_next_generation)); spin_lock_init(&sbi->s_next_gen_lock); sb->s_op = &nilfs_sops; sb->s_export_op = &nilfs_export_ops; sb->s_root = NULL; sb->s_time_gran = 1; sb->s_bdi = nilfs->ns_bdi; err = load_nilfs(nilfs, sbi); if (err) goto failed_sbi; cno = nilfs_last_cno(nilfs); if (sb->s_flags & MS_RDONLY) { if (nilfs_test_opt(sbi, SNAPSHOT)) { down_read(&nilfs->ns_segctor_sem); err = nilfs_cpfile_is_snapshot(nilfs->ns_cpfile, sbi->s_snapshot_cno); up_read(&nilfs->ns_segctor_sem); if (err < 0) { if (err == -ENOENT) err = -EINVAL; goto failed_sbi; } if (!err) { printk(KERN_ERR "NILFS: The specified checkpoint is " "not a snapshot " "(checkpoint number=%llu).\n", (unsigned long long)sbi->s_snapshot_cno); err = -EINVAL; goto failed_sbi; } cno = sbi->s_snapshot_cno; } } err = nilfs_attach_checkpoint(sbi, cno); if (err) { printk(KERN_ERR "NILFS: error loading a checkpoint" " (checkpoint number=%llu).\n", (unsigned long long)cno); goto failed_sbi; } if (!(sb->s_flags & MS_RDONLY)) { err = nilfs_attach_segment_constructor(sbi); if (err) goto failed_checkpoint; } root = nilfs_iget(sb, NILFS_ROOT_INO); if (IS_ERR(root)) { printk(KERN_ERR "NILFS: get root inode failed\n"); err = PTR_ERR(root); goto failed_segctor; } if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { iput(root); printk(KERN_ERR "NILFS: corrupt root inode.\n"); err = -EINVAL; goto failed_segctor; } sb->s_root = d_alloc_root(root); if (!sb->s_root) { iput(root); printk(KERN_ERR "NILFS: get root dentry failed\n"); err = -ENOMEM; goto failed_segctor; } if (!(sb->s_flags & MS_RDONLY)) { down_write(&nilfs->ns_sem); nilfs_setup_super(sbi); up_write(&nilfs->ns_sem); } down_write(&nilfs->ns_super_sem); if (!nilfs_test_opt(sbi, SNAPSHOT)) nilfs->ns_current = sbi; up_write(&nilfs->ns_super_sem); return 0; failed_segctor: nilfs_detach_segment_constructor(sbi); failed_checkpoint: nilfs_detach_checkpoint(sbi); failed_sbi: put_nilfs(nilfs); sb->s_fs_info = NULL; nilfs_put_sbinfo(sbi); return err; }
/** * nilfs_fill_super() - initialize a super block instance * @sb: super_block * @data: mount options * @silent: silent mode flag * * This function is called exclusively by nilfs->ns_mount_mutex. * So, the recovery process is protected from other simultaneous mounts. */ static int nilfs_fill_super(struct super_block *sb, void *data, int silent) { struct the_nilfs *nilfs; struct nilfs_root *fsroot; struct backing_dev_info *bdi; __u64 cno; int err; nilfs = alloc_nilfs(sb->s_bdev); if (!nilfs) return -ENOMEM; sb->s_fs_info = nilfs; err = init_nilfs(nilfs, sb, (char *)data); if (err) goto failed_nilfs; sb->s_op = &nilfs_sops; sb->s_export_op = &nilfs_export_ops; sb->s_root = NULL; sb->s_time_gran = 1; bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info; sb->s_bdi = bdi ? : &default_backing_dev_info; err = load_nilfs(nilfs, sb); if (err) goto failed_nilfs; cno = nilfs_last_cno(nilfs); err = nilfs_attach_checkpoint(sb, cno, true, &fsroot); if (err) { printk(KERN_ERR "NILFS: error loading last checkpoint " "(checkpoint number=%llu).\n", (unsigned long long)cno); goto failed_unload; } if (!(sb->s_flags & MS_RDONLY)) { err = nilfs_attach_log_writer(sb, fsroot); if (err) goto failed_checkpoint; } err = nilfs_get_root_dentry(sb, fsroot, &sb->s_root); if (err) goto failed_segctor; nilfs_put_root(fsroot); if (!(sb->s_flags & MS_RDONLY)) { down_write(&nilfs->ns_sem); nilfs_setup_super(sb, true); up_write(&nilfs->ns_sem); } return 0; failed_segctor: nilfs_detach_log_writer(sb); failed_checkpoint: nilfs_put_root(fsroot); failed_unload: iput(nilfs->ns_sufile); iput(nilfs->ns_cpfile); iput(nilfs->ns_dat); failed_nilfs: destroy_nilfs(nilfs); return err; }
/** * nilfs_fill_super() - initialize a super block instance * @sb: super_block * @data: mount options * @silent: silent mode flag * * This function is called exclusively by nilfs->ns_mount_mutex. * So, the recovery process is protected from other simultaneous mounts. */ static int nilfs_fill_super(struct super_block *sb, void *data, int silent) { struct the_nilfs *nilfs; struct nilfs_root *fsroot; __u64 cno; int err; nilfs = alloc_nilfs(sb); if (!nilfs) return -ENOMEM; sb->s_fs_info = nilfs; err = init_nilfs(nilfs, sb, (char *)data); if (err) goto failed_nilfs; sb->s_op = &nilfs_sops; sb->s_export_op = &nilfs_export_ops; sb->s_root = NULL; sb->s_time_gran = 1; sb->s_max_links = NILFS_LINK_MAX; sb->s_bdi = &bdev_get_queue(sb->s_bdev)->backing_dev_info; err = load_nilfs(nilfs, sb); if (err) goto failed_nilfs; cno = nilfs_last_cno(nilfs); err = nilfs_attach_checkpoint(sb, cno, true, &fsroot); if (err) { nilfs_msg(sb, KERN_ERR, "error %d while loading last checkpoint (checkpoint number=%llu)", err, (unsigned long long)cno); goto failed_unload; } if (!(sb->s_flags & MS_RDONLY)) { err = nilfs_attach_log_writer(sb, fsroot); if (err) goto failed_checkpoint; } err = nilfs_get_root_dentry(sb, fsroot, &sb->s_root); if (err) goto failed_segctor; nilfs_put_root(fsroot); if (!(sb->s_flags & MS_RDONLY)) { down_write(&nilfs->ns_sem); nilfs_setup_super(sb, true); up_write(&nilfs->ns_sem); } return 0; failed_segctor: nilfs_detach_log_writer(sb); failed_checkpoint: nilfs_put_root(fsroot); failed_unload: iput(nilfs->ns_sufile); iput(nilfs->ns_cpfile); iput(nilfs->ns_dat); failed_nilfs: destroy_nilfs(nilfs); return err; }