static int squashfs_fill_super(struct super_block *sb, void *data, int silent) { struct squashfs_sb_info *msblk; struct squashfs_super_block *sblk = NULL; char b[BDEVNAME_SIZE]; struct inode *root; long long root_inode; unsigned short flags; unsigned int fragments; u64 lookup_table_start, xattr_id_table_start, next_table; int err; TRACE("Entered squashfs_fill_superblock\n"); sb->s_fs_info = kzalloc(sizeof(*msblk), GFP_KERNEL); if (sb->s_fs_info == NULL) { ERROR("Failed to allocate squashfs_sb_info\n"); return -ENOMEM; } msblk = sb->s_fs_info; msblk->devblksize = sb_min_blocksize(sb, SQUASHFS_DEVBLK_SIZE); msblk->devblksize_log2 = ffz(~msblk->devblksize); mutex_init(&msblk->read_data_mutex); mutex_init(&msblk->meta_index_mutex); /* * msblk->bytes_used is checked in squashfs_read_table to ensure reads * are not beyond filesystem end. But as we're using * squashfs_read_table here to read the superblock (including the value * of bytes_used) we need to set it to an initial sensible dummy value */ msblk->bytes_used = sizeof(*sblk); sblk = squashfs_read_table(sb, SQUASHFS_START, sizeof(*sblk)); if (IS_ERR(sblk)) { ERROR("unable to read squashfs_super_block\n"); err = PTR_ERR(sblk); sblk = NULL; goto failed_mount; } err = -EINVAL; /* Check it is a SQUASHFS superblock */ sb->s_magic = le32_to_cpu(sblk->s_magic); if (sb->s_magic != SQUASHFS_MAGIC) { if (!silent) ERROR("Can't find a SQUASHFS superblock on %s\n", bdevname(sb->s_bdev, b)); goto failed_mount; } /* Check the MAJOR & MINOR versions and lookup compression type */ msblk->decompressor = supported_squashfs_filesystem( le16_to_cpu(sblk->s_major), le16_to_cpu(sblk->s_minor), le16_to_cpu(sblk->compression)); if (msblk->decompressor == NULL) goto failed_mount; /* Check the filesystem does not extend beyond the end of the block device */ msblk->bytes_used = le64_to_cpu(sblk->bytes_used); if (msblk->bytes_used < 0 || msblk->bytes_used > i_size_read(sb->s_bdev->bd_inode)) goto failed_mount; /* Check block size for sanity */ msblk->block_size = le32_to_cpu(sblk->block_size); if (msblk->block_size > SQUASHFS_FILE_MAX_SIZE) goto failed_mount; /* * Check the system page size is not larger than the filesystem * block size (by default 128K). This is currently not supported. */ if (PAGE_CACHE_SIZE > msblk->block_size) { ERROR("Page size > filesystem block size (%d). This is " "currently not supported!\n", msblk->block_size); goto failed_mount; } msblk->block_log = le16_to_cpu(sblk->block_log); if (msblk->block_log > SQUASHFS_FILE_MAX_LOG) goto failed_mount; /* Check the root inode for sanity */ root_inode = le64_to_cpu(sblk->root_inode); if (SQUASHFS_INODE_OFFSET(root_inode) > SQUASHFS_METADATA_SIZE) goto failed_mount; msblk->inode_table = le64_to_cpu(sblk->inode_table_start); msblk->directory_table = le64_to_cpu(sblk->directory_table_start); msblk->inodes = le32_to_cpu(sblk->inodes); flags = le16_to_cpu(sblk->flags); TRACE("Found valid superblock on %s\n", bdevname(sb->s_bdev, b)); TRACE("Inodes are %scompressed\n", SQUASHFS_UNCOMPRESSED_INODES(flags) ? "un" : ""); TRACE("Data is %scompressed\n", SQUASHFS_UNCOMPRESSED_DATA(flags) ? "un" : ""); TRACE("Filesystem size %lld bytes\n", msblk->bytes_used); TRACE("Block size %d\n", msblk->block_size); TRACE("Number of inodes %d\n", msblk->inodes); TRACE("Number of fragments %d\n", le32_to_cpu(sblk->fragments)); TRACE("Number of ids %d\n", le16_to_cpu(sblk->no_ids)); TRACE("sblk->inode_table_start %llx\n", msblk->inode_table); TRACE("sblk->directory_table_start %llx\n", msblk->directory_table); TRACE("sblk->fragment_table_start %llx\n", (u64) le64_to_cpu(sblk->fragment_table_start)); TRACE("sblk->id_table_start %llx\n", (u64) le64_to_cpu(sblk->id_table_start)); sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_flags |= MS_RDONLY; sb->s_op = &squashfs_super_ops; err = -ENOMEM; msblk->block_cache = squashfs_cache_init("metadata", SQUASHFS_CACHED_BLKS, SQUASHFS_METADATA_SIZE); if (msblk->block_cache == NULL) goto failed_mount; /* Allocate read_page block */ msblk->read_page = squashfs_cache_init("data", 1, msblk->block_size); if (msblk->read_page == NULL) { ERROR("Failed to allocate read_page block\n"); goto failed_mount; } msblk->stream = squashfs_decompressor_init(sb, flags); if (IS_ERR(msblk->stream)) { err = PTR_ERR(msblk->stream); msblk->stream = NULL; goto failed_mount; } /* Handle xattrs */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) sb->s_xattr = (struct xattr_handler **) squashfs_xattr_handlers; #else sb->s_xattr = squashfs_xattr_handlers; #endif xattr_id_table_start = le64_to_cpu(sblk->xattr_id_table_start); if (xattr_id_table_start == SQUASHFS_INVALID_BLK) { next_table = msblk->bytes_used; goto allocate_id_index_table; } /* Allocate and read xattr id lookup table */ msblk->xattr_id_table = squashfs_read_xattr_id_table(sb, xattr_id_table_start, &msblk->xattr_table, &msblk->xattr_ids); if (IS_ERR(msblk->xattr_id_table)) { ERROR("unable to read xattr id index table\n"); err = PTR_ERR(msblk->xattr_id_table); msblk->xattr_id_table = NULL; if (err != -ENOTSUPP) goto failed_mount; } next_table = msblk->xattr_table; allocate_id_index_table: /* Allocate and read id index table */ msblk->id_table = squashfs_read_id_index_table(sb, le64_to_cpu(sblk->id_table_start), next_table, le16_to_cpu(sblk->no_ids)); if (IS_ERR(msblk->id_table)) { ERROR("unable to read id index table\n"); err = PTR_ERR(msblk->id_table); msblk->id_table = NULL; goto failed_mount; } next_table = le64_to_cpu(msblk->id_table[0]); /* Handle inode lookup table */ lookup_table_start = le64_to_cpu(sblk->lookup_table_start); if (lookup_table_start == SQUASHFS_INVALID_BLK) goto handle_fragments; /* Allocate and read inode lookup table */ msblk->inode_lookup_table = squashfs_read_inode_lookup_table(sb, lookup_table_start, next_table, msblk->inodes); if (IS_ERR(msblk->inode_lookup_table)) { ERROR("unable to read inode lookup table\n"); err = PTR_ERR(msblk->inode_lookup_table); msblk->inode_lookup_table = NULL; goto failed_mount; } next_table = le64_to_cpu(msblk->inode_lookup_table[0]); sb->s_export_op = &squashfs_export_ops; handle_fragments: fragments = le32_to_cpu(sblk->fragments); if (fragments == 0) goto check_directory_table; msblk->fragment_cache = squashfs_cache_init("fragment", SQUASHFS_CACHED_FRAGMENTS, msblk->block_size); if (msblk->fragment_cache == NULL) { err = -ENOMEM; goto failed_mount; } /* Allocate and read fragment index table */ msblk->fragment_index = squashfs_read_fragment_index_table(sb, le64_to_cpu(sblk->fragment_table_start), next_table, fragments); if (IS_ERR(msblk->fragment_index)) { ERROR("unable to read fragment index table\n"); err = PTR_ERR(msblk->fragment_index); msblk->fragment_index = NULL; goto failed_mount; } next_table = le64_to_cpu(msblk->fragment_index[0]); check_directory_table: /* Sanity check directory_table */ if (msblk->directory_table > next_table) { err = -EINVAL; goto failed_mount; } /* Sanity check inode_table */ if (msblk->inode_table >= msblk->directory_table) { err = -EINVAL; goto failed_mount; } /* allocate root */ root = new_inode(sb); if (!root) { err = -ENOMEM; goto failed_mount; } err = squashfs_read_inode(root, root_inode); if (err) { make_bad_inode(root); iput(root); goto failed_mount; } insert_inode_hash(root); sb->s_root = d_alloc_root(root); if (sb->s_root == NULL) { ERROR("Root inode create failed\n"); err = -ENOMEM; iput(root); goto failed_mount; } TRACE("Leaving squashfs_fill_super\n"); kfree(sblk); return 0; failed_mount: squashfs_cache_delete(msblk->block_cache); squashfs_cache_delete(msblk->fragment_cache); squashfs_cache_delete(msblk->read_page); squashfs_decompressor_free(msblk, msblk->stream); kfree(msblk->inode_lookup_table); kfree(msblk->fragment_index); kfree(msblk->id_table); kfree(msblk->xattr_id_table); kfree(sb->s_fs_info); sb->s_fs_info = NULL; kfree(sblk); return err; }
static int ext2_fill_super(struct super_block *sb, void *data, int silent) { struct buffer_head * bh; struct ext2_sb_info * sbi; struct ext2_super_block * es; struct inode *root; unsigned long block; unsigned long sb_block = get_sb_block(&data); unsigned long logic_sb_block; unsigned long offset = 0; unsigned long def_mount_opts; long ret = -EINVAL; int blocksize = BLOCK_SIZE; int db_count; int i, j; __le32 features; int err; sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); if (!sbi) return -ENOMEM; sbi->s_blockgroup_lock = kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL); if (!sbi->s_blockgroup_lock) { kfree(sbi); return -ENOMEM; } sb->s_fs_info = sbi; sbi->s_sb_block = sb_block; /* * See what the current blocksize for the device is, and * use that as the blocksize. Otherwise (or if the blocksize * is smaller than the default) use the default. * This is important for devices that have a hardware * sectorsize that is larger than the default. */ blocksize = sb_min_blocksize(sb, BLOCK_SIZE); if (!blocksize) { printk ("EXT2-fs: unable to set blocksize\n"); goto failed_sbi; } /* * If the superblock doesn't start on a hardware sector boundary, * calculate the offset. */ if (blocksize != BLOCK_SIZE) { logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize; offset = (sb_block*BLOCK_SIZE) % blocksize; } else { logic_sb_block = sb_block; } if (!(bh = sb_bread(sb, logic_sb_block))) { printk ("EXT2-fs: unable to read superblock\n"); goto failed_sbi; } /* * Note: s_es must be initialized as soon as possible because * some ext2 macro-instructions depend on its value */ es = (struct ext2_super_block *) (((char *)bh->b_data) + offset); sbi->s_es = es; sb->s_magic = le16_to_cpu(es->s_magic); if (sb->s_magic != EXT2_SUPER_MAGIC) goto cantfind_ext2; /* Set defaults before we parse the mount options */ def_mount_opts = le32_to_cpu(es->s_default_mount_opts); if (def_mount_opts & EXT2_DEFM_DEBUG) set_opt(sbi->s_mount_opt, DEBUG); if (def_mount_opts & EXT2_DEFM_BSDGROUPS) set_opt(sbi->s_mount_opt, GRPID); if (def_mount_opts & EXT2_DEFM_UID16) set_opt(sbi->s_mount_opt, NO_UID32); #ifdef CONFIG_EXT2_FS_XATTR if (def_mount_opts & EXT2_DEFM_XATTR_USER) set_opt(sbi->s_mount_opt, XATTR_USER); #endif #ifdef CONFIG_EXT2_FS_POSIX_ACL if (def_mount_opts & EXT2_DEFM_ACL) set_opt(sbi->s_mount_opt, POSIX_ACL); #endif if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_PANIC) set_opt(sbi->s_mount_opt, ERRORS_PANIC); else if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_CONTINUE) set_opt(sbi->s_mount_opt, ERRORS_CONT); else set_opt(sbi->s_mount_opt, ERRORS_RO); sbi->s_resuid = le16_to_cpu(es->s_def_resuid); sbi->s_resgid = le16_to_cpu(es->s_def_resgid); set_opt(sbi->s_mount_opt, RESERVATION); if (!parse_options ((char *) data, sbi)) goto failed_mount; sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | ((EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0); ext2_xip_verify_sb(sb); /* see if bdev supports xip, unset EXT2_MOUNT_XIP if not */ if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV && (EXT2_HAS_COMPAT_FEATURE(sb, ~0U) || EXT2_HAS_RO_COMPAT_FEATURE(sb, ~0U) || EXT2_HAS_INCOMPAT_FEATURE(sb, ~0U))) printk("EXT2-fs warning: feature flags set on rev 0 fs, " "running e2fsck is recommended\n"); /* * Check feature flags regardless of the revision level, since we * previously didn't change the revision level when setting the flags, * so there is a chance incompat flags are set on a rev 0 filesystem. */ features = EXT2_HAS_INCOMPAT_FEATURE(sb, ~EXT2_FEATURE_INCOMPAT_SUPP); if (features) { printk("EXT2-fs: %s: couldn't mount because of " "unsupported optional features (%x).\n", sb->s_id, le32_to_cpu(features)); goto failed_mount; } if (!(sb->s_flags & MS_RDONLY) && (features = EXT2_HAS_RO_COMPAT_FEATURE(sb, ~EXT2_FEATURE_RO_COMPAT_SUPP))){ printk("EXT2-fs: %s: couldn't mount RDWR because of " "unsupported optional features (%x).\n", sb->s_id, le32_to_cpu(features)); goto failed_mount; } blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size); if (ext2_use_xip(sb) && blocksize != PAGE_SIZE) { if (!silent) printk("XIP: Unsupported blocksize\n"); goto failed_mount; } /* If the blocksize doesn't match, re-read the thing.. */ if (sb->s_blocksize != blocksize) { brelse(bh); if (!sb_set_blocksize(sb, blocksize)) { printk(KERN_ERR "EXT2-fs: blocksize too small for device.\n"); goto failed_sbi; } logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize; offset = (sb_block*BLOCK_SIZE) % blocksize; bh = sb_bread(sb, logic_sb_block); if(!bh) { printk("EXT2-fs: Couldn't read superblock on " "2nd try.\n"); goto failed_sbi; } es = (struct ext2_super_block *) (((char *)bh->b_data) + offset); sbi->s_es = es; if (es->s_magic != cpu_to_le16(EXT2_SUPER_MAGIC)) { printk ("EXT2-fs: Magic mismatch, very weird !\n"); goto failed_mount; } } sb->s_maxbytes = ext2_max_size(sb->s_blocksize_bits); if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV) { sbi->s_inode_size = EXT2_GOOD_OLD_INODE_SIZE; sbi->s_first_ino = EXT2_GOOD_OLD_FIRST_INO; } else { sbi->s_inode_size = le16_to_cpu(es->s_inode_size); sbi->s_first_ino = le32_to_cpu(es->s_first_ino); if ((sbi->s_inode_size < EXT2_GOOD_OLD_INODE_SIZE) || !is_power_of_2(sbi->s_inode_size) || (sbi->s_inode_size > blocksize)) { printk ("EXT2-fs: unsupported inode size: %d\n", sbi->s_inode_size); goto failed_mount; } } sbi->s_frag_size = EXT2_MIN_FRAG_SIZE << le32_to_cpu(es->s_log_frag_size); if (sbi->s_frag_size == 0) goto cantfind_ext2; sbi->s_frags_per_block = sb->s_blocksize / sbi->s_frag_size; sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group); sbi->s_frags_per_group = le32_to_cpu(es->s_frags_per_group); sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group); if (EXT2_INODE_SIZE(sb) == 0) goto cantfind_ext2; sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb); if (sbi->s_inodes_per_block == 0 || sbi->s_inodes_per_group == 0) goto cantfind_ext2; sbi->s_itb_per_group = sbi->s_inodes_per_group / sbi->s_inodes_per_block; sbi->s_desc_per_block = sb->s_blocksize / sizeof (struct ext2_group_desc); sbi->s_sbh = bh; sbi->s_mount_state = le16_to_cpu(es->s_state); sbi->s_addr_per_block_bits = ilog2 (EXT2_ADDR_PER_BLOCK(sb)); sbi->s_desc_per_block_bits = ilog2 (EXT2_DESC_PER_BLOCK(sb)); if (sb->s_magic != EXT2_SUPER_MAGIC) goto cantfind_ext2; if (sb->s_blocksize != bh->b_size) { if (!silent) printk ("VFS: Unsupported blocksize on dev " "%s.\n", sb->s_id); goto failed_mount; } if (sb->s_blocksize != sbi->s_frag_size) { printk ("EXT2-fs: fragsize %lu != blocksize %lu (not supported yet)\n", sbi->s_frag_size, sb->s_blocksize); goto failed_mount; } if (sbi->s_blocks_per_group > sb->s_blocksize * 8) { printk ("EXT2-fs: #blocks per group too big: %lu\n", sbi->s_blocks_per_group); goto failed_mount; } if (sbi->s_frags_per_group > sb->s_blocksize * 8) { printk ("EXT2-fs: #fragments per group too big: %lu\n", sbi->s_frags_per_group); goto failed_mount; } if (sbi->s_inodes_per_group > sb->s_blocksize * 8) { printk ("EXT2-fs: #inodes per group too big: %lu\n", sbi->s_inodes_per_group); goto failed_mount; } if (EXT2_BLOCKS_PER_GROUP(sb) == 0) goto cantfind_ext2; sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) - le32_to_cpu(es->s_first_data_block) - 1) / EXT2_BLOCKS_PER_GROUP(sb)) + 1; db_count = (sbi->s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) / EXT2_DESC_PER_BLOCK(sb); sbi->s_group_desc = kmalloc (db_count * sizeof (struct buffer_head *), GFP_KERNEL); if (sbi->s_group_desc == NULL) { printk ("EXT2-fs: not enough memory\n"); goto failed_mount; } bgl_lock_init(sbi->s_blockgroup_lock); sbi->s_debts = kcalloc(sbi->s_groups_count, sizeof(*sbi->s_debts), GFP_KERNEL); if (!sbi->s_debts) { printk ("EXT2-fs: not enough memory\n"); goto failed_mount_group_desc; } for (i = 0; i < db_count; i++) { block = descriptor_loc(sb, logic_sb_block, i); sbi->s_group_desc[i] = sb_bread(sb, block); if (!sbi->s_group_desc[i]) { for (j = 0; j < i; j++) brelse (sbi->s_group_desc[j]); printk ("EXT2-fs: unable to read group descriptors\n"); goto failed_mount_group_desc; } } if (!ext2_check_descriptors (sb)) { printk ("EXT2-fs: group descriptors corrupted!\n"); goto failed_mount2; } sbi->s_gdb_count = db_count; get_random_bytes(&sbi->s_next_generation, sizeof(u32)); spin_lock_init(&sbi->s_next_gen_lock); /* per fileystem reservation list head & lock */ spin_lock_init(&sbi->s_rsv_window_lock); sbi->s_rsv_window_root = RB_ROOT; /* * Add a single, static dummy reservation to the start of the * reservation window list --- it gives us a placeholder for * append-at-start-of-list which makes the allocation logic * _much_ simpler. */ sbi->s_rsv_window_head.rsv_start = EXT2_RESERVE_WINDOW_NOT_ALLOCATED; sbi->s_rsv_window_head.rsv_end = EXT2_RESERVE_WINDOW_NOT_ALLOCATED; sbi->s_rsv_window_head.rsv_alloc_hit = 0; sbi->s_rsv_window_head.rsv_goal_size = 0; ext2_rsv_window_add(sb, &sbi->s_rsv_window_head); err = percpu_counter_init(&sbi->s_freeblocks_counter, ext2_count_free_blocks(sb)); if (!err) { err = percpu_counter_init(&sbi->s_freeinodes_counter, ext2_count_free_inodes(sb)); } if (!err) { err = percpu_counter_init(&sbi->s_dirs_counter, ext2_count_dirs(sb)); } if (err) { printk(KERN_ERR "EXT2-fs: insufficient memory\n"); goto failed_mount3; } /* * set up enough so that it can read an inode */ sb->s_op = &ext2_sops; sb->s_export_op = &ext2_export_ops; sb->s_xattr = ext2_xattr_handlers; root = ext2_iget(sb, EXT2_ROOT_INO); if (IS_ERR(root)) { ret = PTR_ERR(root); goto failed_mount3; } if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { iput(root); printk(KERN_ERR "EXT2-fs: corrupt root inode, run e2fsck\n"); goto failed_mount3; } sb->s_root = d_alloc_root(root); if (!sb->s_root) { iput(root); printk(KERN_ERR "EXT2-fs: get root inode failed\n"); ret = -ENOMEM; goto failed_mount3; } if (EXT2_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL)) ext2_warning(sb, __func__, "mounting ext3 filesystem as ext2"); ext2_setup_super (sb, es, sb->s_flags & MS_RDONLY); return 0; cantfind_ext2: if (!silent) printk("VFS: Can't find an ext2 filesystem on dev %s.\n", sb->s_id); goto failed_mount; failed_mount3: percpu_counter_destroy(&sbi->s_freeblocks_counter); percpu_counter_destroy(&sbi->s_freeinodes_counter); percpu_counter_destroy(&sbi->s_dirs_counter); failed_mount2: for (i = 0; i < db_count; i++) brelse(sbi->s_group_desc[i]); failed_mount_group_desc: kfree(sbi->s_group_desc); kfree(sbi->s_debts); failed_mount: brelse(bh); failed_sbi: sb->s_fs_info = NULL; kfree(sbi->s_blockgroup_lock); kfree(sbi); return ret; }
/** * vxfs_read_super - read superblock into memory and initialize filesystem * @sbp: VFS superblock (to fill) * @dp: fs private mount data * @silent: do not complain loudly when sth is wrong * * Description: * We are called on the first mount of a filesystem to read the * superblock into memory and do some basic setup. * * Returns: * The superblock on success, else %NULL. * * Locking: * We are under the bkl and @sbp->s_lock. */ static int vxfs_fill_super(struct super_block *sbp, void *dp, int silent) { struct vxfs_sb_info *infp; struct vxfs_sb *rsbp; struct buffer_head *bp = NULL; u_long bsize; struct inode *root; int ret = -EINVAL; sbp->s_flags |= MS_RDONLY; infp = kzalloc(sizeof(*infp), GFP_KERNEL); if (!infp) { printk(KERN_WARNING "vxfs: unable to allocate incore superblock\n"); return -ENOMEM; } bsize = sb_min_blocksize(sbp, BLOCK_SIZE); if (!bsize) { printk(KERN_WARNING "vxfs: unable to set blocksize\n"); goto out; } bp = sb_bread(sbp, 1); if (!bp || !buffer_mapped(bp)) { if (!silent) { printk(KERN_WARNING "vxfs: unable to read disk superblock\n"); } goto out; } rsbp = (struct vxfs_sb *)bp->b_data; if (rsbp->vs_magic != VXFS_SUPER_MAGIC) { if (!silent) printk(KERN_NOTICE "vxfs: WRONG superblock magic\n"); goto out; } if ((rsbp->vs_version < 2 || rsbp->vs_version > 4) && !silent) { printk(KERN_NOTICE "vxfs: unsupported VxFS version (%d)\n", rsbp->vs_version); goto out; } #ifdef DIAGNOSTIC printk(KERN_DEBUG "vxfs: supported VxFS version (%d)\n", rsbp->vs_version); printk(KERN_DEBUG "vxfs: blocksize: %d\n", rsbp->vs_bsize); #endif sbp->s_magic = rsbp->vs_magic; sbp->s_fs_info = infp; infp->vsi_raw = rsbp; infp->vsi_bp = bp; infp->vsi_oltext = rsbp->vs_oltext[0]; infp->vsi_oltsize = rsbp->vs_oltsize; if (!sb_set_blocksize(sbp, rsbp->vs_bsize)) { printk(KERN_WARNING "vxfs: unable to set final block size\n"); goto out; } if (vxfs_read_olt(sbp, bsize)) { printk(KERN_WARNING "vxfs: unable to read olt\n"); goto out; } if (vxfs_read_fshead(sbp)) { printk(KERN_WARNING "vxfs: unable to read fshead\n"); goto out; } sbp->s_op = &vxfs_super_ops; root = vxfs_iget(sbp, VXFS_ROOT_INO); if (IS_ERR(root)) { ret = PTR_ERR(root); goto out; } sbp->s_root = d_alloc_root(root); if (!sbp->s_root) { iput(root); printk(KERN_WARNING "vxfs: unable to get root dentry.\n"); goto out_free_ilist; } return 0; out_free_ilist: vxfs_put_fake_inode(infp->vsi_fship); vxfs_put_fake_inode(infp->vsi_ilist); vxfs_put_fake_inode(infp->vsi_stilist); out: brelse(bp); kfree(infp); return ret; }
/* Allocate private field of the superblock, fill it. * * Finish filling the public superblock fields * Make the root directory * Load a set of NLS translations if needed. */ static int befs_fill_super(struct super_block *sb, void *data, int silent) { struct buffer_head *bh; befs_sb_info *befs_sb; befs_super_block *disk_sb; struct inode *root; long ret = -EINVAL; const unsigned long sb_block = 0; const off_t x86_sb_off = 512; save_mount_options(sb, data); sb->s_fs_info = kmalloc(sizeof (*befs_sb), GFP_KERNEL); if (sb->s_fs_info == NULL) { printk(KERN_ERR "BeFS(%s): Unable to allocate memory for private " "portion of superblock. Bailing.\n", sb->s_id); goto unacquire_none; } befs_sb = BEFS_SB(sb); memset(befs_sb, 0, sizeof(befs_sb_info)); if (!parse_options((char *) data, &befs_sb->mount_opts)) { befs_error(sb, "cannot parse mount options"); goto unacquire_priv_sbp; } befs_debug(sb, "---> befs_fill_super()"); #ifndef CONFIG_BEFS_RW if (!(sb->s_flags & MS_RDONLY)) { befs_warning(sb, "No write support. Marking filesystem read-only"); sb->s_flags |= MS_RDONLY; } #endif /* CONFIG_BEFS_RW */ /* * Set dummy blocksize to read super block. * Will be set to real fs blocksize later. * * Linux 2.4.10 and later refuse to read blocks smaller than * the hardsect size for the device. But we also need to read at * least 1k to get the second 512 bytes of the volume. * -WD 10-26-01 */ sb_min_blocksize(sb, 1024); if (!(bh = sb_bread(sb, sb_block))) { befs_error(sb, "unable to read superblock"); goto unacquire_priv_sbp; } /* account for offset of super block on x86 */ disk_sb = (befs_super_block *) bh->b_data; if ((disk_sb->magic1 == BEFS_SUPER_MAGIC1_LE) || (disk_sb->magic1 == BEFS_SUPER_MAGIC1_BE)) { befs_debug(sb, "Using PPC superblock location"); } else { befs_debug(sb, "Using x86 superblock location"); disk_sb = (befs_super_block *) ((void *) bh->b_data + x86_sb_off); } if (befs_load_sb(sb, disk_sb) != BEFS_OK) goto unacquire_bh; befs_dump_super_block(sb, disk_sb); brelse(bh); if (befs_check_sb(sb) != BEFS_OK) goto unacquire_priv_sbp; if( befs_sb->num_blocks > ~((sector_t)0) ) { befs_error(sb, "blocks count: %Lu " "is larger than the host can use", befs_sb->num_blocks); goto unacquire_priv_sbp; } /* * set up enough so that it can read an inode * Fill in kernel superblock fields from private sb */ sb->s_magic = BEFS_SUPER_MAGIC; /* Set real blocksize of fs */ sb_set_blocksize(sb, (ulong) befs_sb->block_size); sb->s_op = &befs_sops; root = befs_iget(sb, iaddr2blockno(sb, &(befs_sb->root_dir))); if (IS_ERR(root)) { ret = PTR_ERR(root); goto unacquire_priv_sbp; } sb->s_root = d_make_root(root); if (!sb->s_root) { befs_error(sb, "get root inode failed"); goto unacquire_priv_sbp; } /* load nls library */ if (befs_sb->mount_opts.iocharset) { befs_debug(sb, "Loading nls: %s", befs_sb->mount_opts.iocharset); befs_sb->nls = load_nls(befs_sb->mount_opts.iocharset); if (!befs_sb->nls) { befs_warning(sb, "Cannot load nls %s" " loading default nls", befs_sb->mount_opts.iocharset); befs_sb->nls = load_nls_default(); } /* load default nls if none is specified in mount options */ } else { befs_debug(sb, "Loading default nls"); befs_sb->nls = load_nls_default(); } return 0; /*****************/ unacquire_bh: brelse(bh); unacquire_priv_sbp: kfree(befs_sb->mount_opts.iocharset); kfree(sb->s_fs_info); unacquire_none: sb->s_fs_info = NULL; return ret; }
/* Takes in super block, returns true if good data read */ int hfsplus_read_wrapper(struct super_block *sb) { struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); struct hfsplus_wd wd; sector_t part_start, part_size; u32 blocksize; int error = 0; error = -EINVAL; blocksize = sb_min_blocksize(sb, HFSPLUS_SECTOR_SIZE); if (!blocksize) goto out; if (hfsplus_get_last_session(sb, &part_start, &part_size)) goto out; error = -ENOMEM; sbi->s_vhdr_buf = kmalloc(hfsplus_min_io_size(sb), GFP_KERNEL); if (!sbi->s_vhdr_buf) goto out; sbi->s_backup_vhdr_buf = kmalloc(hfsplus_min_io_size(sb), GFP_KERNEL); if (!sbi->s_backup_vhdr_buf) goto out_free_vhdr; reread: error = hfsplus_submit_bio(sb, part_start + HFSPLUS_VOLHEAD_SECTOR, sbi->s_vhdr_buf, (void **)&sbi->s_vhdr, READ); if (error) goto out_free_backup_vhdr; error = -EINVAL; switch (sbi->s_vhdr->signature) { case cpu_to_be16(HFSPLUS_VOLHEAD_SIGX): set_bit(HFSPLUS_SB_HFSX, &sbi->flags); /*FALLTHRU*/ case cpu_to_be16(HFSPLUS_VOLHEAD_SIG): break; case cpu_to_be16(HFSP_WRAP_MAGIC): if (!hfsplus_read_mdb(sbi->s_vhdr, &wd)) goto out_free_backup_vhdr; wd.ablk_size >>= HFSPLUS_SECTOR_SHIFT; part_start += (sector_t)wd.ablk_start + (sector_t)wd.embed_start * wd.ablk_size; part_size = (sector_t)wd.embed_count * wd.ablk_size; goto reread; default: /* * Check for a partition block. * * (should do this only for cdrom/loop though) */ if (hfs_part_find(sb, &part_start, &part_size)) goto out_free_backup_vhdr; goto reread; } error = hfsplus_submit_bio(sb, part_start + part_size - 2, sbi->s_backup_vhdr_buf, (void **)&sbi->s_backup_vhdr, READ); if (error) goto out_free_backup_vhdr; error = -EINVAL; if (sbi->s_backup_vhdr->signature != sbi->s_vhdr->signature) { pr_warn("invalid secondary volume header\n"); goto out_free_backup_vhdr; } blocksize = be32_to_cpu(sbi->s_vhdr->blocksize); /* * Block size must be at least as large as a sector and a multiple of 2. */ if (blocksize < HFSPLUS_SECTOR_SIZE || ((blocksize - 1) & blocksize)) goto out_free_backup_vhdr; sbi->alloc_blksz = blocksize; sbi->alloc_blksz_shift = 0; while ((blocksize >>= 1) != 0) sbi->alloc_blksz_shift++; blocksize = min(sbi->alloc_blksz, (u32)PAGE_SIZE); /* * Align block size to block offset. */ while (part_start & ((blocksize >> HFSPLUS_SECTOR_SHIFT) - 1)) blocksize >>= 1; if (sb_set_blocksize(sb, blocksize) != blocksize) { pr_err("unable to set blocksize to %u!\n", blocksize); goto out_free_backup_vhdr; } sbi->blockoffset = part_start >> (sb->s_blocksize_bits - HFSPLUS_SECTOR_SHIFT); sbi->part_start = part_start; sbi->sect_count = part_size; sbi->fs_shift = sbi->alloc_blksz_shift - sb->s_blocksize_bits; return 0; out_free_backup_vhdr: kfree(sbi->s_backup_vhdr_buf); out_free_vhdr: kfree(sbi->s_vhdr_buf); out: return error; }
unsigned short flags; unsigned int fragments; u64 lookup_table_start, xattr_id_table_start, next_table; int err; TRACE("Entered squashfs_fill_superblock\n"); sb->s_fs_info = kzalloc(sizeof(*msblk), GFP_KERNEL); if (sb->s_fs_info == NULL) { ERROR("Failed to allocate squashfs_sb_info\n"); return -ENOMEM; } msblk = sb->s_fs_info; <<<<<<< HEAD msblk->devblksize = sb_min_blocksize(sb, SQUASHFS_DEVBLK_SIZE); ======= <<<<<<< HEAD msblk->devblksize = sb_min_blocksize(sb, SQUASHFS_DEVBLK_SIZE); ======= msblk->devblksize = sb_min_blocksize(sb, BLOCK_SIZE); >>>>>>> 58a75b6a81be54a8b491263ca1af243e9d8617b9 >>>>>>> ae1773bb70f3d7cf73324ce8fba787e01d8fa9f2 msblk->devblksize_log2 = ffz(~msblk->devblksize); mutex_init(&msblk->read_data_mutex); mutex_init(&msblk->meta_index_mutex); /* * msblk->bytes_used is checked in squashfs_read_table to ensure reads * are not beyond filesystem end. But as we're using
/* * hfs_mdb_get() * * Build the in-core MDB for a filesystem, including * the B-trees and the volume bitmap. */ int hfs_mdb_get(struct super_block *sb) { struct buffer_head *bh; struct hfs_mdb *mdb, *mdb2; unsigned int block; char *ptr; int off2, len, size, sect; sector_t part_start, part_size; loff_t off; __be16 attrib; /* set the device driver to 512-byte blocks */ size = sb_min_blocksize(sb, HFS_SECTOR_SIZE); if (!size) return -EINVAL; if (hfs_get_last_session(sb, &part_start, &part_size)) return -EINVAL; while (1) { /* See if this is an HFS filesystem */ bh = sb_bread512(sb, part_start + HFS_MDB_BLK, mdb); if (!bh) goto out; if (mdb->drSigWord == cpu_to_be16(HFS_SUPER_MAGIC)) break; brelse(bh); /* check for a partition block * (should do this only for cdrom/loop though) */ if (hfs_part_find(sb, &part_start, &part_size)) goto out; } HFS_SB(sb)->alloc_blksz = size = be32_to_cpu(mdb->drAlBlkSiz); if (!size || (size & (HFS_SECTOR_SIZE - 1))) { hfs_warn("hfs_fs: bad allocation block size %d\n", size); goto out_bh; } size = min(HFS_SB(sb)->alloc_blksz, (u32)PAGE_SIZE); /* size must be a multiple of 512 */ while (size & (size - 1)) size -= HFS_SECTOR_SIZE; sect = be16_to_cpu(mdb->drAlBlSt) + part_start; /* align block size to first sector */ while (sect & ((size - 1) >> HFS_SECTOR_SIZE_BITS)) size >>= 1; /* align block size to weird alloc size */ while (HFS_SB(sb)->alloc_blksz & (size - 1)) size >>= 1; brelse(bh); if (!sb_set_blocksize(sb, size)) { printk("hfs_fs: unable to set blocksize to %u\n", size); goto out; } bh = sb_bread512(sb, part_start + HFS_MDB_BLK, mdb); if (!bh) goto out; if (mdb->drSigWord != cpu_to_be16(HFS_SUPER_MAGIC)) goto out_bh; HFS_SB(sb)->mdb_bh = bh; HFS_SB(sb)->mdb = mdb; /* These parameters are read from the MDB, and never written */ HFS_SB(sb)->part_start = part_start; HFS_SB(sb)->fs_ablocks = be16_to_cpu(mdb->drNmAlBlks); HFS_SB(sb)->fs_div = HFS_SB(sb)->alloc_blksz >> sb->s_blocksize_bits; HFS_SB(sb)->clumpablks = be32_to_cpu(mdb->drClpSiz) / HFS_SB(sb)->alloc_blksz; if (!HFS_SB(sb)->clumpablks) HFS_SB(sb)->clumpablks = 1; HFS_SB(sb)->fs_start = (be16_to_cpu(mdb->drAlBlSt) + part_start) >> (sb->s_blocksize_bits - HFS_SECTOR_SIZE_BITS); /* These parameters are read from and written to the MDB */ HFS_SB(sb)->free_ablocks = be16_to_cpu(mdb->drFreeBks); HFS_SB(sb)->next_id = be32_to_cpu(mdb->drNxtCNID); HFS_SB(sb)->root_files = be16_to_cpu(mdb->drNmFls); HFS_SB(sb)->root_dirs = be16_to_cpu(mdb->drNmRtDirs); HFS_SB(sb)->file_count = be32_to_cpu(mdb->drFilCnt); HFS_SB(sb)->folder_count = be32_to_cpu(mdb->drDirCnt); /* TRY to get the alternate (backup) MDB. */ sect = part_start + part_size - 2; bh = sb_bread512(sb, sect, mdb2); if (bh) { if (mdb2->drSigWord == cpu_to_be16(HFS_SUPER_MAGIC)) { HFS_SB(sb)->alt_mdb_bh = bh; HFS_SB(sb)->alt_mdb = mdb2; } else brelse(bh); } if (!HFS_SB(sb)->alt_mdb) { hfs_warn("hfs_fs: unable to locate alternate MDB\n"); hfs_warn("hfs_fs: continuing without an alternate MDB\n"); } HFS_SB(sb)->bitmap = (__be32 *)__get_free_pages(GFP_KERNEL, PAGE_SIZE < 8192 ? 1 : 0); if (!HFS_SB(sb)->bitmap) goto out; /* read in the bitmap */ block = be16_to_cpu(mdb->drVBMSt) + part_start; off = (loff_t)block << HFS_SECTOR_SIZE_BITS; size = (HFS_SB(sb)->fs_ablocks + 8) / 8; ptr = (u8 *)HFS_SB(sb)->bitmap; while (size) { bh = sb_bread(sb, off >> sb->s_blocksize_bits); if (!bh) { hfs_warn("hfs_fs: unable to read volume bitmap\n"); goto out; } off2 = off & (sb->s_blocksize - 1); len = min((int)sb->s_blocksize - off2, size); memcpy(ptr, bh->b_data + off2, len); brelse(bh); ptr += len; off += len; size -= len; } HFS_SB(sb)->ext_tree = hfs_btree_open(sb, HFS_EXT_CNID, hfs_ext_keycmp); if (!HFS_SB(sb)->ext_tree) { hfs_warn("hfs_fs: unable to open extent tree\n"); goto out; } HFS_SB(sb)->cat_tree = hfs_btree_open(sb, HFS_CAT_CNID, hfs_cat_keycmp); if (!HFS_SB(sb)->cat_tree) { hfs_warn("hfs_fs: unable to open catalog tree\n"); goto out; } attrib = mdb->drAtrb; if (!(attrib & cpu_to_be16(HFS_SB_ATTRIB_UNMNT)) || (attrib & cpu_to_be16(HFS_SB_ATTRIB_INCNSTNT))) { hfs_warn("HFS-fs warning: Filesystem was not cleanly unmounted, " "running fsck.hfs is recommended. mounting read-only.\n"); sb->s_flags |= MS_RDONLY; } if ((attrib & cpu_to_be16(HFS_SB_ATTRIB_SLOCK))) { hfs_warn("HFS-fs: Filesystem is marked locked, mounting read-only.\n"); sb->s_flags |= MS_RDONLY; } if (!(sb->s_flags & MS_RDONLY)) { /* Mark the volume uncleanly unmounted in case we crash */ mdb->drAtrb = attrib & cpu_to_be16(~HFS_SB_ATTRIB_UNMNT); mdb->drAtrb = attrib | cpu_to_be16(HFS_SB_ATTRIB_INCNSTNT); mdb->drWrCnt = cpu_to_be32(be32_to_cpu(mdb->drWrCnt) + 1); mdb->drLsMod = hfs_mtime(); mark_buffer_dirty(HFS_SB(sb)->mdb_bh); hfs_buffer_sync(HFS_SB(sb)->mdb_bh); } return 0; out_bh: brelse(bh); out: hfs_mdb_put(sb); return -EIO; }
struct super_block * ext2_read_super (struct super_block * sb, void * data, int silent) { struct buffer_head * bh; struct ext2_super_block * es; unsigned long sb_block = 1; unsigned short resuid = EXT2_DEF_RESUID; unsigned short resgid = EXT2_DEF_RESGID; unsigned long logic_sb_block = 1; unsigned long offset = 0; int blocksize = BLOCK_SIZE; int db_count; int i, j; /* * See what the current blocksize for the device is, and * use that as the blocksize. Otherwise (or if the blocksize * is smaller than the default) use the default. * This is important for devices that have a hardware * sectorsize that is larger than the default. */ sb->u.ext2_sb.s_mount_opt = 0; if (!parse_options ((char *) data, &sb_block, &resuid, &resgid, &sb->u.ext2_sb.s_mount_opt)) { return NULL; } blocksize = sb_min_blocksize(sb, BLOCK_SIZE); if (!blocksize) { printk ("EXT2-fs: unable to set blocksize\n"); return NULL; } /* * If the superblock doesn't start on a sector boundary, * calculate the offset. FIXME(eric) this doesn't make sense * that we would have to do this. */ if (blocksize != BLOCK_SIZE) { logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize; offset = (sb_block*BLOCK_SIZE) % blocksize; } if (!(bh = sb_bread(sb, logic_sb_block))) { printk ("EXT2-fs: unable to read superblock\n"); return NULL; } /* * Note: s_es must be initialized as soon as possible because * some ext2 macro-instructions depend on its value */ es = (struct ext2_super_block *) (((char *)bh->b_data) + offset); sb->u.ext2_sb.s_es = es; sb->s_magic = le16_to_cpu(es->s_magic); if (sb->s_magic != EXT2_SUPER_MAGIC) { if (!silent) printk ("VFS: Can't find ext2 filesystem on dev %s.\n", sb->s_id); goto failed_mount; } if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV && (EXT2_HAS_COMPAT_FEATURE(sb, ~0U) || EXT2_HAS_RO_COMPAT_FEATURE(sb, ~0U) || EXT2_HAS_INCOMPAT_FEATURE(sb, ~0U))) printk("EXT2-fs warning: feature flags set on rev 0 fs, " "running e2fsck is recommended\n"); /* * Check feature flags regardless of the revision level, since we * previously didn't change the revision level when setting the flags, * so there is a chance incompat flags are set on a rev 0 filesystem. */ if ((i = EXT2_HAS_INCOMPAT_FEATURE(sb, ~EXT2_FEATURE_INCOMPAT_SUPP))) { printk("EXT2-fs: %s: couldn't mount because of " "unsupported optional features (%x).\n", sb->s_id, i); goto failed_mount; } if (!(sb->s_flags & MS_RDONLY) && (i = EXT2_HAS_RO_COMPAT_FEATURE(sb, ~EXT2_FEATURE_RO_COMPAT_SUPP))){ printk("EXT2-fs: %s: couldn't mount RDWR because of " "unsupported optional features (%x).\n", sb->s_id, i); goto failed_mount; } blocksize = BLOCK_SIZE << le32_to_cpu(EXT2_SB(sb)->s_es->s_log_block_size); /* If the blocksize doesn't match, re-read the thing.. */ if (sb->s_blocksize != blocksize) { brelse(bh); if (!sb_set_blocksize(sb, blocksize)) { printk(KERN_ERR "EXT2-fs: blocksize too small for device.\n"); return NULL; } logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize; offset = (sb_block*BLOCK_SIZE) % blocksize; bh = sb_bread(sb, logic_sb_block); if(!bh) { printk("EXT2-fs: Couldn't read superblock on " "2nd try.\n"); goto failed_mount; } es = (struct ext2_super_block *) (((char *)bh->b_data) + offset); sb->u.ext2_sb.s_es = es; if (es->s_magic != le16_to_cpu(EXT2_SUPER_MAGIC)) { printk ("EXT2-fs: Magic mismatch, very weird !\n"); goto failed_mount; } } sb->s_maxbytes = ext2_max_size(sb->s_blocksize_bits); if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV) { sb->u.ext2_sb.s_inode_size = EXT2_GOOD_OLD_INODE_SIZE; sb->u.ext2_sb.s_first_ino = EXT2_GOOD_OLD_FIRST_INO; } else { sb->u.ext2_sb.s_inode_size = le16_to_cpu(es->s_inode_size); sb->u.ext2_sb.s_first_ino = le32_to_cpu(es->s_first_ino); if (sb->u.ext2_sb.s_inode_size != EXT2_GOOD_OLD_INODE_SIZE) { printk ("EXT2-fs: unsupported inode size: %d\n", sb->u.ext2_sb.s_inode_size); goto failed_mount; } } sb->u.ext2_sb.s_frag_size = EXT2_MIN_FRAG_SIZE << le32_to_cpu(es->s_log_frag_size); if (sb->u.ext2_sb.s_frag_size) sb->u.ext2_sb.s_frags_per_block = sb->s_blocksize / sb->u.ext2_sb.s_frag_size; else sb->s_magic = 0; sb->u.ext2_sb.s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group); sb->u.ext2_sb.s_frags_per_group = le32_to_cpu(es->s_frags_per_group); sb->u.ext2_sb.s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group); sb->u.ext2_sb.s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb); sb->u.ext2_sb.s_itb_per_group = sb->u.ext2_sb.s_inodes_per_group / sb->u.ext2_sb.s_inodes_per_block; sb->u.ext2_sb.s_desc_per_block = sb->s_blocksize / sizeof (struct ext2_group_desc); sb->u.ext2_sb.s_sbh = bh; if (resuid != EXT2_DEF_RESUID) sb->u.ext2_sb.s_resuid = resuid; else sb->u.ext2_sb.s_resuid = le16_to_cpu(es->s_def_resuid); if (resgid != EXT2_DEF_RESGID) sb->u.ext2_sb.s_resgid = resgid; else sb->u.ext2_sb.s_resgid = le16_to_cpu(es->s_def_resgid); sb->u.ext2_sb.s_mount_state = le16_to_cpu(es->s_state); sb->u.ext2_sb.s_addr_per_block_bits = log2 (EXT2_ADDR_PER_BLOCK(sb)); sb->u.ext2_sb.s_desc_per_block_bits = log2 (EXT2_DESC_PER_BLOCK(sb)); if (sb->s_magic != EXT2_SUPER_MAGIC) { if (!silent) printk ("VFS: Can't find an ext2 filesystem on dev " "%s.\n", sb->s_id); goto failed_mount; } if (sb->s_blocksize != bh->b_size) { if (!silent) printk ("VFS: Unsupported blocksize on dev " "%s.\n", sb->s_id); goto failed_mount; } if (sb->s_blocksize != sb->u.ext2_sb.s_frag_size) { printk ("EXT2-fs: fragsize %lu != blocksize %lu (not supported yet)\n", sb->u.ext2_sb.s_frag_size, sb->s_blocksize); goto failed_mount; } if (sb->u.ext2_sb.s_blocks_per_group > sb->s_blocksize * 8) { printk ("EXT2-fs: #blocks per group too big: %lu\n", sb->u.ext2_sb.s_blocks_per_group); goto failed_mount; } if (sb->u.ext2_sb.s_frags_per_group > sb->s_blocksize * 8) { printk ("EXT2-fs: #fragments per group too big: %lu\n", sb->u.ext2_sb.s_frags_per_group); goto failed_mount; } if (sb->u.ext2_sb.s_inodes_per_group > sb->s_blocksize * 8) { printk ("EXT2-fs: #inodes per group too big: %lu\n", sb->u.ext2_sb.s_inodes_per_group); goto failed_mount; } sb->u.ext2_sb.s_groups_count = (le32_to_cpu(es->s_blocks_count) - le32_to_cpu(es->s_first_data_block) + EXT2_BLOCKS_PER_GROUP(sb) - 1) / EXT2_BLOCKS_PER_GROUP(sb); db_count = (sb->u.ext2_sb.s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) / EXT2_DESC_PER_BLOCK(sb); sb->u.ext2_sb.s_group_desc = kmalloc (db_count * sizeof (struct buffer_head *), GFP_KERNEL); if (sb->u.ext2_sb.s_group_desc == NULL) { printk ("EXT2-fs: not enough memory\n"); goto failed_mount; } for (i = 0; i < db_count; i++) { sb->u.ext2_sb.s_group_desc[i] = sb_bread(sb, logic_sb_block + i + 1); if (!sb->u.ext2_sb.s_group_desc[i]) { for (j = 0; j < i; j++) brelse (sb->u.ext2_sb.s_group_desc[j]); kfree(sb->u.ext2_sb.s_group_desc); printk ("EXT2-fs: unable to read group descriptors\n"); goto failed_mount; } } if (!ext2_check_descriptors (sb)) { printk ("EXT2-fs: group descriptors corrupted!\n"); db_count = i; goto failed_mount2; } for (i = 0; i < EXT2_MAX_GROUP_LOADED; i++) { sb->u.ext2_sb.s_inode_bitmap_number[i] = 0; sb->u.ext2_sb.s_inode_bitmap[i] = NULL; sb->u.ext2_sb.s_block_bitmap_number[i] = 0; sb->u.ext2_sb.s_block_bitmap[i] = NULL; } sb->u.ext2_sb.s_loaded_inode_bitmaps = 0; sb->u.ext2_sb.s_loaded_block_bitmaps = 0; sb->u.ext2_sb.s_gdb_count = db_count; get_random_bytes(&sb->u.ext2_sb.s_next_generation, sizeof(u32)); /* * set up enough so that it can read an inode */ sb->s_op = &ext2_sops; sb->s_root = d_alloc_root(iget(sb, EXT2_ROOT_INO)); if (!sb->s_root || !S_ISDIR(sb->s_root->d_inode->i_mode) || !sb->s_root->d_inode->i_blocks || !sb->s_root->d_inode->i_size) { if (sb->s_root) { dput(sb->s_root); sb->s_root = NULL; printk(KERN_ERR "EXT2-fs: corrupt root inode, run e2fsck\n"); } else printk(KERN_ERR "EXT2-fs: get root inode failed\n"); goto failed_mount2; } ext2_setup_super (sb, es, sb->s_flags & MS_RDONLY); return sb; failed_mount2: for (i = 0; i < db_count; i++) brelse(sb->u.ext2_sb.s_group_desc[i]); kfree(sb->u.ext2_sb.s_group_desc); failed_mount: brelse(bh); return NULL; }
/* ================================================================================== Function :me2fsFillSuperBlock Input :struct super_block *sb < vfs super block object > void *data < user data > int silent < silent flag > Output :void Return :void Description :fill my ext2 super block object ================================================================================== */ static int me2fsFillSuperBlock( struct super_block *sb, void *data, int silent ) { struct buffer_head *bh; struct ext2_super_block *esb; struct me2fs_sb_info *msi; struct inode *root; int block_size; int ret = -EINVAL; int i; int err; unsigned long sb_block = 1; /* ------------------------------------------------------------------------ */ /* allocate memory to me2fs_sb_info */ /* ------------------------------------------------------------------------ */ msi = kzalloc( sizeof( struct me2fs_sb_info ), GFP_KERNEL ); if( !msi ) { ME2FS_ERROR( "<ME2FS>error: unable to alloc me2fs_sb_info\n" ); ret = -ENOMEM; return( ret ); } /* set me2fs information to vfs super block */ sb->s_fs_info = ( void* )msi; /* ------------------------------------------------------------------------ */ /* allocate memory to spin locks for block group */ /* ------------------------------------------------------------------------ */ msi->s_blockgroup_lock = kzalloc( sizeof( struct blockgroup_lock ), GFP_KERNEL ); if( !msi->s_blockgroup_lock ) { ME2FS_ERROR( "<ME2FS>error: unabel to alloc s_blockgroup_lock\n" ); kfree( msi ); return( -ENOMEM ); } /* ------------------------------------------------------------------------ */ /* set device's block size and size bits to super block */ /* ------------------------------------------------------------------------ */ block_size = sb_min_blocksize( sb, BLOCK_SIZE ); DBGPRINT( "<ME2FS>Fill Super! block_size = %d\n", block_size ); DBGPRINT( "<ME2FS>s_blocksize_bits = %d\n", sb->s_blocksize_bits ); DBGPRINT( "<ME2FS>default block size is : %d\n", BLOCK_SIZE ); if( !block_size ) { ME2FS_ERROR( "<ME2FS>error: unable to set blocksize\n" ); goto error_read_sb; } /* ------------------------------------------------------------------------ */ /* read super block */ /* ------------------------------------------------------------------------ */ if( !( bh = sb_bread( sb, sb_block ) ) ) { ME2FS_ERROR( "<ME2FS>failed to bread super block\n" ); goto error_read_sb; } esb = ( struct ext2_super_block* )( bh->b_data ); /* ------------------------------------------------------------------------ */ /* check magic number */ /* ------------------------------------------------------------------------ */ sb->s_magic = le16_to_cpu( esb->s_magic ); if( sb->s_magic != ME2FS_SUPER_MAGIC ) { ME2FS_ERROR( "<ME2FS>error : magic of super block is %lu\n", sb->s_magic ); goto error_mount; } /* ------------------------------------------------------------------------ */ /* check revison */ /* ------------------------------------------------------------------------ */ if( ME2FS_OLD_REV == le32_to_cpu( esb->s_rev_level ) ) { ME2FS_ERROR( "<ME2FS>error : cannot mount old revision\n" ); goto error_mount; } dbgPrintExt2SB( esb ); /* ------------------------------------------------------------------------ */ /* set up me2fs super block information */ /* ------------------------------------------------------------------------ */ /* buffer cache information */ msi->s_esb = esb; msi->s_sbh = bh; /* super block(disk information cache) */ msi->s_sb_block = sb_block; msi->s_mount_opt = le32_to_cpu( esb->s_default_mount_opts ); msi->s_mount_state = le16_to_cpu( esb->s_state ); if( msi->s_mount_state != EXT2_VALID_FS ) { ME2FS_ERROR( "<ME2FS>error : cannot mount invalid fs\n" ); goto error_mount; } if( le16_to_cpu( esb->s_errors ) == EXT2_ERRORS_CONTINUE ) { DBGPRINT( "<ME2FS>s_errors : CONTINUE\n" ); } else if( le16_to_cpu( esb->s_errors ) == EXT2_ERRORS_PANIC ) { DBGPRINT( "<ME2FS>s_errors : PANIC\n" ); } else { DBGPRINT( "<ME2FS>s_errors : READ ONLY\n" ); } if( le32_to_cpu( esb->s_rev_level ) != EXT2_DYNAMIC_REV ) { ME2FS_ERROR( "<ME2FS>error : cannot mount unsupported revision\n" ); goto error_mount; } /* inode(disk information cache) */ msi->s_inode_size = le16_to_cpu( esb->s_inode_size ); if( ( msi->s_inode_size < 128 ) || !is_power_of_2( msi->s_inode_size ) || ( block_size < msi->s_inode_size ) ) { ME2FS_ERROR( "<ME2FS>error : cannot mount unsupported inode size %u\n", msi->s_inode_size ); goto error_mount; } msi->s_first_ino = le32_to_cpu( esb->s_first_ino ); msi->s_inodes_per_group = le32_to_cpu( esb->s_inodes_per_group ); msi->s_inodes_per_block = sb->s_blocksize / msi->s_inode_size; if( msi->s_inodes_per_block == 0 ) { ME2FS_ERROR( "<ME2FS>error : bad inodes per block\n" ); goto error_mount; } msi->s_itb_per_group = msi->s_inodes_per_group / msi->s_inodes_per_block; /* group(disk information cache) */ msi->s_blocks_per_group = le32_to_cpu( esb->s_blocks_per_group ); if( msi->s_blocks_per_group == 0 ) { ME2FS_ERROR( "<ME2FS>eroor : bad blocks per group\n" ); goto error_mount; } msi->s_groups_count = ( ( le32_to_cpu( esb->s_blocks_count ) - le32_to_cpu( esb->s_first_data_block ) - 1 ) / msi->s_blocks_per_group ) + 1; msi->s_desc_per_block = sb->s_blocksize / sizeof( struct ext2_group_desc ); msi->s_gdb_count = ( msi->s_groups_count + msi->s_desc_per_block - 1 ) / msi->s_desc_per_block; /* fragment(disk information cache) */ msi->s_frag_size = 1024 << le32_to_cpu( esb->s_log_frag_size ); if( msi->s_frag_size == 0 ) { ME2FS_ERROR( "<ME2FS>eroor : bad fragment size\n" ); goto error_mount; } msi->s_frags_per_block = sb->s_blocksize / msi->s_frag_size; msi->s_frags_per_group = le32_to_cpu( esb->s_frags_per_group ); /* deaults(disk information cache) */ msi->s_resuid = make_kuid( &init_user_ns, le16_to_cpu( esb->s_def_resuid ) ); msi->s_resgid = make_kgid( &init_user_ns, le16_to_cpu( esb->s_def_resgid ) ); dbgPrintMe2fsInfo( msi ); /* ------------------------------------------------------------------------ */ /* read block group descriptor table */ /* ------------------------------------------------------------------------ */ msi->s_group_desc = kmalloc( msi->s_gdb_count * sizeof( struct buffer_head* ), GFP_KERNEL ); if( !msi->s_group_desc ) { ME2FS_ERROR( "<ME2FS>error : alloc memory for group desc is failed\n" ); goto error_mount; } for( i = 0 ; i < msi->s_gdb_count ; i++ ) { unsigned long block; block = getDescriptorLocation( sb, sb_block, i ); if( !( msi->s_group_desc[ i ] = sb_bread( sb, block ) ) ) //if( !( msi->s_group_desc[ i ] = sb_bread( sb, sb_block + i + 1 ) ) ) { ME2FS_ERROR( "<ME2FS>error : cannot read " ); ME2FS_ERROR( "block group descriptor[ group=%d ]\n", i ); goto error_mount_phase2; } } /* ------------------------------------------------------------------------ */ /* sanity check for group descriptors */ /* ------------------------------------------------------------------------ */ for( i = 0 ; i < msi->s_groups_count ; i++ ) { struct ext2_group_desc *gdesc; unsigned long first_block; unsigned long last_block; unsigned long ar_block; DBGPRINT( "<ME2FS>Block Count %d\n", i ); if( !( gdesc = me2fsGetGroupDescriptor( sb, i ) ) ) { /* corresponding group descriptor does not exist */ goto error_mount_phase2; } first_block = ext2GetFirstBlockNum( sb, i ); if( i == ( msi->s_groups_count - 1 ) ) { last_block = le32_to_cpu( esb->s_blocks_count ) - 1; } else { last_block = first_block + ( esb->s_blocks_per_group - 1 ); } DBGPRINT( "<ME2FS>first = %lu, last = %lu\n", first_block, last_block ); ar_block = le32_to_cpu( gdesc->bg_block_bitmap ); if( ( ar_block < first_block ) || ( last_block < ar_block ) ) { ME2FS_ERROR( "<ME2FS>error : block num of block bitmap is " ); ME2FS_ERROR( "insanity [ group=%d, first=%lu, block=%lu, last=%lu ]\n", i, first_block, ar_block, last_block ); goto error_mount_phase2; } ar_block = le32_to_cpu( gdesc->bg_inode_bitmap ); if( ( ar_block < first_block ) || ( last_block < ar_block ) ) { ME2FS_ERROR( "<ME2FS>error : block num of inode bitmap is " ); ME2FS_ERROR( "insanity [ group=%d, first=%lu, block=%lu, last=%lu ]\n", i, first_block, ar_block, last_block ); goto error_mount_phase2; } ar_block = le32_to_cpu( gdesc->bg_inode_table ); if( ( ar_block < first_block ) || ( last_block < ar_block ) ) { ME2FS_ERROR( "<ME2FS>error : block num of inode table is " ); ME2FS_ERROR( "insanity [ group=%d, first=%lu, block=%lu, last=%lu ]\n", i, first_block, ar_block, last_block ); goto error_mount_phase2; } dbgPrintExt2Bgd( gdesc, i ); } /* ------------------------------------------------------------------------ */ /* initialize exclusive locks */ /* ------------------------------------------------------------------------ */ spin_lock_init( &msi->s_lock ); bgl_lock_init( msi->s_blockgroup_lock ); err = percpu_counter_init( &msi->s_freeblocks_counter, me2fsCountFreeBlocks( sb ) ); if( err ) { ME2FS_ERROR( "<ME2FS>cannot allocate memory for percpu counter" ); ME2FS_ERROR( "[s_freeblocks_counter]\n" ); goto error_mount_phase3; } err = percpu_counter_init( &msi->s_freeinodes_counter, me2fsCountFreeInodes( sb ) ); if( err ) { ME2FS_ERROR( "<ME2FS>cannot allocate memory for percpu counter" ); ME2FS_ERROR( "[s_freeinodes_counter]\n" ); goto error_mount_phase3; } err = percpu_counter_init( &msi->s_dirs_counter, me2fsCountDirectories( sb ) ); if( err ) { ME2FS_ERROR( "<ME2FS>cannot allocate memory for percpu counter" ); ME2FS_ERROR( "[s_dirs_counter]\n" ); goto error_mount_phase3; } /* ------------------------------------------------------------------------ */ /* set up vfs super block */ /* ------------------------------------------------------------------------ */ sb->s_op = &me2fs_super_ops; //sb->s_export_op = &me2fs_export_ops; //sb->s_xattr = me2fs_xattr_handler; sb->s_maxbytes = me2fsMaxFileSize( sb ); sb->s_max_links = ME2FS_LINK_MAX; DBGPRINT( "<ME2FS>max file size = %lld\n", sb->s_maxbytes ); root = me2fsGetVfsInode( sb, ME2FS_EXT2_ROOT_INO ); //root = iget_locked( sb, ME2FS_EXT2_ROOT_INO ); if( IS_ERR( root ) ) { ME2FS_ERROR( "<ME2FS>error : failed to get root inode\n" ); ret = PTR_ERR( root ); goto error_mount_phase3; } if( !S_ISDIR( root->i_mode ) ) { ME2FS_ERROR( "<ME2FS>root is not directory!!!!\n" ); } sb->s_root = d_make_root( root ); //sb->s_root = d_alloc_root( root_ino ); if( !sb->s_root ) { ME2FS_ERROR( "<ME2FS>error : failed to make root\n" ); ret = -ENOMEM; goto error_mount_phase3; } le16_add_cpu( &esb->s_mnt_count, 1 ); me2fsWriteSuper( sb ); DBGPRINT( "<ME2FS> me2fs is mounted !\n" ); return( 0 ); /* ------------------------------------------------------------------------ */ /* destroy percpu counter */ /* ------------------------------------------------------------------------ */ error_mount_phase3: percpu_counter_destroy( &msi->s_freeblocks_counter ); percpu_counter_destroy( &msi->s_freeinodes_counter ); percpu_counter_destroy( &msi->s_dirs_counter ); /* ------------------------------------------------------------------------ */ /* release buffer for group descriptors */ /* ------------------------------------------------------------------------ */ error_mount_phase2: for( i = 0 ; i < msi->s_gdb_count ; i++ ) { brelse( msi->s_group_desc[ i ] ); } kfree( msi->s_group_desc ); /* ------------------------------------------------------------------------ */ /* release buffer cache for read super block */ /* ------------------------------------------------------------------------ */ error_mount: brelse( bh ); /* ------------------------------------------------------------------------ */ /* release me2fs super block information */ /* ------------------------------------------------------------------------ */ error_read_sb: sb->s_fs_info = NULL; kfree( msi->s_blockgroup_lock ); kfree( msi ); return( ret ); }
/* Called to mount a filesystem by read_super() in fs/super.c. * Return a super block, the main structure of a filesystem. * * NOTE : Don't store a pointer to an option, as the page containing the * options is freed after ntfs_read_super() returns. * * NOTE : A context switch can happen in kernel code only if the code blocks * (= calls schedule() in kernel/sched.c). */ struct super_block *ntfs_read_super(struct super_block *sb, void *options, int silent) { ntfs_volume *vol; struct buffer_head *bh; int i, to_read, blocksize; ntfs_debug(DEBUG_OTHER, "ntfs_read_super\n"); vol = NTFS_SB2VOL(sb); init_ntfs_super_block(vol); if (!parse_options(vol, (char*)options)) goto ntfs_read_super_vol; blocksize = sb_min_blocksize(sb, 512); if (!blocksize) { ntfs_error("Unable to set blocksize.\n"); goto ntfs_read_super_vol; } /* Read the super block (boot block). */ if (!(bh = sb_bread(sb, 0))) { ntfs_error("Reading super block failed\n"); goto ntfs_read_super_unl; } ntfs_debug(DEBUG_OTHER, "Done reading boot block\n"); /* Check for valid 'NTFS' boot sector. */ if (!is_boot_sector_ntfs(bh->b_data)) { ntfs_debug(DEBUG_OTHER, "Not a NTFS volume\n"); bforget(bh); goto ntfs_read_super_unl; } ntfs_debug(DEBUG_OTHER, "Going to init volume\n"); if (ntfs_init_volume(vol, bh->b_data) < 0) { ntfs_debug(DEBUG_OTHER, "Init volume failed.\n"); bforget(bh); goto ntfs_read_super_unl; } ntfs_debug(DEBUG_OTHER, "$Mft at cluster 0x%lx\n", vol->mft_lcn); brelse(bh); NTFS_SB(vol) = sb; if (vol->cluster_size > PAGE_SIZE) { ntfs_error("Partition cluster size is not supported yet (it " "is > max kernel blocksize).\n"); goto ntfs_read_super_unl; } ntfs_debug(DEBUG_OTHER, "Done to init volume\n"); /* Inform the kernel that a device block is a NTFS cluster. */ if (!sb_set_blocksize(sb, vol->cluster_size)) { ntfs_error("Cluster size too small for device.\n"); goto ntfs_read_super_unl; } ntfs_debug(DEBUG_OTHER, "set_blocksize\n"); /* Allocate an MFT record (MFT record can be smaller than a cluster). */ i = vol->cluster_size; if (i < vol->mft_record_size) i = vol->mft_record_size; if (!(vol->mft = ntfs_malloc(i))) goto ntfs_read_super_unl; /* Read at least the MFT record for $Mft. */ to_read = vol->mft_clusters_per_record; if (to_read < 1) to_read = 1; for (i = 0; i < to_read; i++) { if (!(bh = sb_bread(sb, vol->mft_lcn + i))) { ntfs_error("Could not read $Mft record 0\n"); goto ntfs_read_super_mft; } ntfs_memcpy(vol->mft + ((__s64)i << vol->cluster_size_bits), bh->b_data, vol->cluster_size); brelse(bh); ntfs_debug(DEBUG_OTHER, "Read cluster 0x%x\n", vol->mft_lcn + i); } /* Check and fixup this MFT record */ if (!ntfs_check_mft_record(vol, vol->mft)){ ntfs_error("Invalid $Mft record 0\n"); goto ntfs_read_super_mft; } /* Inform the kernel about which super operations are available. */ sb->s_op = &ntfs_super_operations; sb->s_magic = NTFS_SUPER_MAGIC; sb->s_maxbytes = ~0ULL >> 1; ntfs_debug(DEBUG_OTHER, "Reading special files\n"); if (ntfs_load_special_files(vol)) { ntfs_error("Error loading special files\n"); goto ntfs_read_super_mft; } ntfs_debug(DEBUG_OTHER, "Getting RootDir\n"); /* Get the root directory. */ if (!(sb->s_root = d_alloc_root(iget(sb, FILE_root)))) { ntfs_error("Could not get root dir inode\n"); goto ntfs_read_super_mft; } ntfs_read_super_ret: ntfs_debug(DEBUG_OTHER, "read_super: done\n"); return sb; ntfs_read_super_mft: ntfs_free(vol->mft); ntfs_read_super_unl: ntfs_read_super_vol: sb = NULL; goto ntfs_read_super_ret; }
/* ================================================================================== Function :me2fsFillSuperBlock Input :struct super_block *sb < vfs super block object > void *data < user data > int silent < silent flag > Output :void Return :void Description :fill my ext2 super block object ================================================================================== */ static int me2fsFillSuperBlock( struct super_block *sb, void *data, int silent ) { struct buffer_head *bh; struct ext2_super_block *esb; struct me2fs_sb_info *msi; int block_size; int ret = -EINVAL; //unsigned long sb_block = 1; /* ------------------------------------------------------------------------ */ /* allocate memory to me2fs_sb_info */ /* ------------------------------------------------------------------------ */ msi = kzalloc( sizeof( struct me2fs_sb_info ), GFP_KERNEL ); if( !msi ) { DBGPRINT( "<ME2FS>error: unable to alloc me2fs_sb_info\n" ); ret = -ENOMEM; return( ret ); } /* ------------------------------------------------------------------------ */ /* set device's block size to super block */ /* ------------------------------------------------------------------------ */ block_size = sb_min_blocksize( sb, BLOCK_SIZE ); DBGPRINT( "<ME2FS>Fill Super! block_size = %d\n", block_size ); DBGPRINT( "<ME2FS>default block size is : %d\n", BLOCK_SIZE ); if( !block_size ) { DBGPRINT( "<ME2FS>error: unable to set blocksize\n" ); goto error_read_sb; } if( !( bh = sb_bread( sb, 1 ) ) ) { DBGPRINT( "<ME2FS>failed to bread super block\n" ); goto error_read_sb; } esb = ( struct ext2_super_block* )( bh->b_data ); sb->s_magic = le16_to_cpu( esb->s_magic ); if( sb->s_magic != ME2FS_SUPER_MAGIC ) { DBGPRINT( "<ME2FS>error : magic of super block is %lu\n", sb->s_magic ); goto error_read_sb; } msi->s_esb = esb; msi->s_sbh = bh; //dbgPrintExt2SB( esb ); /* ------------------------------------------------------------------------ */ /* set up vfs super block */ /* ------------------------------------------------------------------------ */ sb->s_fs_info = ( void* )msi; return( 0 ); error_read_sb: kfree( msi ); return( ret ); }
static int tux3_fill_super(struct super_block *sb, void *data, int silent) { static struct tux_iattr iattr; struct sb *sbi; struct root iroot; int err, blocksize; sbi = kzalloc(sizeof(struct sb), GFP_KERNEL); if (!sbi) return -ENOMEM; sbi->vfs_sb = sb; sb->s_fs_info = sbi; sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_magic = 0x54555833; sb->s_op = &tux3_super_ops; sb->s_time_gran = 1; mutex_init(&sbi->loglock); err = -EIO; blocksize = sb_min_blocksize(sb, BLOCK_SIZE); if (!blocksize) { if (!silent) printk(KERN_ERR "TUX3: unable to set blocksize\n"); goto error; } err = tux_load_sb(sb, &iroot, silent); if (err) goto error; printk("%s: depth %Lu, block %Lu\n", __func__, (L)iroot.depth, (L)iroot.block); printk("%s: blocksize %u, blockbits %u, blockmask %08x\n", __func__, sbi->blocksize, sbi->blockbits, sbi->blockmask); printk("%s: volblocks %Lu, freeblocks %Lu, nextalloc %Lu\n", __func__, sbi->volblocks, sbi->freeblocks, sbi->nextalloc); printk("%s: freeatom %u, atomgen %u\n", __func__, sbi->freeatom, sbi->atomgen); if (sbi->blocksize != blocksize) { if (!sb_set_blocksize(sb, sbi->blocksize)) { printk(KERN_ERR "TUX3: blocksize too small for device.\n"); goto error; } } printk("%s: s_blocksize %lu\n", __func__, sb->s_blocksize); err = -ENOMEM; sbi->volmap = tux_new_volmap(tux_sb(sb)); if (!sbi->volmap) goto error; /* Initialize itable btree */ init_btree(itable_btree(sbi), sbi, iroot, &itable_ops); // struct inode *vtable; sbi->bitmap = tux3_iget(sb, TUX_BITMAP_INO); err = PTR_ERR(sbi->bitmap); if (IS_ERR(sbi->bitmap)) goto error_bitmap; sbi->rootdir = tux3_iget(sb, TUX_ROOTDIR_INO); err = PTR_ERR(sbi->rootdir); if (IS_ERR(sbi->rootdir)) goto error_rootdir; sbi->atable = tux3_iget(sb, TUX_ATABLE_INO); err = PTR_ERR(sbi->atable); if (IS_ERR(sbi->atable)) goto error_atable; err = -ENOMEM; sbi->logmap = tux_new_inode(sbi->rootdir, &iattr, 0); if (!sbi->logmap) goto error_logmap; sb->s_root = d_alloc_root(sbi->rootdir); if (!sb->s_root) goto error_alloc_root; return 0; error_alloc_root: iput(sbi->logmap); error_logmap: iput(sbi->atable); error_atable: iput(sbi->rootdir); error_rootdir: iput(sbi->bitmap); error_bitmap: iput(sbi->volmap); error: kfree(sbi); return err; }
static int ext2_fill_super(struct super_block *sb, void *data, int silent) { struct buffer_head * bh; struct ext2_sb_info * sbi; struct ext2_super_block * es; struct inode *root; unsigned long block; unsigned long sb_block = get_sb_block(&data); unsigned long logic_sb_block; unsigned long offset = 0; unsigned long def_mount_opts; long ret = -EINVAL; int blocksize = BLOCK_SIZE; int db_count; int i, j; __le32 features; int err; err = -ENOMEM; sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); if (!sbi) goto failed; sbi->s_blockgroup_lock = kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL); if (!sbi->s_blockgroup_lock) { kfree(sbi); goto failed; } sb->s_fs_info = sbi; sbi->s_sb_block = sb_block; spin_lock_init(&sbi->s_lock); /* * See what the current blocksize for the device is, and * use that as the blocksize. Otherwise (or if the blocksize * is smaller than the default) use the default. * This is important for devices that have a hardware * sectorsize that is larger than the default. */ blocksize = sb_min_blocksize(sb, BLOCK_SIZE); if (!blocksize) { ext2_msg(sb, KERN_ERR, "error: unable to set blocksize"); goto failed_sbi; } /* * If the superblock doesn't start on a hardware sector boundary, * calculate the offset. */ if (blocksize != BLOCK_SIZE) { logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize; offset = (sb_block*BLOCK_SIZE) % blocksize; } else { logic_sb_block = sb_block; } if (!(bh = sb_bread(sb, logic_sb_block))) { ext2_msg(sb, KERN_ERR, "error: unable to read superblock"); goto failed_sbi; } /* * Note: s_es must be initialized as soon as possible because * some ext2 macro-instructions depend on its value */ es = (struct ext2_super_block *) (((char *)bh->b_data) + offset); sbi->s_es = es; sb->s_magic = le16_to_cpu(es->s_magic); if (sb->s_magic != EXT2_SUPER_MAGIC) goto cantfind_ext2; /* Set defaults before we parse the mount options */ def_mount_opts = le32_to_cpu(es->s_default_mount_opts); if (def_mount_opts & EXT2_DEFM_DEBUG) set_opt(sbi->s_mount_opt, DEBUG); if (def_mount_opts & EXT2_DEFM_BSDGROUPS) set_opt(sbi->s_mount_opt, GRPID); if (def_mount_opts & EXT2_DEFM_UID16) set_opt(sbi->s_mount_opt, NO_UID32); #ifdef CONFIG_EXT2_FS_XATTR if (def_mount_opts & EXT2_DEFM_XATTR_USER) set_opt(sbi->s_mount_opt, XATTR_USER); #endif #ifdef CONFIG_EXT2_FS_POSIX_ACL if (def_mount_opts & EXT2_DEFM_ACL) set_opt(sbi->s_mount_opt, POSIX_ACL); #endif if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_PANIC) set_opt(sbi->s_mount_opt, ERRORS_PANIC); else if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_CONTINUE) set_opt(sbi->s_mount_opt, ERRORS_CONT); else set_opt(sbi->s_mount_opt, ERRORS_RO); sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid)); sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid)); set_opt(sbi->s_mount_opt, RESERVATION); if (!parse_options((char *) data, sb)) goto failed_mount; sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | ((EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0); sb->s_iflags |= SB_I_CGROUPWB; if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV && (EXT2_HAS_COMPAT_FEATURE(sb, ~0U) || EXT2_HAS_RO_COMPAT_FEATURE(sb, ~0U) || EXT2_HAS_INCOMPAT_FEATURE(sb, ~0U))) ext2_msg(sb, KERN_WARNING, "warning: feature flags set on rev 0 fs, " "running e2fsck is recommended"); /* * Check feature flags regardless of the revision level, since we * previously didn't change the revision level when setting the flags, * so there is a chance incompat flags are set on a rev 0 filesystem. */ features = EXT2_HAS_INCOMPAT_FEATURE(sb, ~EXT2_FEATURE_INCOMPAT_SUPP); if (features) { ext2_msg(sb, KERN_ERR, "error: couldn't mount because of " "unsupported optional features (%x)", le32_to_cpu(features)); goto failed_mount; } if (!(sb->s_flags & MS_RDONLY) && (features = EXT2_HAS_RO_COMPAT_FEATURE(sb, ~EXT2_FEATURE_RO_COMPAT_SUPP))){ ext2_msg(sb, KERN_ERR, "error: couldn't mount RDWR because of " "unsupported optional features (%x)", le32_to_cpu(features)); goto failed_mount; } blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size); if (sbi->s_mount_opt & EXT2_MOUNT_DAX) { struct blk_dax_ctl dax = { .sector = 0, .size = PAGE_SIZE, }; if (blocksize != PAGE_SIZE) { ext2_msg(sb, KERN_ERR, "error: unsupported blocksize for dax"); goto failed_mount; } err = bdev_direct_access(sb->s_bdev, &dax); if (err < 0) { switch (err) { case -EOPNOTSUPP: ext2_msg(sb, KERN_ERR, "error: device does not support dax"); break; case -EINVAL: ext2_msg(sb, KERN_ERR, "error: unaligned partition for dax"); break; default: ext2_msg(sb, KERN_ERR, "error: dax access failed (%d)", err); break; } goto failed_mount; } } /* If the blocksize doesn't match, re-read the thing.. */ if (sb->s_blocksize != blocksize) { brelse(bh); if (!sb_set_blocksize(sb, blocksize)) { ext2_msg(sb, KERN_ERR, "error: bad blocksize %d", blocksize); goto failed_sbi; } logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize; offset = (sb_block*BLOCK_SIZE) % blocksize; bh = sb_bread(sb, logic_sb_block); if(!bh) { ext2_msg(sb, KERN_ERR, "error: couldn't read" "superblock on 2nd try"); goto failed_sbi; } es = (struct ext2_super_block *) (((char *)bh->b_data) + offset); sbi->s_es = es; if (es->s_magic != cpu_to_le16(EXT2_SUPER_MAGIC)) { ext2_msg(sb, KERN_ERR, "error: magic mismatch"); goto failed_mount; } } sb->s_maxbytes = ext2_max_size(sb->s_blocksize_bits); sb->s_max_links = EXT2_LINK_MAX; if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV) { sbi->s_inode_size = EXT2_GOOD_OLD_INODE_SIZE; sbi->s_first_ino = EXT2_GOOD_OLD_FIRST_INO; } else { sbi->s_inode_size = le16_to_cpu(es->s_inode_size); sbi->s_first_ino = le32_to_cpu(es->s_first_ino); if ((sbi->s_inode_size < EXT2_GOOD_OLD_INODE_SIZE) || !is_power_of_2(sbi->s_inode_size) || (sbi->s_inode_size > blocksize)) { ext2_msg(sb, KERN_ERR, "error: unsupported inode size: %d", sbi->s_inode_size); goto failed_mount; } } sbi->s_frag_size = EXT2_MIN_FRAG_SIZE << le32_to_cpu(es->s_log_frag_size); if (sbi->s_frag_size == 0) goto cantfind_ext2; sbi->s_frags_per_block = sb->s_blocksize / sbi->s_frag_size; sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group); sbi->s_frags_per_group = le32_to_cpu(es->s_frags_per_group); sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group); if (EXT2_INODE_SIZE(sb) == 0) goto cantfind_ext2; sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb); if (sbi->s_inodes_per_block == 0 || sbi->s_inodes_per_group == 0) goto cantfind_ext2; sbi->s_itb_per_group = sbi->s_inodes_per_group / sbi->s_inodes_per_block; sbi->s_desc_per_block = sb->s_blocksize / sizeof (struct ext2_group_desc); sbi->s_sbh = bh; sbi->s_mount_state = le16_to_cpu(es->s_state); sbi->s_addr_per_block_bits = ilog2 (EXT2_ADDR_PER_BLOCK(sb)); sbi->s_desc_per_block_bits = ilog2 (EXT2_DESC_PER_BLOCK(sb)); if (sb->s_magic != EXT2_SUPER_MAGIC) goto cantfind_ext2; if (sb->s_blocksize != bh->b_size) { if (!silent) ext2_msg(sb, KERN_ERR, "error: unsupported blocksize"); goto failed_mount; } if (sb->s_blocksize != sbi->s_frag_size) { ext2_msg(sb, KERN_ERR, "error: fragsize %lu != blocksize %lu" "(not supported yet)", sbi->s_frag_size, sb->s_blocksize); goto failed_mount; } if (sbi->s_blocks_per_group > sb->s_blocksize * 8) { ext2_msg(sb, KERN_ERR, "error: #blocks per group too big: %lu", sbi->s_blocks_per_group); goto failed_mount; } if (sbi->s_frags_per_group > sb->s_blocksize * 8) { ext2_msg(sb, KERN_ERR, "error: #fragments per group too big: %lu", sbi->s_frags_per_group); goto failed_mount; } if (sbi->s_inodes_per_group > sb->s_blocksize * 8) { ext2_msg(sb, KERN_ERR, "error: #inodes per group too big: %lu", sbi->s_inodes_per_group); goto failed_mount; } if (EXT2_BLOCKS_PER_GROUP(sb) == 0) goto cantfind_ext2; sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) - le32_to_cpu(es->s_first_data_block) - 1) / EXT2_BLOCKS_PER_GROUP(sb)) + 1; db_count = (sbi->s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) / EXT2_DESC_PER_BLOCK(sb); sbi->s_group_desc = kmalloc (db_count * sizeof (struct buffer_head *), GFP_KERNEL); if (sbi->s_group_desc == NULL) { ext2_msg(sb, KERN_ERR, "error: not enough memory"); goto failed_mount; } bgl_lock_init(sbi->s_blockgroup_lock); sbi->s_debts = kcalloc(sbi->s_groups_count, sizeof(*sbi->s_debts), GFP_KERNEL); if (!sbi->s_debts) { ext2_msg(sb, KERN_ERR, "error: not enough memory"); goto failed_mount_group_desc; } for (i = 0; i < db_count; i++) { block = descriptor_loc(sb, logic_sb_block, i); sbi->s_group_desc[i] = sb_bread(sb, block); if (!sbi->s_group_desc[i]) { for (j = 0; j < i; j++) brelse (sbi->s_group_desc[j]); ext2_msg(sb, KERN_ERR, "error: unable to read group descriptors"); goto failed_mount_group_desc; } } if (!ext2_check_descriptors (sb)) { ext2_msg(sb, KERN_ERR, "group descriptors corrupted"); goto failed_mount2; } sbi->s_gdb_count = db_count; get_random_bytes(&sbi->s_next_generation, sizeof(u32)); spin_lock_init(&sbi->s_next_gen_lock); /* per fileystem reservation list head & lock */ spin_lock_init(&sbi->s_rsv_window_lock); sbi->s_rsv_window_root = RB_ROOT; /* * Add a single, static dummy reservation to the start of the * reservation window list --- it gives us a placeholder for * append-at-start-of-list which makes the allocation logic * _much_ simpler. */ sbi->s_rsv_window_head.rsv_start = EXT2_RESERVE_WINDOW_NOT_ALLOCATED; sbi->s_rsv_window_head.rsv_end = EXT2_RESERVE_WINDOW_NOT_ALLOCATED; sbi->s_rsv_window_head.rsv_alloc_hit = 0; sbi->s_rsv_window_head.rsv_goal_size = 0; ext2_rsv_window_add(sb, &sbi->s_rsv_window_head); err = percpu_counter_init(&sbi->s_freeblocks_counter, ext2_count_free_blocks(sb), GFP_KERNEL); if (!err) { err = percpu_counter_init(&sbi->s_freeinodes_counter, ext2_count_free_inodes(sb), GFP_KERNEL); } if (!err) { err = percpu_counter_init(&sbi->s_dirs_counter, ext2_count_dirs(sb), GFP_KERNEL); } if (err) { ext2_msg(sb, KERN_ERR, "error: insufficient memory"); goto failed_mount3; } #ifdef CONFIG_EXT2_FS_XATTR sbi->s_mb_cache = ext2_xattr_create_cache(); if (!sbi->s_mb_cache) { ext2_msg(sb, KERN_ERR, "Failed to create an mb_cache"); goto failed_mount3; } #endif /* * set up enough so that it can read an inode */ sb->s_op = &ext2_sops; sb->s_export_op = &ext2_export_ops; sb->s_xattr = ext2_xattr_handlers; #ifdef CONFIG_QUOTA sb->dq_op = &dquot_operations; sb->s_qcop = &dquot_quotactl_ops; sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP; #endif root = ext2_iget(sb, EXT2_ROOT_INO); if (IS_ERR(root)) { ret = PTR_ERR(root); goto failed_mount3; } if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { iput(root); ext2_msg(sb, KERN_ERR, "error: corrupt root inode, run e2fsck"); goto failed_mount3; } sb->s_root = d_make_root(root); if (!sb->s_root) { ext2_msg(sb, KERN_ERR, "error: get root inode failed"); ret = -ENOMEM; goto failed_mount3; } if (EXT2_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL)) ext2_msg(sb, KERN_WARNING, "warning: mounting ext3 filesystem as ext2"); if (ext2_setup_super (sb, es, sb->s_flags & MS_RDONLY)) sb->s_flags |= MS_RDONLY; ext2_write_super(sb); return 0; cantfind_ext2: if (!silent) ext2_msg(sb, KERN_ERR, "error: can't find an ext2 filesystem on dev %s.", sb->s_id); goto failed_mount; failed_mount3: if (sbi->s_mb_cache) ext2_xattr_destroy_cache(sbi->s_mb_cache); percpu_counter_destroy(&sbi->s_freeblocks_counter); percpu_counter_destroy(&sbi->s_freeinodes_counter); percpu_counter_destroy(&sbi->s_dirs_counter); failed_mount2: for (i = 0; i < db_count; i++) brelse(sbi->s_group_desc[i]); failed_mount_group_desc: kfree(sbi->s_group_desc); kfree(sbi->s_debts); failed_mount: brelse(bh); failed_sbi: sb->s_fs_info = NULL; kfree(sbi->s_blockgroup_lock); kfree(sbi); failed: return ret; } static void ext2_clear_super_error(struct super_block *sb) { struct buffer_head *sbh = EXT2_SB(sb)->s_sbh; if (buffer_write_io_error(sbh)) { /* * Oh, dear. A previous attempt to write the * superblock failed. This could happen because the * USB device was yanked out. Or it could happen to * be a transient write error and maybe the block will * be remapped. Nothing we can do but to retry the * write and hope for the best. */ ext2_msg(sb, KERN_ERR, "previous I/O error to superblock detected\n"); clear_buffer_write_io_error(sbh); set_buffer_uptodate(sbh); } } static void ext2_sync_super(struct super_block *sb, struct ext2_super_block *es, int wait) { ext2_clear_super_error(sb); spin_lock(&EXT2_SB(sb)->s_lock); es->s_free_blocks_count = cpu_to_le32(ext2_count_free_blocks(sb)); es->s_free_inodes_count = cpu_to_le32(ext2_count_free_inodes(sb)); es->s_wtime = cpu_to_le32(get_seconds()); /* unlock before we do IO */ spin_unlock(&EXT2_SB(sb)->s_lock); mark_buffer_dirty(EXT2_SB(sb)->s_sbh); if (wait) sync_dirty_buffer(EXT2_SB(sb)->s_sbh); }
STATIC int linvfs_fill_super( struct super_block *sb, void *data, int silent) { vnode_t *rootvp; struct vfs *vfsp = vfs_allocate(); struct xfs_mount_args *args = xfs_args_allocate(sb); struct kstatfs statvfs; int error; vfsp->vfs_super = sb; LINVFS_SET_VFS(sb, vfsp); if (sb->s_flags & MS_RDONLY) vfsp->vfs_flag |= VFS_RDONLY; bhv_insert_all_vfsops(vfsp); VFS_PARSEARGS(vfsp, (char *)data, args, 0, error); if (error) { bhv_remove_all_vfsops(vfsp, 1); goto fail_vfsop; } sb_min_blocksize(sb, BBSIZE); sb->s_export_op = &linvfs_export_ops; sb->s_qcop = &linvfs_qops; sb->s_op = &linvfs_sops; VFS_MOUNT(vfsp, args, NULL, error); if (error) { bhv_remove_all_vfsops(vfsp, 1); goto fail_vfsop; } VFS_STATVFS(vfsp, &statvfs, NULL, error); if (error) goto fail_unmount; sb->s_dirt = 1; sb->s_magic = statvfs.f_type; sb->s_blocksize = statvfs.f_bsize; sb->s_blocksize_bits = ffs(statvfs.f_bsize) - 1; sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits); set_posix_acl_flag(sb); VFS_ROOT(vfsp, &rootvp, error); if (error) goto fail_unmount; sb->s_root = d_alloc_root(LINVFS_GET_IP(rootvp)); if (!sb->s_root) goto fail_vnrele; if (is_bad_inode(sb->s_root->d_inode)) goto fail_vnrele; if (linvfs_start_syncd(vfsp)) goto fail_vnrele; vn_trace_exit(rootvp, __FUNCTION__, (inst_t *)__return_address); kmem_free(args, sizeof(*args)); return 0; fail_vnrele: if (sb->s_root) { dput(sb->s_root); sb->s_root = NULL; } else { VN_RELE(rootvp); } fail_unmount: VFS_UNMOUNT(vfsp, 0, NULL, error); fail_vfsop: vfs_deallocate(vfsp); kmem_free(args, sizeof(*args)); return -error; }
static int tux3_fill_super(struct super_block *sb, void *data, int silent) { struct sb *sbi; int err, blocksize; sbi = kzalloc(sizeof(struct sb), GFP_KERNEL); if (!sbi) return -ENOMEM; sbi->vfs_sb = sb; sb->s_fs_info = sbi; sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_magic = TUX3_SUPER_MAGIC; sb->s_op = &tux3_super_ops; sb->s_time_gran = 1; mutex_init(&sbi->loglock); INIT_LIST_HEAD(&sbi->alloc_inodes); err = -EIO; blocksize = sb_min_blocksize(sb, BLOCK_SIZE); if (!blocksize) { if (!silent) printk(KERN_ERR "TUX3: unable to set blocksize\n"); goto error; } if ((err = load_sb(tux_sb(sb)))) { if (!silent) { if (err == -EINVAL) warn("invalid superblock [%Lx]", (L)from_be_u64(*(be_u64 *)sbi->super.magic)); else warn("Unable to read superblock"); } goto error; } if (sbi->blocksize != blocksize) { if (!sb_set_blocksize(sb, sbi->blocksize)) { printk(KERN_ERR "TUX3: blocksize too small for device.\n"); goto error; } } warn("s_blocksize %lu", sb->s_blocksize); err = -ENOMEM; sbi->volmap = tux_new_volmap(tux_sb(sb)); if (!sbi->volmap) goto error; insert_inode_hash(sbi->volmap); sbi->logmap = tux_new_logmap(tux_sb(sb)); if (!sbi->logmap) goto error_logmap; err = load_itable(sbi); if (err) goto error_bitmap; // struct inode *vtable; sbi->bitmap = tux3_iget(sb, TUX_BITMAP_INO); err = PTR_ERR(sbi->bitmap); if (IS_ERR(sbi->bitmap)) goto error_bitmap; sbi->rootdir = tux3_iget(sb, TUX_ROOTDIR_INO); err = PTR_ERR(sbi->rootdir); if (IS_ERR(sbi->rootdir)) goto error_rootdir; sbi->atable = tux3_iget(sb, TUX_ATABLE_INO); err = PTR_ERR(sbi->atable); if (IS_ERR(sbi->atable)) goto error_atable; sb->s_root = d_alloc_root(sbi->rootdir); if (!sb->s_root) goto error_alloc_root; return 0; error_alloc_root: iput(sbi->atable); error_atable: iput(sbi->rootdir); error_rootdir: iput(sbi->bitmap); error_bitmap: iput(sbi->logmap); error_logmap: iput(sbi->volmap); error: kfree(sbi); return err; }
static int lab4fs_fill_super(struct super_block * sb, void * data, int silent) { struct buffer_head * bh; int blocksize = BLOCK_SIZE; unsigned long logic_sb_block; unsigned offset = 0; unsigned long sb_block = 1; struct lab4fs_super_block *es; struct lab4fs_sb_info *sbi; struct inode *root; int hblock; int err = 0; sbi = kmalloc(sizeof(*sbi), GFP_KERNEL); if (!sbi) return -ENOMEM; sb->s_fs_info = sbi; memset(sbi, 0, sizeof(*sbi)); blocksize = sb_min_blocksize(sb, BLOCK_SIZE); if (!blocksize) { LAB4ERROR("unable to set blocksize\n"); err = -EIO; goto out_fail; } /* * If the superblock doesn't start on a hardware sector boundary, * calculate the offset. */ if (blocksize != BLOCK_SIZE) { logic_sb_block = (sb_block * BLOCK_SIZE) / blocksize; offset = (sb_block * BLOCK_SIZE) % blocksize; } else { logic_sb_block = sb_block; } if (!(bh = sb_bread(sb, logic_sb_block))) { LAB4ERROR("unable to read super block\n"); goto out_fail; } es = (struct lab4fs_super_block *) (((char *)bh->b_data) + offset); sb->s_magic = le32_to_cpu(es->s_magic); if (sb->s_magic != LAB4FS_SUPER_MAGIC) { if (!silent) LAB4ERROR("VFS: Can't find lab4fs filesystem on dev %s.\n", sb->s_id); goto failed_mount; } sbi->s_sb = es; blocksize = le32_to_cpu(es->s_block_size); hblock = bdev_hardsect_size(sb->s_bdev); if (sb->s_blocksize != blocksize) { /* * Make sure the blocksize for the filesystem is larger * than the hardware sectorsize for the machine. */ if (blocksize < hblock) { LAB4ERROR("blocksize %d too small for " "device blocksize %d.\n", blocksize, hblock); goto failed_mount; } brelse (bh); sb_set_blocksize(sb, blocksize); logic_sb_block = (sb_block * BLOCK_SIZE) / blocksize; offset = (sb_block * BLOCK_SIZE) % blocksize; bh = sb_bread(sb, logic_sb_block); if (!bh) { LAB4ERROR("Can't read superblock on 2nd try.\n"); goto failed_mount; } es = (struct lab4fs_super_block *)(((char *)bh->b_data) + offset); sbi->s_sb = es; if (es->s_magic != cpu_to_le32(LAB4FS_SUPER_MAGIC)) { LAB4ERROR("Magic mismatch, very weird !\n"); goto failed_mount; } } sb->s_maxbytes = lab4fs_max_size(es); sbi->s_sbh = bh; sbi->s_log_block_size = log2(sb->s_blocksize); sbi->s_first_ino = le32_to_cpu(es->s_first_inode); sbi->s_inode_size = le32_to_cpu(es->s_inode_size); sbi->s_log_inode_size = log2(sbi->s_inode_size); sbi->s_inode_table = le32_to_cpu(es->s_inode_table); sbi->s_data_blocks = le32_to_cpu(es->s_data_blocks); sbi->s_next_generation = 0; sbi->s_free_inodes_count = le32_to_cpu(es->s_free_inodes_count); sbi->s_free_data_blocks_count = le32_to_cpu(es->s_free_data_blocks_count); sbi->s_inodes_count = le32_to_cpu(es->s_inodes_count); sbi->s_blocks_count = le32_to_cpu(es->s_blocks_count); sbi->s_inode_bitmap.nr_valid_bits = le32_to_cpu(es->s_inodes_count); sbi->s_data_bitmap.nr_valid_bits = le32_to_cpu(es->s_blocks_count) - le32_to_cpu(es->s_data_blocks); rwlock_init(&sbi->rwlock); sb->s_op = &lab4fs_super_ops; err = bitmap_setup(&sbi->s_inode_bitmap, sb, le32_to_cpu(es->s_inode_bitmap)); if (err) goto out_fail; err = bitmap_setup(&sbi->s_data_bitmap, sb, le32_to_cpu(es->s_data_bitmap)); if (err) goto out_fail; sbi->s_root_inode = le32_to_cpu(es->s_root_inode); root = iget(sb, sbi->s_root_inode); LAB4DEBUG("I can get the root inode\n"); print_inode(root); LAB4DEBUG("END\n"); sb->s_root = d_alloc_root(root); if (!sb->s_root) { iput(root); kfree(sbi); return -ENOMEM; } return 0; failed_mount: out_fail: kfree(sbi); return err; }