static int efs_fill_super(struct super_block *s, void *d, int silent)
{
	struct efs_sb_info *sb;
	struct buffer_head *bh;
	struct inode *root;
	int ret = -EINVAL;

	sb = kzalloc(sizeof(struct efs_sb_info), GFP_KERNEL);
	if (!sb)
		return -ENOMEM;
	s->s_fs_info = sb;

	s->s_magic		= EFS_SUPER_MAGIC;
	if (!sb_set_blocksize(s, EFS_BLOCKSIZE)) {
		printk(KERN_ERR "EFS: device does not support %d byte blocks\n",
			EFS_BLOCKSIZE);
		goto out_no_fs_ul;
	}

	/* read the vh (volume header) block */
	bh = sb_bread(s, 0);

	if (!bh) {
		printk(KERN_ERR "EFS: cannot read volume header\n");
		goto out_no_fs_ul;
	}

	/*
	 * if this returns zero then we didn't find any partition table.
	 * this isn't (yet) an error - just assume for the moment that
	 * the device is valid and go on to search for a superblock.
	 */
	sb->fs_start = efs_validate_vh((struct volume_header *) bh->b_data);
	brelse(bh);

	if (sb->fs_start == -1) {
		goto out_no_fs_ul;
	}

	bh = sb_bread(s, sb->fs_start + EFS_SUPER);
	if (!bh) {
		printk(KERN_ERR "EFS: cannot read superblock\n");
		goto out_no_fs_ul;
	}

	if (efs_validate_super(sb, (struct efs_super *) bh->b_data)) {
#ifdef DEBUG
		printk(KERN_WARNING "EFS: invalid superblock at block %u\n", sb->fs_start + EFS_SUPER);
#endif
		brelse(bh);
		goto out_no_fs_ul;
	}
	brelse(bh);

	if (!(s->s_flags & MS_RDONLY)) {
#ifdef DEBUG
		printk(KERN_INFO "EFS: forcing read-only mode\n");
#endif
		s->s_flags |= MS_RDONLY;
	}
	s->s_op   = &efs_superblock_operations;
	s->s_export_op = &efs_export_ops;
	root = efs_iget(s, EFS_ROOTINODE);
	if (IS_ERR(root)) {
		printk(KERN_ERR "EFS: get root inode failed\n");
		ret = PTR_ERR(root);
		goto out_no_fs;
	}

	s->s_root = d_make_root(root);
	if (!(s->s_root)) {
		printk(KERN_ERR "EFS: get root dentry failed\n");
		ret = -ENOMEM;
		goto out_no_fs;
	}

	return 0;

out_no_fs_ul:
out_no_fs:
	s->s_fs_info = NULL;
	kfree(sb);
	return ret;
}
Example #2
0
static int
nilfs_get_sb(struct file_system_type *fs_type, int flags,
	     const char *dev_name, void *data, struct vfsmount *mnt)
{
	struct nilfs_super_data sd;
	struct super_block *s;
	fmode_t mode = FMODE_READ;
	struct the_nilfs *nilfs;
	int err, need_to_close = 1;

	if (!(flags & MS_RDONLY))
		mode |= FMODE_WRITE;

	sd.bdev = open_bdev_exclusive(dev_name, mode, fs_type);
	if (IS_ERR(sd.bdev))
		return PTR_ERR(sd.bdev);

	/*
	 * To get mount instance using sget() vfs-routine, NILFS needs
	 * much more information than normal filesystems to identify mount
	 * instance.  For snapshot mounts, not only a mount type (ro-mount
	 * or rw-mount) but also a checkpoint number is required.
	 */
	sd.cno = 0;
	sd.flags = flags;
	if (nilfs_identify((char *)data, &sd)) {
		err = -EINVAL;
		goto failed;
	}

	nilfs = find_or_create_nilfs(sd.bdev);
	if (!nilfs) {
		err = -ENOMEM;
		goto failed;
	}

	mutex_lock(&nilfs->ns_mount_mutex);

	if (!sd.cno) {
		/*
		 * Check if an exclusive mount exists or not.
		 * Snapshot mounts coexist with a current mount
		 * (i.e. rw-mount or ro-mount), whereas rw-mount and
		 * ro-mount are mutually exclusive.
		 */
		down_read(&nilfs->ns_super_sem);
		if (nilfs->ns_current &&
		    ((nilfs->ns_current->s_super->s_flags ^ flags)
		     & MS_RDONLY)) {
			up_read(&nilfs->ns_super_sem);
			err = -EBUSY;
			goto failed_unlock;
		}
		up_read(&nilfs->ns_super_sem);
	}

	/*
	 * Find existing nilfs_sb_info struct
	 */
	sd.sbi = nilfs_find_sbinfo(nilfs, !(flags & MS_RDONLY), sd.cno);

	/*
	 * Get super block instance holding the nilfs_sb_info struct.
	 * A new instance is allocated if no existing mount is present or
	 * existing instance has been unmounted.
	 */
	s = sget(fs_type, nilfs_test_bdev_super, nilfs_set_bdev_super, &sd);
	if (sd.sbi)
		nilfs_put_sbinfo(sd.sbi);

	if (IS_ERR(s)) {
		err = PTR_ERR(s);
		goto failed_unlock;
	}

	if (!s->s_root) {
		char b[BDEVNAME_SIZE];

		/* New superblock instance created */
		s->s_flags = flags;
		s->s_mode = mode;
		strlcpy(s->s_id, bdevname(sd.bdev, b), sizeof(s->s_id));
		sb_set_blocksize(s, block_size(sd.bdev));

		err = nilfs_fill_super(s, data, flags & MS_SILENT ? 1 : 0,
				       nilfs);
		if (err)
			goto cancel_new;

		s->s_flags |= MS_ACTIVE;
		need_to_close = 0;
	}

	mutex_unlock(&nilfs->ns_mount_mutex);
	put_nilfs(nilfs);
	if (need_to_close)
		close_bdev_exclusive(sd.bdev, mode);
	simple_set_mnt(mnt, s);
	return 0;

 failed_unlock:
	mutex_unlock(&nilfs->ns_mount_mutex);
	put_nilfs(nilfs);
 failed:
	close_bdev_exclusive(sd.bdev, mode);

	return err;

 cancel_new:
	/* Abandoning the newly allocated superblock */
	mutex_unlock(&nilfs->ns_mount_mutex);
	put_nilfs(nilfs);
	deactivate_locked_super(s);
	/*
	 * deactivate_locked_super() invokes close_bdev_exclusive().
	 * We must finish all post-cleaning before this call;
	 * put_nilfs() needs the block device.
	 */
	return err;
}
static int bfs_fill_super(struct super_block *s, void *data, int silent)
{
	struct buffer_head *bh, *sbh;
	struct bfs_super_block *bfs_sb;
	struct inode *inode;
	unsigned i, imap_len;
	struct bfs_sb_info *info;
	int ret = -EINVAL;
	unsigned long i_sblock, i_eblock, i_eoff, s_size;

	info = kzalloc(sizeof(*info), GFP_KERNEL);
	if (!info)
		return -ENOMEM;
	mutex_init(&info->bfs_lock);
	s->s_fs_info = info;

	sb_set_blocksize(s, BFS_BSIZE);

	sbh = sb_bread(s, 0);
	if (!sbh)
		goto out;
	bfs_sb = (struct bfs_super_block *)sbh->b_data;
	if (le32_to_cpu(bfs_sb->s_magic) != BFS_MAGIC) {
		if (!silent)
			printf("No BFS filesystem on %s (magic=%08x)\n", 
				s->s_id,  le32_to_cpu(bfs_sb->s_magic));
		goto out1;
	}
	if (BFS_UNCLEAN(bfs_sb, s) && !silent)
		printf("%s is unclean, continuing\n", s->s_id);

	s->s_magic = BFS_MAGIC;

	if (le32_to_cpu(bfs_sb->s_start) > le32_to_cpu(bfs_sb->s_end)) {
		printf("Superblock is corrupted\n");
		goto out1;
	}

	info->si_lasti = (le32_to_cpu(bfs_sb->s_start) - BFS_BSIZE) /
					sizeof(struct bfs_inode)
					+ BFS_ROOT_INO - 1;
	imap_len = (info->si_lasti / 8) + 1;
	info->si_imap = kzalloc(imap_len, GFP_KERNEL);
	if (!info->si_imap)
		goto out1;
	for (i = 0; i < BFS_ROOT_INO; i++)
		set_bit(i, info->si_imap);

	s->s_op = &bfs_sops;
	inode = bfs_iget(s, BFS_ROOT_INO);
	if (IS_ERR(inode)) {
		ret = PTR_ERR(inode);
		goto out2;
	}
	s->s_root = d_make_root(inode);
	if (!s->s_root) {
		ret = -ENOMEM;
		goto out2;
	}

	info->si_blocks = (le32_to_cpu(bfs_sb->s_end) + 1) >> BFS_BSIZE_BITS;
	info->si_freeb = (le32_to_cpu(bfs_sb->s_end) + 1
			- le32_to_cpu(bfs_sb->s_start)) >> BFS_BSIZE_BITS;
	info->si_freei = 0;
	info->si_lf_eblk = 0;

	/* can we read the last block? */
	bh = sb_bread(s, info->si_blocks - 1);
	if (!bh) {
		printf("Last block not available: %lu\n", info->si_blocks - 1);
		ret = -EIO;
		goto out3;
	}
	brelse(bh);

	bh = NULL;
	for (i = BFS_ROOT_INO; i <= info->si_lasti; i++) {
		struct bfs_inode *di;
		int block = (i - BFS_ROOT_INO) / BFS_INODES_PER_BLOCK + 1;
		int off = (i - BFS_ROOT_INO) % BFS_INODES_PER_BLOCK;
		unsigned long eblock;

		if (!off) {
			brelse(bh);
			bh = sb_bread(s, block);
		}

		if (!bh)
			continue;

		di = (struct bfs_inode *)bh->b_data + off;

		/* test if filesystem is not corrupted */

		i_eoff = le32_to_cpu(di->i_eoffset);
		i_sblock = le32_to_cpu(di->i_sblock);
		i_eblock = le32_to_cpu(di->i_eblock);
		s_size = le32_to_cpu(bfs_sb->s_end);

		if (i_sblock > info->si_blocks ||
			i_eblock > info->si_blocks ||
			i_sblock > i_eblock ||
			i_eoff > s_size ||
			i_sblock * BFS_BSIZE > i_eoff) {

			printf("Inode 0x%08x corrupted\n", i);

			brelse(bh);
			ret = -EIO;
			goto out3;
		}

		if (!di->i_ino) {
			info->si_freei++;
			continue;
		}
		set_bit(i, info->si_imap);
		info->si_freeb -= BFS_FILEBLOCKS(di);

		eblock =  le32_to_cpu(di->i_eblock);
		if (eblock > info->si_lf_eblk)
			info->si_lf_eblk = eblock;
	}
	brelse(bh);
	brelse(sbh);
	dump_imap("read_super", s);
	return 0;

out3:
	dput(s->s_root);
	s->s_root = NULL;
out2:
	kfree(info->si_imap);
out1:
	brelse(sbh);
out:
	mutex_destroy(&info->bfs_lock);
	kfree(info);
	s->s_fs_info = NULL;
	return ret;
}
Example #4
0
static int ufs_fill_super(struct super_block *sb, void *data, int silent)
{
	struct ufs_sb_info * sbi;
	struct ufs_sb_private_info * uspi;
	struct ufs_super_block_first * usb1;
	struct ufs_super_block_second * usb2;
	struct ufs_super_block_third * usb3;
	struct ufs_super_block *usb;
	struct ufs_buffer_head * ubh;	
	struct inode *inode;
	unsigned block_size, super_block_size;
	unsigned flags;

	uspi = NULL;
	ubh = NULL;
	flags = 0;
	
	UFSD(("ENTER\n"))
		
	sbi = kmalloc(sizeof(struct ufs_sb_info), GFP_KERNEL);
	if (!sbi)
		goto failed_nomem;
	sb->s_fs_info = sbi;
	memset(sbi, 0, sizeof(struct ufs_sb_info));

	UFSD(("flag %u\n", (int)(sb->s_flags & MS_RDONLY)))
	
#ifndef CONFIG_UFS_FS_WRITE
	if (!(sb->s_flags & MS_RDONLY)) {
		printk("ufs was compiled with read-only support, "
		"can't be mounted as read-write\n");
		goto failed;
	}
#endif
	/*
	 * Set default mount options
	 * Parse mount options
	 */
	sbi->s_mount_opt = 0;
	ufs_set_opt (sbi->s_mount_opt, ONERROR_LOCK);
	if (!ufs_parse_options ((char *) data, &sbi->s_mount_opt)) {
		printk("wrong mount options\n");
		goto failed;
	}
	if (!(sbi->s_mount_opt & UFS_MOUNT_UFSTYPE)) {
		if (!silent)
			printk("You didn't specify the type of your ufs filesystem\n\n"
			"mount -t ufs -o ufstype="
			"sun|sunx86|44bsd|ufs2|5xbsd|old|hp|nextstep|netxstep-cd|openstep ...\n\n"
			">>>WARNING<<< Wrong ufstype may corrupt your filesystem, "
			"default is ufstype=old\n");
		ufs_set_opt (sbi->s_mount_opt, UFSTYPE_OLD);
	}

	sbi->s_uspi = uspi =
		kmalloc (sizeof(struct ufs_sb_private_info), GFP_KERNEL);
	if (!uspi)
		goto failed;

	/* Keep 2Gig file limit. Some UFS variants need to override 
	   this but as I don't know which I'll let those in the know loosen
	   the rules */
	   
	switch (sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) {
	case UFS_MOUNT_UFSTYPE_44BSD:
		UFSD(("ufstype=44bsd\n"))
		uspi->s_fsize = block_size = 512;
		uspi->s_fmask = ~(512 - 1);
		uspi->s_fshift = 9;
		uspi->s_sbsize = super_block_size = 1536;
		uspi->s_sbbase = 0;
		flags |= UFS_DE_44BSD | UFS_UID_44BSD | UFS_ST_44BSD | UFS_CG_44BSD;
		break;
	case UFS_MOUNT_UFSTYPE_UFS2:
		UFSD(("ufstype=ufs2\n"))
		uspi->s_fsize = block_size = 512;
		uspi->s_fmask = ~(512 - 1);
		uspi->s_fshift = 9;
		uspi->s_sbsize = super_block_size = 1536;
		uspi->s_sbbase =  0;
		flags |= UFS_TYPE_UFS2 | UFS_DE_44BSD | UFS_UID_44BSD | UFS_ST_44BSD | UFS_CG_44BSD;
		if (!(sb->s_flags & MS_RDONLY)) {
			printk(KERN_INFO "ufstype=ufs2 is supported read-only\n");
			sb->s_flags |= MS_RDONLY;
 		}
		break;
		
	case UFS_MOUNT_UFSTYPE_SUN:
		UFSD(("ufstype=sun\n"))
		uspi->s_fsize = block_size = 1024;
		uspi->s_fmask = ~(1024 - 1);
		uspi->s_fshift = 10;
		uspi->s_sbsize = super_block_size = 2048;
		uspi->s_sbbase = 0;
		uspi->s_maxsymlinklen = 56;
		flags |= UFS_DE_OLD | UFS_UID_EFT | UFS_ST_SUN | UFS_CG_SUN;
		break;

	case UFS_MOUNT_UFSTYPE_SUNx86:
		UFSD(("ufstype=sunx86\n"))
		uspi->s_fsize = block_size = 1024;
		uspi->s_fmask = ~(1024 - 1);
		uspi->s_fshift = 10;
		uspi->s_sbsize = super_block_size = 2048;
		uspi->s_sbbase = 0;
		uspi->s_maxsymlinklen = 56;
		flags |= UFS_DE_OLD | UFS_UID_EFT | UFS_ST_SUNx86 | UFS_CG_SUN;
		break;

	case UFS_MOUNT_UFSTYPE_OLD:
		UFSD(("ufstype=old\n"))
		uspi->s_fsize = block_size = 1024;
		uspi->s_fmask = ~(1024 - 1);
		uspi->s_fshift = 10;
		uspi->s_sbsize = super_block_size = 2048;
		uspi->s_sbbase = 0;
		flags |= UFS_DE_OLD | UFS_UID_OLD | UFS_ST_OLD | UFS_CG_OLD;
		if (!(sb->s_flags & MS_RDONLY)) {
			if (!silent)
				printk(KERN_INFO "ufstype=old is supported read-only\n");
			sb->s_flags |= MS_RDONLY;
		}
		break;
	
	case UFS_MOUNT_UFSTYPE_NEXTSTEP:
		UFSD(("ufstype=nextstep\n"))
		uspi->s_fsize = block_size = 1024;
		uspi->s_fmask = ~(1024 - 1);
		uspi->s_fshift = 10;
		uspi->s_sbsize = super_block_size = 2048;
		uspi->s_sbbase = 0;
		flags |= UFS_DE_OLD | UFS_UID_OLD | UFS_ST_OLD | UFS_CG_OLD;
		if (!(sb->s_flags & MS_RDONLY)) {
			if (!silent)
				printk(KERN_INFO "ufstype=nextstep is supported read-only\n");
			sb->s_flags |= MS_RDONLY;
		}
		break;
	
	case UFS_MOUNT_UFSTYPE_NEXTSTEP_CD:
		UFSD(("ufstype=nextstep-cd\n"))
		uspi->s_fsize = block_size = 2048;
		uspi->s_fmask = ~(2048 - 1);
		uspi->s_fshift = 11;
		uspi->s_sbsize = super_block_size = 2048;
		uspi->s_sbbase = 0;
		flags |= UFS_DE_OLD | UFS_UID_OLD | UFS_ST_OLD | UFS_CG_OLD;
		if (!(sb->s_flags & MS_RDONLY)) {
			if (!silent)
				printk(KERN_INFO "ufstype=nextstep-cd is supported read-only\n");
			sb->s_flags |= MS_RDONLY;
		}
		break;
	
	case UFS_MOUNT_UFSTYPE_OPENSTEP:
		UFSD(("ufstype=openstep\n"))
		uspi->s_fsize = block_size = 1024;
		uspi->s_fmask = ~(1024 - 1);
		uspi->s_fshift = 10;
		uspi->s_sbsize = super_block_size = 2048;
		uspi->s_sbbase = 0;
		flags |= UFS_DE_44BSD | UFS_UID_44BSD | UFS_ST_44BSD | UFS_CG_44BSD;
		if (!(sb->s_flags & MS_RDONLY)) {
			if (!silent)
				printk(KERN_INFO "ufstype=openstep is supported read-only\n");
			sb->s_flags |= MS_RDONLY;
		}
		break;
	
	case UFS_MOUNT_UFSTYPE_HP:
		UFSD(("ufstype=hp\n"))
		uspi->s_fsize = block_size = 1024;
		uspi->s_fmask = ~(1024 - 1);
		uspi->s_fshift = 10;
		uspi->s_sbsize = super_block_size = 2048;
		uspi->s_sbbase = 0;
		flags |= UFS_DE_OLD | UFS_UID_OLD | UFS_ST_OLD | UFS_CG_OLD;
		if (!(sb->s_flags & MS_RDONLY)) {
			if (!silent)
				printk(KERN_INFO "ufstype=hp is supported read-only\n");
			sb->s_flags |= MS_RDONLY;
 		}
 		break;
	default:
		if (!silent)
			printk("unknown ufstype\n");
		goto failed;
	}
	
again:	
	if (!sb_set_blocksize(sb, block_size)) {
		printk(KERN_ERR "UFS: failed to set blocksize\n");
		goto failed;
	}

	/*
	 * read ufs super block from device
	 */
	if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
		ubh = ubh_bread_uspi(uspi, sb, uspi->s_sbbase + SBLOCK_UFS2/block_size, super_block_size);
	}
	else {
		ubh = ubh_bread_uspi(uspi, sb, uspi->s_sbbase + UFS_SBLOCK/block_size, super_block_size);
	}
	if (!ubh) 
            goto failed;

	
	usb1 = ubh_get_usb_first(USPI_UBH);
	usb2 = ubh_get_usb_second(USPI_UBH);
	usb3 = ubh_get_usb_third(USPI_UBH);
	usb  = (struct ufs_super_block *)
		((struct ufs_buffer_head *)uspi)->bh[0]->b_data ;

	/*
	 * Check ufs magic number
	 */
	sbi->s_bytesex = BYTESEX_LE;
	switch ((uspi->fs_magic = fs32_to_cpu(sb, usb3->fs_magic))) {
		case UFS_MAGIC:
		case UFS2_MAGIC:
		case UFS_MAGIC_LFN:
	        case UFS_MAGIC_FEA:
	        case UFS_MAGIC_4GB:
			goto magic_found;
	}
	sbi->s_bytesex = BYTESEX_BE;
	switch ((uspi->fs_magic = fs32_to_cpu(sb, usb3->fs_magic))) {
		case UFS_MAGIC:
		case UFS2_MAGIC:
		case UFS_MAGIC_LFN:
	        case UFS_MAGIC_FEA:
	        case UFS_MAGIC_4GB:
			goto magic_found;
	}

	if ((((sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) == UFS_MOUNT_UFSTYPE_NEXTSTEP) 
	  || ((sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) == UFS_MOUNT_UFSTYPE_NEXTSTEP_CD) 
	  || ((sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) == UFS_MOUNT_UFSTYPE_OPENSTEP)) 
	  && uspi->s_sbbase < 256) {
		ubh_brelse_uspi(uspi);
		ubh = NULL;
		uspi->s_sbbase += 8;
		goto again;
	}
	if (!silent)
		printk("ufs_read_super: bad magic number\n");
	goto failed;

magic_found:
	/*
	 * Check block and fragment sizes
	 */
	uspi->s_bsize = fs32_to_cpu(sb, usb1->fs_bsize);
	uspi->s_fsize = fs32_to_cpu(sb, usb1->fs_fsize);
	uspi->s_sbsize = fs32_to_cpu(sb, usb1->fs_sbsize);
	uspi->s_fmask = fs32_to_cpu(sb, usb1->fs_fmask);
	uspi->s_fshift = fs32_to_cpu(sb, usb1->fs_fshift);

	if (uspi->s_fsize & (uspi->s_fsize - 1)) {
		printk(KERN_ERR "ufs_read_super: fragment size %u is not a power of 2\n",
			uspi->s_fsize);
			goto failed;
	}
	if (uspi->s_fsize < 512) {
		printk(KERN_ERR "ufs_read_super: fragment size %u is too small\n",
			uspi->s_fsize);
		goto failed;
	}
	if (uspi->s_fsize > 4096) {
		printk(KERN_ERR "ufs_read_super: fragment size %u is too large\n",
			uspi->s_fsize);
		goto failed;
	}
	if (uspi->s_bsize & (uspi->s_bsize - 1)) {
		printk(KERN_ERR "ufs_read_super: block size %u is not a power of 2\n",
			uspi->s_bsize);
		goto failed;
	}
	if (uspi->s_bsize < 4096) {
		printk(KERN_ERR "ufs_read_super: block size %u is too small\n",
			uspi->s_bsize);
		goto failed;
	}
	if (uspi->s_bsize / uspi->s_fsize > 8) {
		printk(KERN_ERR "ufs_read_super: too many fragments per block (%u)\n",
			uspi->s_bsize / uspi->s_fsize);
		goto failed;
	}
	if (uspi->s_fsize != block_size || uspi->s_sbsize != super_block_size) {
		ubh_brelse_uspi(uspi);
		ubh = NULL;
		block_size = uspi->s_fsize;
		super_block_size = uspi->s_sbsize;
		UFSD(("another value of block_size or super_block_size %u, %u\n", block_size, super_block_size))
		goto again;
	}
Example #5
0
static int lab4fs_fill_super(struct super_block * sb, void * data, int silent)
{
    struct buffer_head * bh;
    int blocksize = BLOCK_SIZE;
    unsigned long logic_sb_block;
    unsigned offset = 0;
    unsigned long sb_block = 1;
    struct lab4fs_super_block *es;
    struct lab4fs_sb_info *sbi;
    struct inode *root;
    int hblock;
    int err = 0;

    sbi = kmalloc(sizeof(*sbi), GFP_KERNEL);
    if (!sbi)
        return -ENOMEM;

    sb->s_fs_info = sbi;
    memset(sbi, 0, sizeof(*sbi));
    blocksize = sb_min_blocksize(sb, BLOCK_SIZE);
    if (!blocksize) {
        LAB4ERROR("unable to set blocksize\n");
        err = -EIO;
        goto out_fail;
    }

    /*
     * If the superblock doesn't start on a hardware sector boundary,
     * calculate the offset.  
     */
    if (blocksize != BLOCK_SIZE) {
        logic_sb_block = (sb_block * BLOCK_SIZE) / blocksize;
        offset = (sb_block * BLOCK_SIZE) % blocksize;
    } else {
        logic_sb_block = sb_block;
    }

    if (!(bh = sb_bread(sb, logic_sb_block))) {
        LAB4ERROR("unable to read super block\n");
        goto out_fail;
    }

    es = (struct lab4fs_super_block *) (((char *)bh->b_data) + offset);
    sb->s_magic = le32_to_cpu(es->s_magic);
    if (sb->s_magic != LAB4FS_SUPER_MAGIC) {
        if (!silent)
            LAB4ERROR("VFS: Can't find lab4fs filesystem on dev %s.\n",
                    sb->s_id);
        goto failed_mount;
    }

    sbi->s_sb = es;
    blocksize = le32_to_cpu(es->s_block_size);
    hblock = bdev_hardsect_size(sb->s_bdev);
    if (sb->s_blocksize != blocksize) {
        /*
         * Make sure the blocksize for the filesystem is larger
         * than the hardware sectorsize for the machine.
         */
        if (blocksize < hblock) {
            LAB4ERROR("blocksize %d too small for "
                    "device blocksize %d.\n", blocksize, hblock);
            goto failed_mount;
        }

        brelse (bh);
        sb_set_blocksize(sb, blocksize);
        logic_sb_block = (sb_block * BLOCK_SIZE) / blocksize;
        offset = (sb_block * BLOCK_SIZE) % blocksize;
        bh = sb_bread(sb, logic_sb_block);
        if (!bh) {
            LAB4ERROR("Can't read superblock on 2nd try.\n");
            goto failed_mount;
        }
        es = (struct lab4fs_super_block *)(((char *)bh->b_data) + offset);
        sbi->s_sb = es;
        if (es->s_magic != cpu_to_le32(LAB4FS_SUPER_MAGIC)) {
            LAB4ERROR("Magic mismatch, very weird !\n");
            goto failed_mount;
        }
    }
    sb->s_maxbytes = lab4fs_max_size(es);
    sbi->s_sbh = bh;
    sbi->s_log_block_size = log2(sb->s_blocksize);
    sbi->s_first_ino = le32_to_cpu(es->s_first_inode);
    sbi->s_inode_size = le32_to_cpu(es->s_inode_size);
    sbi->s_log_inode_size = log2(sbi->s_inode_size);
    sbi->s_inode_table = le32_to_cpu(es->s_inode_table);
    sbi->s_data_blocks = le32_to_cpu(es->s_data_blocks);
    sbi->s_next_generation = 0;
    sbi->s_free_inodes_count = le32_to_cpu(es->s_free_inodes_count);
    sbi->s_free_data_blocks_count = le32_to_cpu(es->s_free_data_blocks_count);
    sbi->s_inodes_count = le32_to_cpu(es->s_inodes_count);
    sbi->s_blocks_count = le32_to_cpu(es->s_blocks_count);

    sbi->s_inode_bitmap.nr_valid_bits = le32_to_cpu(es->s_inodes_count);
    sbi->s_data_bitmap.nr_valid_bits = le32_to_cpu(es->s_blocks_count) 
        - le32_to_cpu(es->s_data_blocks);

    rwlock_init(&sbi->rwlock);
    sb->s_op = &lab4fs_super_ops;

    err = bitmap_setup(&sbi->s_inode_bitmap, sb, le32_to_cpu(es->s_inode_bitmap));
    if (err)
        goto out_fail;
    err = bitmap_setup(&sbi->s_data_bitmap, sb, le32_to_cpu(es->s_data_bitmap));
    if (err)
        goto out_fail;

    sbi->s_root_inode = le32_to_cpu(es->s_root_inode);
    root = iget(sb, sbi->s_root_inode);
    LAB4DEBUG("I can get the root inode\n");
    print_inode(root);
    LAB4DEBUG("END\n");
    sb->s_root = d_alloc_root(root);
    if (!sb->s_root) {
        iput(root);
        kfree(sbi);
        return -ENOMEM;
    }
    return 0;

failed_mount:
out_fail:
	kfree(sbi);
	return err;
}
Example #6
0
static int hpfs_fill_super(struct super_block *s, void *options, int silent)
{
	struct buffer_head *bh0, *bh1, *bh2;
	struct hpfs_boot_block *bootblock;
	struct hpfs_super_block *superblock;
	struct hpfs_spare_block *spareblock;
	struct hpfs_sb_info *sbi;
	struct inode *root;

	kuid_t uid;
	kgid_t gid;
	umode_t umask;
	int lowercase, eas, chk, errs, chkdsk, timeshift;

	dnode_secno root_dno;
	struct hpfs_dirent *de = NULL;
	struct quad_buffer_head qbh;

	int o;

	save_mount_options(s, options);

	sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
	if (!sbi) {
		return -ENOMEM;
	}
	s->s_fs_info = sbi;

	sbi->sb_bmp_dir = NULL;
	sbi->sb_cp_table = NULL;

	mutex_init(&sbi->hpfs_mutex);
	hpfs_lock(s);

	uid = current_uid();
	gid = current_gid();
	umask = current_umask();
	lowercase = 0;
	eas = 2;
	chk = 1;
	errs = 1;
	chkdsk = 1;
	timeshift = 0;

	if (!(o = parse_opts(options, &uid, &gid, &umask, &lowercase,
	    &eas, &chk, &errs, &chkdsk, &timeshift))) {
		printk("HPFS: bad mount options.\n");
		goto bail0;
	}
	if (o==2) {
		hpfs_help();
		goto bail0;
	}

	/*sbi->sb_mounting = 1;*/
	sb_set_blocksize(s, 512);
	sbi->sb_fs_size = -1;
	if (!(bootblock = hpfs_map_sector(s, 0, &bh0, 0))) goto bail1;
	if (!(superblock = hpfs_map_sector(s, 16, &bh1, 1))) goto bail2;
	if (!(spareblock = hpfs_map_sector(s, 17, &bh2, 0))) goto bail3;

	/* Check magics */
	if (/*le16_to_cpu(bootblock->magic) != BB_MAGIC
	    ||*/ le32_to_cpu(superblock->magic) != SB_MAGIC
	    || le32_to_cpu(spareblock->magic) != SP_MAGIC) {
		if (!silent) printk("HPFS: Bad magic ... probably not HPFS\n");
		goto bail4;
	}

	/* Check version */
	if (!(s->s_flags & MS_RDONLY) &&
	      superblock->funcversion != 2 && superblock->funcversion != 3) {
		printk("HPFS: Bad version %d,%d. Mount readonly to go around\n",
			(int)superblock->version, (int)superblock->funcversion);
		printk("HPFS: please try recent version of HPFS driver at http://artax.karlin.mff.cuni.cz/~mikulas/vyplody/hpfs/index-e.cgi and if it still can't understand this format, contact author - [email protected]\n");
		goto bail4;
	}

	s->s_flags |= MS_NOATIME;

	/* Fill superblock stuff */
	s->s_magic = HPFS_SUPER_MAGIC;
	s->s_op = &hpfs_sops;
	s->s_d_op = &hpfs_dentry_operations;

	sbi->sb_root = le32_to_cpu(superblock->root);
	sbi->sb_fs_size = le32_to_cpu(superblock->n_sectors);
	sbi->sb_bitmaps = le32_to_cpu(superblock->bitmaps);
	sbi->sb_dirband_start = le32_to_cpu(superblock->dir_band_start);
	sbi->sb_dirband_size = le32_to_cpu(superblock->n_dir_band);
	sbi->sb_dmap = le32_to_cpu(superblock->dir_band_bitmap);
	sbi->sb_uid = uid;
	sbi->sb_gid = gid;
	sbi->sb_mode = 0777 & ~umask;
	sbi->sb_n_free = -1;
	sbi->sb_n_free_dnodes = -1;
	sbi->sb_lowercase = lowercase;
	sbi->sb_eas = eas;
	sbi->sb_chk = chk;
	sbi->sb_chkdsk = chkdsk;
	sbi->sb_err = errs;
	sbi->sb_timeshift = timeshift;
	sbi->sb_was_error = 0;
	sbi->sb_cp_table = NULL;
	sbi->sb_c_bitmap = -1;
	sbi->sb_max_fwd_alloc = 0xffffff;
	
	/* Load bitmap directory */
	if (!(sbi->sb_bmp_dir = hpfs_load_bitmap_directory(s, le32_to_cpu(superblock->bitmaps))))
		goto bail4;
	
	/* Check for general fs errors*/
	if (spareblock->dirty && !spareblock->old_wrote) {
		if (errs == 2) {
			printk("HPFS: Improperly stopped, not mounted\n");
			goto bail4;
		}
		hpfs_error(s, "improperly stopped");
	}

	if (!(s->s_flags & MS_RDONLY)) {
		spareblock->dirty = 1;
		spareblock->old_wrote = 0;
		mark_buffer_dirty(bh2);
	}

	if (spareblock->hotfixes_used || spareblock->n_spares_used) {
		if (errs >= 2) {
			printk("HPFS: Hotfixes not supported here, try chkdsk\n");
			mark_dirty(s, 0);
			goto bail4;
		}
		hpfs_error(s, "hotfixes not supported here, try chkdsk");
		if (errs == 0) printk("HPFS: Proceeding, but your filesystem will be probably corrupted by this driver...\n");
		else printk("HPFS: This driver may read bad files or crash when operating on disk with hotfixes.\n");
	}
	if (le32_to_cpu(spareblock->n_dnode_spares) != le32_to_cpu(spareblock->n_dnode_spares_free)) {
		if (errs >= 2) {
			printk("HPFS: Spare dnodes used, try chkdsk\n");
			mark_dirty(s, 0);
			goto bail4;
		}
		hpfs_error(s, "warning: spare dnodes used, try chkdsk");
		if (errs == 0) printk("HPFS: Proceeding, but your filesystem could be corrupted if you delete files or directories\n");
	}
	if (chk) {
		unsigned a;
		if (le32_to_cpu(superblock->dir_band_end) - le32_to_cpu(superblock->dir_band_start) + 1 != le32_to_cpu(superblock->n_dir_band) ||
		    le32_to_cpu(superblock->dir_band_end) < le32_to_cpu(superblock->dir_band_start) || le32_to_cpu(superblock->n_dir_band) > 0x4000) {
			hpfs_error(s, "dir band size mismatch: dir_band_start==%08x, dir_band_end==%08x, n_dir_band==%08x",
				le32_to_cpu(superblock->dir_band_start), le32_to_cpu(superblock->dir_band_end), le32_to_cpu(superblock->n_dir_band));
			goto bail4;
		}
		a = sbi->sb_dirband_size;
		sbi->sb_dirband_size = 0;
		if (hpfs_chk_sectors(s, le32_to_cpu(superblock->dir_band_start), le32_to_cpu(superblock->n_dir_band), "dir_band") ||
		    hpfs_chk_sectors(s, le32_to_cpu(superblock->dir_band_bitmap), 4, "dir_band_bitmap") ||
		    hpfs_chk_sectors(s, le32_to_cpu(superblock->bitmaps), 4, "bitmaps")) {
			mark_dirty(s, 0);
			goto bail4;
		}
		sbi->sb_dirband_size = a;
	} else printk("HPFS: You really don't want any checks? You are crazy...\n");

	/* Load code page table */
	if (le32_to_cpu(spareblock->n_code_pages))
		if (!(sbi->sb_cp_table = hpfs_load_code_page(s, le32_to_cpu(spareblock->code_page_dir))))
			printk("HPFS: Warning: code page support is disabled\n");

	brelse(bh2);
	brelse(bh1);
	brelse(bh0);

	root = iget_locked(s, sbi->sb_root);
	if (!root)
		goto bail0;
	hpfs_init_inode(root);
	hpfs_read_inode(root);
	unlock_new_inode(root);
	s->s_root = d_make_root(root);
	if (!s->s_root)
		goto bail0;

	/*
	 * find the root directory's . pointer & finish filling in the inode
	 */

	root_dno = hpfs_fnode_dno(s, sbi->sb_root);
	if (root_dno)
		de = map_dirent(root, root_dno, "\001\001", 2, NULL, &qbh);
	if (!de)
		hpfs_error(s, "unable to find root dir");
	else {
		root->i_atime.tv_sec = local_to_gmt(s, le32_to_cpu(de->read_date));
		root->i_atime.tv_nsec = 0;
		root->i_mtime.tv_sec = local_to_gmt(s, le32_to_cpu(de->write_date));
		root->i_mtime.tv_nsec = 0;
		root->i_ctime.tv_sec = local_to_gmt(s, le32_to_cpu(de->creation_date));
		root->i_ctime.tv_nsec = 0;
		hpfs_i(root)->i_ea_size = le32_to_cpu(de->ea_size);
		hpfs_i(root)->i_parent_dir = root->i_ino;
		if (root->i_size == -1)
			root->i_size = 2048;
		if (root->i_blocks == -1)
			root->i_blocks = 5;
		hpfs_brelse4(&qbh);
	}
	hpfs_unlock(s);
	return 0;

bail4:	brelse(bh2);
bail3:	brelse(bh1);
bail2:	brelse(bh0);
bail1:
bail0:
	hpfs_unlock(s);
	kfree(sbi->sb_bmp_dir);
	kfree(sbi->sb_cp_table);
	s->s_fs_info = NULL;
	kfree(sbi);
	return -EINVAL;
}
Example #7
0
static int ext2_fill_super(struct super_block *sb, void *data, int silent)
{
	struct buffer_head * bh;
	struct ext2_sb_info * sbi;
	struct ext2_super_block * es;
	struct inode *root;
	unsigned long block;
	unsigned long sb_block = get_sb_block(&data);
	unsigned long logic_sb_block;
	unsigned long offset = 0;
	unsigned long def_mount_opts;
	long ret = -EINVAL;
	int blocksize = BLOCK_SIZE;
	int db_count;
	int i, j;
	__le32 features;
	int err;

	err = -ENOMEM;
	sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
	if (!sbi)
		goto failed_unlock;

	sbi->s_blockgroup_lock =
		kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
	if (!sbi->s_blockgroup_lock) {
		kfree(sbi);
		goto failed_unlock;
	}
	sb->s_fs_info = sbi;
	sbi->s_sb_block = sb_block;

	spin_lock_init(&sbi->s_lock);

	/*
	 * See what the current blocksize for the device is, and
	 * use that as the blocksize.  Otherwise (or if the blocksize
	 * is smaller than the default) use the default.
	 * This is important for devices that have a hardware
	 * sectorsize that is larger than the default.
	 */
	blocksize = sb_min_blocksize(sb, BLOCK_SIZE);
	if (!blocksize) {
		ext2_msg(sb, KERN_ERR, "error: unable to set blocksize");
		goto failed_sbi;
	}

	/*
	 * If the superblock doesn't start on a hardware sector boundary,
	 * calculate the offset.  
	 */
	if (blocksize != BLOCK_SIZE) {
		logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize;
		offset = (sb_block*BLOCK_SIZE) % blocksize;
	} else {
		logic_sb_block = sb_block;
	}

	if (!(bh = sb_bread(sb, logic_sb_block))) {
		ext2_msg(sb, KERN_ERR, "error: unable to read superblock");
		goto failed_sbi;
	}
	/*
	 * Note: s_es must be initialized as soon as possible because
	 *       some ext2 macro-instructions depend on its value
	 */
	es = (struct ext2_super_block *) (((char *)bh->b_data) + offset);
	sbi->s_es = es;
	sb->s_magic = le16_to_cpu(es->s_magic);

	if (sb->s_magic != EXT2_SUPER_MAGIC)
		goto cantfind_ext2;

	/* Set defaults before we parse the mount options */
	def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
	if (def_mount_opts & EXT2_DEFM_DEBUG)
		set_opt(sbi->s_mount_opt, DEBUG);
	if (def_mount_opts & EXT2_DEFM_BSDGROUPS)
		set_opt(sbi->s_mount_opt, GRPID);
	if (def_mount_opts & EXT2_DEFM_UID16)
		set_opt(sbi->s_mount_opt, NO_UID32);
#ifdef CONFIG_EXT2_FS_XATTR
	if (def_mount_opts & EXT2_DEFM_XATTR_USER)
		set_opt(sbi->s_mount_opt, XATTR_USER);
#endif
#ifdef CONFIG_EXT2_FS_POSIX_ACL
	if (def_mount_opts & EXT2_DEFM_ACL)
		set_opt(sbi->s_mount_opt, POSIX_ACL);
#endif
	
	if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_PANIC)
		set_opt(sbi->s_mount_opt, ERRORS_PANIC);
	else if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_CONTINUE)
		set_opt(sbi->s_mount_opt, ERRORS_CONT);
	else
		set_opt(sbi->s_mount_opt, ERRORS_RO);

	sbi->s_resuid = le16_to_cpu(es->s_def_resuid);
	sbi->s_resgid = le16_to_cpu(es->s_def_resgid);
	
	set_opt(sbi->s_mount_opt, RESERVATION);

	if (!parse_options((char *) data, sb))
		goto failed_mount;

	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
		((EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ?
		 MS_POSIXACL : 0);

	ext2_xip_verify_sb(sb); /* see if bdev supports xip, unset
				    EXT2_MOUNT_XIP if not */

	if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV &&
	    (EXT2_HAS_COMPAT_FEATURE(sb, ~0U) ||
	     EXT2_HAS_RO_COMPAT_FEATURE(sb, ~0U) ||
	     EXT2_HAS_INCOMPAT_FEATURE(sb, ~0U)))
		ext2_msg(sb, KERN_WARNING,
			"warning: feature flags set on rev 0 fs, "
			"running e2fsck is recommended");
	/*
	 * Check feature flags regardless of the revision level, since we
	 * previously didn't change the revision level when setting the flags,
	 * so there is a chance incompat flags are set on a rev 0 filesystem.
	 */
	features = EXT2_HAS_INCOMPAT_FEATURE(sb, ~EXT2_FEATURE_INCOMPAT_SUPP);
	if (features) {
		ext2_msg(sb, KERN_ERR,	"error: couldn't mount because of "
		       "unsupported optional features (%x)",
			le32_to_cpu(features));
		goto failed_mount;
	}
	if (!(sb->s_flags & MS_RDONLY) &&
	    (features = EXT2_HAS_RO_COMPAT_FEATURE(sb, ~EXT2_FEATURE_RO_COMPAT_SUPP))){
		ext2_msg(sb, KERN_ERR, "error: couldn't mount RDWR because of "
		       "unsupported optional features (%x)",
		       le32_to_cpu(features));
		goto failed_mount;
	}

	blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);

	if (ext2_use_xip(sb) && blocksize != PAGE_SIZE) {
		if (!silent)
			ext2_msg(sb, KERN_ERR,
				"error: unsupported blocksize for xip");
		goto failed_mount;
	}

	/* If the blocksize doesn't match, re-read the thing.. */
	if (sb->s_blocksize != blocksize) {
		brelse(bh);

		if (!sb_set_blocksize(sb, blocksize)) {
			ext2_msg(sb, KERN_ERR,
				"error: bad blocksize %d", blocksize);
			goto failed_sbi;
		}

		logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize;
		offset = (sb_block*BLOCK_SIZE) % blocksize;
		bh = sb_bread(sb, logic_sb_block);
		if(!bh) {
			ext2_msg(sb, KERN_ERR, "error: couldn't read"
				"superblock on 2nd try");
			goto failed_sbi;
		}
		es = (struct ext2_super_block *) (((char *)bh->b_data) + offset);
		sbi->s_es = es;
		if (es->s_magic != cpu_to_le16(EXT2_SUPER_MAGIC)) {
			ext2_msg(sb, KERN_ERR, "error: magic mismatch");
			goto failed_mount;
		}
	}

	sb->s_maxbytes = ext2_max_size(sb->s_blocksize_bits);
	sb->s_max_links = EXT2_LINK_MAX;

	if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV) {
		sbi->s_inode_size = EXT2_GOOD_OLD_INODE_SIZE;
		sbi->s_first_ino = EXT2_GOOD_OLD_FIRST_INO;
	} else {
		sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
		sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
		if ((sbi->s_inode_size < EXT2_GOOD_OLD_INODE_SIZE) ||
		    !is_power_of_2(sbi->s_inode_size) ||
		    (sbi->s_inode_size > blocksize)) {
			ext2_msg(sb, KERN_ERR,
				"error: unsupported inode size: %d",
				sbi->s_inode_size);
			goto failed_mount;
		}
	}

	sbi->s_frag_size = EXT2_MIN_FRAG_SIZE <<
				   le32_to_cpu(es->s_log_frag_size);
	if (sbi->s_frag_size == 0)
		goto cantfind_ext2;
	sbi->s_frags_per_block = sb->s_blocksize / sbi->s_frag_size;

	sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
	sbi->s_frags_per_group = le32_to_cpu(es->s_frags_per_group);
	sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);

	if (EXT2_INODE_SIZE(sb) == 0)
		goto cantfind_ext2;
	sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb);
	if (sbi->s_inodes_per_block == 0 || sbi->s_inodes_per_group == 0)
		goto cantfind_ext2;
	sbi->s_itb_per_group = sbi->s_inodes_per_group /
					sbi->s_inodes_per_block;
	sbi->s_desc_per_block = sb->s_blocksize /
					sizeof (struct ext2_group_desc);
	sbi->s_sbh = bh;
	sbi->s_mount_state = le16_to_cpu(es->s_state);
	sbi->s_addr_per_block_bits =
		ilog2 (EXT2_ADDR_PER_BLOCK(sb));
	sbi->s_desc_per_block_bits =
		ilog2 (EXT2_DESC_PER_BLOCK(sb));

	if (sb->s_magic != EXT2_SUPER_MAGIC)
		goto cantfind_ext2;

	if (sb->s_blocksize != bh->b_size) {
		if (!silent)
			ext2_msg(sb, KERN_ERR, "error: unsupported blocksize");
		goto failed_mount;
	}

	if (sb->s_blocksize != sbi->s_frag_size) {
		ext2_msg(sb, KERN_ERR,
			"error: fragsize %lu != blocksize %lu"
			"(not supported yet)",
			sbi->s_frag_size, sb->s_blocksize);
		goto failed_mount;
	}

	if (sbi->s_blocks_per_group > sb->s_blocksize * 8) {
		ext2_msg(sb, KERN_ERR,
			"error: #blocks per group too big: %lu",
			sbi->s_blocks_per_group);
		goto failed_mount;
	}
	if (sbi->s_frags_per_group > sb->s_blocksize * 8) {
		ext2_msg(sb, KERN_ERR,
			"error: #fragments per group too big: %lu",
			sbi->s_frags_per_group);
		goto failed_mount;
	}
	if (sbi->s_inodes_per_group > sb->s_blocksize * 8) {
		ext2_msg(sb, KERN_ERR,
			"error: #inodes per group too big: %lu",
			sbi->s_inodes_per_group);
		goto failed_mount;
	}

	if (EXT2_BLOCKS_PER_GROUP(sb) == 0)
		goto cantfind_ext2;
 	sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) -
 				le32_to_cpu(es->s_first_data_block) - 1)
 					/ EXT2_BLOCKS_PER_GROUP(sb)) + 1;
	db_count = (sbi->s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) /
		   EXT2_DESC_PER_BLOCK(sb);
	sbi->s_group_desc = kmalloc (db_count * sizeof (struct buffer_head *), GFP_KERNEL);
	if (sbi->s_group_desc == NULL) {
		ext2_msg(sb, KERN_ERR, "error: not enough memory");
		goto failed_mount;
	}
	bgl_lock_init(sbi->s_blockgroup_lock);
	sbi->s_debts = kcalloc(sbi->s_groups_count, sizeof(*sbi->s_debts), GFP_KERNEL);
	if (!sbi->s_debts) {
		ext2_msg(sb, KERN_ERR, "error: not enough memory");
		goto failed_mount_group_desc;
	}
	for (i = 0; i < db_count; i++) {
		block = descriptor_loc(sb, logic_sb_block, i);
		sbi->s_group_desc[i] = sb_bread(sb, block);
		if (!sbi->s_group_desc[i]) {
			for (j = 0; j < i; j++)
				brelse (sbi->s_group_desc[j]);
			ext2_msg(sb, KERN_ERR,
				"error: unable to read group descriptors");
			goto failed_mount_group_desc;
		}
	}
	if (!ext2_check_descriptors (sb)) {
		ext2_msg(sb, KERN_ERR, "group descriptors corrupted");
		goto failed_mount2;
	}
	sbi->s_gdb_count = db_count;
	get_random_bytes(&sbi->s_next_generation, sizeof(u32));
	spin_lock_init(&sbi->s_next_gen_lock);

	/* per fileystem reservation list head & lock */
	spin_lock_init(&sbi->s_rsv_window_lock);
	sbi->s_rsv_window_root = RB_ROOT;
	/*
	 * Add a single, static dummy reservation to the start of the
	 * reservation window list --- it gives us a placeholder for
	 * append-at-start-of-list which makes the allocation logic
	 * _much_ simpler.
	 */
	sbi->s_rsv_window_head.rsv_start = EXT2_RESERVE_WINDOW_NOT_ALLOCATED;
	sbi->s_rsv_window_head.rsv_end = EXT2_RESERVE_WINDOW_NOT_ALLOCATED;
	sbi->s_rsv_window_head.rsv_alloc_hit = 0;
	sbi->s_rsv_window_head.rsv_goal_size = 0;
	ext2_rsv_window_add(sb, &sbi->s_rsv_window_head);

	err = percpu_counter_init(&sbi->s_freeblocks_counter,
				ext2_count_free_blocks(sb));
	if (!err) {
		err = percpu_counter_init(&sbi->s_freeinodes_counter,
				ext2_count_free_inodes(sb));
	}
	if (!err) {
		err = percpu_counter_init(&sbi->s_dirs_counter,
				ext2_count_dirs(sb));
	}
	if (err) {
		ext2_msg(sb, KERN_ERR, "error: insufficient memory");
		goto failed_mount3;
	}
	/*
	 * set up enough so that it can read an inode
	 */
	sb->s_op = &ext2_sops;
	sb->s_export_op = &ext2_export_ops;
	sb->s_xattr = ext2_xattr_handlers;

#ifdef CONFIG_QUOTA
	sb->dq_op = &dquot_operations;
	sb->s_qcop = &dquot_quotactl_ops;
#endif

	root = ext2_iget(sb, EXT2_ROOT_INO);
	if (IS_ERR(root)) {
		ret = PTR_ERR(root);
		goto failed_mount3;
	}
	if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
		iput(root);
		ext2_msg(sb, KERN_ERR, "error: corrupt root inode, run e2fsck");
		goto failed_mount3;
	}

	sb->s_root = d_make_root(root);
	if (!sb->s_root) {
		ext2_msg(sb, KERN_ERR, "error: get root inode failed");
		ret = -ENOMEM;
		goto failed_mount3;
	}
	if (EXT2_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL))
		ext2_msg(sb, KERN_WARNING,
			"warning: mounting ext3 filesystem as ext2");
	if (ext2_setup_super (sb, es, sb->s_flags & MS_RDONLY))
		sb->s_flags |= MS_RDONLY;
	ext2_write_super(sb);
	return 0;

cantfind_ext2:
	if (!silent)
		ext2_msg(sb, KERN_ERR,
			"error: can't find an ext2 filesystem on dev %s.",
			sb->s_id);
	goto failed_mount;
failed_mount3:
	percpu_counter_destroy(&sbi->s_freeblocks_counter);
	percpu_counter_destroy(&sbi->s_freeinodes_counter);
	percpu_counter_destroy(&sbi->s_dirs_counter);
failed_mount2:
	for (i = 0; i < db_count; i++)
		brelse(sbi->s_group_desc[i]);
failed_mount_group_desc:
	kfree(sbi->s_group_desc);
	kfree(sbi->s_debts);
failed_mount:
	brelse(bh);
failed_sbi:
	sb->s_fs_info = NULL;
	kfree(sbi->s_blockgroup_lock);
	kfree(sbi);
failed_unlock:
	return ret;
}
Example #8
0
static int bfs_fill_super(struct super_block *s, void *data, int silent)
{
	struct buffer_head * bh;
	struct bfs_super_block * bfs_sb;
	struct inode * inode;
	unsigned i, imap_len;
	struct bfs_sb_info * info;

	info = kzalloc(sizeof(*info), GFP_KERNEL);
	if (!info)
		return -ENOMEM;
	s->s_fs_info = info;

	sb_set_blocksize(s, BFS_BSIZE);

	bh = sb_bread(s, 0);
	if(!bh)
		goto out;
	bfs_sb = (struct bfs_super_block *)bh->b_data;
	if (le32_to_cpu(bfs_sb->s_magic) != BFS_MAGIC) {
		if (!silent)
			printf("No BFS filesystem on %s (magic=%08x)\n", 
				s->s_id,  le32_to_cpu(bfs_sb->s_magic));
		goto out;
	}
	if (BFS_UNCLEAN(bfs_sb, s) && !silent)
		printf("%s is unclean, continuing\n", s->s_id);

	s->s_magic = BFS_MAGIC;
	info->si_sbh = bh;
	info->si_lasti = (le32_to_cpu(bfs_sb->s_start) - BFS_BSIZE)/sizeof(struct bfs_inode)
			+ BFS_ROOT_INO - 1;

	imap_len = info->si_lasti/8 + 1;
	info->si_imap = kzalloc(imap_len, GFP_KERNEL);
	if (!info->si_imap)
		goto out;
	for (i=0; i<BFS_ROOT_INO; i++) 
		set_bit(i, info->si_imap);

	s->s_op = &bfs_sops;
	inode = iget(s, BFS_ROOT_INO);
	if (!inode) {
		kfree(info->si_imap);
		goto out;
	}
	s->s_root = d_alloc_root(inode);
	if (!s->s_root) {
		iput(inode);
		kfree(info->si_imap);
		goto out;
	}

	info->si_blocks = (le32_to_cpu(bfs_sb->s_end) + 1)>>BFS_BSIZE_BITS; /* for statfs(2) */
	info->si_freeb = (le32_to_cpu(bfs_sb->s_end) + 1 -  le32_to_cpu(bfs_sb->s_start))>>BFS_BSIZE_BITS;
	info->si_freei = 0;
	info->si_lf_eblk = 0;
	info->si_lf_sblk = 0;
	info->si_lf_ioff = 0;
	bh = NULL;
	for (i=BFS_ROOT_INO; i<=info->si_lasti; i++) {
		struct bfs_inode *di;
		int block = (i - BFS_ROOT_INO)/BFS_INODES_PER_BLOCK + 1;
		int off = (i - BFS_ROOT_INO) % BFS_INODES_PER_BLOCK;
		unsigned long sblock, eblock;

		if (!off) {
			brelse(bh);
			bh = sb_bread(s, block);
		}

		if (!bh)
			continue;

		di = (struct bfs_inode *)bh->b_data + off;

		if (!di->i_ino) {
			info->si_freei++;
			continue;
		}
		set_bit(i, info->si_imap);
		info->si_freeb -= BFS_FILEBLOCKS(di);

		sblock =  le32_to_cpu(di->i_sblock);
		eblock =  le32_to_cpu(di->i_eblock);
		if (eblock > info->si_lf_eblk) {
			info->si_lf_eblk = eblock;
			info->si_lf_sblk = sblock;
			info->si_lf_ioff = BFS_INO2OFF(i);
		}
	}
	brelse(bh);
	if (!(s->s_flags & MS_RDONLY)) {
		mark_buffer_dirty(info->si_sbh);
		s->s_dirt = 1;
	} 
	dump_imap("read_super", s);
	return 0;

out:
	brelse(bh);
	kfree(info);
	s->s_fs_info = NULL;
	return -EINVAL;
}
Example #9
0
/* Takes in super block, returns true if good data read */
int hfsplus_read_wrapper(struct super_block *sb)
{
	struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
	struct hfsplus_wd wd;
	sector_t part_start, part_size;
	u32 blocksize;
	int error = 0;

	error = -EINVAL;
	blocksize = sb_min_blocksize(sb, HFSPLUS_SECTOR_SIZE);
	if (!blocksize)
		goto out;

	if (hfsplus_get_last_session(sb, &part_start, &part_size))
		goto out;
	if ((u64)part_start + part_size > 0x100000000ULL) {
		pr_err("hfs: volumes larger than 2TB are not supported yet\n");
		goto out;
	}

	error = -ENOMEM;
	sbi->s_vhdr = kmalloc(HFSPLUS_SECTOR_SIZE, GFP_KERNEL);
	if (!sbi->s_vhdr)
		goto out;
	sbi->s_backup_vhdr = kmalloc(HFSPLUS_SECTOR_SIZE, GFP_KERNEL);
	if (!sbi->s_backup_vhdr)
		goto out_free_vhdr;

reread:
	error = hfsplus_submit_bio(sb->s_bdev,
				   part_start + HFSPLUS_VOLHEAD_SECTOR,
				   sbi->s_vhdr, READ);
	if (error)
		goto out_free_backup_vhdr;

	error = -EINVAL;
	switch (sbi->s_vhdr->signature) {
	case cpu_to_be16(HFSPLUS_VOLHEAD_SIGX):
		set_bit(HFSPLUS_SB_HFSX, &sbi->flags);
		/*FALLTHRU*/
	case cpu_to_be16(HFSPLUS_VOLHEAD_SIG):
		break;
	case cpu_to_be16(HFSP_WRAP_MAGIC):
		if (!hfsplus_read_mdb(sbi->s_vhdr, &wd))
			goto out_free_backup_vhdr;
		wd.ablk_size >>= HFSPLUS_SECTOR_SHIFT;
		part_start += wd.ablk_start + wd.embed_start * wd.ablk_size;
		part_size = wd.embed_count * wd.ablk_size;
		goto reread;
	default:
		/*
		 * Check for a partition block.
		 *
		 * (should do this only for cdrom/loop though)
		 */
		if (hfs_part_find(sb, &part_start, &part_size))
			goto out_free_backup_vhdr;
		goto reread;
	}

	error = hfsplus_submit_bio(sb->s_bdev,
				   part_start + part_size - 2,
				   sbi->s_backup_vhdr, READ);
	if (error)
		goto out_free_backup_vhdr;

	error = -EINVAL;
	if (sbi->s_backup_vhdr->signature != sbi->s_vhdr->signature) {
		printk(KERN_WARNING
			"hfs: invalid secondary volume header\n");
		goto out_free_backup_vhdr;
	}

	blocksize = be32_to_cpu(sbi->s_vhdr->blocksize);

	/*
	 * Block size must be at least as large as a sector and a multiple of 2.
	 */
	if (blocksize < HFSPLUS_SECTOR_SIZE || ((blocksize - 1) & blocksize))
		goto out_free_backup_vhdr;
	sbi->alloc_blksz = blocksize;
	sbi->alloc_blksz_shift = 0;
	while ((blocksize >>= 1) != 0)
		sbi->alloc_blksz_shift++;
	blocksize = min(sbi->alloc_blksz, (u32)PAGE_SIZE);

	/*
	 * Align block size to block offset.
	 */
	while (part_start & ((blocksize >> HFSPLUS_SECTOR_SHIFT) - 1))
		blocksize >>= 1;

	if (sb_set_blocksize(sb, blocksize) != blocksize) {
		printk(KERN_ERR "hfs: unable to set blocksize to %u!\n",
			blocksize);
		goto out_free_backup_vhdr;
	}

	sbi->blockoffset =
		part_start >> (sb->s_blocksize_bits - HFSPLUS_SECTOR_SHIFT);
	sbi->part_start = part_start;
	sbi->sect_count = part_size;
	sbi->fs_shift = sbi->alloc_blksz_shift - sb->s_blocksize_bits;
	return 0;

out_free_backup_vhdr:
	kfree(sbi->s_backup_vhdr);
out_free_vhdr:
	kfree(sbi->s_vhdr);
out:
	return error;
}
Example #10
0
static int minix_fill_super(struct super_block *s, void *data, int silent)
{
	struct buffer_head *bh;
	struct buffer_head **map;
	struct minix_super_block *ms;
	struct minix3_super_block *m3s = NULL;
	unsigned long i, block;
	struct inode *root_inode;
	struct minix_sb_info *sbi;
	int ret = -EINVAL;

	sbi = kzalloc(sizeof(struct minix_sb_info), GFP_KERNEL);
	if (!sbi)
		return -ENOMEM;
	s->s_fs_info = sbi;

	BUILD_BUG_ON(32 != sizeof (struct minix_inode));
	BUILD_BUG_ON(64 != sizeof(struct minix2_inode));

	if (!sb_set_blocksize(s, BLOCK_SIZE))
		goto out_bad_hblock;

	if (!(bh = sb_bread(s, 1)))
		goto out_bad_sb;

	ms = (struct minix_super_block *) bh->b_data;
	sbi->s_ms = ms;
	sbi->s_sbh = bh;
	sbi->s_mount_state = ms->s_state;
	sbi->s_ninodes = ms->s_ninodes;
	sbi->s_nzones = ms->s_nzones;
	sbi->s_imap_blocks = ms->s_imap_blocks;
	sbi->s_zmap_blocks = ms->s_zmap_blocks;
	sbi->s_firstdatazone = ms->s_firstdatazone;
	sbi->s_log_zone_size = ms->s_log_zone_size;
	sbi->s_max_size = ms->s_max_size;
	s->s_magic = ms->s_magic;
	if (s->s_magic == MINIX_SUPER_MAGIC) {
		sbi->s_version = MINIX_V1;
		sbi->s_dirsize = 16;
		sbi->s_namelen = 14;
		s->s_max_links = MINIX_LINK_MAX;
	} else if (s->s_magic == MINIX_SUPER_MAGIC2) {
		sbi->s_version = MINIX_V1;
		sbi->s_dirsize = 32;
		sbi->s_namelen = 30;
		s->s_max_links = MINIX_LINK_MAX;
	} else if (s->s_magic == MINIX2_SUPER_MAGIC) {
		sbi->s_version = MINIX_V2;
		sbi->s_nzones = ms->s_zones;
		sbi->s_dirsize = 16;
		sbi->s_namelen = 14;
		s->s_max_links = MINIX2_LINK_MAX;
	} else if (s->s_magic == MINIX2_SUPER_MAGIC2) {
		sbi->s_version = MINIX_V2;
		sbi->s_nzones = ms->s_zones;
		sbi->s_dirsize = 32;
		sbi->s_namelen = 30;
		s->s_max_links = MINIX2_LINK_MAX;
	} else if ( *(__u16 *)(bh->b_data + 24) == MINIX3_SUPER_MAGIC) {
		m3s = (struct minix3_super_block *) bh->b_data;
		s->s_magic = m3s->s_magic;
		sbi->s_imap_blocks = m3s->s_imap_blocks;
		sbi->s_zmap_blocks = m3s->s_zmap_blocks;
		sbi->s_firstdatazone = m3s->s_firstdatazone;
		sbi->s_log_zone_size = m3s->s_log_zone_size;
		sbi->s_max_size = m3s->s_max_size;
		sbi->s_ninodes = m3s->s_ninodes;
		sbi->s_nzones = m3s->s_zones;
		sbi->s_dirsize = 64;
		sbi->s_namelen = 60;
		sbi->s_version = MINIX_V3;
		sbi->s_mount_state = MINIX_VALID_FS;
		sb_set_blocksize(s, m3s->s_blocksize);
		s->s_max_links = MINIX2_LINK_MAX;
	} else
		goto out_no_fs;

	/*
	 * Allocate the buffer map to keep the superblock small.
	 */
	if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0)
		goto out_illegal_sb;
	i = (sbi->s_imap_blocks + sbi->s_zmap_blocks) * sizeof(bh);
	map = kzalloc(i, GFP_KERNEL);
	if (!map)
		goto out_no_map;
	sbi->s_imap = &map[0];
	sbi->s_zmap = &map[sbi->s_imap_blocks];

	block=2;
	for (i=0 ; i < sbi->s_imap_blocks ; i++) {
		if (!(sbi->s_imap[i]=sb_bread(s, block)))
			goto out_no_bitmap;
		block++;
	}
	for (i=0 ; i < sbi->s_zmap_blocks ; i++) {
		if (!(sbi->s_zmap[i]=sb_bread(s, block)))
			goto out_no_bitmap;
		block++;
	}

	minix_set_bit(0,sbi->s_imap[0]->b_data);
	minix_set_bit(0,sbi->s_zmap[0]->b_data);

	/* Apparently minix can create filesystems that allocate more blocks for
	 * the bitmaps than needed.  We simply ignore that, but verify it didn't
	 * create one with not enough blocks and bail out if so.
	 */
	block = minix_blocks_needed(sbi->s_ninodes, s->s_blocksize);
	if (sbi->s_imap_blocks < block) {
		printk("MINIX-fs: file system does not have enough "
				"imap blocks allocated.  Refusing to mount.\n");
		goto out_no_bitmap;
	}

	block = minix_blocks_needed(
			(sbi->s_nzones - sbi->s_firstdatazone + 1),
			s->s_blocksize);
	if (sbi->s_zmap_blocks < block) {
		printk("MINIX-fs: file system does not have enough "
				"zmap blocks allocated.  Refusing to mount.\n");
		goto out_no_bitmap;
	}

	/* set up enough so that it can read an inode */
	s->s_op = &minix_sops;
	root_inode = minix_iget(s, MINIX_ROOT_INO);
	if (IS_ERR(root_inode)) {
		ret = PTR_ERR(root_inode);
		goto out_no_root;
	}

	ret = -ENOMEM;
	s->s_root = d_make_root(root_inode);
	if (!s->s_root)
		goto out_no_root;

	if (!(s->s_flags & MS_RDONLY)) {
		if (sbi->s_version != MINIX_V3) /* s_state is now out from V3 sb */
			ms->s_state &= ~MINIX_VALID_FS;
		mark_buffer_dirty(bh);
	}
	if (!(sbi->s_mount_state & MINIX_VALID_FS))
		printk("MINIX-fs: mounting unchecked file system, "
			"running fsck is recommended\n");
	else if (sbi->s_mount_state & MINIX_ERROR_FS)
		printk("MINIX-fs: mounting file system with errors, "
			"running fsck is recommended\n");

	return 0;

out_no_root:
	if (!silent)
		printk("MINIX-fs: get root inode failed\n");
	goto out_freemap;

out_no_bitmap:
	printk("MINIX-fs: bad superblock or unable to read bitmaps\n");
out_freemap:
	for (i = 0; i < sbi->s_imap_blocks; i++)
		brelse(sbi->s_imap[i]);
	for (i = 0; i < sbi->s_zmap_blocks; i++)
		brelse(sbi->s_zmap[i]);
	kfree(sbi->s_imap);
	goto out_release;

out_no_map:
	ret = -ENOMEM;
	if (!silent)
		printk("MINIX-fs: can't allocate map\n");
	goto out_release;

out_illegal_sb:
	if (!silent)
		printk("MINIX-fs: bad superblock\n");
	goto out_release;

out_no_fs:
	if (!silent)
		printk("VFS: Can't find a Minix filesystem V1 | V2 | V3 "
		       "on device %s.\n", s->s_id);
out_release:
	brelse(bh);
	goto out;

out_bad_hblock:
	printk("MINIX-fs: blocksize too small for device\n");
	goto out;

out_bad_sb:
	printk("MINIX-fs: unable to read superblock\n");
out:
	s->s_fs_info = NULL;
	kfree(sbi);
	return ret;
}
Example #11
0
/* Allocate private field of the superblock, fill it.
 *
 * Finish filling the public superblock fields
 * Make the root directory
 * Load a set of NLS translations if needed.
 */
static int
befs_fill_super(struct super_block *sb, void *data, int silent)
{
	struct buffer_head *bh;
	struct befs_sb_info *befs_sb;
	befs_super_block *disk_sb;
	struct inode *root;
	long ret = -EINVAL;
	const unsigned long sb_block = 0;
	const off_t x86_sb_off = 512;
	int blocksize;

	sb->s_fs_info = kzalloc(sizeof(*befs_sb), GFP_KERNEL);
	if (sb->s_fs_info == NULL)
		goto unacquire_none;

	befs_sb = BEFS_SB(sb);

	if (!parse_options((char *) data, &befs_sb->mount_opts)) {
		if (!silent)
			befs_error(sb, "cannot parse mount options");
		goto unacquire_priv_sbp;
	}

	befs_debug(sb, "---> %s", __func__);

	if (!(sb->s_flags & MS_RDONLY)) {
		befs_warning(sb,
			     "No write support. Marking filesystem read-only");
		sb->s_flags |= MS_RDONLY;
	}

	/*
	 * Set dummy blocksize to read super block.
	 * Will be set to real fs blocksize later.
	 *
	 * Linux 2.4.10 and later refuse to read blocks smaller than
	 * the logical block size for the device. But we also need to read at
	 * least 1k to get the second 512 bytes of the volume.
	 */
	blocksize = sb_min_blocksize(sb, 1024);
	if (!blocksize) {
		if (!silent)
			befs_error(sb, "unable to set blocksize");
		goto unacquire_priv_sbp;
	}

	bh = sb_bread(sb, sb_block);
	if (!bh) {
		if (!silent)
			befs_error(sb, "unable to read superblock");
		goto unacquire_priv_sbp;
	}

	/* account for offset of super block on x86 */
	disk_sb = (befs_super_block *) bh->b_data;
	if ((disk_sb->magic1 == BEFS_SUPER_MAGIC1_LE) ||
	    (disk_sb->magic1 == BEFS_SUPER_MAGIC1_BE)) {
		befs_debug(sb, "Using PPC superblock location");
	} else {
		befs_debug(sb, "Using x86 superblock location");
		disk_sb =
		    (befs_super_block *) ((void *) bh->b_data + x86_sb_off);
	}

	if ((befs_load_sb(sb, disk_sb) != BEFS_OK) ||
	    (befs_check_sb(sb) != BEFS_OK))
		goto unacquire_bh;

	befs_dump_super_block(sb, disk_sb);

	brelse(bh);

	if (befs_sb->num_blocks > ~((sector_t)0)) {
		if (!silent)
			befs_error(sb, "blocks count: %llu is larger than the host can use",
					befs_sb->num_blocks);
		goto unacquire_priv_sbp;
	}

	/*
	 * set up enough so that it can read an inode
	 * Fill in kernel superblock fields from private sb
	 */
	sb->s_magic = BEFS_SUPER_MAGIC;
	/* Set real blocksize of fs */
	sb_set_blocksize(sb, (ulong) befs_sb->block_size);
	sb->s_op = &befs_sops;
	sb->s_export_op = &befs_export_operations;
	root = befs_iget(sb, iaddr2blockno(sb, &(befs_sb->root_dir)));
	if (IS_ERR(root)) {
		ret = PTR_ERR(root);
		goto unacquire_priv_sbp;
	}
	sb->s_root = d_make_root(root);
	if (!sb->s_root) {
		if (!silent)
			befs_error(sb, "get root inode failed");
		goto unacquire_priv_sbp;
	}

	/* load nls library */
	if (befs_sb->mount_opts.iocharset) {
		befs_debug(sb, "Loading nls: %s",
			   befs_sb->mount_opts.iocharset);
		befs_sb->nls = load_nls(befs_sb->mount_opts.iocharset);
		if (!befs_sb->nls) {
			befs_warning(sb, "Cannot load nls %s"
					" loading default nls",
					befs_sb->mount_opts.iocharset);
			befs_sb->nls = load_nls_default();
		}
	/* load default nls if none is specified  in mount options */
	} else {
		befs_debug(sb, "Loading default nls");
		befs_sb->nls = load_nls_default();
	}

	return 0;

unacquire_bh:
	brelse(bh);

unacquire_priv_sbp:
	kfree(befs_sb->mount_opts.iocharset);
	kfree(sb->s_fs_info);
	sb->s_fs_info = NULL;

unacquire_none:
	return ret;
}
Example #12
0
static int ext2_fill_super(struct super_block *sb, void *data, int silent)
{
    struct buffer_head * bh;
    struct ext2_sb_info * sbi;
    struct ext2_super_block * es;
    struct inode *root;
    unsigned long block;
    unsigned long sb_block = get_sb_block(&data);
    unsigned long logic_sb_block;
    unsigned long offset = 0;
    unsigned long def_mount_opts;
    int blocksize = BLOCK_SIZE;
    int db_count;
    int i, j;
    __le32 features;

    sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
    if (!sbi)
        return -ENOMEM;
    sb->s_fs_info = sbi;

    /*
     * See what the current blocksize for the device is, and
     * use that as the blocksize.  Otherwise (or if the blocksize
     * is smaller than the default) use the default.
     * This is important for devices that have a hardware
     * sectorsize that is larger than the default.
     */
    blocksize = sb_min_blocksize(sb, BLOCK_SIZE);
    if (!blocksize) {
        printk ("EXT2-fs: unable to set blocksize\n");
        goto failed_sbi;
    }

    /*
     * If the superblock doesn't start on a hardware sector boundary,
     * calculate the offset.
     */
    if (blocksize != BLOCK_SIZE) {
        logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize;
        offset = (sb_block*BLOCK_SIZE) % blocksize;
    } else {
        logic_sb_block = sb_block;
    }

    if (!(bh = sb_bread(sb, logic_sb_block))) {
        printk ("EXT2-fs: unable to read superblock\n");
        goto failed_sbi;
    }
    /*
     * Note: s_es must be initialized as soon as possible because
     *       some ext2 macro-instructions depend on its value
     */
    es = (struct ext2_super_block *) (((char *)bh->b_data) + offset);
    sbi->s_es = es;
    sb->s_magic = le16_to_cpu(es->s_magic);

    if (sb->s_magic != EXT2_SUPER_MAGIC)
        goto cantfind_ext2;

    /* Set defaults before we parse the mount options */
    def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
    if (def_mount_opts & EXT2_DEFM_DEBUG)
        set_opt(sbi->s_mount_opt, DEBUG);
    if (def_mount_opts & EXT2_DEFM_BSDGROUPS)
        set_opt(sbi->s_mount_opt, GRPID);
    if (def_mount_opts & EXT2_DEFM_UID16)
        set_opt(sbi->s_mount_opt, NO_UID32);
    if (def_mount_opts & EXT2_DEFM_XATTR_USER)
        set_opt(sbi->s_mount_opt, XATTR_USER);
    if (def_mount_opts & EXT2_DEFM_ACL)
        set_opt(sbi->s_mount_opt, POSIX_ACL);

    if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_PANIC)
        set_opt(sbi->s_mount_opt, ERRORS_PANIC);
    else if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_RO)
        set_opt(sbi->s_mount_opt, ERRORS_RO);
    else
        set_opt(sbi->s_mount_opt, ERRORS_CONT);

    sbi->s_resuid = le16_to_cpu(es->s_def_resuid);
    sbi->s_resgid = le16_to_cpu(es->s_def_resgid);

    if (!parse_options ((char *) data, sbi))
        goto failed_mount;

    sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
                  ((EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ?
                   MS_POSIXACL : 0);

    ext2_xip_verify_sb(sb); /* see if bdev supports xip, unset
				    EXT2_MOUNT_XIP if not */

    if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV &&
            (EXT2_HAS_COMPAT_FEATURE(sb, ~0U) ||
             EXT2_HAS_RO_COMPAT_FEATURE(sb, ~0U) ||
             EXT2_HAS_INCOMPAT_FEATURE(sb, ~0U)))
        printk("EXT2-fs warning: feature flags set on rev 0 fs, "
               "running e2fsck is recommended\n");
    /*
     * Check feature flags regardless of the revision level, since we
     * previously didn't change the revision level when setting the flags,
     * so there is a chance incompat flags are set on a rev 0 filesystem.
     */
    features = EXT2_HAS_INCOMPAT_FEATURE(sb, ~EXT2_FEATURE_INCOMPAT_SUPP);
    if (features) {
        printk("EXT2-fs: %s: couldn't mount because of "
               "unsupported optional features (%x).\n",
               sb->s_id, le32_to_cpu(features));
        goto failed_mount;
    }
    if (!(sb->s_flags & MS_RDONLY) &&
            (features = EXT2_HAS_RO_COMPAT_FEATURE(sb, ~EXT2_FEATURE_RO_COMPAT_SUPP))) {
        printk("EXT2-fs: %s: couldn't mount RDWR because of "
               "unsupported optional features (%x).\n",
               sb->s_id, le32_to_cpu(features));
        goto failed_mount;
    }

    blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);

    if ((ext2_use_xip(sb)) && ((blocksize != PAGE_SIZE) ||
                               (sb->s_blocksize != blocksize))) {
        if (!silent)
            printk("XIP: Unsupported blocksize\n");
        goto failed_mount;
    }

    /* If the blocksize doesn't match, re-read the thing.. */
    if (sb->s_blocksize != blocksize) {
        brelse(bh);

        if (!sb_set_blocksize(sb, blocksize)) {
            printk(KERN_ERR "EXT2-fs: blocksize too small for device.\n");
            goto failed_sbi;
        }

        logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize;
        offset = (sb_block*BLOCK_SIZE) % blocksize;
        bh = sb_bread(sb, logic_sb_block);
        if(!bh) {
            printk("EXT2-fs: Couldn't read superblock on "
                   "2nd try.\n");
            goto failed_sbi;
        }
        es = (struct ext2_super_block *) (((char *)bh->b_data) + offset);
        sbi->s_es = es;
        if (es->s_magic != cpu_to_le16(EXT2_SUPER_MAGIC)) {
            printk ("EXT2-fs: Magic mismatch, very weird !\n");
            goto failed_mount;
        }
    }

    sb->s_maxbytes = ext2_max_size(sb->s_blocksize_bits);

    if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV) {
        sbi->s_inode_size = EXT2_GOOD_OLD_INODE_SIZE;
        sbi->s_first_ino = EXT2_GOOD_OLD_FIRST_INO;
    } else {
        sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
        sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
        if ((sbi->s_inode_size < EXT2_GOOD_OLD_INODE_SIZE) ||
                (sbi->s_inode_size & (sbi->s_inode_size - 1)) ||
                (sbi->s_inode_size > blocksize)) {
            printk ("EXT2-fs: unsupported inode size: %d\n",
                    sbi->s_inode_size);
            goto failed_mount;
        }
    }

    sbi->s_frag_size = EXT2_MIN_FRAG_SIZE <<
                       le32_to_cpu(es->s_log_frag_size);
    if (sbi->s_frag_size == 0)
        goto cantfind_ext2;
    sbi->s_frags_per_block = sb->s_blocksize / sbi->s_frag_size;

    sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
    sbi->s_frags_per_group = le32_to_cpu(es->s_frags_per_group);
    sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);

    if (EXT2_INODE_SIZE(sb) == 0)
        goto cantfind_ext2;
    sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb);
    if (sbi->s_inodes_per_block == 0 || sbi->s_inodes_per_group == 0)
        goto cantfind_ext2;
    sbi->s_itb_per_group = sbi->s_inodes_per_group /
                           sbi->s_inodes_per_block;
    sbi->s_desc_per_block = sb->s_blocksize /
                            sizeof (struct ext2_group_desc);
    sbi->s_sbh = bh;
    sbi->s_mount_state = le16_to_cpu(es->s_state);
    sbi->s_addr_per_block_bits =
        ilog2 (EXT2_ADDR_PER_BLOCK(sb));
    sbi->s_desc_per_block_bits =
        ilog2 (EXT2_DESC_PER_BLOCK(sb));

    if (sb->s_magic != EXT2_SUPER_MAGIC)
        goto cantfind_ext2;

    if (sb->s_blocksize != bh->b_size) {
        if (!silent)
            printk ("VFS: Unsupported blocksize on dev "
                    "%s.\n", sb->s_id);
        goto failed_mount;
    }

    if (sb->s_blocksize != sbi->s_frag_size) {
        printk ("EXT2-fs: fragsize %lu != blocksize %lu (not supported yet)\n",
                sbi->s_frag_size, sb->s_blocksize);
        goto failed_mount;
    }

    if (sbi->s_blocks_per_group > sb->s_blocksize * 8) {
        printk ("EXT2-fs: #blocks per group too big: %lu\n",
                sbi->s_blocks_per_group);
        goto failed_mount;
    }
    if (sbi->s_frags_per_group > sb->s_blocksize * 8) {
        printk ("EXT2-fs: #fragments per group too big: %lu\n",
                sbi->s_frags_per_group);
        goto failed_mount;
    }
    if (sbi->s_inodes_per_group > sb->s_blocksize * 8) {
        printk ("EXT2-fs: #inodes per group too big: %lu\n",
                sbi->s_inodes_per_group);
        goto failed_mount;
    }

    if (EXT2_BLOCKS_PER_GROUP(sb) == 0)
        goto cantfind_ext2;
    sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) -
                            le32_to_cpu(es->s_first_data_block) - 1)
                           / EXT2_BLOCKS_PER_GROUP(sb)) + 1;
    db_count = (sbi->s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) /
               EXT2_DESC_PER_BLOCK(sb);
    sbi->s_group_desc = kmalloc (db_count * sizeof (struct buffer_head *), GFP_KERNEL);
    if (sbi->s_group_desc == NULL) {
        printk ("EXT2-fs: not enough memory\n");
        goto failed_mount;
    }
    bgl_lock_init(&sbi->s_blockgroup_lock);
    sbi->s_debts = kmalloc(sbi->s_groups_count * sizeof(*sbi->s_debts),
                           GFP_KERNEL);
    if (!sbi->s_debts) {
        printk ("EXT2-fs: not enough memory\n");
        goto failed_mount_group_desc;
    }
    memset(sbi->s_debts, 0, sbi->s_groups_count * sizeof(*sbi->s_debts));
    for (i = 0; i < db_count; i++) {
        block = descriptor_loc(sb, logic_sb_block, i);
        sbi->s_group_desc[i] = sb_bread(sb, block);
        if (!sbi->s_group_desc[i]) {
            for (j = 0; j < i; j++)
                brelse (sbi->s_group_desc[j]);
            printk ("EXT2-fs: unable to read group descriptors\n");
            goto failed_mount_group_desc;
        }
    }
    if (!ext2_check_descriptors (sb)) {
        printk ("EXT2-fs: group descriptors corrupted!\n");
        goto failed_mount2;
    }
    sbi->s_gdb_count = db_count;
    get_random_bytes(&sbi->s_next_generation, sizeof(u32));
    spin_lock_init(&sbi->s_next_gen_lock);

    percpu_counter_init(&sbi->s_freeblocks_counter,
                        ext2_count_free_blocks(sb));
    percpu_counter_init(&sbi->s_freeinodes_counter,
                        ext2_count_free_inodes(sb));
    percpu_counter_init(&sbi->s_dirs_counter,
                        ext2_count_dirs(sb));
    /*
     * set up enough so that it can read an inode
     */
    sb->s_op = &ext2_sops;
    sb->s_export_op = &ext2_export_ops;
    sb->s_xattr = ext2_xattr_handlers;
    root = iget(sb, EXT2_ROOT_INO);
    sb->s_root = d_alloc_root(root);
    if (!sb->s_root) {
        iput(root);
        printk(KERN_ERR "EXT2-fs: get root inode failed\n");
        goto failed_mount3;
    }
    if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
        dput(sb->s_root);
        sb->s_root = NULL;
        printk(KERN_ERR "EXT2-fs: corrupt root inode, run e2fsck\n");
        goto failed_mount3;
    }
    if (EXT2_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL))
        ext2_warning(sb, __FUNCTION__,
                     "mounting ext3 filesystem as ext2");
    ext2_setup_super (sb, es, sb->s_flags & MS_RDONLY);
    return 0;

cantfind_ext2:
    if (!silent)
        printk("VFS: Can't find an ext2 filesystem on dev %s.\n",
               sb->s_id);
    goto failed_mount;
failed_mount3:
    percpu_counter_destroy(&sbi->s_freeblocks_counter);
    percpu_counter_destroy(&sbi->s_freeinodes_counter);
    percpu_counter_destroy(&sbi->s_dirs_counter);
failed_mount2:
    for (i = 0; i < db_count; i++)
        brelse(sbi->s_group_desc[i]);
failed_mount_group_desc:
    kfree(sbi->s_group_desc);
    kfree(sbi->s_debts);
failed_mount:
    brelse(bh);
failed_sbi:
    sb->s_fs_info = NULL;
    kfree(sbi);
    return -EINVAL;
}
Example #13
0
static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
{
	struct f2fs_sb_info *sbi;
	struct f2fs_super_block *raw_super;
	struct buffer_head *raw_super_buf;
	struct inode *root;
	long err = -EINVAL;
	int i;

	/* allocate memory for f2fs-specific super block info */
	sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
	if (!sbi)
		return -ENOMEM;

	/* set a block size */
	if (!sb_set_blocksize(sb, F2FS_BLKSIZE)) {
		f2fs_msg(sb, KERN_ERR, "unable to set blocksize");
		goto free_sbi;
	}

	if (validate_superblock(sb, &raw_super, &raw_super_buf, 0)) {
		brelse(raw_super_buf);
		if (validate_superblock(sb, &raw_super, &raw_super_buf, 1))
			goto free_sb_buf;
	}
	/* init some FS parameters */
	sbi->active_logs = NR_CURSEG_TYPE;

	set_opt(sbi, BG_GC);

#ifdef CONFIG_F2FS_FS_XATTR
	set_opt(sbi, XATTR_USER);
#endif
#ifdef CONFIG_F2FS_FS_POSIX_ACL
	set_opt(sbi, POSIX_ACL);
#endif
	/* parse mount options */
	if (parse_options(sb, sbi, (char *)data))
		goto free_sb_buf;

	sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize));
	sb->s_max_links = F2FS_LINK_MAX;
	get_random_bytes(&sbi->s_next_generation, sizeof(u32));

	sb->s_op = &f2fs_sops;
	sb->s_xattr = f2fs_xattr_handlers;
	sb->s_export_op = &f2fs_export_ops;
	sb->s_magic = F2FS_SUPER_MAGIC;
	sb->s_fs_info = sbi;
	sb->s_time_gran = 1;
	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
		(test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
	memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));

	/* init f2fs-specific super block info */
	sbi->sb = sb;
	sbi->raw_super = raw_super;
	sbi->raw_super_buf = raw_super_buf;
	mutex_init(&sbi->gc_mutex);
	mutex_init(&sbi->write_inode);
	mutex_init(&sbi->writepages);
	mutex_init(&sbi->cp_mutex);
	for (i = 0; i < NR_LOCK_TYPE; i++)
		mutex_init(&sbi->fs_lock[i]);
	sbi->por_doing = 0;
	spin_lock_init(&sbi->stat_lock);
	init_rwsem(&sbi->bio_sem);
	init_sb_info(sbi);

	/* get an inode for meta space */
	sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
	if (IS_ERR(sbi->meta_inode)) {
		f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
		err = PTR_ERR(sbi->meta_inode);
		goto free_sb_buf;
	}

	err = get_valid_checkpoint(sbi);
	if (err) {
		f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint");
		goto free_meta_inode;
	}

	/* sanity checking of checkpoint */
	err = -EINVAL;
	if (sanity_check_ckpt(sbi)) {
		f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint");
		goto free_cp;
	}

	sbi->total_valid_node_count =
				le32_to_cpu(sbi->ckpt->valid_node_count);
	sbi->total_valid_inode_count =
				le32_to_cpu(sbi->ckpt->valid_inode_count);
	sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
	sbi->total_valid_block_count =
				le64_to_cpu(sbi->ckpt->valid_block_count);
	sbi->last_valid_block_count = sbi->total_valid_block_count;
	sbi->alloc_valid_block_count = 0;
	INIT_LIST_HEAD(&sbi->dir_inode_list);
	spin_lock_init(&sbi->dir_inode_lock);

	init_orphan_info(sbi);

	/* setup f2fs internal modules */
	err = build_segment_manager(sbi);
	if (err) {
		f2fs_msg(sb, KERN_ERR,
			"Failed to initialize F2FS segment manager");
		goto free_sm;
	}
	err = build_node_manager(sbi);
	if (err) {
		f2fs_msg(sb, KERN_ERR,
			"Failed to initialize F2FS node manager");
		goto free_nm;
	}

	build_gc_manager(sbi);

	/* get an inode for node space */
	sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
	if (IS_ERR(sbi->node_inode)) {
		f2fs_msg(sb, KERN_ERR, "Failed to read node inode");
		err = PTR_ERR(sbi->node_inode);
		goto free_nm;
	}

	/* if there are nt orphan nodes free them */
	err = -EINVAL;
	if (recover_orphan_inodes(sbi))
		goto free_node_inode;

	/* read root inode and dentry */
	root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
	if (IS_ERR(root)) {
		f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
		err = PTR_ERR(root);
		goto free_node_inode;
	}
	if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size)
		goto free_root_inode;

	sb->s_root = d_make_root(root); /* allocate root dentry */
	if (!sb->s_root) {
		err = -ENOMEM;
		goto free_root_inode;
	}

	/* recover fsynced data */
	if (!test_opt(sbi, DISABLE_ROLL_FORWARD))
		recover_fsync_data(sbi);

	/* After POR, we can run background GC thread */
	err = start_gc_thread(sbi);
	if (err)
		goto fail;

	err = f2fs_build_stats(sbi);
	if (err)
		goto fail;

	return 0;
fail:
	stop_gc_thread(sbi);
free_root_inode:
	dput(sb->s_root);
	sb->s_root = NULL;
free_node_inode:
	iput(sbi->node_inode);
free_nm:
	destroy_node_manager(sbi);
free_sm:
	destroy_segment_manager(sbi);
free_cp:
	kfree(sbi->ckpt);
free_meta_inode:
	make_bad_inode(sbi->meta_inode);
	iput(sbi->meta_inode);
free_sb_buf:
	brelse(raw_super_buf);
free_sbi:
	kfree(sbi);
	return err;
}
static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
{
	struct f2fs_sb_info *sbi;
	struct f2fs_super_block *raw_super;
	struct buffer_head *raw_super_buf;
	struct inode *root;
	long err = -EINVAL;
	bool retry = true;
	int i;

try_onemore:
	/* allocate memory for f2fs-specific super block info */
	sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
	if (!sbi)
		return -ENOMEM;

	/* set a block size */
	if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
		f2fs_msg(sb, KERN_ERR, "unable to set blocksize");
		goto free_sbi;
	}

	err = read_raw_super_block(sb, &raw_super, &raw_super_buf);
	if (err)
		goto free_sbi;

	sb->s_fs_info = sbi;
	/* init some FS parameters */
	sbi->active_logs = NR_CURSEG_TYPE;

	set_opt(sbi, BG_GC);

#ifdef CONFIG_F2FS_FS_XATTR
	set_opt(sbi, XATTR_USER);
#endif
#ifdef CONFIG_F2FS_FS_POSIX_ACL
	set_opt(sbi, POSIX_ACL);
#endif
	/* parse mount options */
	err = parse_options(sb, (char *)data);
	if (err)
		goto free_sb_buf;

	sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize));
	sb->s_max_links = F2FS_LINK_MAX;
	get_random_bytes(&sbi->s_next_generation, sizeof(u32));

	sb->s_op = &f2fs_sops;
	sb->s_xattr = f2fs_xattr_handlers;
	sb->s_export_op = &f2fs_export_ops;
	sb->s_magic = F2FS_SUPER_MAGIC;
	sb->s_time_gran = 1;
	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
		(test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
	memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));

	/* init f2fs-specific super block info */
	sbi->sb = sb;
	sbi->raw_super = raw_super;
	sbi->raw_super_buf = raw_super_buf;
	mutex_init(&sbi->gc_mutex);
	mutex_init(&sbi->writepages);
	mutex_init(&sbi->cp_mutex);
	init_rwsem(&sbi->node_write);
	sbi->por_doing = false;
	spin_lock_init(&sbi->stat_lock);

	init_rwsem(&sbi->read_io.io_rwsem);
	sbi->read_io.sbi = sbi;
	sbi->read_io.bio = NULL;
	for (i = 0; i < NR_PAGE_TYPE; i++) {
		init_rwsem(&sbi->write_io[i].io_rwsem);
		sbi->write_io[i].sbi = sbi;
		sbi->write_io[i].bio = NULL;
	}

	init_rwsem(&sbi->cp_rwsem);
	init_waitqueue_head(&sbi->cp_wait);
	init_sb_info(sbi);

	/* get an inode for meta space */
	sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
	if (IS_ERR(sbi->meta_inode)) {
		f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
		err = PTR_ERR(sbi->meta_inode);
		goto free_sb_buf;
	}

	err = get_valid_checkpoint(sbi);
	if (err) {
		f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint");
		goto free_meta_inode;
	}

	/* sanity checking of checkpoint */
	err = -EINVAL;
	if (sanity_check_ckpt(sbi)) {
		f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint");
		goto free_cp;
	}

	sbi->total_valid_node_count =
				le32_to_cpu(sbi->ckpt->valid_node_count);
	sbi->total_valid_inode_count =
				le32_to_cpu(sbi->ckpt->valid_inode_count);
	sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
	sbi->total_valid_block_count =
				le64_to_cpu(sbi->ckpt->valid_block_count);
	sbi->last_valid_block_count = sbi->total_valid_block_count;
	sbi->alloc_valid_block_count = 0;
	INIT_LIST_HEAD(&sbi->dir_inode_list);
	spin_lock_init(&sbi->dir_inode_lock);

	init_ino_entry_info(sbi);

	/* setup f2fs internal modules */
	err = build_segment_manager(sbi);
	if (err) {
		f2fs_msg(sb, KERN_ERR,
			"Failed to initialize F2FS segment manager");
		goto free_sm;
	}
	err = build_node_manager(sbi);
	if (err) {
		f2fs_msg(sb, KERN_ERR,
			"Failed to initialize F2FS node manager");
		goto free_nm;
	}

	build_gc_manager(sbi);

	/* get an inode for node space */
	sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
	if (IS_ERR(sbi->node_inode)) {
		f2fs_msg(sb, KERN_ERR, "Failed to read node inode");
		err = PTR_ERR(sbi->node_inode);
		goto free_nm;
	}

	/* if there are nt orphan nodes free them */
	recover_orphan_inodes(sbi);

	/* read root inode and dentry */
	root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
	if (IS_ERR(root)) {
		f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
		err = PTR_ERR(root);
		goto free_node_inode;
	}
	if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
		iput(root);
		err = -EINVAL;
		goto free_node_inode;
	}

	sb->s_root = d_make_root(root); /* allocate root dentry */
	if (!sb->s_root) {
		err = -ENOMEM;
		goto free_root_inode;
	}

	err = f2fs_build_stats(sbi);
	if (err)
		goto free_root_inode;

	if (f2fs_proc_root)
		sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);

	if (sbi->s_proc)
		proc_create_data("segment_info", S_IRUGO, sbi->s_proc,
				 &f2fs_seq_segment_info_fops, sb);

	if (test_opt(sbi, DISCARD)) {
		struct request_queue *q = bdev_get_queue(sb->s_bdev);
		if (!blk_queue_discard(q))
			f2fs_msg(sb, KERN_WARNING,
					"mounting with \"discard\" option, but "
					"the device does not support discard");
	}

	sbi->s_kobj.kset = f2fs_kset;
	init_completion(&sbi->s_kobj_unregister);
	err = kobject_init_and_add(&sbi->s_kobj, &f2fs_ktype, NULL,
							"%s", sb->s_id);
	if (err)
		goto free_proc;

	/* recover fsynced data */
	if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
		err = recover_fsync_data(sbi);
		if (err) {
			f2fs_msg(sb, KERN_ERR,
				"Cannot recover all fsync data errno=%ld", err);
			goto free_kobj;
		}
	}

	/*
	 * If filesystem is not mounted as read-only then
	 * do start the gc_thread.
	 */
	if (!f2fs_readonly(sb)) {
		/* After POR, we can run background GC thread.*/
		err = start_gc_thread(sbi);
		if (err)
			goto free_kobj;
	}
	return 0;

free_kobj:
	kobject_del(&sbi->s_kobj);
free_proc:
	if (sbi->s_proc) {
		remove_proc_entry("segment_info", sbi->s_proc);
		remove_proc_entry(sb->s_id, f2fs_proc_root);
	}
	f2fs_destroy_stats(sbi);
free_root_inode:
	dput(sb->s_root);
	sb->s_root = NULL;
free_node_inode:
	iput(sbi->node_inode);
free_nm:
	destroy_node_manager(sbi);
free_sm:
	destroy_segment_manager(sbi);
free_cp:
	kfree(sbi->ckpt);
free_meta_inode:
	make_bad_inode(sbi->meta_inode);
	iput(sbi->meta_inode);
free_sb_buf:
	brelse(raw_super_buf);
free_sbi:
	kfree(sbi);

	/* give only one another chance */
	if (retry) {
		retry = 0;
		shrink_dcache_sb(sb);
		goto try_onemore;
	}
	return err;
}
Example #15
0
static struct dentry *
nilfs_mount(struct file_system_type *fs_type, int flags,
	     const char *dev_name, void *data)
{
	struct nilfs_super_data sd;
	struct super_block *s;
	fmode_t mode = FMODE_READ | FMODE_EXCL;
	struct dentry *root_dentry;
	int err, s_new = false;

	if (!(flags & MS_RDONLY))
		mode |= FMODE_WRITE;

	sd.bdev = blkdev_get_by_path(dev_name, mode, fs_type);
	if (IS_ERR(sd.bdev))
		return ERR_CAST(sd.bdev);

	sd.cno = 0;
	sd.flags = flags;
	if (nilfs_identify((char *)data, &sd)) {
		err = -EINVAL;
		goto failed;
	}

	/*
	 * once the super is inserted into the list by sget, s_umount
	 * will protect the lockfs code from trying to start a snapshot
	 * while we are mounting
	 */
	mutex_lock(&sd.bdev->bd_fsfreeze_mutex);
	if (sd.bdev->bd_fsfreeze_count > 0) {
		mutex_unlock(&sd.bdev->bd_fsfreeze_mutex);
		err = -EBUSY;
		goto failed;
	}
	s = sget(fs_type, nilfs_test_bdev_super, nilfs_set_bdev_super, sd.bdev);
	mutex_unlock(&sd.bdev->bd_fsfreeze_mutex);
	if (IS_ERR(s)) {
		err = PTR_ERR(s);
		goto failed;
	}

	if (!s->s_root) {
		char b[BDEVNAME_SIZE];

		s_new = true;

		/* New superblock instance created */
		s->s_flags = flags;
		s->s_mode = mode;
		strlcpy(s->s_id, bdevname(sd.bdev, b), sizeof(s->s_id));
		sb_set_blocksize(s, block_size(sd.bdev));

		err = nilfs_fill_super(s, data, flags & MS_SILENT ? 1 : 0);
		if (err)
			goto failed_super;

		s->s_flags |= MS_ACTIVE;
	} else if (!sd.cno) {
		int busy = false;

		if (nilfs_tree_was_touched(s->s_root)) {
			busy = nilfs_try_to_shrink_tree(s->s_root);
			if (busy && (flags ^ s->s_flags) & MS_RDONLY) {
				printk(KERN_ERR "NILFS: the device already "
				       "has a %s mount.\n",
				       (s->s_flags & MS_RDONLY) ?
				       "read-only" : "read/write");
				err = -EBUSY;
				goto failed_super;
			}
		}
		if (!busy) {
			/*
			 * Try remount to setup mount states if the current
			 * tree is not mounted and only snapshots use this sb.
			 */
			err = nilfs_remount(s, &flags, data);
			if (err)
				goto failed_super;
		}
	}

	if (sd.cno) {
		err = nilfs_attach_snapshot(s, sd.cno, &root_dentry);
		if (err)
			goto failed_super;
	} else {
		root_dentry = dget(s->s_root);
	}

	if (!s_new)
		blkdev_put(sd.bdev, mode);

	return root_dentry;

 failed_super:
	deactivate_locked_super(s);

 failed:
	if (!s_new)
		blkdev_put(sd.bdev, mode);
	return ERR_PTR(err);
}
Example #16
0
static struct super_block * cdfs_mount(struct super_block *sb, void *data, int silent){
  kdev_t dev = sb->s_dev;
  int i, j, t;
  struct cdrom_tochdr  hdr;
#else
static int cdfs_fill_super(struct super_block *sb, void *data, int silent){
  int i, t;
#endif
  struct cdrom_tocentry   entry;   
  int no_audio=0, no_data=0;
  cd * this_cd;
  struct inode *retinode;

  PRINT("cdfs_mount\n");

#ifdef OLD_KERNEL
  MOD_INC_USE_COUNT;

  set_blocksize(dev, CD_FRAMESIZE);  // voor bread met ide-cd
#else
  sb_set_blocksize(sb, CD_FRAMESIZE);  // voor bread met ide-cd
#endif

  sb->s_blocksize = CD_FRAMESIZE;
  sb->s_blocksize_bits = 11;

  if (!(this_cd = cdfs_info(sb) = kmalloc(sizeof(cd), GFP_KERNEL))){
#ifdef OLD_KERNEL
    MOD_DEC_USE_COUNT;     
    return NULL;
#else
    return -ENOMEM;
#endif
  }

  this_cd->mode           = MODE;
  this_cd->gid            = GID;
  this_cd->uid            = UID;
  this_cd->single         = FALSE;
  this_cd->raw_audio      = 0;
  this_cd->toc_scsi       = FALSE;

  // Initialize cache for maximum sector size
  if (!(this_cd->cache = kmalloc(CD_FRAMESIZE_RAWER*CACHE_SIZE, GFP_KERNEL))) {
#ifdef OLD_KERNEL
    MOD_DEC_USE_COUNT;
    return NULL;
#else
    kfree(cdfs_info(sb));
    return -ENOMEM;
#endif
  }

  // Cache is still invalid
  this_cd->cache_sector = -CACHE_SIZE;

  cdfs_parse_options((char *) data, this_cd);

  /* Populate CD info with '.' and '..' */
  strcpy(this_cd->track[1].name, ".");  this_cd->track[1].start_lba=0;
  strcpy(this_cd->track[2].name, ".."); this_cd->track[2].start_lba=0;
  this_cd->nr_iso_sessions = 0;
  this_cd->size            = 0;

  if (this_cd->toc_scsi){
    if (cdfs_toc_read_full(sb)){
      printk("TOC read failed\n");
#ifdef OLD_KERNEL
      MOD_DEC_USE_COUNT;
      return NULL;
#else
      goto invalid;
#endif
    }
  } else {
    //if (cdfs_ioctl(sb, CDROMREADTOCHDR, (unsigned long)&hdr)){
    if (cdfs_toc_read(sb)){
      printk("cdfs_toc_read failed\n");
#ifdef OLD_KERNEL
      MOD_DEC_USE_COUNT;
      return NULL;
#else
      goto invalid;
#endif
    }
  }

  PRINT("CD contains %d tracks\n", this_cd->tracks);

  /* Collect track info */
  entry.cdte_format = CDROM_LBA;

  for (t=this_cd->tracks; t>=0; t--) {

    i = T2I(t);
//    j = this_cd->tracks-i;

 //   entry.cdte_track = (t==this_cd->tracks) ? CDROM_LEADOUT : t+1;
 //   PRINT("Read track %d/%d/%d\n", entry.cdte_track, t, i);

 //   if (cdfs_ioctl(sb, CDROMREADTOCENTRY, (unsigned long)&entry)){
   //   printk("ioctl(CDROMREADTOCENTRY) failed\n");
     // MOD_DEC_USE_COUNT;
  //    return NULL;
   // }

 //   this_cd->track[i].start_lba  = entry.cdte_addr.lba;
 //   this_cd->track[i].stop_lba   = this_cd->track[i+1].start_lba - 1;
    this_cd->track[i].track_size = this_cd->track[i+1].start_lba - this_cd->track[i].start_lba;  /* in sectors! */

    PRINT("Start[%d]: %d\n", i, this_cd->track[i].start_lba);

    if (t!=this_cd->tracks) {                 /* all tracks but the LEADOUT */
      if (this_cd->track[i].type==DATA) {
	//int track=i;
	no_data++;
	this_cd->track[i].iso_info  = cdfs_get_iso_info(sb, i);
	if (this_cd->track[i].iso_info) {
	  this_cd->track[i].time      = cdfs_constructtime((char*)&(this_cd->track[i].iso_info->creation_date));
	  this_cd->track[i].iso_size  = cdfs_constructsize((char*)&(this_cd->track[i].iso_info->volume_space_size)) * CD_FRAMESIZE;
	  if (!this_cd->single) this_cd->track[i].iso_size += this_cd->track[i].start_lba * CD_FRAMESIZE;
	  this_cd->track[i].track_size *= CD_FRAMESIZE;
	  this_cd->track[i].size = this_cd->track[i+1].start_lba * CD_FRAMESIZE;
	  sprintf(this_cd->track[i].name, this_cd->single ? DATA_NAME_SINGLE : DATA_NAME_ISO, t+1);
	  this_cd->lba_iso_sessions[this_cd->nr_iso_sessions].start = this_cd->track[i].start_lba;
	  this_cd->lba_iso_sessions[this_cd->nr_iso_sessions].stop  = this_cd->track[i].iso_size/CD_FRAMESIZE;
	  this_cd->nr_iso_sessions++;
	  cdfs_get_hfs_info(sb, i);  // possibly also a HFS
	} else {  // DATA, but no ISO -> either HFS or VideoCD
	  if (cdfs_get_hfs_info(sb, i)==-1){
	    printk("CHECKING VIDEOCD!!\n");
	    cdfs_get_XA_info(sb, i);
	    this_cd->track[i].time       = 0;
	    this_cd->track[i].iso_size   = 0;
	    this_cd->track[i].track_size = (this_cd->track[i].track_size-1) * this_cd->track[i].xa_data_size;
	    this_cd->track[i].size       = this_cd->track[i].track_size;
	    sprintf(this_cd->track[i].name, DATA_NAME_VCD, no_data);
	  } else { // HFS, no ISO, no VideoCD -> remove track
	    this_cd->track[i].iso_info  = NULL;
	    this_cd->track[i].type      = 0;
	  }
	}
      } else {
	no_audio++;
	this_cd->track[i].iso_info    = NULL;
	this_cd->track[i].type        = AUDIO;
	this_cd->track[i].time        = get_seconds();
	this_cd->track[i].iso_size    = 0;
	this_cd->track[i].track_size  = this_cd->track[i].track_size * CD_FRAMESIZE_RAW + ((this_cd->raw_audio==0)?WAV_HEADER_SIZE:0);
	this_cd->track[i].size        = this_cd->track[i].track_size;
	this_cd->track[i].avi         = 0;
	sprintf(this_cd->track[i].name, (this_cd->raw_audio)? RAW_AUDIO_NAME:AUDIO_NAME, t+1);
	if (this_cd->raw_audio) {
	  /* read the first sector. */
	  struct cdrom_read_audio cdda;
	  int status,k,j,prevk=0;
	  char* buf;
	  buf=kmalloc(CD_FRAMESIZE_RAW*2,GFP_KERNEL);
	  if(buf==NULL) {
		printk(FSNAME ": kmalloc failed in root.c !\n");
		return(-ENOMEM);
	  }
	  for (j=0;j<10;j++) {
	    cdda.addr_format = CDROM_LBA;
	    cdda.nframes     = 1;
	    cdda.buf         = buf+CD_FRAMESIZE_RAW;
	    cdda.addr.lba = this_cd->track[i].start_lba+j;
	    status = cdfs_ioctl(sb,CDROMREADAUDIO,(unsigned long)&cdda);
	    if (status) {
	      printk("cdfs_ioctl(CDROMREADAUDIO,%d) ioctl failed: %d\n", cdda.addr.lba, status);
	      goto out;
	    }
	    /* search the first non-zero byte */
	    for (k=0;k<CD_FRAMESIZE_RAW;k++)
	      if (buf[k+CD_FRAMESIZE_RAW]) break;
	    if (k<=CD_FRAMESIZE_RAW-4) break;
	    prevk=k;
	    if (k<CD_FRAMESIZE_RAW)
	      for (k=0;k<CD_FRAMESIZE_RAW;k++)
		buf[k]=buf[k+CD_FRAMESIZE_RAW];
	  }
	  if (j==10) goto out;
	  if ((j!=0)&&(prevk!=CD_FRAMESIZE_RAW)) {
	    k=prevk;
	    j--;
	  }
	  else k+=CD_FRAMESIZE_RAW;
	  this_cd->track[i].avi_offset = j*CD_FRAMESIZE_RAW+k-CD_FRAMESIZE_RAW;
	  if ((buf[k]=='R')&&(buf[k+1]=='I')&&
	      (buf[k+2]=='F')&&(buf[k+3]=='F')) {
	    this_cd->track[i].avi = 1;
	    this_cd->track[i].avi_swab = 0;
	  } else if ((buf[k]=='I')&&(buf[k+1]=='R')&&
	      (buf[k+2]=='F')&&(buf[k+3]=='F')) {
	    this_cd->track[i].avi = 1;
	    this_cd->track[i].avi_swab = 1;
	  }
	  if (this_cd->track[i].avi) {
	    if ((this_cd->track[i].avi_offset&1)!=0) {
	      printk("AVI offset is not even, error\n");
	      this_cd->track[i].avi=0;
	    } else {
	      this_cd->track[i].track_size -= this_cd->track[i].avi_offset;
	      sprintf(this_cd->track[i].name, AVI_AUDIO_NAME, t+1);
	    }
	  }
out:
    kfree(buf);
	}
      }
      // Calculate total CD size
      this_cd->size += this_cd->track[i].track_size;

      PRINT("Track %2d: (%dB)\n", t,  this_cd->track[i].size);

    } // else CDROM_LEADOUT

  }

  PRINT("CD ends at %d\n", this_cd->track[this_cd->tracks].start_lba);


  /* take care to get disc id after the toc has been read. JP, 29-12-2001 */
  this_cd->discid = discid(this_cd);

  ////////////////////////////////

  /* Check if CD is bootable */
  if (this_cd->track[T2I(0)].type==DATA) cdfs_check_bootable(sb);

  /* Check for an HFS partition in the first data track */
  /*if (no_data) {
    i=T2I(0);
    while (i<T2I(this_cd->tracks)) {
      if (this_cd->track[i].type==DATA)
        break;
      i++;
    }
    cdfs_get_hfs_info(sb, i);
  }
  */
  
  PRINT("%d audio tracks and %d data tracks => %dbytes\n", 
        no_audio, no_data, this_cd->size);
  
  sb->s_magic  = CDFS_MAGIC;
  sb->s_flags |= MS_RDONLY;
  sb->s_op     = &cdfs_ops;
  /* always get inode status */
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,24)
  retinode=cdfs_iget(sb, 0);
#else
  retinode=iget(sb, 0);
#endif
  if ( IS_ERR(retinode) )
    return PTR_ERR(retinode);

  PRINT("retinode = %ld\n", retinode->i_ino);

  sb->s_root   = d_alloc_root(retinode);

  cdfs_proc_cd = this_cd;

#ifdef OLD_KERNEL
  return sb;
#else
  return 0;

invalid:
  kfree(this_cd->cache);
  kfree(cdfs_info(sb));
  return -EINVAL;
#endif
}
Example #17
0
static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
{
	struct f2fs_sb_info *sbi;
	struct f2fs_super_block *raw_super;
	struct buffer_head *raw_super_buf;
	struct inode *root;
	long err;
	bool retry = true, need_fsck = false;
	char *options = NULL;
	int recovery, i;

try_onemore:
	err = -EINVAL;
	raw_super = NULL;
	raw_super_buf = NULL;
	recovery = 0;

	/* allocate memory for f2fs-specific super block info */
	sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
	if (!sbi)
		return -ENOMEM;

	/* set a block size */
	if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
		f2fs_msg(sb, KERN_ERR, "unable to set blocksize");
		goto free_sbi;
	}

	err = read_raw_super_block(sb, &raw_super, &raw_super_buf, &recovery);
	if (err)
		goto free_sbi;

	sb->s_fs_info = sbi;
	default_options(sbi);
	/* parse mount options */
	options = kstrdup((const char *)data, GFP_KERNEL);
	if (data && !options) {
		err = -ENOMEM;
		goto free_sb_buf;
	}

	err = parse_options(sb, options);
	if (err)
		goto free_options;

	sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize));
	sb->s_max_links = F2FS_LINK_MAX;
	get_random_bytes(&sbi->s_next_generation, sizeof(u32));

	sb->s_op = &f2fs_sops;
	sb->s_xattr = f2fs_xattr_handlers;
	sb->s_export_op = &f2fs_export_ops;
	sb->s_magic = F2FS_SUPER_MAGIC;
	sb->s_time_gran = 1;
	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
		(test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
	memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));

	/* init f2fs-specific super block info */
	sbi->sb = sb;
	sbi->raw_super = raw_super;
	sbi->raw_super_buf = raw_super_buf;
	mutex_init(&sbi->gc_mutex);
	mutex_init(&sbi->writepages);
	mutex_init(&sbi->cp_mutex);
	init_rwsem(&sbi->node_write);

	/* disallow all the data/node/meta page writes */
	set_sbi_flag(sbi, SBI_POR_DOING);
	spin_lock_init(&sbi->stat_lock);

	init_rwsem(&sbi->read_io.io_rwsem);
	sbi->read_io.sbi = sbi;
	sbi->read_io.bio = NULL;
	for (i = 0; i < NR_PAGE_TYPE; i++) {
		init_rwsem(&sbi->write_io[i].io_rwsem);
		sbi->write_io[i].sbi = sbi;
		sbi->write_io[i].bio = NULL;
	}

	init_rwsem(&sbi->cp_rwsem);
	init_waitqueue_head(&sbi->cp_wait);
	init_sb_info(sbi);

	/* get an inode for meta space */
	sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
	if (IS_ERR(sbi->meta_inode)) {
		f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
		err = PTR_ERR(sbi->meta_inode);
		goto free_options;
	}

	err = get_valid_checkpoint(sbi);
	if (err) {
		f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint");
		goto free_meta_inode;
	}

	/* sanity checking of checkpoint */
	err = -EINVAL;
	if (sanity_check_ckpt(sbi)) {
		f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint");
		goto free_cp;
	}

	sbi->total_valid_node_count =
				le32_to_cpu(sbi->ckpt->valid_node_count);
	sbi->total_valid_inode_count =
				le32_to_cpu(sbi->ckpt->valid_inode_count);
	sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
	sbi->total_valid_block_count =
				le64_to_cpu(sbi->ckpt->valid_block_count);
	sbi->last_valid_block_count = sbi->total_valid_block_count;
	sbi->alloc_valid_block_count = 0;
	INIT_LIST_HEAD(&sbi->dir_inode_list);
	spin_lock_init(&sbi->dir_inode_lock);

	init_extent_cache_info(sbi);

	init_ino_entry_info(sbi);

	/* setup f2fs internal modules */
	err = build_segment_manager(sbi);
	if (err) {
		f2fs_msg(sb, KERN_ERR,
			"Failed to initialize F2FS segment manager");
		goto free_sm;
	}
	err = build_node_manager(sbi);
	if (err) {
		f2fs_msg(sb, KERN_ERR,
			"Failed to initialize F2FS node manager");
		goto free_nm;
	}

	build_gc_manager(sbi);

	/* get an inode for node space */
	sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
	if (IS_ERR(sbi->node_inode)) {
		f2fs_msg(sb, KERN_ERR, "Failed to read node inode");
		err = PTR_ERR(sbi->node_inode);
		goto free_nm;
	}

	f2fs_join_shrinker(sbi);

	/* if there are nt orphan nodes free them */
	err = recover_orphan_inodes(sbi);
	if (err)
		goto free_node_inode;

	/* read root inode and dentry */
	root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
	if (IS_ERR(root)) {
		f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
		err = PTR_ERR(root);
		goto free_node_inode;
	}
	if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
		iput(root);
		err = -EINVAL;
		goto free_node_inode;
	}

	sb->s_root = d_make_root(root); /* allocate root dentry */
	if (!sb->s_root) {
		err = -ENOMEM;
		goto free_root_inode;
	}

	err = f2fs_build_stats(sbi);
	if (err)
		goto free_root_inode;

	if (f2fs_proc_root)
		sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);

	if (sbi->s_proc)
		proc_create_data("segment_info", S_IRUGO, sbi->s_proc,
				 &f2fs_seq_segment_info_fops, sb);

	sbi->s_kobj.kset = f2fs_kset;
	init_completion(&sbi->s_kobj_unregister);
	err = kobject_init_and_add(&sbi->s_kobj, &f2fs_ktype, NULL,
							"%s", sb->s_id);
	if (err)
		goto free_proc;

	/* recover fsynced data */
	if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
		/*
		 * mount should be failed, when device has readonly mode, and
		 * previous checkpoint was not done by clean system shutdown.
		 */
		if (bdev_read_only(sb->s_bdev) &&
				!is_set_ckpt_flags(sbi->ckpt, CP_UMOUNT_FLAG)) {
			err = -EROFS;
			goto free_kobj;
		}

		if (need_fsck)
			set_sbi_flag(sbi, SBI_NEED_FSCK);

		err = recover_fsync_data(sbi);
		if (err) {
			need_fsck = true;
			f2fs_msg(sb, KERN_ERR,
				"Cannot recover all fsync data errno=%ld", err);
			goto free_kobj;
		}
	}
	/* recover_fsync_data() cleared this already */
	clear_sbi_flag(sbi, SBI_POR_DOING);

	/*
	 * If filesystem is not mounted as read-only then
	 * do start the gc_thread.
	 */
	if (test_opt(sbi, BG_GC) && !f2fs_readonly(sb)) {
		/* After POR, we can run background GC thread.*/
		err = start_gc_thread(sbi);
		if (err)
			goto free_kobj;
	}
	kfree(options);

	/* recover broken superblock */
	if (recovery && !f2fs_readonly(sb) && !bdev_read_only(sb->s_bdev)) {
		f2fs_msg(sb, KERN_INFO, "Recover invalid superblock");
		f2fs_commit_super(sbi, true);
	}

	return 0;

free_kobj:
	kobject_del(&sbi->s_kobj);
free_proc:
	if (sbi->s_proc) {
		remove_proc_entry("segment_info", sbi->s_proc);
		remove_proc_entry(sb->s_id, f2fs_proc_root);
	}
	f2fs_destroy_stats(sbi);
free_root_inode:
	dput(sb->s_root);
	sb->s_root = NULL;
free_node_inode:
	mutex_lock(&sbi->umount_mutex);
	f2fs_leave_shrinker(sbi);
	iput(sbi->node_inode);
	mutex_unlock(&sbi->umount_mutex);
free_nm:
	destroy_node_manager(sbi);
free_sm:
	destroy_segment_manager(sbi);
free_cp:
	kfree(sbi->ckpt);
free_meta_inode:
	make_bad_inode(sbi->meta_inode);
	iput(sbi->meta_inode);
free_options:
	kfree(options);
free_sb_buf:
	brelse(raw_super_buf);
free_sbi:
	kfree(sbi);

	/* give only one another chance */
	if (retry) {
		retry = false;
		shrink_dcache_sb(sb);
		goto try_onemore;
	}
	return err;
}
Example #18
0
static int romfs_fill_super(struct super_block *s, void *data, int silent)
{
	struct buffer_head *bh;
	struct romfs_super_block *rsb;
	struct inode *root;
	int sz;

	/* I would parse the options here, but there are none.. :) */

	sb_set_blocksize(s, ROMBSIZE);
	s->s_maxbytes = 0xFFFFFFFF;

	bh = sb_bread(s, 0);
	if (!bh) {
		/* XXX merge with other printk? */
                printk ("romfs: unable to read superblock\n");
		goto outnobh;
	}

	rsb = (struct romfs_super_block *)bh->b_data;
	sz = be32_to_cpu(rsb->size);
	if (rsb->word0 != ROMSB_WORD0 || rsb->word1 != ROMSB_WORD1
	   || sz < ROMFH_SIZE) {
		if (!silent)
			printk ("VFS: Can't find a romfs filesystem on dev "
				"%s.\n", s->s_id);
		goto out;
	}
	if (romfs_checksum(rsb, min_t(int, sz, 512))) {
		printk ("romfs: bad initial checksum on dev "
			"%s.\n", s->s_id);
		goto out;
	}

	s->s_magic = ROMFS_MAGIC;
	s->s_fs_info = (void *)(long)sz;

	s->s_flags |= MS_RDONLY;

	/* Find the start of the fs */
	sz = (ROMFH_SIZE +
	      strnlen(rsb->name, ROMFS_MAXFN) + 1 + ROMFH_PAD)
	     & ROMFH_MASK;

	s->s_op	= &romfs_ops;
	root = iget(s, sz);
	if (!root)
		goto out;

	s->s_root = d_alloc_root(root);
	if (!s->s_root)
		goto outiput;

	brelse(bh);
	return 0;

outiput:
	iput(root);
out:
	brelse(bh);
outnobh:
	return -EINVAL;
}
static int qnx4_fill_super(struct super_block *s, void *data, int silent)
{
	struct buffer_head *bh;
	struct inode *root;
	const char *errmsg;
	struct qnx4_sb_info *qs;
	int ret = -EINVAL;

	qs = kzalloc(sizeof(struct qnx4_sb_info), GFP_KERNEL);
	if (!qs)
		return -ENOMEM;
	s->s_fs_info = qs;

	sb_set_blocksize(s, QNX4_BLOCK_SIZE);

	/* Check the superblock signature. Since the qnx4 code is
	   dangerous, we should leave as quickly as possible
	   if we don't belong here... */
	bh = sb_bread(s, 1);
	if (!bh) {
		printk(KERN_ERR "qnx4: unable to read the superblock\n");
		goto outnobh;
	}
	if ( le32_to_cpup((__le32*) bh->b_data) != QNX4_SUPER_MAGIC ) {
		if (!silent)
			printk(KERN_ERR "qnx4: wrong fsid in superblock.\n");
		goto out;
	}
	s->s_op = &qnx4_sops;
	s->s_magic = QNX4_SUPER_MAGIC;
	s->s_flags |= MS_RDONLY;	/* Yup, read-only yet */
	qnx4_sb(s)->sb_buf = bh;
	qnx4_sb(s)->sb = (struct qnx4_super_block *) bh->b_data;


 	/* check before allocating dentries, inodes, .. */
	errmsg = qnx4_checkroot(s);
	if (errmsg != NULL) {
 		if (!silent)
			printk(KERN_ERR "qnx4: %s\n", errmsg);
		goto out;
	}

 	/* does root not have inode number QNX4_ROOT_INO ?? */
	root = qnx4_iget(s, QNX4_ROOT_INO * QNX4_INODES_PER_BLOCK);
	if (IS_ERR(root)) {
		printk(KERN_ERR "qnx4: get inode failed\n");
		ret = PTR_ERR(root);
 		goto outb;
 	}

	ret = -ENOMEM;
 	s->s_root = d_make_root(root);
 	if (s->s_root == NULL)
 		goto outb;

	brelse(bh);
	return 0;

      outb:
	kfree(qs->BitMap);
      out:
	brelse(bh);
      outnobh:
	kfree(qs);
	s->s_fs_info = NULL;
	return ret;
}
Example #20
0
/*
 * hfs_mdb_get()
 *
 * Build the in-core MDB for a filesystem, including
 * the B-trees and the volume bitmap.
 */
int hfs_mdb_get(struct super_block *sb)
{
	struct buffer_head *bh;
	struct hfs_mdb *mdb, *mdb2;
	unsigned int block;
	char *ptr;
	int off2, len, size, sect;
	sector_t part_start, part_size;
	loff_t off;
	__be16 attrib;

	/* set the device driver to 512-byte blocks */
	size = sb_min_blocksize(sb, HFS_SECTOR_SIZE);
	if (!size)
		return -EINVAL;

	if (hfs_get_last_session(sb, &part_start, &part_size))
		return -EINVAL;
	while (1) {
		/* See if this is an HFS filesystem */
		bh = sb_bread512(sb, part_start + HFS_MDB_BLK, mdb);
		if (!bh)
			goto out;

		if (mdb->drSigWord == cpu_to_be16(HFS_SUPER_MAGIC))
			break;
		brelse(bh);

		/* check for a partition block
		 * (should do this only for cdrom/loop though)
		 */
		if (hfs_part_find(sb, &part_start, &part_size))
			goto out;
	}

	HFS_SB(sb)->alloc_blksz = size = be32_to_cpu(mdb->drAlBlkSiz);
	if (!size || (size & (HFS_SECTOR_SIZE - 1))) {
		printk(KERN_ERR "hfs: bad allocation block size %d\n", size);
		goto out_bh;
	}

	size = min(HFS_SB(sb)->alloc_blksz, (u32)PAGE_SIZE);
	/* size must be a multiple of 512 */
	while (size & (size - 1))
		size -= HFS_SECTOR_SIZE;
	sect = be16_to_cpu(mdb->drAlBlSt) + part_start;
	/* align block size to first sector */
	while (sect & ((size - 1) >> HFS_SECTOR_SIZE_BITS))
		size >>= 1;
	/* align block size to weird alloc size */
	while (HFS_SB(sb)->alloc_blksz & (size - 1))
		size >>= 1;
	brelse(bh);
	if (!sb_set_blocksize(sb, size)) {
		printk(KERN_ERR "hfs: unable to set blocksize to %u\n", size);
		goto out;
	}

	bh = sb_bread512(sb, part_start + HFS_MDB_BLK, mdb);
	if (!bh)
		goto out;
	if (mdb->drSigWord != cpu_to_be16(HFS_SUPER_MAGIC))
		goto out_bh;

	HFS_SB(sb)->mdb_bh = bh;
	HFS_SB(sb)->mdb = mdb;

	/* These parameters are read from the MDB, and never written */
	HFS_SB(sb)->part_start = part_start;
	HFS_SB(sb)->fs_ablocks = be16_to_cpu(mdb->drNmAlBlks);
	HFS_SB(sb)->fs_div = HFS_SB(sb)->alloc_blksz >> sb->s_blocksize_bits;
	HFS_SB(sb)->clumpablks = be32_to_cpu(mdb->drClpSiz) /
				 HFS_SB(sb)->alloc_blksz;
	if (!HFS_SB(sb)->clumpablks)
		HFS_SB(sb)->clumpablks = 1;
	HFS_SB(sb)->fs_start = (be16_to_cpu(mdb->drAlBlSt) + part_start) >>
			       (sb->s_blocksize_bits - HFS_SECTOR_SIZE_BITS);

	/* These parameters are read from and written to the MDB */
	HFS_SB(sb)->free_ablocks = be16_to_cpu(mdb->drFreeBks);
	HFS_SB(sb)->next_id = be32_to_cpu(mdb->drNxtCNID);
	HFS_SB(sb)->root_files = be16_to_cpu(mdb->drNmFls);
	HFS_SB(sb)->root_dirs = be16_to_cpu(mdb->drNmRtDirs);
	HFS_SB(sb)->file_count = be32_to_cpu(mdb->drFilCnt);
	HFS_SB(sb)->folder_count = be32_to_cpu(mdb->drDirCnt);

	/* TRY to get the alternate (backup) MDB. */
	sect = part_start + part_size - 2;
	bh = sb_bread512(sb, sect, mdb2);
	if (bh) {
		if (mdb2->drSigWord == cpu_to_be16(HFS_SUPER_MAGIC)) {
			HFS_SB(sb)->alt_mdb_bh = bh;
			HFS_SB(sb)->alt_mdb = mdb2;
		} else
			brelse(bh);
	}

	if (!HFS_SB(sb)->alt_mdb) {
		printk(KERN_WARNING "hfs: unable to locate alternate MDB\n");
		printk(KERN_WARNING "hfs: continuing without an alternate MDB\n");
	}

	HFS_SB(sb)->bitmap = (__be32 *)__get_free_pages(GFP_KERNEL, PAGE_SIZE < 8192 ? 1 : 0);
	if (!HFS_SB(sb)->bitmap)
		goto out;

	/* read in the bitmap */
	block = be16_to_cpu(mdb->drVBMSt) + part_start;
	off = (loff_t)block << HFS_SECTOR_SIZE_BITS;
	size = (HFS_SB(sb)->fs_ablocks + 8) / 8;
	ptr = (u8 *)HFS_SB(sb)->bitmap;
	while (size) {
		bh = sb_bread(sb, off >> sb->s_blocksize_bits);
		if (!bh) {
			printk(KERN_ERR "hfs: unable to read volume bitmap\n");
			goto out;
		}
		off2 = off & (sb->s_blocksize - 1);
		len = min((int)sb->s_blocksize - off2, size);
		memcpy(ptr, bh->b_data + off2, len);
		brelse(bh);
		ptr += len;
		off += len;
		size -= len;
	}

	HFS_SB(sb)->ext_tree = hfs_btree_open(sb, HFS_EXT_CNID, hfs_ext_keycmp);
	if (!HFS_SB(sb)->ext_tree) {
		printk(KERN_ERR "hfs: unable to open extent tree\n");
		goto out;
	}
	HFS_SB(sb)->cat_tree = hfs_btree_open(sb, HFS_CAT_CNID, hfs_cat_keycmp);
	if (!HFS_SB(sb)->cat_tree) {
		printk(KERN_ERR "hfs: unable to open catalog tree\n");
		goto out;
	}

	attrib = mdb->drAtrb;
	if (!(attrib & cpu_to_be16(HFS_SB_ATTRIB_UNMNT))) {
		printk(KERN_WARNING "hfs: filesystem was not cleanly unmounted, "
			 "running fsck.hfs is recommended.  mounting read-only.\n");
		sb->s_flags |= MS_RDONLY;
	}
	if ((attrib & cpu_to_be16(HFS_SB_ATTRIB_SLOCK))) {
		printk(KERN_WARNING "hfs: filesystem is marked locked, mounting read-only.\n");
		sb->s_flags |= MS_RDONLY;
	}
	if (!(sb->s_flags & MS_RDONLY)) {
		/* Mark the volume uncleanly unmounted in case we crash */
		attrib &= cpu_to_be16(~HFS_SB_ATTRIB_UNMNT);
		attrib |= cpu_to_be16(HFS_SB_ATTRIB_INCNSTNT);
		mdb->drAtrb = attrib;
		be32_add_cpu(&mdb->drWrCnt, 1);
		mdb->drLsMod = hfs_mtime();

		mark_buffer_dirty(HFS_SB(sb)->mdb_bh);
		hfs_buffer_sync(HFS_SB(sb)->mdb_bh);
	}

	return 0;

out_bh:
	brelse(bh);
out:
	hfs_mdb_put(sb);
	return -EIO;
}
Example #21
0
static int ufs_fill_super(struct super_block *sb, void *data, int silent)
{
    struct ufs_sb_info * sbi;
    struct ufs_sb_private_info * uspi;
    struct ufs_super_block_first * usb1;
    struct ufs_super_block_second * usb2;
    struct ufs_super_block_third * usb3;
    struct ufs_buffer_head * ubh;
    struct inode *inode;
    unsigned block_size, super_block_size;
    unsigned flags;
    unsigned super_block_offset;

    uspi = NULL;
    ubh = NULL;
    flags = 0;

    UFSD("ENTER\n");

    sbi = kmalloc(sizeof(struct ufs_sb_info), GFP_KERNEL);
    if (!sbi)
        goto failed_nomem;
    sb->s_fs_info = sbi;
    memset(sbi, 0, sizeof(struct ufs_sb_info));

    UFSD("flag %u\n", (int)(sb->s_flags & MS_RDONLY));

#ifndef CONFIG_UFS_FS_WRITE
    if (!(sb->s_flags & MS_RDONLY)) {
        printk("ufs was compiled with read-only support, "
               "can't be mounted as read-write\n");
        goto failed;
    }
#endif
    /*
     * Set default mount options
     * Parse mount options
     */
    sbi->s_mount_opt = 0;
    ufs_set_opt (sbi->s_mount_opt, ONERROR_LOCK);
    if (!ufs_parse_options ((char *) data, &sbi->s_mount_opt)) {
        printk("wrong mount options\n");
        goto failed;
    }
    if (!(sbi->s_mount_opt & UFS_MOUNT_UFSTYPE)) {
        if (!silent)
            printk("You didn't specify the type of your ufs filesystem\n\n"
                   "mount -t ufs -o ufstype="
                   "sun|sunx86|44bsd|ufs2|5xbsd|old|hp|nextstep|nextstep-cd|openstep ...\n\n"
                   ">>>WARNING<<< Wrong ufstype may corrupt your filesystem, "
                   "default is ufstype=old\n");
        ufs_set_opt (sbi->s_mount_opt, UFSTYPE_OLD);
    }

    sbi->s_uspi = uspi =
                      kmalloc (sizeof(struct ufs_sb_private_info), GFP_KERNEL);
    if (!uspi)
        goto failed;

    super_block_offset=UFS_SBLOCK;

    /* Keep 2Gig file limit. Some UFS variants need to override
       this but as I don't know which I'll let those in the know loosen
       the rules */
    switch (sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) {
    case UFS_MOUNT_UFSTYPE_44BSD:
        UFSD("ufstype=44bsd\n");
        uspi->s_fsize = block_size = 512;
        uspi->s_fmask = ~(512 - 1);
        uspi->s_fshift = 9;
        uspi->s_sbsize = super_block_size = 1536;
        uspi->s_sbbase = 0;
        flags |= UFS_DE_44BSD | UFS_UID_44BSD | UFS_ST_44BSD | UFS_CG_44BSD;
        break;
    case UFS_MOUNT_UFSTYPE_UFS2:
        UFSD("ufstype=ufs2\n");
        super_block_offset=SBLOCK_UFS2;
        uspi->s_fsize = block_size = 512;
        uspi->s_fmask = ~(512 - 1);
        uspi->s_fshift = 9;
        uspi->s_sbsize = super_block_size = 1536;
        uspi->s_sbbase =  0;
        flags |= UFS_TYPE_UFS2 | UFS_DE_44BSD | UFS_UID_44BSD | UFS_ST_44BSD | UFS_CG_44BSD;
        if (!(sb->s_flags & MS_RDONLY)) {
            printk(KERN_INFO "ufstype=ufs2 is supported read-only\n");
            sb->s_flags |= MS_RDONLY;
        }
        break;

    case UFS_MOUNT_UFSTYPE_SUN:
        UFSD("ufstype=sun\n");
        uspi->s_fsize = block_size = 1024;
        uspi->s_fmask = ~(1024 - 1);
        uspi->s_fshift = 10;
        uspi->s_sbsize = super_block_size = 2048;
        uspi->s_sbbase = 0;
        uspi->s_maxsymlinklen = 56;
        flags |= UFS_DE_OLD | UFS_UID_EFT | UFS_ST_SUN | UFS_CG_SUN;
        break;

    case UFS_MOUNT_UFSTYPE_SUNx86:
        UFSD("ufstype=sunx86\n");
        uspi->s_fsize = block_size = 1024;
        uspi->s_fmask = ~(1024 - 1);
        uspi->s_fshift = 10;
        uspi->s_sbsize = super_block_size = 2048;
        uspi->s_sbbase = 0;
        uspi->s_maxsymlinklen = 56;
        flags |= UFS_DE_OLD | UFS_UID_EFT | UFS_ST_SUNx86 | UFS_CG_SUN;
        break;

    case UFS_MOUNT_UFSTYPE_OLD:
        UFSD("ufstype=old\n");
        uspi->s_fsize = block_size = 1024;
        uspi->s_fmask = ~(1024 - 1);
        uspi->s_fshift = 10;
        uspi->s_sbsize = super_block_size = 2048;
        uspi->s_sbbase = 0;
        flags |= UFS_DE_OLD | UFS_UID_OLD | UFS_ST_OLD | UFS_CG_OLD;
        if (!(sb->s_flags & MS_RDONLY)) {
            if (!silent)
                printk(KERN_INFO "ufstype=old is supported read-only\n");
            sb->s_flags |= MS_RDONLY;
        }
        break;

    case UFS_MOUNT_UFSTYPE_NEXTSTEP:
        UFSD("ufstype=nextstep\n");
        uspi->s_fsize = block_size = 1024;
        uspi->s_fmask = ~(1024 - 1);
        uspi->s_fshift = 10;
        uspi->s_sbsize = super_block_size = 2048;
        uspi->s_sbbase = 0;
        flags |= UFS_DE_OLD | UFS_UID_OLD | UFS_ST_OLD | UFS_CG_OLD;
        if (!(sb->s_flags & MS_RDONLY)) {
            if (!silent)
                printk(KERN_INFO "ufstype=nextstep is supported read-only\n");
            sb->s_flags |= MS_RDONLY;
        }
        break;

    case UFS_MOUNT_UFSTYPE_NEXTSTEP_CD:
        UFSD("ufstype=nextstep-cd\n");
        uspi->s_fsize = block_size = 2048;
        uspi->s_fmask = ~(2048 - 1);
        uspi->s_fshift = 11;
        uspi->s_sbsize = super_block_size = 2048;
        uspi->s_sbbase = 0;
        flags |= UFS_DE_OLD | UFS_UID_OLD | UFS_ST_OLD | UFS_CG_OLD;
        if (!(sb->s_flags & MS_RDONLY)) {
            if (!silent)
                printk(KERN_INFO "ufstype=nextstep-cd is supported read-only\n");
            sb->s_flags |= MS_RDONLY;
        }
        break;

    case UFS_MOUNT_UFSTYPE_OPENSTEP:
        UFSD("ufstype=openstep\n");
        uspi->s_fsize = block_size = 1024;
        uspi->s_fmask = ~(1024 - 1);
        uspi->s_fshift = 10;
        uspi->s_sbsize = super_block_size = 2048;
        uspi->s_sbbase = 0;
        flags |= UFS_DE_44BSD | UFS_UID_44BSD | UFS_ST_44BSD | UFS_CG_44BSD;
        if (!(sb->s_flags & MS_RDONLY)) {
            if (!silent)
                printk(KERN_INFO "ufstype=openstep is supported read-only\n");
            sb->s_flags |= MS_RDONLY;
        }
        break;

    case UFS_MOUNT_UFSTYPE_HP:
        UFSD("ufstype=hp\n");
        uspi->s_fsize = block_size = 1024;
        uspi->s_fmask = ~(1024 - 1);
        uspi->s_fshift = 10;
        uspi->s_sbsize = super_block_size = 2048;
        uspi->s_sbbase = 0;
        flags |= UFS_DE_OLD | UFS_UID_OLD | UFS_ST_OLD | UFS_CG_OLD;
        if (!(sb->s_flags & MS_RDONLY)) {
            if (!silent)
                printk(KERN_INFO "ufstype=hp is supported read-only\n");
            sb->s_flags |= MS_RDONLY;
        }
        break;
    default:
        if (!silent)
            printk("unknown ufstype\n");
        goto failed;
    }

again:
    if (!sb_set_blocksize(sb, block_size)) {
        printk(KERN_ERR "UFS: failed to set blocksize\n");
        goto failed;
    }

    /*
     * read ufs super block from device
     */

    ubh = ubh_bread_uspi(uspi, sb, uspi->s_sbbase + super_block_offset/block_size, super_block_size);

    if (!ubh)
        goto failed;


    usb1 = ubh_get_usb_first(uspi);
    usb2 = ubh_get_usb_second(uspi);
    usb3 = ubh_get_usb_third(uspi);

    /*
     * Check ufs magic number
     */
    sbi->s_bytesex = BYTESEX_LE;
    switch ((uspi->fs_magic = fs32_to_cpu(sb, usb3->fs_magic))) {
    case UFS_MAGIC:
    case UFS2_MAGIC:
    case UFS_MAGIC_LFN:
    case UFS_MAGIC_FEA:
    case UFS_MAGIC_4GB:
        goto magic_found;
    }
    sbi->s_bytesex = BYTESEX_BE;
    switch ((uspi->fs_magic = fs32_to_cpu(sb, usb3->fs_magic))) {
    case UFS_MAGIC:
    case UFS2_MAGIC:
    case UFS_MAGIC_LFN:
    case UFS_MAGIC_FEA:
    case UFS_MAGIC_4GB:
        goto magic_found;
    }

    if ((((sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) == UFS_MOUNT_UFSTYPE_NEXTSTEP)
            || ((sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) == UFS_MOUNT_UFSTYPE_NEXTSTEP_CD)
            || ((sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) == UFS_MOUNT_UFSTYPE_OPENSTEP))
            && uspi->s_sbbase < 256) {
        ubh_brelse_uspi(uspi);
        ubh = NULL;
        uspi->s_sbbase += 8;
        goto again;
    }
    if (!silent)
        printk("ufs_read_super: bad magic number\n");
    goto failed;

magic_found:
    /*
     * Check block and fragment sizes
     */
    uspi->s_bsize = fs32_to_cpu(sb, usb1->fs_bsize);
    uspi->s_fsize = fs32_to_cpu(sb, usb1->fs_fsize);
    uspi->s_sbsize = fs32_to_cpu(sb, usb1->fs_sbsize);
    uspi->s_fmask = fs32_to_cpu(sb, usb1->fs_fmask);
    uspi->s_fshift = fs32_to_cpu(sb, usb1->fs_fshift);

    if (uspi->s_fsize & (uspi->s_fsize - 1)) {
        printk(KERN_ERR "ufs_read_super: fragment size %u is not a power of 2\n",
               uspi->s_fsize);
        goto failed;
    }
    if (uspi->s_fsize < 512) {
        printk(KERN_ERR "ufs_read_super: fragment size %u is too small\n",
               uspi->s_fsize);
        goto failed;
    }
    if (uspi->s_fsize > 4096) {
        printk(KERN_ERR "ufs_read_super: fragment size %u is too large\n",
               uspi->s_fsize);
        goto failed;
    }
    if (uspi->s_bsize & (uspi->s_bsize - 1)) {
        printk(KERN_ERR "ufs_read_super: block size %u is not a power of 2\n",
               uspi->s_bsize);
        goto failed;
    }
    if (uspi->s_bsize < 4096) {
        printk(KERN_ERR "ufs_read_super: block size %u is too small\n",
               uspi->s_bsize);
        goto failed;
    }
    if (uspi->s_bsize / uspi->s_fsize > 8) {
        printk(KERN_ERR "ufs_read_super: too many fragments per block (%u)\n",
               uspi->s_bsize / uspi->s_fsize);
        goto failed;
    }
    if (uspi->s_fsize != block_size || uspi->s_sbsize != super_block_size) {
        ubh_brelse_uspi(uspi);
        ubh = NULL;
        block_size = uspi->s_fsize;
        super_block_size = uspi->s_sbsize;
        UFSD("another value of block_size or super_block_size %u, %u\n", block_size, super_block_size);
        goto again;
    }


    ufs_print_super_stuff(sb, flags, usb1, usb2, usb3);

    /*
     * Check, if file system was correctly unmounted.
     * If not, make it read only.
     */
    if (((flags & UFS_ST_MASK) == UFS_ST_44BSD) ||
            ((flags & UFS_ST_MASK) == UFS_ST_OLD) ||
            (((flags & UFS_ST_MASK) == UFS_ST_SUN ||
              (flags & UFS_ST_MASK) == UFS_ST_SUNx86) &&
             (ufs_get_fs_state(sb, usb1, usb3) == (UFS_FSOK - fs32_to_cpu(sb, usb1->fs_time))))) {
        switch(usb1->fs_clean) {
        case UFS_FSCLEAN:
            UFSD("fs is clean\n");
            break;
        case UFS_FSSTABLE:
            UFSD("fs is stable\n");
            break;
        case UFS_FSOSF1:
            UFSD("fs is DEC OSF/1\n");
            break;
        case UFS_FSACTIVE:
            printk("ufs_read_super: fs is active\n");
            sb->s_flags |= MS_RDONLY;
            break;
        case UFS_FSBAD:
            printk("ufs_read_super: fs is bad\n");
            sb->s_flags |= MS_RDONLY;
            break;
        default:
            printk("ufs_read_super: can't grok fs_clean 0x%x\n", usb1->fs_clean);
            sb->s_flags |= MS_RDONLY;
            break;
        }
    } else {
        printk("ufs_read_super: fs needs fsck\n");
        sb->s_flags |= MS_RDONLY;
    }

    /*
     * Read ufs_super_block into internal data structures
     */
    sb->s_op = &ufs_super_ops;
    sb->dq_op = NULL; /***/
    sb->s_magic = fs32_to_cpu(sb, usb3->fs_magic);

    uspi->s_sblkno = fs32_to_cpu(sb, usb1->fs_sblkno);
    uspi->s_cblkno = fs32_to_cpu(sb, usb1->fs_cblkno);
    uspi->s_iblkno = fs32_to_cpu(sb, usb1->fs_iblkno);
    uspi->s_dblkno = fs32_to_cpu(sb, usb1->fs_dblkno);
    uspi->s_cgoffset = fs32_to_cpu(sb, usb1->fs_cgoffset);
    uspi->s_cgmask = fs32_to_cpu(sb, usb1->fs_cgmask);

    if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
        uspi->s_u2_size  = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_size);
        uspi->s_u2_dsize = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_dsize);
    } else {
        uspi->s_size  =  fs32_to_cpu(sb, usb1->fs_size);
        uspi->s_dsize =  fs32_to_cpu(sb, usb1->fs_dsize);
    }

    uspi->s_ncg = fs32_to_cpu(sb, usb1->fs_ncg);
    /* s_bsize already set */
    /* s_fsize already set */
    uspi->s_fpb = fs32_to_cpu(sb, usb1->fs_frag);
    uspi->s_minfree = fs32_to_cpu(sb, usb1->fs_minfree);
    uspi->s_bmask = fs32_to_cpu(sb, usb1->fs_bmask);
    uspi->s_fmask = fs32_to_cpu(sb, usb1->fs_fmask);
    uspi->s_bshift = fs32_to_cpu(sb, usb1->fs_bshift);
    uspi->s_fshift = fs32_to_cpu(sb, usb1->fs_fshift);
    UFSD("uspi->s_bshift = %d,uspi->s_fshift = %d", uspi->s_bshift,
         uspi->s_fshift);
    uspi->s_fpbshift = fs32_to_cpu(sb, usb1->fs_fragshift);
    uspi->s_fsbtodb = fs32_to_cpu(sb, usb1->fs_fsbtodb);
    /* s_sbsize already set */
    uspi->s_csmask = fs32_to_cpu(sb, usb1->fs_csmask);
    uspi->s_csshift = fs32_to_cpu(sb, usb1->fs_csshift);
    uspi->s_nindir = fs32_to_cpu(sb, usb1->fs_nindir);
    uspi->s_inopb = fs32_to_cpu(sb, usb1->fs_inopb);
    uspi->s_nspf = fs32_to_cpu(sb, usb1->fs_nspf);
    uspi->s_npsect = ufs_get_fs_npsect(sb, usb1, usb3);
    uspi->s_interleave = fs32_to_cpu(sb, usb1->fs_interleave);
    uspi->s_trackskew = fs32_to_cpu(sb, usb1->fs_trackskew);
    uspi->s_csaddr = fs32_to_cpu(sb, usb1->fs_csaddr);
    uspi->s_cssize = fs32_to_cpu(sb, usb1->fs_cssize);
    uspi->s_cgsize = fs32_to_cpu(sb, usb1->fs_cgsize);
    uspi->s_ntrak = fs32_to_cpu(sb, usb1->fs_ntrak);
    uspi->s_nsect = fs32_to_cpu(sb, usb1->fs_nsect);
    uspi->s_spc = fs32_to_cpu(sb, usb1->fs_spc);
    uspi->s_ipg = fs32_to_cpu(sb, usb1->fs_ipg);
    uspi->s_fpg = fs32_to_cpu(sb, usb1->fs_fpg);
    uspi->s_cpc = fs32_to_cpu(sb, usb2->fs_un.fs_u1.fs_cpc);
    uspi->s_contigsumsize = fs32_to_cpu(sb, usb3->fs_un2.fs_44.fs_contigsumsize);
    uspi->s_qbmask = ufs_get_fs_qbmask(sb, usb3);
    uspi->s_qfmask = ufs_get_fs_qfmask(sb, usb3);
    uspi->s_postblformat = fs32_to_cpu(sb, usb3->fs_postblformat);
    uspi->s_nrpos = fs32_to_cpu(sb, usb3->fs_nrpos);
    uspi->s_postbloff = fs32_to_cpu(sb, usb3->fs_postbloff);
    uspi->s_rotbloff = fs32_to_cpu(sb, usb3->fs_rotbloff);

    /*
     * Compute another frequently used values
     */
    uspi->s_fpbmask = uspi->s_fpb - 1;
    if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
        uspi->s_apbshift = uspi->s_bshift - 3;
    else
        uspi->s_apbshift = uspi->s_bshift - 2;

    uspi->s_2apbshift = uspi->s_apbshift * 2;
    uspi->s_3apbshift = uspi->s_apbshift * 3;
    uspi->s_apb = 1 << uspi->s_apbshift;
    uspi->s_2apb = 1 << uspi->s_2apbshift;
    uspi->s_3apb = 1 << uspi->s_3apbshift;
    uspi->s_apbmask = uspi->s_apb - 1;
    uspi->s_nspfshift = uspi->s_fshift - UFS_SECTOR_BITS;
    uspi->s_nspb = uspi->s_nspf << uspi->s_fpbshift;
    uspi->s_inopf = uspi->s_inopb >> uspi->s_fpbshift;
    uspi->s_bpf = uspi->s_fsize << 3;
    uspi->s_bpfshift = uspi->s_fshift + 3;
    uspi->s_bpfmask = uspi->s_bpf - 1;
    if ((sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) ==
            UFS_MOUNT_UFSTYPE_44BSD)
        uspi->s_maxsymlinklen =
            fs32_to_cpu(sb, usb3->fs_un2.fs_44.fs_maxsymlinklen);

    sbi->s_flags = flags;

    inode = iget(sb, UFS_ROOTINO);
    if (!inode || is_bad_inode(inode))
        goto failed;
    sb->s_root = d_alloc_root(inode);
    if (!sb->s_root)
        goto dalloc_failed;

    ufs_setup_cstotal(sb);
    /*
     * Read cylinder group structures
     */
    if (!(sb->s_flags & MS_RDONLY))
        if (!ufs_read_cylinder_structures(sb))
            goto failed;

    UFSD("EXIT\n");
    return 0;

dalloc_failed:
    iput(inode);
failed:
    if (ubh)
        ubh_brelse_uspi (uspi);
    kfree (uspi);
    kfree(sbi);
    sb->s_fs_info = NULL;
    UFSD("EXIT (FAILED)\n");
    return -EINVAL;

failed_nomem:
    UFSD("EXIT (NOMEM)\n");
    return -ENOMEM;
}
/*
 * fill in the superblock
 */
static int romfs_fill_super(struct super_block *sb, void *data, int silent)
{
	struct romfs_super_block *rsb;
	struct inode *root;
	unsigned long pos, img_size;
	const char *storage;
	size_t len;
	int ret;

#ifdef CONFIG_BLOCK
	if (!sb->s_mtd) {
		sb_set_blocksize(sb, ROMBSIZE);
	} else {
		sb->s_blocksize = ROMBSIZE;
		sb->s_blocksize_bits = blksize_bits(ROMBSIZE);
	}
#endif

	sb->s_maxbytes = 0xFFFFFFFF;
	sb->s_magic = ROMFS_MAGIC;
	sb->s_flags |= MS_RDONLY | MS_NOATIME;
	sb->s_op = &romfs_super_ops;

	/* read the image superblock and check it */
	rsb = kmalloc(512, GFP_KERNEL);
	if (!rsb)
		return -ENOMEM;

	sb->s_fs_info = (void *) 512;
	ret = romfs_dev_read(sb, 0, rsb, 512);
	if (ret < 0)
		goto error_rsb;

	img_size = be32_to_cpu(rsb->size);

	if (sb->s_mtd && img_size > sb->s_mtd->size)
		goto error_rsb_inval;

	sb->s_fs_info = (void *) img_size;

	if (rsb->word0 != ROMSB_WORD0 || rsb->word1 != ROMSB_WORD1 ||
	    img_size < ROMFH_SIZE) {
		if (!silent)
			printk(KERN_WARNING "VFS:"
			       " Can't find a romfs filesystem on dev %s.\n",
			       sb->s_id);
		goto error_rsb_inval;
	}

	if (romfs_checksum(rsb, min_t(size_t, img_size, 512))) {
		printk(KERN_ERR "ROMFS: bad initial checksum on dev %s.\n",
		       sb->s_id);
		goto error_rsb_inval;
	}

	storage = sb->s_mtd ? "MTD" : "the block layer";

	len = strnlen(rsb->name, ROMFS_MAXFN);
	if (!silent)
		printk(KERN_NOTICE "ROMFS: Mounting image '%*.*s' through %s\n",
		       (unsigned) len, (unsigned) len, rsb->name, storage);

	kfree(rsb);
	rsb = NULL;

	/* find the root directory */
	pos = (ROMFH_SIZE + len + 1 + ROMFH_PAD) & ROMFH_MASK;

	root = romfs_iget(sb, pos);
	if (IS_ERR(root))
		goto error;

	sb->s_root = d_alloc_root(root);
	if (!sb->s_root)
		goto error_i;

	return 0;

error_i:
	iput(root);
error:
	return -EINVAL;
error_rsb_inval:
	ret = -EINVAL;
error_rsb:
	kfree(rsb);
	return ret;
}