示例#1
0
/*
 * Generic function to fsync a file.
 *
 * filp may be NULL if called via the msync of a vma.
 */
int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
{
	struct inode * inode = dentry->d_inode;
	struct super_block * sb;
	int ret, err;

	/* sync the inode to buffers */
	ret = write_inode_now(inode, 0);

	/* sync the superblock to buffers */
	sb = inode->i_sb;
	lock_super(sb);
	if (sb->s_dirt && sb->s_op->write_super)
		sb->s_op->write_super(sb);
	unlock_super(sb);

	/* .. finally sync the buffers to disk */
	err = sync_blockdev(sb->s_bdev);
	if (!ret)
		ret = err;
	return ret;
}
示例#2
0
void ext2_check_inodes_bitmap (struct super_block * sb)
{
	struct ext2_super_block * es;
	unsigned long desc_count, bitmap_count, x;
	int bitmap_nr;
	struct ext2_group_desc * gdp;
	int i, bs;

	lock_super (sb);
	bs = BYTE_SWAP(sb->u.ext2_sb.s_byte_swapped);
	es = sb->u.ext2_sb.s_es;
	desc_count = 0;
	bitmap_count = 0;
	gdp = NULL;
	for (i = 0; i < sb->u.ext2_sb.s_groups_count; i++) {
		gdp = get_group_desc (sb, i, NULL);
		desc_count += e_swab (bs, gdp->bg_free_inodes_count);
		bitmap_nr = load_inode_bitmap (sb, i);
		if (bitmap_nr < 0)
			continue;
		
		x = ext2_count_free (sb->u.ext2_sb.s_inode_bitmap[bitmap_nr],
				     EXT2_INODES_PER_GROUP(sb) / 8);
		if (e_swab (bs, gdp->bg_free_inodes_count) != x)
			ext2_error (sb, "ext2_check_inodes_bitmap",
				    "Wrong free inodes count in group %d, "
				    "stored = %d, counted = %lu", i,
				    e_swab (bs, gdp->bg_free_inodes_count), x);
		bitmap_count += x;
	}
	if (e_swab (bs, es->s_free_inodes_count) != bitmap_count)
		ext2_error (sb, "ext2_check_inodes_bitmap",
			    "Wrong free inodes count in super block, "
			    "stored = %lu, counted = %lu",
			    (unsigned long) e_swab (bs, es->s_free_inodes_count),
			    bitmap_count);
	unlock_super (sb);
}
示例#3
0
文件: ialloc.c 项目: hugh712/Jollen
unsigned long ext3_count_free_inodes (struct super_block * sb)
{
#ifdef EXT3FS_DEBUG
	struct ext3_super_block * es;
	unsigned long desc_count, bitmap_count, x;
	int bitmap_nr;
	struct ext3_group_desc * gdp;
	int i;

	lock_super (sb);
	es = sb->u.ext3_sb.s_es;
	desc_count = 0;
	bitmap_count = 0;
	gdp = NULL;
	for (i = 0; i < sb->u.ext3_sb.s_groups_count; i++) {
		gdp = ext3_get_group_desc (sb, i, NULL);
		if (!gdp)
			continue;
		desc_count += le16_to_cpu(gdp->bg_free_inodes_count);
		bitmap_nr = load_inode_bitmap (sb, i);
		if (bitmap_nr < 0)
			continue;

		x = ext3_count_free (sb->u.ext3_sb.s_inode_bitmap[bitmap_nr],
				     EXT3_INODES_PER_GROUP(sb) / 8);
		printk ("group %d: stored = %d, counted = %lu\n",
			i, le16_to_cpu(gdp->bg_free_inodes_count), x);
		bitmap_count += x;
	}
	printk("ext3_count_free_inodes: stored = %lu, computed = %lu, %lu\n",
		le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count);
	unlock_super (sb);
	return desc_count;
#else
	return le32_to_cpu(sb->u.ext3_sb.s_es->s_free_inodes_count);
#endif
}
示例#4
0
unsigned long ext2_count_free_inodes (struct super_block * sb)
{
#ifdef EXT2FS_DEBUG
	struct ext2_super_block * es;
	unsigned long desc_count, bitmap_count, x;
	int bitmap_nr;
	struct ext2_group_desc * gdp;
	int i, bs;

	lock_super (sb);
	es = sb->u.ext2_sb.s_es;
	bs = BYTE_SWAP(sb->u.ext2_sb.s_byte_swapped);
	desc_count = 0;
	bitmap_count = 0;
	gdp = NULL;
	for (i = 0; i < sb->u.ext2_sb.s_groups_count; i++) {
		gdp = get_group_desc (sb, i, NULL);
		desc_count += e_swab (bs, gdp->bg_free_inodes_count);
		bitmap_nr = load_inode_bitmap (sb, i);
		if (bitmap_nr < 0)
			continue;
		
		x = ext2_count_free (sb->u.ext2_sb.s_inode_bitmap[bitmap_nr],
				     EXT2_INODES_PER_GROUP(sb) / 8);
		printk ("group %d: stored = %d, counted = %lu\n",
			i, e_swab (bs, gdp->bg_free_inodes_count), x);
		bitmap_count += x;
	}
	printk("ext2_count_free_inodes: stored = %lu, computed = %lu, %lu\n",
		e_swab (bs, es->s_free_inodes_count), desc_count, bitmap_count);
	unlock_super (sb);
	return desc_count;
#else
	return e_swab (BYTE_SWAP(sb->u.ext2_sb.s_byte_swapped),
		       sb->u.ext2_sb.s_es->s_free_inodes_count);
#endif
}
示例#5
0
void udf_free_inode(struct inode * inode)
{
	struct super_block * sb = inode->i_sb;
	int is_directory;
	unsigned long ino;

	ino = inode->i_ino;

	/*
	 * Note: we must free any quota before locking the superblock,
	 * as writing the quota to disk may need the lock as well.
	 */
	DQUOT_FREE_INODE(inode);
	DQUOT_DROP(inode);

	lock_super(sb);

	is_directory = S_ISDIR(inode->i_mode);

	clear_inode(inode);

	if (UDF_SB_LVIDBH(sb))
	{
		if (is_directory)
			UDF_SB_LVIDIU(sb)->numDirs =
				cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numDirs) - 1);
		else
			UDF_SB_LVIDIU(sb)->numFiles =
				cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numFiles) - 1);
		
		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
	}
	unlock_super(sb);

	udf_free_blocks(sb, NULL, UDF_I_LOCATION(inode), 0, 1);
}
static struct dentry *msdos_lookup(struct inode *dir, struct dentry *dentry,
				   struct nameidata *nd)
{
	struct super_block *sb = dir->i_sb;
	struct fat_slot_info sinfo;
	struct inode *inode;
	int err;

	lock_super(sb);
	err = msdos_find(dir, dentry->d_name.name, dentry->d_name.len, &sinfo);
	switch (err) {
	case -ENOENT:
		inode = NULL;
		break;
	case 0:
		inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos);
		brelse(sinfo.bh);
		break;
	default:
		inode = ERR_PTR(err);
	}
	unlock_super(sb);
	return d_splice_alias(inode, dentry);
}
示例#7
0
/* Hmm, should we do this like the NFS mount command does? Guess so.. */
struct super_block *
smb_read_super(struct super_block *sb, void *raw_data, int silent)
{
	struct smb_mount_data *data = (struct smb_mount_data *) raw_data;
	struct smb_server *server;
        struct smb_sb_info *smb_sb;
	unsigned int fd;
	struct file *filp;
	kdev_t dev = sb->s_dev;
	int error;

	if (!data) {
		printk("smb_read_super: missing data argument\n");
		sb->s_dev = 0;
		return NULL;
	}
	fd = data->fd;
	if (data->version != SMB_MOUNT_VERSION) {
		printk("smb warning: mount version %s than kernel\n",
		       (data->version < SMB_MOUNT_VERSION) ?
                       "older" : "newer");
	}
	if (fd >= NR_OPEN || !(filp = current->files->fd[fd])) {
		printk("smb_read_super: invalid file descriptor\n");
		sb->s_dev = 0;
		return NULL;
	}
	if (!S_ISSOCK(filp->f_inode->i_mode)) {
		printk("smb_read_super: not a socket!\n");
		sb->s_dev = 0;
		return NULL;
	}

        /* We must malloc our own super-block info */
        smb_sb = (struct smb_sb_info *)smb_kmalloc(sizeof(struct smb_sb_info),
                                                   GFP_KERNEL);

        if (smb_sb == NULL) {
                printk("smb_read_super: could not alloc smb_sb_info\n");
                return NULL;
        }

	filp->f_count += 1; 

	lock_super(sb);

        SMB_SBP(sb) = smb_sb;
        
	sb->s_blocksize = 1024; /* Eh...  Is this correct? */
	sb->s_blocksize_bits = 10;
	sb->s_magic = SMB_SUPER_MAGIC;
	sb->s_dev = dev;
	sb->s_op = &smb_sops;

	server = &(SMB_SBP(sb)->s_server);
	server->sock_file = filp;
	server->lock = 0;
	server->wait = NULL;
        server->packet = NULL;
	server->max_xmit = data->max_xmit;
	if (server->max_xmit <= 0)
		server->max_xmit = SMB_DEF_MAX_XMIT;
   
	server->tid = 0;
	server->pid = current->pid;
	server->mid = current->pid + 20;

        server->m = *data;
	server->m.file_mode = (server->m.file_mode &
                             (S_IRWXU|S_IRWXG|S_IRWXO)) | S_IFREG;
	server->m.dir_mode  = (server->m.dir_mode &
                             (S_IRWXU|S_IRWXG|S_IRWXO)) | S_IFDIR;

        smb_init_root(server);

        /*
         * Make the connection to the server
         */
        
	error = smb_proc_connect(server);

	unlock_super(sb);

	if (error < 0) {
		sb->s_dev = 0;
		printk("smb_read_super: Failed connection, bailing out "
                       "(error = %d).\n", -error);
                goto fail;
	}

        if (server->protocol >= PROTOCOL_LANMAN2)
                server->case_handling = CASE_DEFAULT;
        else
                server->case_handling = CASE_LOWER;

	if ((error = smb_proc_dskattr(sb, &(SMB_SBP(sb)->s_attr))) < 0) {
		sb->s_dev = 0;
		printk("smb_read_super: could not get super block "
                       "attributes\n");
                smb_kfree_s(server->packet, server->max_xmit);
                goto fail;
	}

	if ((error = smb_stat_root(server)) < 0) {
		sb->s_dev = 0;
		printk("smb_read_super: could not get root dir attributes\n");
                smb_kfree_s(server->packet, server->max_xmit);
                goto fail;
	}

	DPRINTK("smb_read_super : %u %u %u %u\n",
                SMB_SBP(sb)->s_attr.total,
                SMB_SBP(sb)->s_attr.blocksize,
                SMB_SBP(sb)->s_attr.allocblocks,
                SMB_SBP(sb)->s_attr.free);

        DPRINTK("smb_read_super: SMB_SBP(sb) = %x\n", (int)SMB_SBP(sb));

	if (!(sb->s_mounted = iget(sb, (int)&(server->root)))) {
		sb->s_dev = 0;
		printk("smb_read_super: get root inode failed\n");
                smb_kfree_s(server->packet, server->max_xmit);
                goto fail;
	}

        MOD_INC_USE_COUNT;
	return sb;

 fail:
	filp->f_count -= 1; 
        smb_kfree_s(SMB_SBP(sb), sizeof(struct smb_sb_info));
        return NULL;
}
示例#8
0
static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
{
    uid_t uid;
    gid_t gid;
    umode_t umask;
    int lowercase, eas, chk, errs, chkdsk, timeshift;
    int o;
    struct hpfs_sb_info *sbi = hpfs_sb(s);
    char *new_opts = kstrdup(data, GFP_KERNEL);

    *flags |= MS_NOATIME;

    hpfs_lock(s);
    lock_super(s);
    uid = sbi->sb_uid;
    gid = sbi->sb_gid;
    umask = 0777 & ~sbi->sb_mode;
    lowercase = sbi->sb_lowercase;
    eas = sbi->sb_eas;
    chk = sbi->sb_chk;
    chkdsk = sbi->sb_chkdsk;
    errs = sbi->sb_err;
    timeshift = sbi->sb_timeshift;

    if (!(o = parse_opts(data, &uid, &gid, &umask, &lowercase,
                         &eas, &chk, &errs, &chkdsk, &timeshift))) {
        printk("HPFS: bad mount options.\n");
        goto out_err;
    }
    if (o == 2) {
        hpfs_help();
        goto out_err;
    }
    if (timeshift != sbi->sb_timeshift) {
        printk("HPFS: timeshift can't be changed using remount.\n");
        goto out_err;
    }

    unmark_dirty(s);

    sbi->sb_uid = uid;
    sbi->sb_gid = gid;
    sbi->sb_mode = 0777 & ~umask;
    sbi->sb_lowercase = lowercase;
    sbi->sb_eas = eas;
    sbi->sb_chk = chk;
    sbi->sb_chkdsk = chkdsk;
    sbi->sb_err = errs;
    sbi->sb_timeshift = timeshift;

    if (!(*flags & MS_RDONLY)) mark_dirty(s, 1);

    replace_mount_options(s, new_opts);

    unlock_super(s);
    hpfs_unlock(s);
    return 0;

out_err:
    unlock_super(s);
    hpfs_unlock(s);
    kfree(new_opts);
    return -EINVAL;
}
示例#9
0
文件: ialloc.c 项目: 274914765/C
/*
 * NOTE! When we get the inode, we're the only people
 * that have access to it, and as such there are no
 * race conditions we have to worry about. The inode
 * is not on the hash-lists, and it cannot be reached
 * through the filesystem because the directory entry
 * has been deleted earlier.
 *
 * HOWEVER: we must make sure that we get no aliases,
 * which means that we have to call "clear_inode()"
 * _before_ we mark the inode not in use in the inode
 * bitmaps. Otherwise a newly created file might use
 * the same inode number (not actually the same pointer
 * though), and then we'd have two inodes sharing the
 * same inode number and space on the harddisk.
 */
void ufs_free_inode (struct inode * inode)
{
    struct super_block * sb;
    struct ufs_sb_private_info * uspi;
    struct ufs_super_block_first * usb1;
    struct ufs_cg_private_info * ucpi;
    struct ufs_cylinder_group * ucg;
    int is_directory;
    unsigned ino, cg, bit;
    
    UFSD("ENTER, ino %lu\n", inode->i_ino);

    sb = inode->i_sb;
    uspi = UFS_SB(sb)->s_uspi;
    usb1 = ubh_get_usb_first(uspi);
    
    ino = inode->i_ino;

    lock_super (sb);

    if (!((ino > 1) && (ino < (uspi->s_ncg * uspi->s_ipg )))) {
        ufs_warning(sb, "ufs_free_inode", "reserved inode or nonexistent inode %u\n", ino);
        unlock_super (sb);
        return;
    }
    
    cg = ufs_inotocg (ino);
    bit = ufs_inotocgoff (ino);
    ucpi = ufs_load_cylinder (sb, cg);
    if (!ucpi) {
        unlock_super (sb);
        return;
    }
    ucg = ubh_get_ucg(UCPI_UBH(ucpi));
    if (!ufs_cg_chkmagic(sb, ucg))
        ufs_panic (sb, "ufs_free_fragments", "internal error, bad cg magic number");

    ucg->cg_time = cpu_to_fs32(sb, get_seconds());

    is_directory = S_ISDIR(inode->i_mode);

    DQUOT_FREE_INODE(inode);
    DQUOT_DROP(inode);

    clear_inode (inode);

    if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_iusedoff, bit))
        ufs_error(sb, "ufs_free_inode", "bit already cleared for inode %u", ino);
    else {
        ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_iusedoff, bit);
        if (ino < ucpi->c_irotor)
            ucpi->c_irotor = ino;
        fs32_add(sb, &ucg->cg_cs.cs_nifree, 1);
        uspi->cs_total.cs_nifree++;
        fs32_add(sb, &UFS_SB(sb)->fs_cs(cg).cs_nifree, 1);

        if (is_directory) {
            fs32_sub(sb, &ucg->cg_cs.cs_ndir, 1);
            uspi->cs_total.cs_ndir--;
            fs32_sub(sb, &UFS_SB(sb)->fs_cs(cg).cs_ndir, 1);
        }
    }

    ubh_mark_buffer_dirty (USPI_UBH(uspi));
    ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
    if (sb->s_flags & MS_SYNCHRONOUS) {
        ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
        ubh_wait_on_buffer (UCPI_UBH(ucpi));
    }
    
    sb->s_dirt = 1;
    unlock_super (sb);
    UFSD("EXIT\n");
}
示例#10
0
struct super_block *
affs_read_super(struct super_block *s,void *data, int silent)
{
	struct buffer_head	*bh = NULL;
	struct buffer_head	*bb;
	kdev_t			 dev = s->s_dev;
	int			 root_block;
	int			 size;
	__u32			 chksum;
	__u32			*bm;
	int			 ptype, stype;
	int			 mapidx;
	int			 num_bm;
	int			 i, j;
	int			 key;
	int			 blocksize;
	uid_t			 uid;
	gid_t			 gid;
	int			 reserved;
	int			 az_no;
	unsigned long		 mount_flags;
	unsigned long		 offset;

	pr_debug("affs_read_super(%s)\n",data ? (const char *)data : "no options");

	MOD_INC_USE_COUNT;

	if (!parse_options(data,&uid,&gid,&i,&reserved,&root_block,
	    &blocksize,&s->u.affs_sb.s_prefix,s->u.affs_sb.s_volume,&mount_flags)) {
		s->s_dev = 0;
		printk("AFFS: error parsing options.\n");
		MOD_DEC_USE_COUNT;
		return NULL;
	}
	lock_super(s);

	/* Get the size of the device in 512-byte blocks.
	 * If we later see that the partition uses bigger
	 * blocks, we will have to change it.
	 */

	size = blksize_size[MAJOR(dev)][MINOR(dev)];
	size = (size ? size : BLOCK_SIZE) / 512 * blk_size[MAJOR(dev)][MINOR(dev)];

	s->u.affs_sb.s_bitmap  = NULL;
	s->u.affs_sb.s_root_bh = NULL;
	s->u.affs_sb.s_flags   = mount_flags;
	s->u.affs_sb.s_mode    = i;
	s->u.affs_sb.s_uid     = uid;
	s->u.affs_sb.s_gid     = gid;

	if (size == 0) {
		s->s_dev = 0;
		unlock_super(s);
		printk("affs_read_super: could not determine device size\n");
		goto out;
	}
	s->u.affs_sb.s_partition_size = size;
	s->u.affs_sb.s_reserved       = reserved;

	/* Try to find root block. Its location may depend on the block size. */

	s->u.affs_sb.s_hashsize = 0;
	if (blocksize > 0) {
		i = blocksize;
		j = blocksize;
	} else {
		i = 512;
		j = 4096;
	}
	for (blocksize = i, key = 0; blocksize <= j; blocksize <<= 1, size >>= 1) {
		if (root_block < 0)
			s->u.affs_sb.s_root_block = (reserved + size - 1) / 2;
		else
			s->u.affs_sb.s_root_block = root_block;
		set_blocksize(dev,blocksize);

		/* The root block location that was calculated above is not
		 * correct if the partition size is an odd number of 512-
		 * byte blocks, which will be rounded down to a number of
		 * 1024-byte blocks, and if there were an even number of
		 * reserved blocks. Ideally, all partition checkers should
		 * report the real number of blocks of the real blocksize,
		 * but since this just cannot be done, we have to try to
		 * find the root block anyways. In the above case, it is one
		 * block behind the calculated one. So we check this one, too.
		 */
		for (num_bm = 0; num_bm < 2; num_bm++) {
			pr_debug("AFFS: Dev %s - trying bs=%d bytes, root at %d, "
				 "size=%d blocks, %d reserved\n",kdevname(dev),blocksize,
				 s->u.affs_sb.s_root_block + num_bm,size,reserved);
			bh = affs_bread(dev,s->u.affs_sb.s_root_block + num_bm,blocksize);
			if (!bh) {
				printk("AFFS: unable to read root block\n");
				goto out;
			}
			if (!affs_checksum_block(blocksize,bh->b_data,&ptype,&stype) &&
			    ptype == T_SHORT && stype == ST_ROOT) {
				s->s_blocksize             = blocksize;
				s->u.affs_sb.s_hashsize    = blocksize / 4 - 56;
				s->u.affs_sb.s_root_block += num_bm;
				key                        = 1;
				break;
			}
		}
		if (key)
			break;
		affs_brelse(bh);
		bh = NULL;
	}
	if (!key) {
		affs_brelse(bh);
		if (!silent)
			printk("AFFS: Can't find a valid root block on device %s\n",kdevname(dev));
		goto out;
	}
	root_block = s->u.affs_sb.s_root_block;

	s->u.affs_sb.s_partition_size   = size;
	s->s_blocksize_bits             = blocksize == 512 ? 9 :
					  blocksize == 1024 ? 10 :
					  blocksize == 2048 ? 11 : 12;

	/* Find out which kind of FS we have */
	bb = affs_bread(dev,0,s->s_blocksize);
	if (bb) {
		chksum = htonl(*(__u32 *)bb->b_data);
		switch (chksum) {
			case MUFS_FS:
			case MUFS_INTLFFS:
				s->u.affs_sb.s_flags |= SF_MUFS;
				/* fall thru */
			case FS_INTLFFS:
				s->u.affs_sb.s_flags |= SF_INTL;
				break;
			case MUFS_DCFFS:
			case MUFS_FFS:
				s->u.affs_sb.s_flags |= SF_MUFS;
				break;
			case FS_DCFFS:
			case FS_FFS:
				break;
			case MUFS_OFS:
				s->u.affs_sb.s_flags |= SF_MUFS;
				/* fall thru */
			case FS_OFS:
				s->u.affs_sb.s_flags |= SF_OFS;
				break;
			case MUFS_DCOFS:
				if (!(s->s_flags & MS_RDONLY)) {
					printk("AFFS: Dircache FS - mounting %s read only.\n",
					       kdevname(dev));
				}
				/* fall thru */
			case MUFS_INTLOFS:
				s->u.affs_sb.s_flags |= SF_MUFS;
				if (0)
				/* fall thru */
			case FS_DCOFS:
				if (!(s->s_flags & MS_RDONLY)) {
					printk("AFFS: Dircache FS - mounting %s read only.\n",
					       kdevname(dev));
				}
				/* fall thru */
			case FS_INTLOFS:
				s->u.affs_sb.s_flags |= SF_INTL | SF_OFS;
				break;
			default:
				printk("AFFS: Unknown filesystem on device %s: %08X\n",
				       kdevname(dev),chksum);
				affs_brelse(bb);
				goto out;
		}
		affs_brelse(bb);
	} else {
		printk("AFFS: Can't get boot block.\n");
		goto out;
	}
	if (mount_flags & SF_VERBOSE) {
		chksum = ntohl(chksum);
		printk("AFFS: Mounting volume \"%*s\": Type=%.3s\\%c, Blocksize=%d\n",
		       GET_END_PTR(struct root_end,bh->b_data,blocksize)->disk_name[0],
		       &GET_END_PTR(struct root_end,bh->b_data,blocksize)->disk_name[1],
		       (char *)&chksum,((char *)&chksum)[3] + '0',blocksize);
	}
示例#11
0
struct super_block *hpfs_read_super(struct super_block *s,
				    void *options, int silent)
{
	struct hpfs_boot_block *bootblock;
	struct hpfs_super_block *superblock;
	struct hpfs_spare_block *spareblock;
	struct hpfs_dirent *de;
	struct buffer_head *bh0, *bh1, *bh2;
	struct quad_buffer_head qbh;
	dnode_secno root_dno;
	dev_t dev;
	uid_t uid;
	gid_t gid;
	umode_t umask;
	int lowercase;
	int conv;
	int dubious;

	/*
	 * Get the mount options
	 */

	if (!parse_opts(options, &uid, &gid, &umask, &lowercase, &conv)) {
		printk("HPFS: syntax error in mount options.  Not mounted.\n");
		s->s_dev = 0;
		return 0;
	}

	/*
	 * Fill in the super block struct
	 */

	lock_super(s);
	dev = s->s_dev;
	set_blocksize(dev, 512);

	/*
	 * fetch sectors 0, 16, 17
	 */

	bootblock = map_sector(dev, 0, &bh0);
	if (!bootblock)
		goto bail;

	superblock = map_sector(dev, 16, &bh1);
	if (!superblock)
		goto bail0;

	spareblock = map_sector(dev, 17, &bh2);
	if (!spareblock)
		goto bail1;

	/*
	 * Check that this fs looks enough like a known one that we can find
	 * and read the root directory.
	 */

	if (bootblock->magic != 0xaa55
	    || superblock->magic != SB_MAGIC
	    || spareblock->magic != SP_MAGIC
	    || bootblock->sig_28h != 0x28
	    || memcmp(&bootblock->sig_hpfs, "HPFS    ", 8)
	    || little_ushort(bootblock->bytes_per_sector) != 512) {
		printk("HPFS: hpfs_read_super: Not HPFS\n");
		goto bail2;
	}

	/*
	 * Check for inconsistencies -- possibly wrong guesses here, possibly
	 * filesystem problems.
	 */

	dubious = 0;

	dubious |= check_warn(spareblock->dirty != 0,
		       "`Improperly stopped'", "flag is set", "run CHKDSK");
	dubious |= check_warn(spareblock->n_spares_used != 0,
			      "Spare blocks", "may be in use", "run CHKDSK");

	/*
	 * Above errors mean we could get wrong answers if we proceed,
	 * so don't
	 */

	if (dubious)
		goto bail2;

	dubious |= check_warn((spareblock->n_dnode_spares !=
			       spareblock->n_dnode_spares_free),
			      "Spare dnodes", "may be in use", "run CHKDSK");
	dubious |= check_warn(superblock->zero1 != 0,
			      "#1", "unknown word nonzero", "investigate");
	dubious |= check_warn(superblock->zero3 != 0,
			      "#3", "unknown word nonzero", "investigate");
	dubious |= check_warn(superblock->zero4 != 0,
			      "#4", "unknown word nonzero", "investigate");
	dubious |= check_warn(!zerop(superblock->zero5,
				     sizeof superblock->zero5),
			      "#5", "unknown word nonzero", "investigate");
	dubious |= check_warn(!zerop(superblock->zero6,
				     sizeof superblock->zero6),
			      "#6", "unknown word nonzero", "investigate");

	if (dubious)
		printk("HPFS: Proceeding, but operation may be unreliable\n");

	/*
	 * set fs read only
	 */

	s->s_flags |= MS_RDONLY;

	/*
	 * fill in standard stuff
	 */

	s->s_magic = HPFS_SUPER_MAGIC;
	s->s_blocksize = 512;
	s->s_blocksize_bits = 9;
	s->s_op = (struct super_operations *) &hpfs_sops;

	/*
	 * fill in hpfs stuff
	 */

	s->s_hpfs_root = dir_ino(superblock->root);
	s->s_hpfs_fs_size = superblock->n_sectors;
	s->s_hpfs_dirband_size = superblock->n_dir_band / 4;
	s->s_hpfs_dmap = superblock->dir_band_bitmap;
	s->s_hpfs_bitmaps = superblock->bitmaps;
	s->s_hpfs_uid = uid;
	s->s_hpfs_gid = gid;
	s->s_hpfs_mode = 0777 & ~umask;
	s->s_hpfs_n_free = -1;
	s->s_hpfs_n_free_dnodes = -1;
	s->s_hpfs_lowercase = lowercase;
	s->s_hpfs_conv = conv;

	/*
	 * done with the low blocks
	 */

	brelse(bh2);
	brelse(bh1);
	brelse(bh0);

	/*
	 * all set.  try it out.
	 */

	s->s_mounted = iget(s, s->s_hpfs_root);
	unlock_super(s);

	if (!s->s_mounted) {
		printk("HPFS: hpfs_read_super: inode get failed\n");
		s->s_dev = 0;
		return 0;
	}

	/*
	 * find the root directory's . pointer & finish filling in the inode
	 */

	root_dno = fnode_dno(dev, s->s_hpfs_root);
	if (root_dno)
		de = map_dirent(s->s_mounted, root_dno, "\001\001", 2, &qbh);
	if (!root_dno || !de) {
		printk("HPFS: "
		       "hpfs_read_super: root dir isn't in the root dir\n");
		s->s_dev = 0;
		return 0;
	}

	s->s_mounted->i_atime = local_to_gmt(de->read_date);
	s->s_mounted->i_mtime = local_to_gmt(de->write_date);
	s->s_mounted->i_ctime = local_to_gmt(de->creation_date);

	brelse4(&qbh);
	return s;

 bail2:
	brelse(bh2);
 bail1:
	brelse(bh1);
 bail0:
	brelse(bh0);
 bail:
	s->s_dev = 0;
	unlock_super(s);
	return 0;
}
示例#12
0
struct super_block *sysv_read_super(struct super_block *sb,void *data,
				     int silent)
{
	struct buffer_head *bh;
	const char *found;
	kdev_t dev = sb->s_dev;
	struct inode *root_inode;
	unsigned long blocknr;
	
	if (1024 != sizeof (struct xenix_super_block))
		panic("Xenix FS: bad super-block size");
	if ((512 != sizeof (struct sysv4_super_block))
            || (512 != sizeof (struct sysv2_super_block)))
		panic("SystemV FS: bad super-block size");
	if (500 != sizeof (struct coh_super_block))
		panic("Coherent FS: bad super-block size");
	if (64 != sizeof (struct sysv_inode))
		panic("sysv fs: bad i-node size");
	MOD_INC_USE_COUNT;
	lock_super(sb);
	set_blocksize(dev,BLOCK_SIZE);
	sb->sv_block_base = 0;

	/* Try to read Xenix superblock */
	if ((bh = bread(dev, 1, BLOCK_SIZE)) != NULL) {
		if ((found = detect_xenix(sb,bh)) != NULL)
			goto ok;
		brelse(bh);
	}
	if ((bh = bread(dev, 0, BLOCK_SIZE)) != NULL) {
		/* Try to recognize SystemV superblock */
		if ((found = detect_sysv4(sb,bh)) != NULL)
			goto ok;
		if ((found = detect_sysv2(sb,bh)) != NULL)
			goto ok;
		/* Try to recognize Coherent superblock */
		if ((found = detect_coherent(sb,bh)) != NULL)
			goto ok;
		brelse(bh);
	}
	/* Try to recognize SystemV superblock */
	/* Offset by 1 track, i.e. most probably 9, 15, or 18 kilobytes. */
	/* 2kB blocks with offset of 9 and 15 kilobytes are not supported. */
	/* Maybe we should also check the device geometry ? */
	{	static int offsets[] = { 9, 15, 18, };
		int i;
		for (i = 0; i < sizeof(offsets)/sizeof(offsets[0]); i++)
			if ((bh = bread(dev, offsets[i], BLOCK_SIZE)) != NULL) {
				/* Try to recognize SystemV superblock */
				if ((found = detect_sysv4(sb,bh)) != NULL) {
					if (sb->sv_block_size>BLOCK_SIZE && (offsets[i] % 2))
						goto bad_shift;
					sb->sv_block_base = (offsets[i] << sb->sv_block_size_dec_bits) >> sb->sv_block_size_inc_bits;
					goto ok;
				}
				if ((found = detect_sysv2(sb,bh)) != NULL) {
					if (sb->sv_block_size>BLOCK_SIZE && (offsets[i] % 2))
						goto bad_shift;
					sb->sv_block_base = (offsets[i] << sb->sv_block_size_dec_bits) >> sb->sv_block_size_inc_bits;
					goto ok;
				}
				brelse(bh);
			}
	}
	bad_shift:
	sb->s_dev = 0;
	unlock_super(sb);
	if (!silent)
		printk("VFS: unable to read Xenix/SystemV/Coherent superblock on device "
		       "%s\n", kdevname(dev));
	failed:
	MOD_DEC_USE_COUNT;
	return NULL;

	ok:
	if (sb->sv_block_size >= BLOCK_SIZE) {
		if (sb->sv_block_size != BLOCK_SIZE) {
			brelse(bh);
			set_blocksize(dev, sb->sv_block_size);
			blocknr = (bh->b_blocknr << sb->sv_block_size_dec_bits) >> sb->sv_block_size_inc_bits;
			if ((bh = bread(dev, blocknr, sb->sv_block_size)) == NULL)
				goto bad_superblock;
		}
		switch (sb->sv_type) {
			case FSTYPE_XENIX:
				if (!detected_xenix(sb,bh,bh))
					goto bad_superblock;
				break;
			case FSTYPE_SYSV4:
				if (!detected_sysv4(sb,bh))
					goto bad_superblock;
				break;
			case FSTYPE_SYSV2:
				if (!detected_sysv2(sb,bh))
					goto bad_superblock;
				break;
			default: goto bad_superblock;
		goto superblock_ok;
		bad_superblock:
			brelse(bh);
			sb->s_dev = 0;
			unlock_super(sb);
			printk("SysV FS: cannot read superblock in %d byte mode\n", sb->sv_block_size);
			goto failed;
		superblock_ok:
		}
	} else {
示例#13
0
/*
 * Free 'count' fragments from fragment number 'fragment'
 */
void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
{
	struct super_block * sb;
	struct ufs_sb_private_info * uspi;
	struct ufs_super_block_first * usb1;
	struct ufs_cg_private_info * ucpi;
	struct ufs_cylinder_group * ucg;
	unsigned cgno, bit, end_bit, bbase, blkmap, i;
	u64 blkno;
	
	sb = inode->i_sb;
	uspi = UFS_SB(sb)->s_uspi;
	usb1 = ubh_get_usb_first(uspi);
	
	UFSD("ENTER, fragment %llu, count %u\n",
	     (unsigned long long)fragment, count);
	
	if (ufs_fragnum(fragment) + count > uspi->s_fpg)
		ufs_error (sb, "ufs_free_fragments", "internal error");
	
	lock_super(sb);
	
	cgno = ufs_dtog(uspi, fragment);
	bit = ufs_dtogd(uspi, fragment);
	if (cgno >= uspi->s_ncg) {
		ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
		goto failed;
	}
		
	ucpi = ufs_load_cylinder (sb, cgno);
	if (!ucpi) 
		goto failed;
	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
	if (!ufs_cg_chkmagic(sb, ucg)) {
		ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
		goto failed;
	}

	end_bit = bit + count;
	bbase = ufs_blknum (bit);
	blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
	ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
	for (i = bit; i < end_bit; i++) {
		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
			ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
		else 
			ufs_error (sb, "ufs_free_fragments",
				   "bit already cleared for fragment %u", i);
	}
	
	DQUOT_FREE_BLOCK (inode, count);

	
	fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
	uspi->cs_total.cs_nffree += count;
	fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
	blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
	ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);

	/*
	 * Trying to reassemble free fragments into block
	 */
	blkno = ufs_fragstoblks (bbase);
	if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
		fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
		uspi->cs_total.cs_nffree -= uspi->s_fpb;
		fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
		if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
			ufs_clusteracct (sb, ucpi, blkno, 1);
		fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
		uspi->cs_total.cs_nbfree++;
		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
		if (uspi->fs_magic != UFS2_MAGIC) {
			unsigned cylno = ufs_cbtocylno (bbase);

			fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
						  ufs_cbtorpos(bbase)), 1);
			fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
		}
	}
	
	ubh_mark_buffer_dirty (USPI_UBH(uspi));
	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
	if (sb->s_flags & MS_SYNCHRONOUS) {
		ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
		ubh_wait_on_buffer (UCPI_UBH(ucpi));
	}
	mark_sb_dirty(sb);
	
	unlock_super (sb);
	UFSD("EXIT\n");
	return;

failed:
	unlock_super (sb);
	UFSD("EXIT (FAILED)\n");
	return;
}
示例#14
0
文件: ialloc.c 项目: hugh712/Jollen
/*
 * NOTE! When we get the inode, we're the only people
 * that have access to it, and as such there are no
 * race conditions we have to worry about. The inode
 * is not on the hash-lists, and it cannot be reached
 * through the filesystem because the directory entry
 * has been deleted earlier.
 *
 * HOWEVER: we must make sure that we get no aliases,
 * which means that we have to call "clear_inode()"
 * _before_ we mark the inode not in use in the inode
 * bitmaps. Otherwise a newly created file might use
 * the same inode number (not actually the same pointer
 * though), and then we'd have two inodes sharing the
 * same inode number and space on the harddisk.
 */
void ext3_free_inode (handle_t *handle, struct inode * inode)
{
	struct super_block * sb = inode->i_sb;
	int is_directory;
	unsigned long ino;
	struct buffer_head * bh;
	struct buffer_head * bh2;
	unsigned long block_group;
	unsigned long bit;
	int bitmap_nr;
	struct ext3_group_desc * gdp;
	struct ext3_super_block * es;
	int fatal = 0, err;

	if (!inode->i_dev) {
		printk ("ext3_free_inode: inode has no device\n");
		return;
	}
	if (atomic_read(&inode->i_count) > 1) {
		printk ("ext3_free_inode: inode has count=%d\n",
					atomic_read(&inode->i_count));
		return;
	}
	if (inode->i_nlink) {
		printk ("ext3_free_inode: inode has nlink=%d\n",
			inode->i_nlink);
		return;
	}
	if (!sb) {
		printk("ext3_free_inode: inode on nonexistent device\n");
		return;
	}

	ino = inode->i_ino;
	ext3_debug ("freeing inode %lu\n", ino);

	/*
	 * Note: we must free any quota before locking the superblock,
	 * as writing the quota to disk may need the lock as well.
	 */
	DQUOT_INIT(inode);
	DQUOT_FREE_INODE(inode);
	DQUOT_DROP(inode);

	is_directory = S_ISDIR(inode->i_mode);

	/* Do this BEFORE marking the inode not in use or returning an error */
	clear_inode (inode);

	lock_super (sb);
	es = sb->u.ext3_sb.s_es;
	if (ino < EXT3_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
		ext3_error (sb, "ext3_free_inode",
			    "reserved or nonexistent inode %lu", ino);
		goto error_return;
	}
	block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
	bit = (ino - 1) % EXT3_INODES_PER_GROUP(sb);
	bitmap_nr = load_inode_bitmap (sb, block_group);
	if (bitmap_nr < 0)
		goto error_return;

	bh = sb->u.ext3_sb.s_inode_bitmap[bitmap_nr];

	BUFFER_TRACE(bh, "get_write_access");
	fatal = ext3_journal_get_write_access(handle, bh);
	if (fatal)
		goto error_return;

	/* Ok, now we can actually update the inode bitmaps.. */
	if (!ext3_clear_bit (bit, bh->b_data))
		ext3_error (sb, "ext3_free_inode",
			      "bit already cleared for inode %lu", ino);
	else {
		gdp = ext3_get_group_desc (sb, block_group, &bh2);

		BUFFER_TRACE(bh2, "get_write_access");
		fatal = ext3_journal_get_write_access(handle, bh2);
		if (fatal) goto error_return;

		BUFFER_TRACE(sb->u.ext3_sb.s_sbh, "get write access");
		fatal = ext3_journal_get_write_access(handle, sb->u.ext3_sb.s_sbh);
		if (fatal) goto error_return;

		if (gdp) {
			gdp->bg_free_inodes_count = cpu_to_le16(
				le16_to_cpu(gdp->bg_free_inodes_count) + 1);
			if (is_directory)
				gdp->bg_used_dirs_count = cpu_to_le16(
				  le16_to_cpu(gdp->bg_used_dirs_count) - 1);
		}
		BUFFER_TRACE(bh2, "call ext3_journal_dirty_metadata");
		err = ext3_journal_dirty_metadata(handle, bh2);
		if (!fatal) fatal = err;
		es->s_free_inodes_count =
			cpu_to_le32(le32_to_cpu(es->s_free_inodes_count) + 1);
		BUFFER_TRACE(sb->u.ext3_sb.s_sbh,
					"call ext3_journal_dirty_metadata");
		err = ext3_journal_dirty_metadata(handle, sb->u.ext3_sb.s_sbh);
		if (!fatal) fatal = err;
	}
	BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
	err = ext3_journal_dirty_metadata(handle, bh);
	if (!fatal)
		fatal = err;
	sb->s_dirt = 1;
error_return:
	ext3_std_error(sb, fatal);
	unlock_super(sb);
}
示例#15
0
/*
 * this functino has been reduced to the actual 'find the inode number' part
 */
ino_t
ext2_new_inode(const struct inode *dir, int mode)
{
	struct ext2_sb_info *sb;
	struct buf *bh;
	struct buf *bh2;
	int i, j, avefreei;
	int bitmap_nr;
	struct ext2_group_desc *gdp;
	struct ext2_group_desc *tmp;
	struct ext2_super_block *es;

	if (!dir)
		return 0;
	sb = dir->i_e2fs;

        lock_super (DEVVP(dir));
        es = sb->s_es;
repeat:
        gdp = NULL; i=0;

        if (S_ISDIR(mode)) {
		avefreei = es->s_free_inodes_count /
			sb->s_groups_count;
/* I am not yet convinced that this next bit is necessary.
		i = dir->u.ext2_i.i_block_group;
		for (j = 0; j < sb->u.ext2_sb.s_groups_count; j++) {
			tmp = get_group_desc (sb, i, &bh2);
			if ((tmp->bg_used_dirs_count << 8) <
			    tmp->bg_free_inodes_count) {
				gdp = tmp;
				break;
			}
			else
			i = ++i % sb->u.ext2_sb.s_groups_count;
		}
*/
		if (!gdp) {
			for (j = 0; j < sb->s_groups_count; j++) {
				tmp = get_group_desc(ITOV(dir)->v_mount,j,&bh2);
				if (tmp->bg_free_inodes_count &&
					tmp->bg_free_inodes_count >= avefreei) {
					if (!gdp ||
					    (tmp->bg_free_blocks_count >
					     gdp->bg_free_blocks_count)) {
						i = j;
						gdp = tmp;
					}
				}
			}
		}
	}
	else
	{
		/*
		 * Try to place the inode in its parent directory
		 */
		i = dir->i_block_group;
		tmp = get_group_desc (ITOV(dir)->v_mount, i, &bh2);
		if (tmp->bg_free_inodes_count)
			gdp = tmp;
		else
		{
			/*
			 * Use a quadratic hash to find a group with a
			 * free inode
			 */
			for (j = 1; j < sb->s_groups_count; j <<= 1) {
				i += j;
				if (i >= sb->s_groups_count)
					i -= sb->s_groups_count;
				tmp = get_group_desc(ITOV(dir)->v_mount,i,&bh2);
				if (tmp->bg_free_inodes_count) {
					gdp = tmp;
					break;
				}
			}
		}
		if (!gdp) {
			/*
			 * That failed: try linear search for a free inode
			 */
			i = dir->i_block_group + 1;
			for (j = 2; j < sb->s_groups_count; j++) {
				if (++i >= sb->s_groups_count)
					i = 0;
				tmp = get_group_desc(ITOV(dir)->v_mount,i,&bh2);
				if (tmp->bg_free_inodes_count) {
					gdp = tmp;
					break;
				}
			}
		}
	}

	if (!gdp) {
		unlock_super (DEVVP(dir));
		return 0;
	}
	bitmap_nr = load_inode_bitmap (ITOV(dir)->v_mount, i);
	bh = sb->s_inode_bitmap[bitmap_nr];
	if ((j = find_first_zero_bit ((unsigned long *) bh->b_data,
				      EXT2_INODES_PER_GROUP(sb))) <
	    EXT2_INODES_PER_GROUP(sb)) {
		if (set_bit (j, bh->b_data)) {
			kprintf ( "ext2_new_inode:"
				      "bit already set for inode %d", j);
			goto repeat;
		}
/* Linux now does the following:
		mark_buffer_dirty(bh);
		if (sb->s_flags & MS_SYNCHRONOUS) {
			ll_rw_block (WRITE, 1, &bh);
			wait_on_buffer (bh);
		}
*/
		mark_buffer_dirty(bh);
	} else {
		if (gdp->bg_free_inodes_count != 0) {
			kprintf ( "ext2_new_inode:"
				    "Free inodes count corrupted in group %d",
				    i);
			unlock_super (DEVVP(dir));
			return 0;
		}
		goto repeat;
	}
	j += i * EXT2_INODES_PER_GROUP(sb) + 1;
	if (j < EXT2_FIRST_INO(sb) || j > es->s_inodes_count) {
		kprintf ( "ext2_new_inode:"
			    "reserved inode or inode > inodes count - "
			    "block_group = %d,inode=%d", i, j);
		unlock_super (DEVVP(dir));
		return 0;
	}
	gdp->bg_free_inodes_count--;
	if (S_ISDIR(mode))
		gdp->bg_used_dirs_count++;
	mark_buffer_dirty(bh2);
	es->s_free_inodes_count--;
	/* mark_buffer_dirty(sb->u.ext2_sb.s_sbh, 1); */
	sb->s_dirt = 1;
	unlock_super (DEVVP(dir));
	return j;
}
示例#16
0
/*
 * There are two policies for allocating an inode.  If the new inode is
 * a directory, then a forward search is made for a block group with both
 * free space and a low directory-to-inode ratio; if that fails, then of
 * the groups with above-average free space, that group with the fewest
 * directories already is chosen.
 *
 * For other inodes, search forward from the parent directory's block
 * group to find a free inode.
 */
struct inode * ufs_new_inode(struct inode * dir, int mode)
{
	struct super_block * sb;
	struct ufs_sb_info * sbi;
	struct ufs_sb_private_info * uspi;
	struct ufs_super_block_first * usb1;
	struct ufs_cg_private_info * ucpi;
	struct ufs_cylinder_group * ucg;
	struct inode * inode;
	unsigned cg, bit, i, j, start;
	struct ufs_inode_info *ufsi;

	UFSD(("ENTER\n"))
	
	/* Cannot create files in a deleted directory */
	if (!dir || !dir->i_nlink)
		return ERR_PTR(-EPERM);
	sb = dir->i_sb;
	inode = new_inode(sb);
	if (!inode)
		return ERR_PTR(-ENOMEM);
	ufsi = UFS_I(inode);
	sbi = UFS_SB(sb);
	uspi = sbi->s_uspi;
	usb1 = ubh_get_usb_first(USPI_UBH);

	lock_super (sb);

	/*
	 * Try to place the inode in its parent directory
	 */
	i = ufs_inotocg(dir->i_ino);
	if (sbi->fs_cs(i).cs_nifree) {
		cg = i;
		goto cg_found;
	}

	/*
	 * Use a quadratic hash to find a group with a free inode
	 */
	for ( j = 1; j < uspi->s_ncg; j <<= 1 ) {
		i += j;
		if (i >= uspi->s_ncg)
			i -= uspi->s_ncg;
		if (sbi->fs_cs(i).cs_nifree) {
			cg = i;
			goto cg_found;
		}
	}

	/*
	 * That failed: try linear search for a free inode
	 */
	i = ufs_inotocg(dir->i_ino) + 1;
	for (j = 2; j < uspi->s_ncg; j++) {
		i++;
		if (i >= uspi->s_ncg)
			i = 0;
		if (sbi->fs_cs(i).cs_nifree) {
			cg = i;
			goto cg_found;
		}
	}
	
	goto failed;

cg_found:
	ucpi = ufs_load_cylinder (sb, cg);
	if (!ucpi)
		goto failed;
	ucg = ubh_get_ucg(UCPI_UBH);
	if (!ufs_cg_chkmagic(sb, ucg)) 
		ufs_panic (sb, "ufs_new_inode", "internal error, bad cg magic number");

	start = ucpi->c_irotor;
	bit = ubh_find_next_zero_bit (UCPI_UBH, ucpi->c_iusedoff, uspi->s_ipg, start);
	if (!(bit < uspi->s_ipg)) {
		bit = ubh_find_first_zero_bit (UCPI_UBH, ucpi->c_iusedoff, start);
		if (!(bit < start)) {
			ufs_error (sb, "ufs_new_inode",
			    "cylinder group %u corrupted - error in inode bitmap\n", cg);
			goto failed;
		}
	}
	UFSD(("start = %u, bit = %u, ipg = %u\n", start, bit, uspi->s_ipg))
	if (ubh_isclr (UCPI_UBH, ucpi->c_iusedoff, bit))
		ubh_setbit (UCPI_UBH, ucpi->c_iusedoff, bit);
	else {
		ufs_panic (sb, "ufs_new_inode", "internal error");
		goto failed;
	}
	
	fs32_sub(sb, &ucg->cg_cs.cs_nifree, 1);
	fs32_sub(sb, &usb1->fs_cstotal.cs_nifree, 1);
	fs32_sub(sb, &sbi->fs_cs(cg).cs_nifree, 1);
	
	if (S_ISDIR(mode)) {
		fs32_add(sb, &ucg->cg_cs.cs_ndir, 1);
		fs32_add(sb, &usb1->fs_cstotal.cs_ndir, 1);
		fs32_add(sb, &sbi->fs_cs(cg).cs_ndir, 1);
	}

	ubh_mark_buffer_dirty (USPI_UBH);
	ubh_mark_buffer_dirty (UCPI_UBH);
	if (sb->s_flags & MS_SYNCHRONOUS) {
		ubh_wait_on_buffer (UCPI_UBH);
		ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **) &ucpi);
		ubh_wait_on_buffer (UCPI_UBH);
	}
	sb->s_dirt = 1;

	inode->i_mode = mode;
	inode->i_uid = current->fsuid;
	if (dir->i_mode & S_ISGID) {
		inode->i_gid = dir->i_gid;
		if (S_ISDIR(mode))
			inode->i_mode |= S_ISGID;
	} else
		inode->i_gid = current->fsgid;

	inode->i_ino = cg * uspi->s_ipg + bit;
	inode->i_blksize = PAGE_SIZE;	/* This is the optimal IO size (for stat), not the fs block size */
	inode->i_blocks = 0;
	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
	ufsi->i_flags = UFS_I(dir)->i_flags;
	ufsi->i_lastfrag = 0;
	ufsi->i_gen = 0;
	ufsi->i_shadow = 0;
	ufsi->i_osync = 0;
	ufsi->i_oeftflag = 0;
	memset(&ufsi->i_u1, 0, sizeof(ufsi->i_u1));

	insert_inode_hash(inode);
	mark_inode_dirty(inode);

	unlock_super (sb);

	if (DQUOT_ALLOC_INODE(inode)) {
		DQUOT_DROP(inode);
		inode->i_flags |= S_NOQUOTA;
		inode->i_nlink = 0;
		iput(inode);
		return ERR_PTR(-EDQUOT);
	}

	UFSD(("allocating inode %lu\n", inode->i_ino))
	UFSD(("EXIT\n"))
	return inode;

failed:
	unlock_super (sb);
	make_bad_inode(inode);
	iput (inode);
	UFSD(("EXIT (FAILED)\n"))
	return ERR_PTR(-ENOSPC);
}
示例#17
0
/*
 * allocate a block and return it's absolute blocknr. Zeroes out the
 * block if zero set.
 */
int pram_new_block (struct super_block * sb, int* blocknr, int zero)
{
	struct pram_super_block * ps;
	pram_off_t bitmap_block;
	unsigned long flags;
	int bnr, bitmap_bnr, errval;
	void* bitmap;
	void* bp;
	
	lock_super (sb);
	ps = pram_get_super(sb);
	bitmap = pram_get_bitmap(sb);

	if (ps->s_free_blocks_count) {
		/* find the oldest unused block */
		bnr = find_next_zero_bit(bitmap,
					 ps->s_blocks_count,
					 ps->s_free_blocknr_hint);
		
		if (bnr < ps->s_bitmap_blocks || bnr >= ps->s_blocks_count) {
			pram_err("no free blocks found!\n");
			errval = -ENOSPC;
			goto fail;
		}

		pram_dbg ("allocating blocknr %d\n", bnr);
		pram_lock_super(ps);
		ps->s_free_blocks_count--;
		ps->s_free_blocknr_hint =
			(bnr < ps->s_blocks_count-1) ? bnr+1 : 0;
                pram_unlock_super(ps);
	} else {
		pram_err("all blocks allocated\n");
		errval = -ENOSPC;
		goto fail;
	}

	/*
	 * find the block within the bitmap that contains the inuse bit
	 * for the unused block we just found. We need to unlock it to
	 * set the inuse bit.
	 */
	bitmap_bnr = bnr >> (3 + sb->s_blocksize_bits);
	bitmap_block = pram_get_block_off(sb, bitmap_bnr);
	bp = pram_get_block(sb, bitmap_block);
	
	pram_lock_block(sb, bp);
	set_bit(bnr, bitmap); // mark the new block in use
	pram_unlock_block(sb, bp);

	if (zero) {
		bp = pram_get_block(sb, pram_get_block_off(sb, bnr));
		pram_lock_block(sb, bp);
		memset(bp, 0, sb->s_blocksize);
		pram_unlock_block(sb, bp);
	}
		
	*blocknr = bnr;
	errval = 0;
 fail:
	unlock_super (sb);
	return errval;
}
示例#18
0
struct inode * udf_new_inode (struct inode *dir, int mode, int * err)
{
	struct super_block *sb;
	struct inode * inode;
	int block;
	Uint32 start = UDF_I_LOCATION(dir).logicalBlockNum;

	sb = dir->i_sb;
	inode = new_inode(sb);

	if (!inode)
	{
		*err = -ENOMEM;
		return NULL;
	}
	*err = -ENOSPC;

	block = udf_new_block(dir->i_sb, NULL, UDF_I_LOCATION(dir).partitionReferenceNum,
		start, err);
	if (*err)
	{
		iput(inode);
		return NULL;
	}
	lock_super(sb);

	if (UDF_SB_LVIDBH(sb))
	{
		struct LogicalVolHeaderDesc *lvhd;
		Uint64 uniqueID;
		lvhd = (struct LogicalVolHeaderDesc *)(UDF_SB_LVID(sb)->logicalVolContentsUse);
		if (S_ISDIR(mode))
			UDF_SB_LVIDIU(sb)->numDirs =
				cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numDirs) + 1);
		else
			UDF_SB_LVIDIU(sb)->numFiles =
				cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numFiles) + 1);
		UDF_I_UNIQUE(inode) = uniqueID = le64_to_cpu(lvhd->uniqueID);
		if (!(++uniqueID & 0x00000000FFFFFFFFUL))
			uniqueID += 16;
		lvhd->uniqueID = cpu_to_le64(uniqueID);
		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
	}
	inode->i_mode = mode;
	inode->i_uid = current->fsuid;
	if (dir->i_mode & S_ISGID)
	{
		inode->i_gid = dir->i_gid;
		if (S_ISDIR(mode))
			mode |= S_ISGID;
	}
	else
		inode->i_gid = current->fsgid;

	UDF_I_LOCATION(inode).logicalBlockNum = block;
	UDF_I_LOCATION(inode).partitionReferenceNum = UDF_I_LOCATION(dir).partitionReferenceNum;
	inode->i_ino = udf_get_lb_pblock(sb, UDF_I_LOCATION(inode), 0);
	inode->i_blksize = PAGE_SIZE;
	inode->i_blocks = 0;
	UDF_I_LENEATTR(inode) = 0;
	UDF_I_LENALLOC(inode) = 0;
	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_EXTENDED_FE))
	{
		UDF_I_EXTENDED_FE(inode) = 1;
		UDF_UPDATE_UDFREV(inode->i_sb, UDF_VERS_USE_EXTENDED_FE);
	}
	else
		UDF_I_EXTENDED_FE(inode) = 0;
	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_AD_IN_ICB))
		UDF_I_ALLOCTYPE(inode) = ICB_FLAG_AD_IN_ICB;
	else if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
		UDF_I_ALLOCTYPE(inode) = ICB_FLAG_AD_SHORT;
	else
		UDF_I_ALLOCTYPE(inode) = ICB_FLAG_AD_LONG;
	inode->i_mtime = inode->i_atime = inode->i_ctime =
		UDF_I_CRTIME(inode) = CURRENT_TIME;
	UDF_I_UMTIME(inode) = UDF_I_UCTIME(inode) =
		UDF_I_UCRTIME(inode) = CURRENT_UTIME;
	UDF_I_NEW_INODE(inode) = 1;
	insert_inode_hash(inode);
	mark_inode_dirty(inode);

	unlock_super(sb);
	if (DQUOT_ALLOC_INODE(inode))
	{
		DQUOT_DROP(inode);
		inode->i_flags |= S_NOQUOTA;
		inode->i_nlink = 0;
		iput(inode);
		*err = -EDQUOT;
		return NULL;
	}

	*err = 0;
	return inode;
}
示例#19
0
struct super_block *capifs_read_super(struct super_block *s, void *data,
				      int silent)
{
	struct inode * root_inode;
	struct dentry * root;
	struct capifs_sb_info *sbi;

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,51)
	MOD_INC_USE_COUNT;
	lock_super(s);
#endif
	/* Super block already completed? */
	if (s->s_root)
		goto out;

	sbi = (struct capifs_sb_info *) kmalloc(sizeof(struct capifs_sb_info), GFP_KERNEL);
	if ( !sbi )
		goto fail;

	memset(sbi, 0, sizeof(struct capifs_sb_info));
	sbi->magic  = CAPIFS_SBI_MAGIC;

	if ( capifs_parse_options(data,sbi) ) {
		kfree(sbi);
		printk("capifs: called with bogus options\n");
		goto fail;
	}

	sbi->nccis = kmalloc(sizeof(struct capifs_ncci) * sbi->max_ncci, GFP_KERNEL);
	if ( !sbi->nccis ) {
		kfree(sbi);
		goto fail;
	}
	memset(sbi->nccis, 0, sizeof(struct capifs_ncci) * sbi->max_ncci);

	s->u.generic_sbp = (void *) sbi;
	s->s_blocksize = 1024;
	s->s_blocksize_bits = 10;
	s->s_magic = CAPIFS_SUPER_MAGIC;
	s->s_op = &capifs_sops;
	s->s_root = NULL;

	/*
	 * Get the root inode and dentry, but defer checking for errors.
	 */
	root_inode = iget(s, 1); /* inode 1 == root directory */
	root = d_alloc_root(root_inode);

	/*
	 * Check whether somebody else completed the super block.
	 */
	if (s->s_root) {
		if (root) dput(root);
		else iput(root_inode);
		goto out;
	}

	if (!root) {
		printk("capifs: get root dentry failed\n");
		/*
	 	* iput() can block, so we clear the super block first.
	 	*/
		iput(root_inode);
		kfree(sbi->nccis);
		kfree(sbi);
		goto fail;
	}

	/*
	 * Check whether somebody else completed the super block.
	 */
	if (s->s_root)
		goto out;
	
	/*
	 * Success! Install the root dentry now to indicate completion.
	 */
	s->s_root = root;

	sbi->next = mounts;
	if ( sbi->next )
		SBI(sbi->next)->back = &(sbi->next);
	sbi->back = &mounts;
	mounts = s;

out:	/* Success ... somebody else completed the super block for us. */ 
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,51)
	unlock_super(s);
#endif
	return s;
fail:
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,51)
	unlock_super(s);
	MOD_DEC_USE_COUNT;
#endif
	return NULL;
}
示例#20
0
/*
 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
 */
void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
{
	struct super_block * sb;
	struct ufs_sb_private_info * uspi;
	struct ufs_super_block_first * usb1;
	struct ufs_cg_private_info * ucpi;
	struct ufs_cylinder_group * ucg;
	unsigned overflow, cgno, bit, end_bit, i;
	u64 blkno;
	
	sb = inode->i_sb;
	uspi = UFS_SB(sb)->s_uspi;
	usb1 = ubh_get_usb_first(uspi);

	UFSD("ENTER, fragment %llu, count %u\n",
	     (unsigned long long)fragment, count);
	
	if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
		ufs_error (sb, "ufs_free_blocks", "internal error, "
			   "fragment %llu, count %u\n",
			   (unsigned long long)fragment, count);
		goto failed;
	}

	lock_super(sb);
	
do_more:
	overflow = 0;
	cgno = ufs_dtog(uspi, fragment);
	bit = ufs_dtogd(uspi, fragment);
	if (cgno >= uspi->s_ncg) {
		ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
		goto failed_unlock;
	}
	end_bit = bit + count;
	if (end_bit > uspi->s_fpg) {
		overflow = bit + count - uspi->s_fpg;
		count -= overflow;
		end_bit -= overflow;
	}

	ucpi = ufs_load_cylinder (sb, cgno);
	if (!ucpi) 
		goto failed_unlock;
	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
	if (!ufs_cg_chkmagic(sb, ucg)) {
		ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
		goto failed_unlock;
	}

	for (i = bit; i < end_bit; i += uspi->s_fpb) {
		blkno = ufs_fragstoblks(i);
		if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
			ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
		}
		ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
		if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
			ufs_clusteracct (sb, ucpi, blkno, 1);
		DQUOT_FREE_BLOCK(inode, uspi->s_fpb);

		fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
		uspi->cs_total.cs_nbfree++;
		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);

		if (uspi->fs_magic != UFS2_MAGIC) {
			unsigned cylno = ufs_cbtocylno(i);

			fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
						  ufs_cbtorpos(i)), 1);
			fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
		}
	}

	ubh_mark_buffer_dirty (USPI_UBH(uspi));
	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
	if (sb->s_flags & MS_SYNCHRONOUS) {
		ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
		ubh_wait_on_buffer (UCPI_UBH(ucpi));
	}

	if (overflow) {
		fragment += count;
		count = overflow;
		goto do_more;
	}

	mark_sb_dirty(sb);
	unlock_super (sb);
	UFSD("EXIT\n");
	return;

failed_unlock:
	unlock_super (sb);
failed:
	UFSD("EXIT (FAILED)\n");
	return;
}
示例#21
0
u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
			   u64 goal, unsigned count, int *err,
			   struct page *locked_page)
{
	struct super_block * sb;
	struct ufs_sb_private_info * uspi;
	struct ufs_super_block_first * usb1;
	unsigned cgno, oldcount, newcount;
	u64 tmp, request, result;
	
	UFSD("ENTER, ino %lu, fragment %llu, goal %llu, count %u\n",
	     inode->i_ino, (unsigned long long)fragment,
	     (unsigned long long)goal, count);
	
	sb = inode->i_sb;
	uspi = UFS_SB(sb)->s_uspi;
	usb1 = ubh_get_usb_first(uspi);
	*err = -ENOSPC;

	lock_super (sb);
	tmp = ufs_data_ptr_to_cpu(sb, p);

	if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
		ufs_warning(sb, "ufs_new_fragments", "internal warning"
			    " fragment %llu, count %u",
			    (unsigned long long)fragment, count);
		count = uspi->s_fpb - ufs_fragnum(fragment); 
	}
	oldcount = ufs_fragnum (fragment);
	newcount = oldcount + count;

	/*
	 * Somebody else has just allocated our fragments
	 */
	if (oldcount) {
		if (!tmp) {
			ufs_error(sb, "ufs_new_fragments", "internal error, "
				  "fragment %llu, tmp %llu\n",
				  (unsigned long long)fragment,
				  (unsigned long long)tmp);
			unlock_super(sb);
			return INVBLOCK;
		}
		if (fragment < UFS_I(inode)->i_lastfrag) {
			UFSD("EXIT (ALREADY ALLOCATED)\n");
			unlock_super (sb);
			return 0;
		}
	}
	else {
		if (tmp) {
			UFSD("EXIT (ALREADY ALLOCATED)\n");
			unlock_super(sb);
			return 0;
		}
	}

	/*
	 * There is not enough space for user on the device
	 */
	if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
		unlock_super (sb);
		UFSD("EXIT (FAILED)\n");
		return 0;
	}

	if (goal >= uspi->s_size) 
		goal = 0;
	if (goal == 0) 
		cgno = ufs_inotocg (inode->i_ino);
	else
		cgno = ufs_dtog(uspi, goal);
	 
	/*
	 * allocate new fragment
	 */
	if (oldcount == 0) {
		result = ufs_alloc_fragments (inode, cgno, goal, count, err);
		if (result) {
			ufs_cpu_to_data_ptr(sb, p, result);
			*err = 0;
			UFS_I(inode)->i_lastfrag =
				max_t(u32, UFS_I(inode)->i_lastfrag,
				      fragment + count);
			ufs_clear_frags(inode, result + oldcount,
					newcount - oldcount, locked_page != NULL);
		}
		unlock_super(sb);
		UFSD("EXIT, result %llu\n", (unsigned long long)result);
		return result;
	}

	/*
	 * resize block
	 */
	result = ufs_add_fragments (inode, tmp, oldcount, newcount, err);
	if (result) {
		*err = 0;
		UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
		ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
				locked_page != NULL);
		unlock_super(sb);
		UFSD("EXIT, result %llu\n", (unsigned long long)result);
		return result;
	}

	/*
	 * allocate new block and move data
	 */
	switch (fs32_to_cpu(sb, usb1->fs_optim)) {
	    case UFS_OPTSPACE:
		request = newcount;
		if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree
		    > uspi->s_dsize * uspi->s_minfree / (2 * 100))
			break;
		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
		break;
	    default:
		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
	
	    case UFS_OPTTIME:
		request = uspi->s_fpb;
		if (uspi->cs_total.cs_nffree < uspi->s_dsize *
		    (uspi->s_minfree - 2) / 100)
			break;
		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
		break;
	}
	result = ufs_alloc_fragments (inode, cgno, goal, request, err);
	if (result) {
		ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
				locked_page != NULL);
		ufs_change_blocknr(inode, fragment - oldcount, oldcount,
				   uspi->s_sbbase + tmp,
				   uspi->s_sbbase + result, locked_page);
		ufs_cpu_to_data_ptr(sb, p, result);
		*err = 0;
		UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
		unlock_super(sb);
		if (newcount < request)
			ufs_free_fragments (inode, result + newcount, request - newcount);
		ufs_free_fragments (inode, tmp, oldcount);
		UFSD("EXIT, result %llu\n", (unsigned long long)result);
		return result;
	}

	unlock_super(sb);
	UFSD("EXIT (FAILED)\n");
	return 0;
}		
示例#22
0
文件: ialloc.c 项目: hugh712/Jollen
/*
 * There are two policies for allocating an inode.  If the new inode is
 * a directory, then a forward search is made for a block group with both
 * free space and a low directory-to-inode ratio; if that fails, then of
 * the groups with above-average free space, that group with the fewest
 * directories already is chosen.
 *
 * For other inodes, search forward from the parent directory's block
 * group to find a free inode.
 */
struct inode * ext3_new_inode (handle_t *handle,
				const struct inode * dir, int mode)
{
	struct super_block * sb;
	struct buffer_head * bh;
	struct buffer_head * bh2;
	int i, j, avefreei;
	struct inode * inode;
	int bitmap_nr;
	struct ext3_group_desc * gdp;
	struct ext3_group_desc * tmp;
	struct ext3_super_block * es;
	int err = 0;

	/* Cannot create files in a deleted directory */
	if (!dir || !dir->i_nlink)
		return ERR_PTR(-EPERM);

	sb = dir->i_sb;
	inode = new_inode(sb);
	if (!inode)
		return ERR_PTR(-ENOMEM);
	init_rwsem(&inode->u.ext3_i.truncate_sem);

	lock_super (sb);
	es = sb->u.ext3_sb.s_es;
repeat:
	gdp = NULL;
	i = 0;

	if (S_ISDIR(mode)) {
		avefreei = le32_to_cpu(es->s_free_inodes_count) /
			sb->u.ext3_sb.s_groups_count;
		if (!gdp) {
			for (j = 0; j < sb->u.ext3_sb.s_groups_count; j++) {
				struct buffer_head *temp_buffer;
				tmp = ext3_get_group_desc (sb, j, &temp_buffer);
				if (tmp &&
				    le16_to_cpu(tmp->bg_free_inodes_count) &&
				    le16_to_cpu(tmp->bg_free_inodes_count) >=
							avefreei) {
					if (!gdp || (le16_to_cpu(tmp->bg_free_blocks_count) >
						le16_to_cpu(gdp->bg_free_blocks_count))) {
						i = j;
						gdp = tmp;
						bh2 = temp_buffer;
					}
				}
			}
		}
	} else {
		/*
		 * Try to place the inode in its parent directory
		 */
		i = dir->u.ext3_i.i_block_group;
		tmp = ext3_get_group_desc (sb, i, &bh2);
		if (tmp && le16_to_cpu(tmp->bg_free_inodes_count))
			gdp = tmp;
		else
		{
			/*
			 * Use a quadratic hash to find a group with a
			 * free inode
			 */
			for (j = 1; j < sb->u.ext3_sb.s_groups_count; j <<= 1) {
				i += j;
				if (i >= sb->u.ext3_sb.s_groups_count)
					i -= sb->u.ext3_sb.s_groups_count;
				tmp = ext3_get_group_desc (sb, i, &bh2);
				if (tmp &&
				    le16_to_cpu(tmp->bg_free_inodes_count)) {
					gdp = tmp;
					break;
				}
			}
		}
		if (!gdp) {
			/*
			 * That failed: try linear search for a free inode
			 */
			i = dir->u.ext3_i.i_block_group + 1;
			for (j = 2; j < sb->u.ext3_sb.s_groups_count; j++) {
				if (++i >= sb->u.ext3_sb.s_groups_count)
					i = 0;
				tmp = ext3_get_group_desc (sb, i, &bh2);
				if (tmp &&
				    le16_to_cpu(tmp->bg_free_inodes_count)) {
					gdp = tmp;
					break;
				}
			}
		}
	}

	err = -ENOSPC;
	if (!gdp)
		goto fail;

	err = -EIO;
	bitmap_nr = load_inode_bitmap (sb, i);
	if (bitmap_nr < 0)
		goto fail;

	bh = sb->u.ext3_sb.s_inode_bitmap[bitmap_nr];

	if ((j = ext3_find_first_zero_bit ((unsigned long *) bh->b_data,
				      EXT3_INODES_PER_GROUP(sb))) <
	    EXT3_INODES_PER_GROUP(sb)) {
		BUFFER_TRACE(bh, "get_write_access");
		err = ext3_journal_get_write_access(handle, bh);
		if (err) goto fail;
		
		if (ext3_set_bit (j, bh->b_data)) {
			ext3_error (sb, "ext3_new_inode",
				      "bit already set for inode %d", j);
			goto repeat;
		}
		BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
		err = ext3_journal_dirty_metadata(handle, bh);
		if (err) goto fail;
	} else {
		if (le16_to_cpu(gdp->bg_free_inodes_count) != 0) {
			ext3_error (sb, "ext3_new_inode",
				    "Free inodes count corrupted in group %d",
				    i);
			/* Is it really ENOSPC? */
			err = -ENOSPC;
			if (sb->s_flags & MS_RDONLY)
				goto fail;

			BUFFER_TRACE(bh2, "get_write_access");
			err = ext3_journal_get_write_access(handle, bh2);
			if (err) goto fail;
			gdp->bg_free_inodes_count = 0;
			BUFFER_TRACE(bh2, "call ext3_journal_dirty_metadata");
			err = ext3_journal_dirty_metadata(handle, bh2);
			if (err) goto fail;
		}
		goto repeat;
	}
	j += i * EXT3_INODES_PER_GROUP(sb) + 1;
	if (j < EXT3_FIRST_INO(sb) || j > le32_to_cpu(es->s_inodes_count)) {
		ext3_error (sb, "ext3_new_inode",
			    "reserved inode or inode > inodes count - "
			    "block_group = %d,inode=%d", i, j);
		err = -EIO;
		goto fail;
	}

	BUFFER_TRACE(bh2, "get_write_access");
	err = ext3_journal_get_write_access(handle, bh2);
	if (err) goto fail;
	gdp->bg_free_inodes_count =
		cpu_to_le16(le16_to_cpu(gdp->bg_free_inodes_count) - 1);
	if (S_ISDIR(mode))
		gdp->bg_used_dirs_count =
			cpu_to_le16(le16_to_cpu(gdp->bg_used_dirs_count) + 1);
	BUFFER_TRACE(bh2, "call ext3_journal_dirty_metadata");
	err = ext3_journal_dirty_metadata(handle, bh2);
	if (err) goto fail;
	
	BUFFER_TRACE(sb->u.ext3_sb.s_sbh, "get_write_access");
	err = ext3_journal_get_write_access(handle, sb->u.ext3_sb.s_sbh);
	if (err) goto fail;
	es->s_free_inodes_count =
		cpu_to_le32(le32_to_cpu(es->s_free_inodes_count) - 1);
	BUFFER_TRACE(sb->u.ext3_sb.s_sbh, "call ext3_journal_dirty_metadata");
	err = ext3_journal_dirty_metadata(handle, sb->u.ext3_sb.s_sbh);
	sb->s_dirt = 1;
	if (err) goto fail;

	inode->i_uid = current->fsuid;
	if (test_opt (sb, GRPID))
		inode->i_gid = dir->i_gid;
	else if (dir->i_mode & S_ISGID) {
		inode->i_gid = dir->i_gid;
		if (S_ISDIR(mode))
			mode |= S_ISGID;
	} else
		inode->i_gid = current->fsgid;
	inode->i_mode = mode;

	inode->i_ino = j;
	/* This is the optimal IO size (for stat), not the fs block size */
	inode->i_blksize = PAGE_SIZE;
	inode->i_blocks = 0;
	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
	inode->u.ext3_i.i_flags = dir->u.ext3_i.i_flags & ~EXT3_INDEX_FL;
	if (S_ISLNK(mode))
		inode->u.ext3_i.i_flags &= ~(EXT3_IMMUTABLE_FL|EXT3_APPEND_FL);
#ifdef EXT3_FRAGMENTS
	inode->u.ext3_i.i_faddr = 0;
	inode->u.ext3_i.i_frag_no = 0;
	inode->u.ext3_i.i_frag_size = 0;
#endif
	inode->u.ext3_i.i_file_acl = 0;
	inode->u.ext3_i.i_dir_acl = 0;
	inode->u.ext3_i.i_dtime = 0;
	INIT_LIST_HEAD(&inode->u.ext3_i.i_orphan);
#ifdef EXT3_PREALLOCATE
	inode->u.ext3_i.i_prealloc_count = 0;
#endif
	inode->u.ext3_i.i_block_group = i;
	
	if (inode->u.ext3_i.i_flags & EXT3_SYNC_FL)
		inode->i_flags |= S_SYNC;
	if (IS_SYNC(inode))
		handle->h_sync = 1;
	insert_inode_hash(inode);
	inode->i_generation = sb->u.ext3_sb.s_next_generation++;

	inode->u.ext3_i.i_state = EXT3_STATE_NEW;
	err = ext3_mark_inode_dirty(handle, inode);
	if (err) goto fail;
	
	unlock_super (sb);
	if(DQUOT_ALLOC_INODE(inode)) {
		DQUOT_DROP(inode);
		inode->i_flags |= S_NOQUOTA;
		inode->i_nlink = 0;
		iput(inode);
		return ERR_PTR(-EDQUOT);
	}
	ext3_debug ("allocating inode %lu\n", inode->i_ino);
	return inode;

fail:
	unlock_super(sb);
	iput(inode);
	ext3_std_error(sb, err);
	return ERR_PTR(err);
}
示例#23
0
static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
{
	struct ufs_sb_private_info * uspi;
	struct ufs_super_block_first * usb1;
	struct ufs_super_block_third * usb3;
	unsigned new_mount_opt, ufstype;
	unsigned flags;

	lock_ufs(sb);
	lock_super(sb);
	uspi = UFS_SB(sb)->s_uspi;
	flags = UFS_SB(sb)->s_flags;
	usb1 = ubh_get_usb_first(uspi);
	usb3 = ubh_get_usb_third(uspi);
	
	ufstype = UFS_SB(sb)->s_mount_opt & UFS_MOUNT_UFSTYPE;
	new_mount_opt = 0;
	ufs_set_opt (new_mount_opt, ONERROR_LOCK);
	if (!ufs_parse_options (data, &new_mount_opt)) {
		unlock_super(sb);
		unlock_ufs(sb);
		return -EINVAL;
	}
	if (!(new_mount_opt & UFS_MOUNT_UFSTYPE)) {
		new_mount_opt |= ufstype;
	} else if ((new_mount_opt & UFS_MOUNT_UFSTYPE) != ufstype) {
		printk("ufstype can't be changed during remount\n");
		unlock_super(sb);
		unlock_ufs(sb);
		return -EINVAL;
	}

	if ((*mount_flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) {
		UFS_SB(sb)->s_mount_opt = new_mount_opt;
		unlock_super(sb);
		unlock_ufs(sb);
		return 0;
	}
	
	if (*mount_flags & MS_RDONLY) {
		ufs_put_super_internal(sb);
		usb1->fs_time = cpu_to_fs32(sb, get_seconds());
		if ((flags & UFS_ST_MASK) == UFS_ST_SUN
		  || (flags & UFS_ST_MASK) == UFS_ST_SUNOS
		  || (flags & UFS_ST_MASK) == UFS_ST_SUNx86) 
			ufs_set_fs_state(sb, usb1, usb3,
				UFS_FSOK - fs32_to_cpu(sb, usb1->fs_time));
		ubh_mark_buffer_dirty (USPI_UBH(uspi));
		sb->s_dirt = 0;
		sb->s_flags |= MS_RDONLY;
	} else {
#ifndef CONFIG_UFS_FS_WRITE
		printk("ufs was compiled with read-only support, "
		"can't be mounted as read-write\n");
		unlock_super(sb);
		unlock_ufs(sb);
		return -EINVAL;
#else
		if (ufstype != UFS_MOUNT_UFSTYPE_SUN && 
		    ufstype != UFS_MOUNT_UFSTYPE_SUNOS &&
		    ufstype != UFS_MOUNT_UFSTYPE_44BSD &&
		    ufstype != UFS_MOUNT_UFSTYPE_SUNx86 &&
		    ufstype != UFS_MOUNT_UFSTYPE_UFS2) {
			printk("this ufstype is read-only supported\n");
			unlock_super(sb);
			unlock_ufs(sb);
			return -EINVAL;
		}
		if (!ufs_read_cylinder_structures(sb)) {
			printk("failed during remounting\n");
			unlock_super(sb);
			unlock_ufs(sb);
			return -EPERM;
		}
		sb->s_flags &= ~MS_RDONLY;
#endif
	}
	UFS_SB(sb)->s_mount_opt = new_mount_opt;
	unlock_super(sb);
	unlock_ufs(sb);
	return 0;
}
示例#24
0
/*
 * NOTE! When we get the inode, we're the only people
 * that have access to it, and as such there are no
 * race conditions we have to worry about. The inode
 * is not on the hash-lists, and it cannot be reached
 * through the filesystem because the directory entry
 * has been deleted earlier.
 *
 * HOWEVER: we must make sure that we get no aliases,
 * which means that we have to call "clear_inode()"
 * _before_ we mark the inode not in use in the inode
 * bitmaps. Otherwise a newly created file might use
 * the same inode number (not actually the same pointer
 * though), and then we'd have two inodes sharing the
 * same inode number and space on the harddisk.
 */
void ext2_free_inode (struct inode * inode)
{
	struct super_block * sb = inode->i_sb;
	int is_directory;
	unsigned long ino;
	struct buffer_head * bh;
	struct buffer_head * bh2;
	unsigned long block_group;
	unsigned long bit;
	struct ext2_group_desc * desc;
	struct ext2_super_block * es;

	ino = inode->i_ino;
	ext2_debug ("freeing inode %lu\n", ino);

	/*
	 * Note: we must free any quota before locking the superblock,
	 * as writing the quota to disk may need the lock as well.
	 */
	if (!is_bad_inode(inode)) {
		/* Quota is already initialized in iput() */
	    	DQUOT_FREE_INODE(inode);
		DQUOT_DROP(inode);
	}

	lock_super (sb);
	es = sb->u.ext2_sb.s_es;
	is_directory = S_ISDIR(inode->i_mode);

	/* Do this BEFORE marking the inode not in use or returning an error */
	clear_inode (inode);

	if (ino < EXT2_FIRST_INO(sb) ||
	    ino > le32_to_cpu(es->s_inodes_count)) {
		ext2_error (sb, "ext2_free_inode",
			    "reserved or nonexistent inode %lu", ino);
		goto error_return;
	}
	block_group = (ino - 1) / EXT2_INODES_PER_GROUP(sb);
	bit = (ino - 1) % EXT2_INODES_PER_GROUP(sb);
	bh = load_inode_bitmap (sb, block_group);
	if (IS_ERR(bh))
		goto error_return;

	/* Ok, now we can actually update the inode bitmaps.. */
	if (!ext2_clear_bit (bit, bh->b_data))
		ext2_error (sb, "ext2_free_inode",
			      "bit already cleared for inode %lu", ino);
	else {
		desc = ext2_get_group_desc (sb, block_group, &bh2);
		if (desc) {
			desc->bg_free_inodes_count =
				cpu_to_le16(le16_to_cpu(desc->bg_free_inodes_count) + 1);
			if (is_directory)
				desc->bg_used_dirs_count =
					cpu_to_le16(le16_to_cpu(desc->bg_used_dirs_count) - 1);
		}
		mark_buffer_dirty(bh2);
		es->s_free_inodes_count =
			cpu_to_le32(le32_to_cpu(es->s_free_inodes_count) + 1);
		mark_buffer_dirty(sb->u.ext2_sb.s_sbh);
	}
	mark_buffer_dirty(bh);
	if (sb->s_flags & MS_SYNCHRONOUS) {
		ll_rw_block (WRITE, 1, &bh);
		wait_on_buffer (bh);
	}
	sb->s_dirt = 1;
error_return:
	unlock_super (sb);
}
示例#25
0
/*
 * The way this works is that the mount process passes a structure
 * in the data argument which contains the server's IP address
 * and the root file handle obtained from the server's mount
 * daemon. We stash these away in the private superblock fields.
 */
struct super_block *
nfs_read_super(struct super_block *sb, void *raw_data, int silent)
{
    struct nfs_mount_data	*data = (struct nfs_mount_data *) raw_data;
    struct nfs_server	*server;
    struct rpc_xprt		*xprt;
    struct rpc_clnt		*clnt;
    struct nfs_fh		*root_fh;
    struct inode		*root_inode;
    unsigned int		authflavor;
    int			tcp;
    struct sockaddr_in	srvaddr;
    struct rpc_timeout	timeparms;
    struct nfs_fattr	fattr;

    MOD_INC_USE_COUNT;
    if (!data)
        goto out_miss_args;

    /* No NFS V3. */
    if (data->flags & NFS_MOUNT_VER3)
        goto out_fail;

    /* Don't complain if "mount" is newer. */
    if (data->version < NFS_MOUNT_VERSION) {
        printk("nfs warning: mount version %s than kernel\n",
               data->version < NFS_MOUNT_VERSION ? "older" : "newer");
        if (data->version < 2)
            data->namlen = 0;
        if (data->version < 3)
            data->bsize  = 0;
    }

    /* We now require that the mount process passes the remote address */
    memcpy(&srvaddr, &data->addr, sizeof(srvaddr));
    if (srvaddr.sin_addr.s_addr == INADDR_ANY)
        goto out_no_remote;

    lock_super(sb);

    sb->s_flags |= MS_ODD_RENAME; /* This should go away */

    sb->s_magic      = NFS_SUPER_MAGIC;
    sb->s_op         = &nfs_sops;
    sb->s_blocksize  = nfs_block_size(data->bsize, &sb->s_blocksize_bits);
    sb->u.nfs_sb.s_root = data->root;
    server           = &sb->u.nfs_sb.s_server;
    server->rsize    = nfs_block_size(data->rsize, NULL);
    server->wsize    = nfs_block_size(data->wsize, NULL);
    server->flags    = data->flags;

    if (data->flags & NFS_MOUNT_NOAC) {
        data->acregmin = data->acregmax = 0;
        data->acdirmin = data->acdirmax = 0;
    }
    server->acregmin = data->acregmin*HZ;
    server->acregmax = data->acregmax*HZ;
    server->acdirmin = data->acdirmin*HZ;
    server->acdirmax = data->acdirmax*HZ;

    server->hostname = kmalloc(strlen(data->hostname) + 1, GFP_KERNEL);
    if (!server->hostname)
        goto out_unlock;
    strcpy(server->hostname, data->hostname);

    /* Which protocol do we use? */
    tcp   = (data->flags & NFS_MOUNT_TCP);

    /* Initialize timeout values */
    timeparms.to_initval = data->timeo * HZ / 10;
    timeparms.to_retries = data->retrans;
    timeparms.to_maxval  = tcp? RPC_MAX_TCP_TIMEOUT : RPC_MAX_UDP_TIMEOUT;
    timeparms.to_exponential = 1;

    /* Now create transport and client */
    xprt = xprt_create_proto(tcp? IPPROTO_TCP : IPPROTO_UDP,
                             &srvaddr, &timeparms);
    if (xprt == NULL)
        goto out_no_xprt;

    /* Choose authentication flavor */
    authflavor = RPC_AUTH_UNIX;
    if (data->flags & NFS_MOUNT_SECURE)
        authflavor = RPC_AUTH_DES;
    else if (data->flags & NFS_MOUNT_KERBEROS)
        authflavor = RPC_AUTH_KRB;

    clnt = rpc_create_client(xprt, server->hostname, &nfs_program,
                             NFS_VERSION, authflavor);
    if (clnt == NULL)
        goto out_no_client;

    clnt->cl_intr     = (data->flags & NFS_MOUNT_INTR)? 1 : 0;
    clnt->cl_softrtry = (data->flags & NFS_MOUNT_SOFT)? 1 : 0;
    clnt->cl_chatty   = 1;
    server->client    = clnt;

    /* Fire up rpciod if not yet running */
    if (rpciod_up() != 0)
        goto out_no_iod;

    /*
     * Keep the super block locked while we try to get
     * the root fh attributes.
     */
    root_fh = kmalloc(sizeof(struct nfs_fh), GFP_KERNEL);
    if (!root_fh)
        goto out_no_fh;
    *root_fh = data->root;

    if (nfs_proc_getattr(server, root_fh, &fattr) != 0)
        goto out_no_fattr;

    clnt->cl_to_err = 1;

    root_inode = __nfs_fhget(sb, &fattr);
    if (!root_inode)
        goto out_no_root;
    sb->s_root = d_alloc_root(root_inode, NULL);
    if (!sb->s_root)
        goto out_no_root;
    sb->s_root->d_op = &nfs_dentry_operations;
    sb->s_root->d_fsdata = root_fh;

    /* We're airborne */
    unlock_super(sb);

    /* Check whether to start the lockd process */
    if (!(server->flags & NFS_MOUNT_NONLM))
        lockd_up();
    return sb;

    /* Yargs. It didn't work out. */
out_no_root:
    printk("nfs_read_super: get root inode failed\n");
    iput(root_inode);
    goto out_free_fh;

out_no_fattr:
    printk("nfs_read_super: get root fattr failed\n");
out_free_fh:
    kfree(root_fh);
out_no_fh:
    rpciod_down();
    goto out_shutdown;

out_no_iod:
    printk(KERN_WARNING "NFS: couldn't start rpciod!\n");
out_shutdown:
    rpc_shutdown_client(server->client);
    goto out_free_host;

out_no_client:
    printk(KERN_WARNING "NFS: cannot create RPC client.\n");
    xprt_destroy(xprt);
    goto out_free_host;

out_no_xprt:
    printk(KERN_WARNING "NFS: cannot create RPC transport.\n");

out_free_host:
    kfree(server->hostname);
out_unlock:
    unlock_super(sb);
    goto out_fail;

out_no_remote:
    printk("NFS: mount program didn't pass remote address!\n");
    goto out_fail;

out_miss_args:
    printk("nfs_read_super: missing data argument\n");

out_fail:
    sb->s_dev = 0;
    MOD_DEC_USE_COUNT;
    return NULL;
}
示例#26
0
文件: ialloc.c 项目: 274914765/C
/*
 * There are two policies for allocating an inode.  If the new inode is
 * a directory, then a forward search is made for a block group with both
 * free space and a low directory-to-inode ratio; if that fails, then of
 * the groups with above-average free space, that group with the fewest
 * directories already is chosen.
 *
 * For other inodes, search forward from the parent directory's block
 * group to find a free inode.
 */
struct inode * ufs_new_inode(struct inode * dir, int mode)
{
    struct super_block * sb;
    struct ufs_sb_info * sbi;
    struct ufs_sb_private_info * uspi;
    struct ufs_super_block_first * usb1;
    struct ufs_cg_private_info * ucpi;
    struct ufs_cylinder_group * ucg;
    struct inode * inode;
    unsigned cg, bit, i, j, start;
    struct ufs_inode_info *ufsi;
    int err = -ENOSPC;

    UFSD("ENTER\n");
    
    /* Cannot create files in a deleted directory */
    if (!dir || !dir->i_nlink)
        return ERR_PTR(-EPERM);
    sb = dir->i_sb;
    inode = new_inode(sb);
    if (!inode)
        return ERR_PTR(-ENOMEM);
    ufsi = UFS_I(inode);
    sbi = UFS_SB(sb);
    uspi = sbi->s_uspi;
    usb1 = ubh_get_usb_first(uspi);

    lock_super (sb);

    /*
     * Try to place the inode in its parent directory
     */
    i = ufs_inotocg(dir->i_ino);
    if (sbi->fs_cs(i).cs_nifree) {
        cg = i;
        goto cg_found;
    }

    /*
     * Use a quadratic hash to find a group with a free inode
     */
    for ( j = 1; j < uspi->s_ncg; j <<= 1 ) {
        i += j;
        if (i >= uspi->s_ncg)
            i -= uspi->s_ncg;
        if (sbi->fs_cs(i).cs_nifree) {
            cg = i;
            goto cg_found;
        }
    }

    /*
     * That failed: try linear search for a free inode
     */
    i = ufs_inotocg(dir->i_ino) + 1;
    for (j = 2; j < uspi->s_ncg; j++) {
        i++;
        if (i >= uspi->s_ncg)
            i = 0;
        if (sbi->fs_cs(i).cs_nifree) {
            cg = i;
            goto cg_found;
        }
    }

    goto failed;

cg_found:
    ucpi = ufs_load_cylinder (sb, cg);
    if (!ucpi) {
        err = -EIO;
        goto failed;
    }
    ucg = ubh_get_ucg(UCPI_UBH(ucpi));
    if (!ufs_cg_chkmagic(sb, ucg)) 
        ufs_panic (sb, "ufs_new_inode", "internal error, bad cg magic number");

    start = ucpi->c_irotor;
    bit = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_iusedoff, uspi->s_ipg, start);
    if (!(bit < uspi->s_ipg)) {
        bit = ubh_find_first_zero_bit (UCPI_UBH(ucpi), ucpi->c_iusedoff, start);
        if (!(bit < start)) {
            ufs_error (sb, "ufs_new_inode",
                "cylinder group %u corrupted - error in inode bitmap\n", cg);
            err = -EIO;
            goto failed;
        }
    }
    UFSD("start = %u, bit = %u, ipg = %u\n", start, bit, uspi->s_ipg);
    if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_iusedoff, bit))
        ubh_setbit (UCPI_UBH(ucpi), ucpi->c_iusedoff, bit);
    else {
        ufs_panic (sb, "ufs_new_inode", "internal error");
        err = -EIO;
        goto failed;
    }

    if (uspi->fs_magic == UFS2_MAGIC) {
        u32 initediblk = fs32_to_cpu(sb, ucg->cg_u.cg_u2.cg_initediblk);

        if (bit + uspi->s_inopb > initediblk &&
            initediblk < fs32_to_cpu(sb, ucg->cg_u.cg_u2.cg_niblk))
            ufs2_init_inodes_chunk(sb, ucpi, ucg);
    }

    fs32_sub(sb, &ucg->cg_cs.cs_nifree, 1);
    uspi->cs_total.cs_nifree--;
    fs32_sub(sb, &sbi->fs_cs(cg).cs_nifree, 1);
    
    if (S_ISDIR(mode)) {
        fs32_add(sb, &ucg->cg_cs.cs_ndir, 1);
        uspi->cs_total.cs_ndir++;
        fs32_add(sb, &sbi->fs_cs(cg).cs_ndir, 1);
    }
    ubh_mark_buffer_dirty (USPI_UBH(uspi));
    ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
    if (sb->s_flags & MS_SYNCHRONOUS) {
        ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
        ubh_wait_on_buffer (UCPI_UBH(ucpi));
    }
    sb->s_dirt = 1;

    inode->i_ino = cg * uspi->s_ipg + bit;
    inode->i_mode = mode;
    inode->i_uid = current->fsuid;
    if (dir->i_mode & S_ISGID) {
        inode->i_gid = dir->i_gid;
        if (S_ISDIR(mode))
            inode->i_mode |= S_ISGID;
    } else
        inode->i_gid = current->fsgid;

    inode->i_blocks = 0;
    inode->i_generation = 0;
    inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
    ufsi->i_flags = UFS_I(dir)->i_flags;
    ufsi->i_lastfrag = 0;
    ufsi->i_shadow = 0;
    ufsi->i_osync = 0;
    ufsi->i_oeftflag = 0;
    ufsi->i_dir_start_lookup = 0;
    memset(&ufsi->i_u1, 0, sizeof(ufsi->i_u1));
    insert_inode_hash(inode);
    mark_inode_dirty(inode);

    if (uspi->fs_magic == UFS2_MAGIC) {
        struct buffer_head *bh;
        struct ufs2_inode *ufs2_inode;

        /*
         * setup birth date, we do it here because of there is no sense
         * to hold it in struct ufs_inode_info, and lose 64 bit
         */
        bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino));
        if (!bh) {
            ufs_warning(sb, "ufs_read_inode",
                    "unable to read inode %lu\n",
                    inode->i_ino);
            err = -EIO;
            goto fail_remove_inode;
        }
        lock_buffer(bh);
        ufs2_inode = (struct ufs2_inode *)bh->b_data;
        ufs2_inode += ufs_inotofsbo(inode->i_ino);
        ufs2_inode->ui_birthtime = cpu_to_fs64(sb, CURRENT_TIME.tv_sec);
        ufs2_inode->ui_birthnsec = cpu_to_fs32(sb, CURRENT_TIME.tv_nsec);
        mark_buffer_dirty(bh);
        unlock_buffer(bh);
        if (sb->s_flags & MS_SYNCHRONOUS)
            sync_dirty_buffer(bh);
        brelse(bh);
    }

    unlock_super (sb);

    if (DQUOT_ALLOC_INODE(inode)) {
        DQUOT_DROP(inode);
        err = -EDQUOT;
        goto fail_without_unlock;
    }

    UFSD("allocating inode %lu\n", inode->i_ino);
    UFSD("EXIT\n");
    return inode;

fail_remove_inode:
    unlock_super(sb);
fail_without_unlock:
    inode->i_flags |= S_NOQUOTA;
    inode->i_nlink = 0;
    iput(inode);
    UFSD("EXIT (FAILED): err %d\n", err);
    return ERR_PTR(err);
failed:
    unlock_super (sb);
    make_bad_inode(inode);
    iput (inode);
    UFSD("EXIT (FAILED): err %d\n", err);
    return ERR_PTR(err);
}
示例#27
0
static void hpfs_put_super(struct super_block *s)
{
	lock_super(s);
	s->s_dev = 0;
	unlock_super(s);
}
示例#28
0
void proc_put_super(struct super_block *sb)
{
	lock_super(sb);
	sb->s_dev = 0;
	unlock_super(sb);
}
示例#29
0
struct inode * ext2_new_inode (const struct inode * dir, int mode)
{
	struct super_block * sb;
	struct buffer_head * bh;
	struct buffer_head * bh2;
	int group, i;
	ino_t ino;
	struct inode * inode;
	struct ext2_group_desc * desc;
	struct ext2_super_block * es;
	int err;

	sb = dir->i_sb;
	inode = new_inode(sb);
	if (!inode)
		return ERR_PTR(-ENOMEM);

	lock_super (sb);
	es = sb->u.ext2_sb.s_es;
repeat:
	if (S_ISDIR(mode))
		group = find_group_dir(sb, dir->u.ext2_i.i_block_group);
	else 
		group = find_group_other(sb, dir->u.ext2_i.i_block_group);

	err = -ENOSPC;
	if (group == -1)
		goto fail;

	err = -EIO;
	bh = load_inode_bitmap (sb, group);
	if (IS_ERR(bh))
		goto fail2;

	i = ext2_find_first_zero_bit ((unsigned long *) bh->b_data,
				      EXT2_INODES_PER_GROUP(sb));
	if (i >= EXT2_INODES_PER_GROUP(sb))
		goto bad_count;
	ext2_set_bit (i, bh->b_data);

	mark_buffer_dirty(bh);
	if (sb->s_flags & MS_SYNCHRONOUS) {
		ll_rw_block (WRITE, 1, &bh);
		wait_on_buffer (bh);
	}

	ino = group * EXT2_INODES_PER_GROUP(sb) + i + 1;
	if (ino < EXT2_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
		ext2_error (sb, "ext2_new_inode",
			    "reserved inode or inode > inodes count - "
			    "block_group = %d,inode=%ld", group, ino);
		err = -EIO;
		goto fail2;
	}

	es->s_free_inodes_count =
		cpu_to_le32(le32_to_cpu(es->s_free_inodes_count) - 1);
	mark_buffer_dirty(sb->u.ext2_sb.s_sbh);
	sb->s_dirt = 1;
	inode->i_uid = current->fsuid;
	if (test_opt (sb, GRPID))
		inode->i_gid = dir->i_gid;
	else if (dir->i_mode & S_ISGID) {
		inode->i_gid = dir->i_gid;
		if (S_ISDIR(mode))
			mode |= S_ISGID;
	} else
		inode->i_gid = current->fsgid;
	inode->i_mode = mode;

	inode->i_ino = ino;
	inode->i_blksize = PAGE_SIZE;	/* This is the optimal IO size (for stat), not the fs block size */
	inode->i_blocks = 0;
	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
	inode->u.ext2_i.i_new_inode = 1;
	inode->u.ext2_i.i_flags = dir->u.ext2_i.i_flags & ~EXT2_BTREE_FL;
	if (S_ISLNK(mode))
		inode->u.ext2_i.i_flags &= ~(EXT2_IMMUTABLE_FL|EXT2_APPEND_FL);
	inode->u.ext2_i.i_block_group = group;
	if (inode->u.ext2_i.i_flags & EXT2_SYNC_FL)
		inode->i_flags |= S_SYNC;
	insert_inode_hash(inode);
	inode->i_generation = event++;
	mark_inode_dirty(inode);

	unlock_super (sb);
	if(DQUOT_ALLOC_INODE(inode)) {
		DQUOT_DROP(inode);
		inode->i_flags |= S_NOQUOTA;
		inode->i_nlink = 0;
		iput(inode);
		return ERR_PTR(-EDQUOT);
	}
	ext2_debug ("allocating inode %lu\n", inode->i_ino);
	return inode;

fail2:
	desc = ext2_get_group_desc (sb, group, &bh2);
	desc->bg_free_inodes_count =
		cpu_to_le16(le16_to_cpu(desc->bg_free_inodes_count) + 1);
	if (S_ISDIR(mode))
		desc->bg_used_dirs_count =
			cpu_to_le16(le16_to_cpu(desc->bg_used_dirs_count) - 1);
	mark_buffer_dirty(bh2);
fail:
	unlock_super(sb);
	make_bad_inode(inode);
	iput(inode);
	return ERR_PTR(err);

bad_count:
	ext2_error (sb, "ext2_new_inode",
		    "Free inodes count corrupted in group %d",
		    group);
	/* Is it really ENOSPC? */
	err = -ENOSPC;
	if (sb->s_flags & MS_RDONLY)
		goto fail;

	desc = ext2_get_group_desc (sb, group, &bh2);
	desc->bg_free_inodes_count = 0;
	mark_buffer_dirty(bh2);
	goto repeat;
}
int reiserfs_resize (struct super_block * s, unsigned long block_count_new)
{
	struct reiserfs_super_block * sb;
        struct reiserfs_bitmap_info *bitmap;
	struct buffer_head * bh;
	struct reiserfs_transaction_handle th;
	unsigned int bmap_nr_new, bmap_nr;
	unsigned int block_r_new, block_r;
	
	struct reiserfs_list_bitmap * jb;
	struct reiserfs_list_bitmap jbitmap[JOURNAL_NUM_BITMAPS];
	
	unsigned long int block_count, free_blocks;
	int i;
	int copy_size ;

	sb = SB_DISK_SUPER_BLOCK(s);

	if (SB_BLOCK_COUNT(s) >= block_count_new) {
		printk("can\'t shrink filesystem on-line\n");
		return -EINVAL;
	}

	/* check the device size */
	bh = sb_bread(s, block_count_new - 1);
	if (!bh) {
		printk("reiserfs_resize: can\'t read last block\n");
		return -EINVAL;
	}	
	bforget(bh);

	/* old disk layout detection; those partitions can be mounted, but
	 * cannot be resized */
	if (SB_BUFFER_WITH_SB(s)->b_blocknr *	SB_BUFFER_WITH_SB(s)->b_size 
		!= REISERFS_DISK_OFFSET_IN_BYTES ) {
		printk("reiserfs_resize: unable to resize a reiserfs without distributed bitmap (fs version < 3.5.12)\n");
		return -ENOTSUPP;
	}
       
	/* count used bits in last bitmap block */
	block_r = SB_BLOCK_COUNT(s) -
	        (SB_BMAP_NR(s) - 1) * s->s_blocksize * 8;
	
	/* count bitmap blocks in new fs */
	bmap_nr_new = block_count_new / ( s->s_blocksize * 8 );
	block_r_new = block_count_new - bmap_nr_new * s->s_blocksize * 8;
	if (block_r_new) 
		bmap_nr_new++;
	else
		block_r_new = s->s_blocksize * 8;

	/* save old values */
	block_count = SB_BLOCK_COUNT(s);
	bmap_nr     = SB_BMAP_NR(s);

	/* resizing of reiserfs bitmaps (journal and real), if needed */
	if (bmap_nr_new > bmap_nr) {	    
	    /* reallocate journal bitmaps */
	    if (reiserfs_allocate_list_bitmaps(s, jbitmap, bmap_nr_new) < 0) {
		printk("reiserfs_resize: unable to allocate memory for journal bitmaps\n");
		unlock_super(s) ;
		return -ENOMEM ;
	    }
	    /* the new journal bitmaps are zero filled, now we copy in the bitmap
	    ** node pointers from the old journal bitmap structs, and then
	    ** transfer the new data structures into the journal struct.
	    **
	    ** using the copy_size var below allows this code to work for
	    ** both shrinking and expanding the FS.
	    */
	    copy_size = bmap_nr_new < bmap_nr ? bmap_nr_new : bmap_nr ;
	    copy_size = copy_size * sizeof(struct reiserfs_list_bitmap_node *) ;
	    for (i = 0 ; i < JOURNAL_NUM_BITMAPS ; i++) {
		struct reiserfs_bitmap_node **node_tmp ;
		jb = SB_JOURNAL(s)->j_list_bitmap + i ;
		memcpy(jbitmap[i].bitmaps, jb->bitmaps, copy_size) ;

		/* just in case vfree schedules on us, copy the new
		** pointer into the journal struct before freeing the 
		** old one
		*/
		node_tmp = jb->bitmaps ;
		jb->bitmaps = jbitmap[i].bitmaps ;
		vfree(node_tmp) ;
	    }	
	
	    /* allocate additional bitmap blocks, reallocate array of bitmap
	     * block pointers */
	    bitmap = vmalloc(sizeof(struct reiserfs_bitmap_info) * bmap_nr_new);
	    if (!bitmap) {
		printk("reiserfs_resize: unable to allocate memory.\n");
		return -ENOMEM;
	    }
	    memset (bitmap, 0, sizeof (struct reiserfs_bitmap_info) * SB_BMAP_NR(s));
	    for (i = 0; i < bmap_nr; i++)
		bitmap[i] = SB_AP_BITMAP(s)[i];
	    for (i = bmap_nr; i < bmap_nr_new; i++) {
		bitmap[i].bh = sb_getblk(s, i * s->s_blocksize * 8);
		memset(bitmap[i].bh->b_data, 0, sb_blocksize(sb));
		reiserfs_test_and_set_le_bit(0, bitmap[i].bh->b_data);

		set_buffer_uptodate(bitmap[i].bh);
		mark_buffer_dirty(bitmap[i].bh) ;
		sync_dirty_buffer(bitmap[i].bh);
		// update bitmap_info stuff
		bitmap[i].first_zero_hint=1;
		bitmap[i].free_count = sb_blocksize(sb) * 8 - 1;
	    }	
	    /* free old bitmap blocks array */
	    vfree(SB_AP_BITMAP(s));
	    SB_AP_BITMAP(s) = bitmap;
	}
	
	/* begin transaction */
	journal_begin(&th, s, 10);

	/* correct last bitmap blocks in old and new disk layout */
	reiserfs_prepare_for_journal(s, SB_AP_BITMAP(s)[bmap_nr - 1].bh, 1);
	for (i = block_r; i < s->s_blocksize * 8; i++)
	    reiserfs_test_and_clear_le_bit(i, 
					   SB_AP_BITMAP(s)[bmap_nr - 1].bh->b_data);
	SB_AP_BITMAP(s)[bmap_nr - 1].free_count += s->s_blocksize * 8 - block_r;
	if ( !SB_AP_BITMAP(s)[bmap_nr - 1].first_zero_hint)
	    SB_AP_BITMAP(s)[bmap_nr - 1].first_zero_hint = block_r;

	journal_mark_dirty(&th, s, SB_AP_BITMAP(s)[bmap_nr - 1].bh);

	reiserfs_prepare_for_journal(s, SB_AP_BITMAP(s)[bmap_nr_new - 1].bh, 1);
	for (i = block_r_new; i < s->s_blocksize * 8; i++)
	    reiserfs_test_and_set_le_bit(i,
					 SB_AP_BITMAP(s)[bmap_nr_new - 1].bh->b_data);
	journal_mark_dirty(&th, s, SB_AP_BITMAP(s)[bmap_nr_new - 1].bh);
 
	SB_AP_BITMAP(s)[bmap_nr_new - 1].free_count -= s->s_blocksize * 8 - block_r_new;
	/* Extreme case where last bitmap is the only valid block in itself. */
	if ( !SB_AP_BITMAP(s)[bmap_nr_new - 1].free_count )
	    SB_AP_BITMAP(s)[bmap_nr_new - 1].first_zero_hint = 0;
 	/* update super */
	reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1) ;
	free_blocks = SB_FREE_BLOCKS(s);
	PUT_SB_FREE_BLOCKS(s, free_blocks + (block_count_new - block_count - (bmap_nr_new - bmap_nr)));
	PUT_SB_BLOCK_COUNT(s, block_count_new);
	PUT_SB_BMAP_NR(s, bmap_nr_new);
	s->s_dirt = 1;

	journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB(s));
	
	SB_JOURNAL(s)->j_must_wait = 1;
	journal_end(&th, s, 10);

	return 0;
}