Пример #1
0
void udf_free_inode(struct inode *inode)
{
    struct super_block *sb = inode->i_sb;
    struct udf_sb_info *sbi = UDF_SB(sb);

    /*
     * Note: we must free any quota before locking the superblock,
     * as writing the quota to disk may need the lock as well.
     */
    DQUOT_FREE_INODE(inode);
    DQUOT_DROP(inode);

    clear_inode(inode);

    mutex_lock(&sbi->s_alloc_mutex);
    if (sbi->s_lvid_bh) {
        struct logicalVolIntegrityDescImpUse *lvidiu =
                            udf_sb_lvidiu(sbi);
        if (S_ISDIR(inode->i_mode))
            le32_add_cpu(&lvidiu->numDirs, -1);
        else
            le32_add_cpu(&lvidiu->numFiles, -1);

        mark_buffer_dirty(sbi->s_lvid_bh);
    }
    mutex_unlock(&sbi->s_alloc_mutex);

    udf_free_blocks(sb, NULL, UDF_I(inode)->i_location, 0, 1);
}
Пример #2
0
/* quota utility function, call if you've had to abort after calling
** new_inode_init, and have not called reiserfs_new_inode yet.
** This should only be called on inodes that do not have stat data
** inserted into the tree yet.
*/
static int drop_new_inode(struct inode *inode) {
    DQUOT_DROP(inode);
    make_bad_inode(inode) ;
    inode->i_flags |= S_NOQUOTA;
    iput(inode) ;
    return 0 ;
}
Пример #3
0
void udf_free_inode(struct inode * inode)
{
	struct super_block *sb = inode->i_sb;
	struct udf_sb_info *sbi = UDF_SB(sb);

	/*
	 * Note: we must free any quota before locking the superblock,
	 * as writing the quota to disk may need the lock as well.
	 */
	DQUOT_FREE_INODE(inode);
	DQUOT_DROP(inode);

	clear_inode(inode);

	lock_udf_alloc_sem(sbi);
	if (sbi->s_lvidbh) {
		if (S_ISDIR(inode->i_mode))
			UDF_SB_LVIDIU(sb)->numDirs =
				cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numDirs) - 1);
		else
			UDF_SB_LVIDIU(sb)->numFiles =
				cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numFiles) - 1);
		
		mark_buffer_dirty(sbi->s_lvidbh);
	}
	unlock_udf_alloc_sem(sbi);

	udf_free_blocks(sb, NULL, UDF_I_LOCATION(inode), 0, 1);
}
Пример #4
0
/*
 * This is called by the filesystem to tell us
 * that the inode is no longer useful. We just
 * terminate it with extreme prejudice.
 */
void clear_inode(struct inode *inode)
{
	if (inode->i_nrpages)
		truncate_inode_pages(inode, 0);
	wait_on_inode(inode);
	if (IS_QUOTAINIT(inode))
		DQUOT_DROP(inode);
	if (inode->i_sb && inode->i_sb->s_op && inode->i_sb->s_op->clear_inode)
		inode->i_sb->s_op->clear_inode(inode);

	inode->i_state = 0;
}
Пример #5
0
/*
 * Truncate a file.
 * The calling routines must make sure to update the ctime
 * field and call notify_change.
 *
 * XXX Nobody calls this thing? -DaveM
 * N.B. After this call fhp needs an fh_put
 */
int
nfsd_truncate(struct svc_rqst *rqstp, struct svc_fh *fhp, unsigned long size)
{
	struct dentry	*dentry;
	struct inode	*inode;
	struct iattr	newattrs;
	int		err;
	kernel_cap_t	saved_cap;

	err = fh_verify(rqstp, fhp, S_IFREG, MAY_WRITE | MAY_TRUNC);
	if (err)
		goto out;

	dentry = fhp->fh_dentry;
	inode = dentry->d_inode;

	err = get_write_access(inode);
	if (err)
		goto out_nfserr;

	/* Things look sane, lock and do it. */
	fh_lock(fhp);
	DQUOT_INIT(inode);
	newattrs.ia_size = size;
	newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME;
	if (current->fsuid != 0) {
		saved_cap = current->cap_effective;
		cap_clear(current->cap_effective);
	}
	err = notify_change(dentry, &newattrs);
	if (current->fsuid != 0)
		current->cap_effective = saved_cap;
	if (!err) {
		vmtruncate(inode, size);
		if (inode->i_op && inode->i_op->truncate)
			inode->i_op->truncate(inode);
	}
	put_write_access(inode);
	DQUOT_DROP(inode);
	fh_unlock(fhp);
out_nfserr:
	if (err)
		err = nfserrno(-err);
out:
	return err;
}
Пример #6
0
/*
 * Close a file.
 */
void
nfsd_close(struct file *filp)
{
	struct dentry	*dentry = filp->f_dentry;
	struct inode	*inode = dentry->d_inode;

	if (!inode->i_count)
		printk(KERN_WARNING "nfsd: inode count == 0!\n");
	if (!dentry->d_count)
		printk(KERN_WARNING "nfsd: wheee, %s/%s d_count == 0!\n",
			dentry->d_parent->d_name.name, dentry->d_name.name);
	if (filp->f_op && filp->f_op->release)
		filp->f_op->release(inode, filp);
	if (filp->f_mode & FMODE_WRITE) {
		put_write_access(inode);
		DQUOT_DROP(inode);
	}
}
Пример #7
0
void jfs_delete_inode(struct inode *inode)
{
	jfs_info("In jfs_delete_inode, inode = 0x%p", inode);

	if (test_cflag(COMMIT_Freewmap, inode))
		freeZeroLink(inode);

	diFree(inode);

	/*
	 * Free the inode from the quota allocation.
	 */
	DQUOT_INIT(inode);
	DQUOT_FREE_INODE(inode);
	DQUOT_DROP(inode);

	clear_inode(inode);
}
Пример #8
0
void jfs_delete_inode(struct inode *inode)
{
	jfs_info("In jfs_delete_inode, inode = 0x%p", inode);

	if (!is_bad_inode(inode) &&
	    (JFS_IP(inode)->fileset == FILESYSTEM_I)) {
		truncate_inode_pages(&inode->i_data, 0);

		if (test_cflag(COMMIT_Freewmap, inode))
			jfs_free_zero_link(inode);

		diFree(inode);

		/*
		 * Free the inode from the quota allocation.
		 */
		DQUOT_INIT(inode);
		DQUOT_FREE_INODE(inode);
		DQUOT_DROP(inode);
	}

	clear_inode(inode);
}
Пример #9
0
void udf_free_inode(struct inode * inode)
{
	struct super_block * sb = inode->i_sb;
	int is_directory;
	unsigned long ino;

	ino = inode->i_ino;

	/*
	 * Note: we must free any quota before locking the superblock,
	 * as writing the quota to disk may need the lock as well.
	 */
	DQUOT_FREE_INODE(inode);
	DQUOT_DROP(inode);

	lock_super(sb);

	is_directory = S_ISDIR(inode->i_mode);

	clear_inode(inode);

	if (UDF_SB_LVIDBH(sb))
	{
		if (is_directory)
			UDF_SB_LVIDIU(sb)->numDirs =
				cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numDirs) - 1);
		else
			UDF_SB_LVIDIU(sb)->numFiles =
				cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numFiles) - 1);
		
		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
	}
	unlock_super(sb);

	udf_free_blocks(sb, NULL, UDF_I_LOCATION(inode), 0, 1);
}
Пример #10
0
/*
 * There are two policies for allocating an inode.  If the new inode is
 * a directory, then a forward search is made for a block group with both
 * free space and a low directory-to-inode ratio; if that fails, then of
 * the groups with above-average free space, that group with the fewest
 * directories already is chosen.
 *
 * For other inodes, search forward from the parent directory's block
 * group to find a free inode.
 */
struct inode *ext3_new_inode(handle_t *handle, struct inode * dir, int mode)
{
	struct super_block *sb;
	struct buffer_head *bitmap_bh = NULL;
	struct buffer_head *bh2;
	int group;
	unsigned long ino = 0;
	struct inode * inode;
	struct ext3_group_desc * gdp = NULL;
	struct ext3_super_block * es;
	struct ext3_inode_info *ei;
	struct ext3_sb_info *sbi;
	int err = 0;
	struct inode *ret;
	int i;

	/* Cannot create files in a deleted directory */
	if (!dir || !dir->i_nlink)
		return ERR_PTR(-EPERM);

	sb = dir->i_sb;
	inode = new_inode(sb);
	if (!inode)
		return ERR_PTR(-ENOMEM);
	ei = EXT3_I(inode);

	sbi = EXT3_SB(sb);
	es = sbi->s_es;
	if (S_ISDIR(mode)) {
		if (test_opt (sb, OLDALLOC))
			group = find_group_dir(sb, dir);
		else
			group = find_group_orlov(sb, dir);
	} else 
		group = find_group_other(sb, dir);

	err = -ENOSPC;
	if (group == -1)
		goto out;

	for (i = 0; i < sbi->s_groups_count; i++) {
		err = -EIO;

		gdp = ext3_get_group_desc(sb, group, &bh2);
		if (!gdp)
			goto fail;

		brelse(bitmap_bh);
		bitmap_bh = read_inode_bitmap(sb, group);
		if (!bitmap_bh)
			goto fail;

		ino = 0;

repeat_in_this_group:
		ino = ext3_find_next_zero_bit((unsigned long *)
				bitmap_bh->b_data, EXT3_INODES_PER_GROUP(sb), ino);
		if (ino < EXT3_INODES_PER_GROUP(sb)) {

			BUFFER_TRACE(bitmap_bh, "get_write_access");
			err = ext3_journal_get_write_access(handle, bitmap_bh);
			if (err)
				goto fail;

			if (!ext3_set_bit_atomic(sb_bgl_lock(sbi, group),
						ino, bitmap_bh->b_data)) {
				/* we won it */
				BUFFER_TRACE(bitmap_bh,
					"call ext3_journal_dirty_metadata");
				err = ext3_journal_dirty_metadata(handle,
								bitmap_bh);
				if (err)
					goto fail;
				goto got;
			}
			/* we lost it */
			journal_release_buffer(handle, bitmap_bh);

			if (++ino < EXT3_INODES_PER_GROUP(sb))
				goto repeat_in_this_group;
		}

		/*
		 * This case is possible in concurrent environment.  It is very
		 * rare.  We cannot repeat the find_group_xxx() call because
		 * that will simply return the same blockgroup, because the
		 * group descriptor metadata has not yet been updated.
		 * So we just go onto the next blockgroup.
		 */
		if (++group == sbi->s_groups_count)
			group = 0;
	}
	err = -ENOSPC;
	goto out;

got:
	ino += group * EXT3_INODES_PER_GROUP(sb) + 1;
	if (ino < EXT3_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
		ext3_error (sb, "ext3_new_inode",
			    "reserved inode or inode > inodes count - "
			    "block_group = %d, inode=%lu", group, ino);
		err = -EIO;
		goto fail;
	}

	BUFFER_TRACE(bh2, "get_write_access");
	err = ext3_journal_get_write_access(handle, bh2);
	if (err) goto fail;
	spin_lock(sb_bgl_lock(sbi, group));
	gdp->bg_free_inodes_count =
		cpu_to_le16(le16_to_cpu(gdp->bg_free_inodes_count) - 1);
	if (S_ISDIR(mode)) {
		gdp->bg_used_dirs_count =
			cpu_to_le16(le16_to_cpu(gdp->bg_used_dirs_count) + 1);
	}
	spin_unlock(sb_bgl_lock(sbi, group));
	BUFFER_TRACE(bh2, "call ext3_journal_dirty_metadata");
	err = ext3_journal_dirty_metadata(handle, bh2);
	if (err) goto fail;

	percpu_counter_dec(&sbi->s_freeinodes_counter);
	if (S_ISDIR(mode))
		percpu_counter_inc(&sbi->s_dirs_counter);
	sb->s_dirt = 1;

	inode->i_uid = current->fsuid;
	if (test_opt (sb, GRPID))
		inode->i_gid = dir->i_gid;
	else if (dir->i_mode & S_ISGID) {
		inode->i_gid = dir->i_gid;
		if (S_ISDIR(mode))
			mode |= S_ISGID;
	} else
		inode->i_gid = current->fsgid;
	inode->i_mode = mode;

	inode->i_ino = ino;
	/* This is the optimal IO size (for stat), not the fs block size */
	inode->i_blksize = PAGE_SIZE;
	inode->i_blocks = 0;
	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;

	memset(ei->i_data, 0, sizeof(ei->i_data));
	ei->i_dir_start_lookup = 0;
	ei->i_disksize = 0;

	ei->i_flags = EXT3_I(dir)->i_flags & ~EXT3_INDEX_FL;
	if (S_ISLNK(mode))
		ei->i_flags &= ~(EXT3_IMMUTABLE_FL|EXT3_APPEND_FL);
	/* dirsync only applies to directories */
	if (!S_ISDIR(mode))
		ei->i_flags &= ~EXT3_DIRSYNC_FL;
#ifdef EXT3_FRAGMENTS
	ei->i_faddr = 0;
	ei->i_frag_no = 0;
	ei->i_frag_size = 0;
#endif
	ei->i_file_acl = 0;
	ei->i_dir_acl = 0;
	ei->i_dtime = 0;
	ei->i_block_alloc_info = NULL;
	ei->i_block_group = group;

	ext3_set_inode_flags(inode);
	if (IS_DIRSYNC(inode))
		handle->h_sync = 1;
	insert_inode_hash(inode);
	spin_lock(&sbi->s_next_gen_lock);
	inode->i_generation = sbi->s_next_generation++;
	spin_unlock(&sbi->s_next_gen_lock);

	ei->i_state = EXT3_STATE_NEW;
	ei->i_extra_isize =
		(EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) ?
		sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE : 0;

	ret = inode;
	if(DQUOT_ALLOC_INODE(inode)) {
		err = -EDQUOT;
		goto fail_drop;
	}

	err = ext3_init_acl(handle, inode, dir);
	if (err)
		goto fail_free_drop;

	err = ext3_init_security(handle,inode, dir);
	if (err)
		goto fail_free_drop;

	err = ext3_mark_inode_dirty(handle, inode);
	if (err) {
		ext3_std_error(sb, err);
		goto fail_free_drop;
	}

	ext3_debug("allocating inode %lu\n", inode->i_ino);
	goto really_out;
fail:
	ext3_std_error(sb, err);
out:
	iput(inode);
	ret = ERR_PTR(err);
really_out:
	brelse(bitmap_bh);
	return ret;

fail_free_drop:
	DQUOT_FREE_INODE(inode);

fail_drop:
	DQUOT_DROP(inode);
	inode->i_flags |= S_NOQUOTA;
	inode->i_nlink = 0;
	iput(inode);
	brelse(bitmap_bh);
	return ERR_PTR(err);
}
Пример #11
0
/*
 * NOTE! When we get the inode, we're the only people
 * that have access to it, and as such there are no
 * race conditions we have to worry about. The inode
 * is not on the hash-lists, and it cannot be reached
 * through the filesystem because the directory entry
 * has been deleted earlier.
 *
 * HOWEVER: we must make sure that we get no aliases,
 * which means that we have to call "clear_inode()"
 * _before_ we mark the inode not in use in the inode
 * bitmaps. Otherwise a newly created file might use
 * the same inode number (not actually the same pointer
 * though), and then we'd have two inodes sharing the
 * same inode number and space on the harddisk.
 */
void ufs_free_inode (struct inode * inode)
{
    struct super_block * sb;
    struct ufs_sb_private_info * uspi;
    struct ufs_super_block_first * usb1;
    struct ufs_cg_private_info * ucpi;
    struct ufs_cylinder_group * ucg;
    int is_directory;
    unsigned ino, cg, bit;
    
    UFSD("ENTER, ino %lu\n", inode->i_ino);

    sb = inode->i_sb;
    uspi = UFS_SB(sb)->s_uspi;
    usb1 = ubh_get_usb_first(uspi);
    
    ino = inode->i_ino;

    lock_super (sb);

    if (!((ino > 1) && (ino < (uspi->s_ncg * uspi->s_ipg )))) {
        ufs_warning(sb, "ufs_free_inode", "reserved inode or nonexistent inode %u\n", ino);
        unlock_super (sb);
        return;
    }
    
    cg = ufs_inotocg (ino);
    bit = ufs_inotocgoff (ino);
    ucpi = ufs_load_cylinder (sb, cg);
    if (!ucpi) {
        unlock_super (sb);
        return;
    }
    ucg = ubh_get_ucg(UCPI_UBH(ucpi));
    if (!ufs_cg_chkmagic(sb, ucg))
        ufs_panic (sb, "ufs_free_fragments", "internal error, bad cg magic number");

    ucg->cg_time = cpu_to_fs32(sb, get_seconds());

    is_directory = S_ISDIR(inode->i_mode);

    DQUOT_FREE_INODE(inode);
    DQUOT_DROP(inode);

    clear_inode (inode);

    if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_iusedoff, bit))
        ufs_error(sb, "ufs_free_inode", "bit already cleared for inode %u", ino);
    else {
        ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_iusedoff, bit);
        if (ino < ucpi->c_irotor)
            ucpi->c_irotor = ino;
        fs32_add(sb, &ucg->cg_cs.cs_nifree, 1);
        uspi->cs_total.cs_nifree++;
        fs32_add(sb, &UFS_SB(sb)->fs_cs(cg).cs_nifree, 1);

        if (is_directory) {
            fs32_sub(sb, &ucg->cg_cs.cs_ndir, 1);
            uspi->cs_total.cs_ndir--;
            fs32_sub(sb, &UFS_SB(sb)->fs_cs(cg).cs_ndir, 1);
        }
    }

    ubh_mark_buffer_dirty (USPI_UBH(uspi));
    ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
    if (sb->s_flags & MS_SYNCHRONOUS) {
        ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
        ubh_wait_on_buffer (UCPI_UBH(ucpi));
    }
    
    sb->s_dirt = 1;
    unlock_super (sb);
    UFSD("EXIT\n");
}
Пример #12
0
/*
 * There are two policies for allocating an inode.  If the new inode is
 * a directory, then a forward search is made for a block group with both
 * free space and a low directory-to-inode ratio; if that fails, then of
 * the groups with above-average free space, that group with the fewest
 * directories already is chosen.
 *
 * For other inodes, search forward from the parent directory's block
 * group to find a free inode.
 */
struct inode * ufs_new_inode(struct inode * dir, int mode)
{
    struct super_block * sb;
    struct ufs_sb_info * sbi;
    struct ufs_sb_private_info * uspi;
    struct ufs_super_block_first * usb1;
    struct ufs_cg_private_info * ucpi;
    struct ufs_cylinder_group * ucg;
    struct inode * inode;
    unsigned cg, bit, i, j, start;
    struct ufs_inode_info *ufsi;
    int err = -ENOSPC;

    UFSD("ENTER\n");
    
    /* Cannot create files in a deleted directory */
    if (!dir || !dir->i_nlink)
        return ERR_PTR(-EPERM);
    sb = dir->i_sb;
    inode = new_inode(sb);
    if (!inode)
        return ERR_PTR(-ENOMEM);
    ufsi = UFS_I(inode);
    sbi = UFS_SB(sb);
    uspi = sbi->s_uspi;
    usb1 = ubh_get_usb_first(uspi);

    lock_super (sb);

    /*
     * Try to place the inode in its parent directory
     */
    i = ufs_inotocg(dir->i_ino);
    if (sbi->fs_cs(i).cs_nifree) {
        cg = i;
        goto cg_found;
    }

    /*
     * Use a quadratic hash to find a group with a free inode
     */
    for ( j = 1; j < uspi->s_ncg; j <<= 1 ) {
        i += j;
        if (i >= uspi->s_ncg)
            i -= uspi->s_ncg;
        if (sbi->fs_cs(i).cs_nifree) {
            cg = i;
            goto cg_found;
        }
    }

    /*
     * That failed: try linear search for a free inode
     */
    i = ufs_inotocg(dir->i_ino) + 1;
    for (j = 2; j < uspi->s_ncg; j++) {
        i++;
        if (i >= uspi->s_ncg)
            i = 0;
        if (sbi->fs_cs(i).cs_nifree) {
            cg = i;
            goto cg_found;
        }
    }

    goto failed;

cg_found:
    ucpi = ufs_load_cylinder (sb, cg);
    if (!ucpi) {
        err = -EIO;
        goto failed;
    }
    ucg = ubh_get_ucg(UCPI_UBH(ucpi));
    if (!ufs_cg_chkmagic(sb, ucg)) 
        ufs_panic (sb, "ufs_new_inode", "internal error, bad cg magic number");

    start = ucpi->c_irotor;
    bit = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_iusedoff, uspi->s_ipg, start);
    if (!(bit < uspi->s_ipg)) {
        bit = ubh_find_first_zero_bit (UCPI_UBH(ucpi), ucpi->c_iusedoff, start);
        if (!(bit < start)) {
            ufs_error (sb, "ufs_new_inode",
                "cylinder group %u corrupted - error in inode bitmap\n", cg);
            err = -EIO;
            goto failed;
        }
    }
    UFSD("start = %u, bit = %u, ipg = %u\n", start, bit, uspi->s_ipg);
    if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_iusedoff, bit))
        ubh_setbit (UCPI_UBH(ucpi), ucpi->c_iusedoff, bit);
    else {
        ufs_panic (sb, "ufs_new_inode", "internal error");
        err = -EIO;
        goto failed;
    }

    if (uspi->fs_magic == UFS2_MAGIC) {
        u32 initediblk = fs32_to_cpu(sb, ucg->cg_u.cg_u2.cg_initediblk);

        if (bit + uspi->s_inopb > initediblk &&
            initediblk < fs32_to_cpu(sb, ucg->cg_u.cg_u2.cg_niblk))
            ufs2_init_inodes_chunk(sb, ucpi, ucg);
    }

    fs32_sub(sb, &ucg->cg_cs.cs_nifree, 1);
    uspi->cs_total.cs_nifree--;
    fs32_sub(sb, &sbi->fs_cs(cg).cs_nifree, 1);
    
    if (S_ISDIR(mode)) {
        fs32_add(sb, &ucg->cg_cs.cs_ndir, 1);
        uspi->cs_total.cs_ndir++;
        fs32_add(sb, &sbi->fs_cs(cg).cs_ndir, 1);
    }
    ubh_mark_buffer_dirty (USPI_UBH(uspi));
    ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
    if (sb->s_flags & MS_SYNCHRONOUS) {
        ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
        ubh_wait_on_buffer (UCPI_UBH(ucpi));
    }
    sb->s_dirt = 1;

    inode->i_ino = cg * uspi->s_ipg + bit;
    inode->i_mode = mode;
    inode->i_uid = current->fsuid;
    if (dir->i_mode & S_ISGID) {
        inode->i_gid = dir->i_gid;
        if (S_ISDIR(mode))
            inode->i_mode |= S_ISGID;
    } else
        inode->i_gid = current->fsgid;

    inode->i_blocks = 0;
    inode->i_generation = 0;
    inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
    ufsi->i_flags = UFS_I(dir)->i_flags;
    ufsi->i_lastfrag = 0;
    ufsi->i_shadow = 0;
    ufsi->i_osync = 0;
    ufsi->i_oeftflag = 0;
    ufsi->i_dir_start_lookup = 0;
    memset(&ufsi->i_u1, 0, sizeof(ufsi->i_u1));
    insert_inode_hash(inode);
    mark_inode_dirty(inode);

    if (uspi->fs_magic == UFS2_MAGIC) {
        struct buffer_head *bh;
        struct ufs2_inode *ufs2_inode;

        /*
         * setup birth date, we do it here because of there is no sense
         * to hold it in struct ufs_inode_info, and lose 64 bit
         */
        bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino));
        if (!bh) {
            ufs_warning(sb, "ufs_read_inode",
                    "unable to read inode %lu\n",
                    inode->i_ino);
            err = -EIO;
            goto fail_remove_inode;
        }
        lock_buffer(bh);
        ufs2_inode = (struct ufs2_inode *)bh->b_data;
        ufs2_inode += ufs_inotofsbo(inode->i_ino);
        ufs2_inode->ui_birthtime = cpu_to_fs64(sb, CURRENT_TIME.tv_sec);
        ufs2_inode->ui_birthnsec = cpu_to_fs32(sb, CURRENT_TIME.tv_nsec);
        mark_buffer_dirty(bh);
        unlock_buffer(bh);
        if (sb->s_flags & MS_SYNCHRONOUS)
            sync_dirty_buffer(bh);
        brelse(bh);
    }

    unlock_super (sb);

    if (DQUOT_ALLOC_INODE(inode)) {
        DQUOT_DROP(inode);
        err = -EDQUOT;
        goto fail_without_unlock;
    }

    UFSD("allocating inode %lu\n", inode->i_ino);
    UFSD("EXIT\n");
    return inode;

fail_remove_inode:
    unlock_super(sb);
fail_without_unlock:
    inode->i_flags |= S_NOQUOTA;
    inode->i_nlink = 0;
    iput(inode);
    UFSD("EXIT (FAILED): err %d\n", err);
    return ERR_PTR(err);
failed:
    unlock_super (sb);
    make_bad_inode(inode);
    iput (inode);
    UFSD("EXIT (FAILED): err %d\n", err);
    return ERR_PTR(err);
}
Пример #13
0
/*
 * There are two policies for allocating an inode.  If the new inode is
 * a directory, then a forward search is made for a block group with both
 * free space and a low directory-to-inode ratio; if that fails, then of
 * the groups with above-average free space, that group with the fewest
 * directories already is chosen.
 *
 * For other inodes, search forward from the parent directory's block
 * group to find a free inode.
 */
struct inode * ext3_new_inode (handle_t *handle,
				const struct inode * dir, int mode)
{
	struct super_block * sb;
	struct buffer_head * bh;
	struct buffer_head * bh2;
	int i, j, avefreei;
	struct inode * inode;
	int bitmap_nr;
	struct ext3_group_desc * gdp;
	struct ext3_group_desc * tmp;
	struct ext3_super_block * es;
	int err = 0;

	/* Cannot create files in a deleted directory */
	if (!dir || !dir->i_nlink)
		return ERR_PTR(-EPERM);

	sb = dir->i_sb;
	inode = new_inode(sb);
	if (!inode)
		return ERR_PTR(-ENOMEM);
	init_rwsem(&inode->u.ext3_i.truncate_sem);

	lock_super (sb);
	es = sb->u.ext3_sb.s_es;
repeat:
	gdp = NULL;
	i = 0;

	if (S_ISDIR(mode)) {
		avefreei = le32_to_cpu(es->s_free_inodes_count) /
			sb->u.ext3_sb.s_groups_count;
		if (!gdp) {
			for (j = 0; j < sb->u.ext3_sb.s_groups_count; j++) {
				struct buffer_head *temp_buffer;
				tmp = ext3_get_group_desc (sb, j, &temp_buffer);
				if (tmp &&
				    le16_to_cpu(tmp->bg_free_inodes_count) &&
				    le16_to_cpu(tmp->bg_free_inodes_count) >=
							avefreei) {
					if (!gdp || (le16_to_cpu(tmp->bg_free_blocks_count) >
						le16_to_cpu(gdp->bg_free_blocks_count))) {
						i = j;
						gdp = tmp;
						bh2 = temp_buffer;
					}
				}
			}
		}
	} else {
		/*
		 * Try to place the inode in its parent directory
		 */
		i = dir->u.ext3_i.i_block_group;
		tmp = ext3_get_group_desc (sb, i, &bh2);
		if (tmp && le16_to_cpu(tmp->bg_free_inodes_count))
			gdp = tmp;
		else
		{
			/*
			 * Use a quadratic hash to find a group with a
			 * free inode
			 */
			for (j = 1; j < sb->u.ext3_sb.s_groups_count; j <<= 1) {
				i += j;
				if (i >= sb->u.ext3_sb.s_groups_count)
					i -= sb->u.ext3_sb.s_groups_count;
				tmp = ext3_get_group_desc (sb, i, &bh2);
				if (tmp &&
				    le16_to_cpu(tmp->bg_free_inodes_count)) {
					gdp = tmp;
					break;
				}
			}
		}
		if (!gdp) {
			/*
			 * That failed: try linear search for a free inode
			 */
			i = dir->u.ext3_i.i_block_group + 1;
			for (j = 2; j < sb->u.ext3_sb.s_groups_count; j++) {
				if (++i >= sb->u.ext3_sb.s_groups_count)
					i = 0;
				tmp = ext3_get_group_desc (sb, i, &bh2);
				if (tmp &&
				    le16_to_cpu(tmp->bg_free_inodes_count)) {
					gdp = tmp;
					break;
				}
			}
		}
	}

	err = -ENOSPC;
	if (!gdp)
		goto fail;

	err = -EIO;
	bitmap_nr = load_inode_bitmap (sb, i);
	if (bitmap_nr < 0)
		goto fail;

	bh = sb->u.ext3_sb.s_inode_bitmap[bitmap_nr];

	if ((j = ext3_find_first_zero_bit ((unsigned long *) bh->b_data,
				      EXT3_INODES_PER_GROUP(sb))) <
	    EXT3_INODES_PER_GROUP(sb)) {
		BUFFER_TRACE(bh, "get_write_access");
		err = ext3_journal_get_write_access(handle, bh);
		if (err) goto fail;
		
		if (ext3_set_bit (j, bh->b_data)) {
			ext3_error (sb, "ext3_new_inode",
				      "bit already set for inode %d", j);
			goto repeat;
		}
		BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
		err = ext3_journal_dirty_metadata(handle, bh);
		if (err) goto fail;
	} else {
		if (le16_to_cpu(gdp->bg_free_inodes_count) != 0) {
			ext3_error (sb, "ext3_new_inode",
				    "Free inodes count corrupted in group %d",
				    i);
			/* Is it really ENOSPC? */
			err = -ENOSPC;
			if (sb->s_flags & MS_RDONLY)
				goto fail;

			BUFFER_TRACE(bh2, "get_write_access");
			err = ext3_journal_get_write_access(handle, bh2);
			if (err) goto fail;
			gdp->bg_free_inodes_count = 0;
			BUFFER_TRACE(bh2, "call ext3_journal_dirty_metadata");
			err = ext3_journal_dirty_metadata(handle, bh2);
			if (err) goto fail;
		}
		goto repeat;
	}
	j += i * EXT3_INODES_PER_GROUP(sb) + 1;
	if (j < EXT3_FIRST_INO(sb) || j > le32_to_cpu(es->s_inodes_count)) {
		ext3_error (sb, "ext3_new_inode",
			    "reserved inode or inode > inodes count - "
			    "block_group = %d,inode=%d", i, j);
		err = -EIO;
		goto fail;
	}

	BUFFER_TRACE(bh2, "get_write_access");
	err = ext3_journal_get_write_access(handle, bh2);
	if (err) goto fail;
	gdp->bg_free_inodes_count =
		cpu_to_le16(le16_to_cpu(gdp->bg_free_inodes_count) - 1);
	if (S_ISDIR(mode))
		gdp->bg_used_dirs_count =
			cpu_to_le16(le16_to_cpu(gdp->bg_used_dirs_count) + 1);
	BUFFER_TRACE(bh2, "call ext3_journal_dirty_metadata");
	err = ext3_journal_dirty_metadata(handle, bh2);
	if (err) goto fail;
	
	BUFFER_TRACE(sb->u.ext3_sb.s_sbh, "get_write_access");
	err = ext3_journal_get_write_access(handle, sb->u.ext3_sb.s_sbh);
	if (err) goto fail;
	es->s_free_inodes_count =
		cpu_to_le32(le32_to_cpu(es->s_free_inodes_count) - 1);
	BUFFER_TRACE(sb->u.ext3_sb.s_sbh, "call ext3_journal_dirty_metadata");
	err = ext3_journal_dirty_metadata(handle, sb->u.ext3_sb.s_sbh);
	sb->s_dirt = 1;
	if (err) goto fail;

	inode->i_uid = current->fsuid;
	if (test_opt (sb, GRPID))
		inode->i_gid = dir->i_gid;
	else if (dir->i_mode & S_ISGID) {
		inode->i_gid = dir->i_gid;
		if (S_ISDIR(mode))
			mode |= S_ISGID;
	} else
		inode->i_gid = current->fsgid;
	inode->i_mode = mode;

	inode->i_ino = j;
	/* This is the optimal IO size (for stat), not the fs block size */
	inode->i_blksize = PAGE_SIZE;
	inode->i_blocks = 0;
	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
	inode->u.ext3_i.i_flags = dir->u.ext3_i.i_flags & ~EXT3_INDEX_FL;
	if (S_ISLNK(mode))
		inode->u.ext3_i.i_flags &= ~(EXT3_IMMUTABLE_FL|EXT3_APPEND_FL);
#ifdef EXT3_FRAGMENTS
	inode->u.ext3_i.i_faddr = 0;
	inode->u.ext3_i.i_frag_no = 0;
	inode->u.ext3_i.i_frag_size = 0;
#endif
	inode->u.ext3_i.i_file_acl = 0;
	inode->u.ext3_i.i_dir_acl = 0;
	inode->u.ext3_i.i_dtime = 0;
	INIT_LIST_HEAD(&inode->u.ext3_i.i_orphan);
#ifdef EXT3_PREALLOCATE
	inode->u.ext3_i.i_prealloc_count = 0;
#endif
	inode->u.ext3_i.i_block_group = i;
	
	if (inode->u.ext3_i.i_flags & EXT3_SYNC_FL)
		inode->i_flags |= S_SYNC;
	if (IS_SYNC(inode))
		handle->h_sync = 1;
	insert_inode_hash(inode);
	inode->i_generation = sb->u.ext3_sb.s_next_generation++;

	inode->u.ext3_i.i_state = EXT3_STATE_NEW;
	err = ext3_mark_inode_dirty(handle, inode);
	if (err) goto fail;
	
	unlock_super (sb);
	if(DQUOT_ALLOC_INODE(inode)) {
		DQUOT_DROP(inode);
		inode->i_flags |= S_NOQUOTA;
		inode->i_nlink = 0;
		iput(inode);
		return ERR_PTR(-EDQUOT);
	}
	ext3_debug ("allocating inode %lu\n", inode->i_ino);
	return inode;

fail:
	unlock_super(sb);
	iput(inode);
	ext3_std_error(sb, err);
	return ERR_PTR(err);
}
Пример #14
0
/*
 * There are two policies for allocating an inode.  If the new inode is
 * a directory, then a forward search is made for a block group with both
 * free space and a low directory-to-inode ratio; if that fails, then of
 * the groups with above-average free space, that group with the fewest
 * directories already is chosen.
 *
 * For other inodes, search forward from the parent directory's block
 * group to find a free inode.
 */
struct inode * ufs_new_inode(struct inode * dir, int mode)
{
	struct super_block * sb;
	struct ufs_sb_info * sbi;
	struct ufs_sb_private_info * uspi;
	struct ufs_super_block_first * usb1;
	struct ufs_cg_private_info * ucpi;
	struct ufs_cylinder_group * ucg;
	struct inode * inode;
	unsigned cg, bit, i, j, start;
	struct ufs_inode_info *ufsi;

	UFSD(("ENTER\n"))
	
	/* Cannot create files in a deleted directory */
	if (!dir || !dir->i_nlink)
		return ERR_PTR(-EPERM);
	sb = dir->i_sb;
	inode = new_inode(sb);
	if (!inode)
		return ERR_PTR(-ENOMEM);
	ufsi = UFS_I(inode);
	sbi = UFS_SB(sb);
	uspi = sbi->s_uspi;
	usb1 = ubh_get_usb_first(USPI_UBH);

	lock_super (sb);

	/*
	 * Try to place the inode in its parent directory
	 */
	i = ufs_inotocg(dir->i_ino);
	if (sbi->fs_cs(i).cs_nifree) {
		cg = i;
		goto cg_found;
	}

	/*
	 * Use a quadratic hash to find a group with a free inode
	 */
	for ( j = 1; j < uspi->s_ncg; j <<= 1 ) {
		i += j;
		if (i >= uspi->s_ncg)
			i -= uspi->s_ncg;
		if (sbi->fs_cs(i).cs_nifree) {
			cg = i;
			goto cg_found;
		}
	}

	/*
	 * That failed: try linear search for a free inode
	 */
	i = ufs_inotocg(dir->i_ino) + 1;
	for (j = 2; j < uspi->s_ncg; j++) {
		i++;
		if (i >= uspi->s_ncg)
			i = 0;
		if (sbi->fs_cs(i).cs_nifree) {
			cg = i;
			goto cg_found;
		}
	}
	
	goto failed;

cg_found:
	ucpi = ufs_load_cylinder (sb, cg);
	if (!ucpi)
		goto failed;
	ucg = ubh_get_ucg(UCPI_UBH);
	if (!ufs_cg_chkmagic(sb, ucg)) 
		ufs_panic (sb, "ufs_new_inode", "internal error, bad cg magic number");

	start = ucpi->c_irotor;
	bit = ubh_find_next_zero_bit (UCPI_UBH, ucpi->c_iusedoff, uspi->s_ipg, start);
	if (!(bit < uspi->s_ipg)) {
		bit = ubh_find_first_zero_bit (UCPI_UBH, ucpi->c_iusedoff, start);
		if (!(bit < start)) {
			ufs_error (sb, "ufs_new_inode",
			    "cylinder group %u corrupted - error in inode bitmap\n", cg);
			goto failed;
		}
	}
	UFSD(("start = %u, bit = %u, ipg = %u\n", start, bit, uspi->s_ipg))
	if (ubh_isclr (UCPI_UBH, ucpi->c_iusedoff, bit))
		ubh_setbit (UCPI_UBH, ucpi->c_iusedoff, bit);
	else {
		ufs_panic (sb, "ufs_new_inode", "internal error");
		goto failed;
	}
	
	fs32_sub(sb, &ucg->cg_cs.cs_nifree, 1);
	fs32_sub(sb, &usb1->fs_cstotal.cs_nifree, 1);
	fs32_sub(sb, &sbi->fs_cs(cg).cs_nifree, 1);
	
	if (S_ISDIR(mode)) {
		fs32_add(sb, &ucg->cg_cs.cs_ndir, 1);
		fs32_add(sb, &usb1->fs_cstotal.cs_ndir, 1);
		fs32_add(sb, &sbi->fs_cs(cg).cs_ndir, 1);
	}

	ubh_mark_buffer_dirty (USPI_UBH);
	ubh_mark_buffer_dirty (UCPI_UBH);
	if (sb->s_flags & MS_SYNCHRONOUS) {
		ubh_wait_on_buffer (UCPI_UBH);
		ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **) &ucpi);
		ubh_wait_on_buffer (UCPI_UBH);
	}
	sb->s_dirt = 1;

	inode->i_mode = mode;
	inode->i_uid = current->fsuid;
	if (dir->i_mode & S_ISGID) {
		inode->i_gid = dir->i_gid;
		if (S_ISDIR(mode))
			inode->i_mode |= S_ISGID;
	} else
		inode->i_gid = current->fsgid;

	inode->i_ino = cg * uspi->s_ipg + bit;
	inode->i_blksize = PAGE_SIZE;	/* This is the optimal IO size (for stat), not the fs block size */
	inode->i_blocks = 0;
	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
	ufsi->i_flags = UFS_I(dir)->i_flags;
	ufsi->i_lastfrag = 0;
	ufsi->i_gen = 0;
	ufsi->i_shadow = 0;
	ufsi->i_osync = 0;
	ufsi->i_oeftflag = 0;
	memset(&ufsi->i_u1, 0, sizeof(ufsi->i_u1));

	insert_inode_hash(inode);
	mark_inode_dirty(inode);

	unlock_super (sb);

	if (DQUOT_ALLOC_INODE(inode)) {
		DQUOT_DROP(inode);
		inode->i_flags |= S_NOQUOTA;
		inode->i_nlink = 0;
		iput(inode);
		return ERR_PTR(-EDQUOT);
	}

	UFSD(("allocating inode %lu\n", inode->i_ino))
	UFSD(("EXIT\n"))
	return inode;

failed:
	unlock_super (sb);
	make_bad_inode(inode);
	iput (inode);
	UFSD(("EXIT (FAILED)\n"))
	return ERR_PTR(-ENOSPC);
}
Пример #15
0
struct inode *udf_new_inode(struct inode *dir, int mode, int *err)
{
    struct super_block *sb = dir->i_sb;
    struct udf_sb_info *sbi = UDF_SB(sb);
    struct inode *inode;
    int block;
    uint32_t start = UDF_I(dir)->i_location.logicalBlockNum;
    struct udf_inode_info *iinfo;
    struct udf_inode_info *dinfo = UDF_I(dir);

    inode = new_inode(sb);

    if (!inode) {
        *err = -ENOMEM;
        return NULL;
    }
    *err = -ENOSPC;

    iinfo = UDF_I(inode);
    iinfo->i_unique = 0;
    iinfo->i_lenExtents = 0;
    iinfo->i_next_alloc_block = 0;
    iinfo->i_next_alloc_goal = 0;
    iinfo->i_strat4096 = 0;

    block = udf_new_block(dir->i_sb, NULL,
                  dinfo->i_location.partitionReferenceNum,
                  start, err);
    if (*err) {
        iput(inode);
        return NULL;
    }

    mutex_lock(&sbi->s_alloc_mutex);
    if (sbi->s_lvid_bh) {
        struct logicalVolIntegrityDesc *lvid =
            (struct logicalVolIntegrityDesc *)
            sbi->s_lvid_bh->b_data;
        struct logicalVolIntegrityDescImpUse *lvidiu =
                            udf_sb_lvidiu(sbi);
        struct logicalVolHeaderDesc *lvhd;
        uint64_t uniqueID;
        lvhd = (struct logicalVolHeaderDesc *)
                (lvid->logicalVolContentsUse);
        if (S_ISDIR(mode))
            le32_add_cpu(&lvidiu->numDirs, 1);
        else
            le32_add_cpu(&lvidiu->numFiles, 1);
        iinfo->i_unique = uniqueID = le64_to_cpu(lvhd->uniqueID);
        if (!(++uniqueID & 0x00000000FFFFFFFFUL))
            uniqueID += 16;
        lvhd->uniqueID = cpu_to_le64(uniqueID);
        mark_buffer_dirty(sbi->s_lvid_bh);
    }
    inode->i_mode = mode;
    inode->i_uid = current->fsuid;
    if (dir->i_mode & S_ISGID) {
        inode->i_gid = dir->i_gid;
        if (S_ISDIR(mode))
            mode |= S_ISGID;
    } else {
        inode->i_gid = current->fsgid;
    }

    iinfo->i_location.logicalBlockNum = block;
    iinfo->i_location.partitionReferenceNum =
                dinfo->i_location.partitionReferenceNum;
    inode->i_ino = udf_get_lb_pblock(sb, iinfo->i_location, 0);
    inode->i_blocks = 0;
    iinfo->i_lenEAttr = 0;
    iinfo->i_lenAlloc = 0;
    iinfo->i_use = 0;
    if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_EXTENDED_FE)) {
        iinfo->i_efe = 1;
        if (UDF_VERS_USE_EXTENDED_FE > sbi->s_udfrev)
            sbi->s_udfrev = UDF_VERS_USE_EXTENDED_FE;
        iinfo->i_ext.i_data = kzalloc(inode->i_sb->s_blocksize -
                        sizeof(struct extendedFileEntry),
                        GFP_KERNEL);
    } else {
        iinfo->i_efe = 0;
        iinfo->i_ext.i_data = kzalloc(inode->i_sb->s_blocksize -
                        sizeof(struct fileEntry),
                        GFP_KERNEL);
    }
    if (!iinfo->i_ext.i_data) {
        iput(inode);
        *err = -ENOMEM;
        mutex_unlock(&sbi->s_alloc_mutex);
        return NULL;
    }
    if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_AD_IN_ICB))
        iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
    else if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
        iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
    else
        iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
    inode->i_mtime = inode->i_atime = inode->i_ctime =
        iinfo->i_crtime = current_fs_time(inode->i_sb);
    insert_inode_hash(inode);
    mark_inode_dirty(inode);
    mutex_unlock(&sbi->s_alloc_mutex);

    if (DQUOT_ALLOC_INODE(inode)) {
        DQUOT_DROP(inode);
        inode->i_flags |= S_NOQUOTA;
        inode->i_nlink = 0;
        iput(inode);
        *err = -EDQUOT;
        return NULL;
    }

    *err = 0;
    return inode;
}
Пример #16
0
/*
 * Rename a file
 * N.B. After this call _both_ ffhp and tfhp need an fh_put
 */
int
nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
			    struct svc_fh *tfhp, char *tname, int tlen)
{
	struct dentry	*fdentry, *tdentry, *odentry, *ndentry;
	struct inode	*fdir, *tdir;
	int		err;

	err = fh_verify(rqstp, ffhp, S_IFDIR, MAY_REMOVE);
	if (err)
		goto out;
	err = fh_verify(rqstp, tfhp, S_IFDIR, MAY_CREATE);
	if (err)
		goto out;

	fdentry = ffhp->fh_dentry;
	fdir = fdentry->d_inode;

	tdentry = tfhp->fh_dentry;
	tdir = tdentry->d_inode;

	/* N.B. We shouldn't need this ... dentry layer handles it */
	err = nfserr_perm;
	if (!flen || (fname[0] == '.' && 
	    (flen == 1 || (flen == 2 && fname[1] == '.'))) ||
	    !tlen || (tname[0] == '.' && 
	    (tlen == 1 || (tlen == 2 && tname[1] == '.'))))
		goto out;

	odentry = lookup_dentry(fname, dget(fdentry), 0);
	err = PTR_ERR(odentry);
	if (IS_ERR(odentry))
		goto out_nfserr;

	err = -ENOENT;
	if (!odentry->d_inode)
		goto out_dput_old;

	ndentry = lookup_dentry(tname, dget(tdentry), 0);
	err = PTR_ERR(ndentry);
	if (IS_ERR(ndentry))
		goto out_dput_old;

	/*
	 * Lock the parent directories.
	 */
	nfsd_double_down(&tdir->i_sem, &fdir->i_sem);
	err = -ENOENT;
	/* GAM3 check for parent changes after locking. */
	if (check_parent(fdir, odentry) &&
	    check_parent(tdir, ndentry)) {

		err = vfs_rename(fdir, odentry, tdir, ndentry);
		if (!err && EX_ISSYNC(tfhp->fh_export)) {
			write_inode_now(fdir);
			write_inode_now(tdir);
		}
	} else
		dprintk("nfsd: Caught race in nfsd_rename");
	DQUOT_DROP(fdir);
	DQUOT_DROP(tdir);

	nfsd_double_up(&tdir->i_sem, &fdir->i_sem);
	dput(ndentry);

out_dput_old:
	dput(odentry);
	if (err)
		goto out_nfserr;
out:
	return err;

out_nfserr:
	err = nfserrno(-err);
	goto out;
}
Пример #17
0
struct inode * udf_new_inode (struct inode *dir, int mode, int * err)
{
	struct super_block *sb;
	struct inode * inode;
	int block;
	Uint32 start = UDF_I_LOCATION(dir).logicalBlockNum;

	sb = dir->i_sb;
	inode = new_inode(sb);

	if (!inode)
	{
		*err = -ENOMEM;
		return NULL;
	}
	*err = -ENOSPC;

	block = udf_new_block(dir->i_sb, NULL, UDF_I_LOCATION(dir).partitionReferenceNum,
		start, err);
	if (*err)
	{
		iput(inode);
		return NULL;
	}
	lock_super(sb);

	if (UDF_SB_LVIDBH(sb))
	{
		struct LogicalVolHeaderDesc *lvhd;
		Uint64 uniqueID;
		lvhd = (struct LogicalVolHeaderDesc *)(UDF_SB_LVID(sb)->logicalVolContentsUse);
		if (S_ISDIR(mode))
			UDF_SB_LVIDIU(sb)->numDirs =
				cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numDirs) + 1);
		else
			UDF_SB_LVIDIU(sb)->numFiles =
				cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numFiles) + 1);
		UDF_I_UNIQUE(inode) = uniqueID = le64_to_cpu(lvhd->uniqueID);
		if (!(++uniqueID & 0x00000000FFFFFFFFUL))
			uniqueID += 16;
		lvhd->uniqueID = cpu_to_le64(uniqueID);
		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
	}
	inode->i_mode = mode;
	inode->i_uid = current->fsuid;
	if (dir->i_mode & S_ISGID)
	{
		inode->i_gid = dir->i_gid;
		if (S_ISDIR(mode))
			mode |= S_ISGID;
	}
	else
		inode->i_gid = current->fsgid;

	UDF_I_LOCATION(inode).logicalBlockNum = block;
	UDF_I_LOCATION(inode).partitionReferenceNum = UDF_I_LOCATION(dir).partitionReferenceNum;
	inode->i_ino = udf_get_lb_pblock(sb, UDF_I_LOCATION(inode), 0);
	inode->i_blksize = PAGE_SIZE;
	inode->i_blocks = 0;
	UDF_I_LENEATTR(inode) = 0;
	UDF_I_LENALLOC(inode) = 0;
	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_EXTENDED_FE))
	{
		UDF_I_EXTENDED_FE(inode) = 1;
		UDF_UPDATE_UDFREV(inode->i_sb, UDF_VERS_USE_EXTENDED_FE);
	}
	else
		UDF_I_EXTENDED_FE(inode) = 0;
	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_AD_IN_ICB))
		UDF_I_ALLOCTYPE(inode) = ICB_FLAG_AD_IN_ICB;
	else if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
		UDF_I_ALLOCTYPE(inode) = ICB_FLAG_AD_SHORT;
	else
		UDF_I_ALLOCTYPE(inode) = ICB_FLAG_AD_LONG;
	inode->i_mtime = inode->i_atime = inode->i_ctime =
		UDF_I_CRTIME(inode) = CURRENT_TIME;
	UDF_I_UMTIME(inode) = UDF_I_UCTIME(inode) =
		UDF_I_UCRTIME(inode) = CURRENT_UTIME;
	UDF_I_NEW_INODE(inode) = 1;
	insert_inode_hash(inode);
	mark_inode_dirty(inode);

	unlock_super(sb);
	if (DQUOT_ALLOC_INODE(inode))
	{
		DQUOT_DROP(inode);
		inode->i_flags |= S_NOQUOTA;
		inode->i_nlink = 0;
		iput(inode);
		*err = -EDQUOT;
		return NULL;
	}

	*err = 0;
	return inode;
}
Пример #18
0
/*
 * NOTE! When we get the inode, we're the only people
 * that have access to it, and as such there are no
 * race conditions we have to worry about. The inode
 * is not on the hash-lists, and it cannot be reached
 * through the filesystem because the directory entry
 * has been deleted earlier.
 *
 * HOWEVER: we must make sure that we get no aliases,
 * which means that we have to call "clear_inode()"
 * _before_ we mark the inode not in use in the inode
 * bitmaps. Otherwise a newly created file might use
 * the same inode number (not actually the same pointer
 * though), and then we'd have two inodes sharing the
 * same inode number and space on the harddisk.
 */
void ext2_free_inode (struct inode * inode)
{
	struct super_block * sb = inode->i_sb;
	int is_directory;
	unsigned long ino;
	struct buffer_head * bh;
	struct buffer_head * bh2;
	unsigned long block_group;
	unsigned long bit;
	struct ext2_group_desc * desc;
	struct ext2_super_block * es;

	ino = inode->i_ino;
	ext2_debug ("freeing inode %lu\n", ino);

	/*
	 * Note: we must free any quota before locking the superblock,
	 * as writing the quota to disk may need the lock as well.
	 */
	if (!is_bad_inode(inode)) {
		/* Quota is already initialized in iput() */
	    	DQUOT_FREE_INODE(inode);
		DQUOT_DROP(inode);
	}

	lock_super (sb);
	es = sb->u.ext2_sb.s_es;
	is_directory = S_ISDIR(inode->i_mode);

	/* Do this BEFORE marking the inode not in use or returning an error */
	clear_inode (inode);

	if (ino < EXT2_FIRST_INO(sb) ||
	    ino > le32_to_cpu(es->s_inodes_count)) {
		ext2_error (sb, "ext2_free_inode",
			    "reserved or nonexistent inode %lu", ino);
		goto error_return;
	}
	block_group = (ino - 1) / EXT2_INODES_PER_GROUP(sb);
	bit = (ino - 1) % EXT2_INODES_PER_GROUP(sb);
	bh = load_inode_bitmap (sb, block_group);
	if (IS_ERR(bh))
		goto error_return;

	/* Ok, now we can actually update the inode bitmaps.. */
	if (!ext2_clear_bit (bit, bh->b_data))
		ext2_error (sb, "ext2_free_inode",
			      "bit already cleared for inode %lu", ino);
	else {
		desc = ext2_get_group_desc (sb, block_group, &bh2);
		if (desc) {
			desc->bg_free_inodes_count =
				cpu_to_le16(le16_to_cpu(desc->bg_free_inodes_count) + 1);
			if (is_directory)
				desc->bg_used_dirs_count =
					cpu_to_le16(le16_to_cpu(desc->bg_used_dirs_count) - 1);
		}
		mark_buffer_dirty(bh2);
		es->s_free_inodes_count =
			cpu_to_le32(le32_to_cpu(es->s_free_inodes_count) + 1);
		mark_buffer_dirty(sb->u.ext2_sb.s_sbh);
	}
	mark_buffer_dirty(bh);
	if (sb->s_flags & MS_SYNCHRONOUS) {
		ll_rw_block (WRITE, 1, &bh);
		wait_on_buffer (bh);
	}
	sb->s_dirt = 1;
error_return:
	unlock_super (sb);
}
Пример #19
0
/*
 * Unlink a file or directory
 * N.B. After this call fhp needs an fh_put
 */
int
nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
				char *fname, int flen)
{
	struct dentry	*dentry, *rdentry;
	struct inode	*dirp;
	int		err;

	/* N.B. We shouldn't need this test ... handled by dentry layer */
	err = nfserr_acces;
	if (!flen || isdotent(fname, flen))
		goto out;
	err = fh_verify(rqstp, fhp, S_IFDIR, MAY_REMOVE);
	if (err)
		goto out;

	dentry = fhp->fh_dentry;
	dirp = dentry->d_inode;

	rdentry = lookup_dentry(fname, dget(dentry), 0);
	err = PTR_ERR(rdentry);
	if (IS_ERR(rdentry))
		goto out_nfserr;

	if (!rdentry->d_inode) {
		dput(rdentry);
		err = nfserr_noent;
		goto out;
	}

	if (type != S_IFDIR) {
		/* It's UNLINK */
		err = fh_lock_parent(fhp, rdentry);
		if (err)
			goto out;

		err = vfs_unlink(dirp, rdentry);

		DQUOT_DROP(dirp);
		fh_unlock(fhp);

		dput(rdentry);

	} else {
		/* It's RMDIR */
		/* See comments in fs/namei.c:do_rmdir */
		rdentry->d_count++;
		nfsd_double_down(&dirp->i_sem, &rdentry->d_inode->i_sem);
		if (!fhp->fh_pre_mtime)
			fhp->fh_pre_mtime = dirp->i_mtime;
		fhp->fh_locked = 1;

		err = -ENOENT;
		if (check_parent(dirp, rdentry))
			err = vfs_rmdir(dirp, rdentry);

		rdentry->d_count--;
		DQUOT_DROP(dirp);
		if (!fhp->fh_post_version)
			fhp->fh_post_version = dirp->i_version;
		fhp->fh_locked = 0;
		nfsd_double_up(&dirp->i_sem, &rdentry->d_inode->i_sem);

		dput(rdentry);
	}

	if (err)
		goto out_nfserr;
	if (EX_ISSYNC(fhp->fh_export))
		write_inode_now(dirp);
out:
	return err;

out_nfserr:
	err = nfserrno(-err);
	goto out;
}
Пример #20
0
/*
 * NOTE! When we get the inode, we're the only people
 * that have access to it, and as such there are no
 * race conditions we have to worry about. The inode
 * is not on the hash-lists, and it cannot be reached
 * through the filesystem because the directory entry
 * has been deleted earlier.
 *
 * HOWEVER: we must make sure that we get no aliases,
 * which means that we have to call "clear_inode()"
 * _before_ we mark the inode not in use in the inode
 * bitmaps. Otherwise a newly created file might use
 * the same inode number (not actually the same pointer
 * though), and then we'd have two inodes sharing the
 * same inode number and space on the harddisk.
 */
void ext3_free_inode (handle_t *handle, struct inode * inode)
{
	struct super_block * sb = inode->i_sb;
	int is_directory;
	unsigned long ino;
	struct buffer_head *bitmap_bh = NULL;
	struct buffer_head *bh2;
	unsigned long block_group;
	unsigned long bit;
	struct ext3_group_desc * gdp;
	struct ext3_super_block * es;
	struct ext3_sb_info *sbi;
	int fatal = 0, err;

	if (atomic_read(&inode->i_count) > 1) {
		printk ("ext3_free_inode: inode has count=%d\n",
					atomic_read(&inode->i_count));
		return;
	}
	if (inode->i_nlink) {
		printk ("ext3_free_inode: inode has nlink=%d\n",
			inode->i_nlink);
		return;
	}
	if (!sb) {
		printk("ext3_free_inode: inode on nonexistent device\n");
		return;
	}
	sbi = EXT3_SB(sb);

	ino = inode->i_ino;
	ext3_debug ("freeing inode %lu\n", ino);

	/*
	 * Note: we must free any quota before locking the superblock,
	 * as writing the quota to disk may need the lock as well.
	 */
	DQUOT_INIT(inode);
	ext3_xattr_delete_inode(handle, inode);
	DQUOT_FREE_INODE(inode);
	DQUOT_DROP(inode);

	is_directory = S_ISDIR(inode->i_mode);

	/* Do this BEFORE marking the inode not in use or returning an error */
	clear_inode (inode);

	es = EXT3_SB(sb)->s_es;
	if (ino < EXT3_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
		ext3_error (sb, "ext3_free_inode",
			    "reserved or nonexistent inode %lu", ino);
		goto error_return;
	}
	block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
	bit = (ino - 1) % EXT3_INODES_PER_GROUP(sb);
	bitmap_bh = read_inode_bitmap(sb, block_group);
	if (!bitmap_bh)
		goto error_return;

	BUFFER_TRACE(bitmap_bh, "get_write_access");
	fatal = ext3_journal_get_write_access(handle, bitmap_bh);
	if (fatal)
		goto error_return;

	/* Ok, now we can actually update the inode bitmaps.. */
	if (!ext3_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
					bit, bitmap_bh->b_data))
		ext3_error (sb, "ext3_free_inode",
			      "bit already cleared for inode %lu", ino);
	else {
		gdp = ext3_get_group_desc (sb, block_group, &bh2);

		BUFFER_TRACE(bh2, "get_write_access");
		fatal = ext3_journal_get_write_access(handle, bh2);
		if (fatal) goto error_return;

		if (gdp) {
			spin_lock(sb_bgl_lock(sbi, block_group));
			gdp->bg_free_inodes_count = cpu_to_le16(
				le16_to_cpu(gdp->bg_free_inodes_count) + 1);
			if (is_directory)
				gdp->bg_used_dirs_count = cpu_to_le16(
				  le16_to_cpu(gdp->bg_used_dirs_count) - 1);
			spin_unlock(sb_bgl_lock(sbi, block_group));
			percpu_counter_inc(&sbi->s_freeinodes_counter);
			if (is_directory)
				percpu_counter_dec(&sbi->s_dirs_counter);

		}
		BUFFER_TRACE(bh2, "call ext3_journal_dirty_metadata");
		err = ext3_journal_dirty_metadata(handle, bh2);
		if (!fatal) fatal = err;
	}
	BUFFER_TRACE(bitmap_bh, "call ext3_journal_dirty_metadata");
	err = ext3_journal_dirty_metadata(handle, bitmap_bh);
	if (!fatal)
		fatal = err;
	sb->s_dirt = 1;
error_return:
	brelse(bitmap_bh);
	ext3_std_error(sb, fatal);
}
Пример #21
0
/*
 * NOTE! When we get the inode, we're the only people
 * that have access to it, and as such there are no
 * race conditions we have to worry about. The inode
 * is not on the hash-lists, and it cannot be reached
 * through the filesystem because the directory entry
 * has been deleted earlier.
 *
 * HOWEVER: we must make sure that we get no aliases,
 * which means that we have to call "clear_inode()"
 * _before_ we mark the inode not in use in the inode
 * bitmaps. Otherwise a newly created file might use
 * the same inode number (not actually the same pointer
 * though), and then we'd have two inodes sharing the
 * same inode number and space on the harddisk.
 */
void ext2_free_inode (struct inode * inode)
{
	struct super_block * sb = inode->i_sb;
	int is_directory;
	unsigned long ino;
	struct buffer_head * bh;
	struct buffer_head * bh2;
	unsigned long block_group;
	unsigned long bit;
	int bitmap_nr;
	struct ext2_group_desc * gdp;
	struct ext2_super_block * es;

	if (!inode->i_dev) {
		printk ("ext2_free_inode: inode has no device\n");
		return;
	}
	if (inode->i_count > 1) {
		printk ("ext2_free_inode: inode has count=%d\n", inode->i_count);
		return;
	}
	if (inode->i_nlink) {
		printk ("ext2_free_inode: inode has nlink=%d\n",
			(int) inode->i_nlink);
		return;
	}
	if (!sb) {
		printk("ext2_free_inode: inode on nonexistent device\n");
		return;
	}

	ino = inode->i_ino;
	ext2_debug ("freeing inode %lu\n", ino);

	/*
	 * Note: we must free any quota before locking the superblock,
	 * as writing the quota to disk may need the lock as well.
	 */
	DQUOT_FREE_INODE(sb, inode);
	DQUOT_DROP(inode);

	lock_super (sb);
	es = sb->u.ext2_sb.s_es;
	if (ino < EXT2_FIRST_INO(sb) || 
	    ino > le32_to_cpu(es->s_inodes_count)) {
		ext2_error (sb, "free_inode",
			    "reserved inode or nonexistent inode");
		goto error_return;
	}
	block_group = (ino - 1) / EXT2_INODES_PER_GROUP(sb);
	bit = (ino - 1) % EXT2_INODES_PER_GROUP(sb);
	bitmap_nr = load_inode_bitmap (sb, block_group);
	if (bitmap_nr < 0)
		goto error_return;
	
	bh = sb->u.ext2_sb.s_inode_bitmap[bitmap_nr];

	is_directory = S_ISDIR(inode->i_mode);

	/* Do this BEFORE marking the inode not in use */
	clear_inode (inode);

	/* Ok, now we can actually update the inode bitmaps.. */
	if (!ext2_clear_bit (bit, bh->b_data))
		ext2_warning (sb, "ext2_free_inode",
			      "bit already cleared for inode %lu", ino);
	else {
		gdp = ext2_get_group_desc (sb, block_group, &bh2);
		if (gdp) {
			gdp->bg_free_inodes_count =
				cpu_to_le16(le16_to_cpu(gdp->bg_free_inodes_count) + 1);
			if (is_directory)
				gdp->bg_used_dirs_count =
					cpu_to_le16(le16_to_cpu(gdp->bg_used_dirs_count) - 1);
		}
		mark_buffer_dirty(bh2, 1);
		es->s_free_inodes_count =
			cpu_to_le32(le32_to_cpu(es->s_free_inodes_count) + 1);
		mark_buffer_dirty(sb->u.ext2_sb.s_sbh, 1);
	}
	mark_buffer_dirty(bh, 1);
	if (sb->s_flags & MS_SYNCHRONOUS) {
		ll_rw_block (WRITE, 1, &bh);
		wait_on_buffer (bh);
	}
	sb->s_dirt = 1;
error_return:
	unlock_super (sb);
}
Пример #22
0
/*
 * Create a file (regular, directory, device, fifo); UNIX sockets 
 * not yet implemented.
 * If the response fh has been verified, the parent directory should
 * already be locked. Note that the parent directory is left locked.
 *
 * N.B. Every call to nfsd_create needs an fh_put for _both_ fhp and resfhp
 */
int
nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
		char *fname, int flen, struct iattr *iap,
		int type, dev_t rdev, struct svc_fh *resfhp)
{
	struct dentry	*dentry, *dchild;
	struct inode	*dirp;
	nfsd_dirop_t	opfunc = NULL;
	int		err;

	err = nfserr_perm;
	if (!flen)
		goto out;
	err = fh_verify(rqstp, fhp, S_IFDIR, MAY_CREATE);
	if (err)
		goto out;

	dentry = fhp->fh_dentry;
	dirp = dentry->d_inode;

	err = nfserr_notdir;
	if(!dirp->i_op || !dirp->i_op->lookup)
		goto out;
	/*
	 * Check whether the response file handle has been verified yet.
	 * If it has, the parent directory should already be locked.
	 */
	if (!resfhp->fh_dverified) {
		dchild = lookup_dentry(fname, dget(dentry), 0);
		err = PTR_ERR(dchild);
		if (IS_ERR(dchild))
			goto out_nfserr;
		fh_compose(resfhp, fhp->fh_export, dchild);
		/* Lock the parent and check for errors ... */
		err = fh_lock_parent(fhp, dchild);
		if (err)
			goto out;
	} else {
		dchild = resfhp->fh_dentry;
		if (!fhp->fh_locked)
			printk(KERN_ERR
				"nfsd_create: parent %s/%s not locked!\n",
				dentry->d_parent->d_name.name,
				dentry->d_name.name);
	}
	/*
	 * Make sure the child dentry is still negative ...
	 */
	err = nfserr_exist;
	if (dchild->d_inode) {
		printk(KERN_WARNING
			"nfsd_create: dentry %s/%s not negative!\n",
			dentry->d_name.name, dchild->d_name.name);
		goto out; 
	}

	/*
	 * Get the dir op function pointer.
	 */
	err = nfserr_perm;
	switch (type) {
	case S_IFREG:
		opfunc = (nfsd_dirop_t) dirp->i_op->create;
		break;
	case S_IFDIR:
		opfunc = (nfsd_dirop_t) dirp->i_op->mkdir;
		break;
	case S_IFCHR:
	case S_IFBLK:
		/* The client is _NOT_ required to do security enforcement */
		if(!capable(CAP_SYS_ADMIN))
		{
			err = -EPERM;
			goto out;
		}
	case S_IFIFO:
	case S_IFSOCK:
		opfunc = dirp->i_op->mknod;
		break;
	}
	if (!opfunc)
		goto out;

	if (!(iap->ia_valid & ATTR_MODE))
		iap->ia_mode = 0;

	/*
	 * Call the dir op function to create the object.
	 */
	DQUOT_INIT(dirp);
	err = opfunc(dirp, dchild, iap->ia_mode, rdev);
	DQUOT_DROP(dirp);
	if (err < 0)
		goto out_nfserr;

	if (EX_ISSYNC(fhp->fh_export))
		write_inode_now(dirp);

	/*
	 * Update the file handle to get the new inode info.
	 */
	fh_update(resfhp);

	/* Set file attributes. Mode has already been set and
	 * setting uid/gid works only for root. Irix appears to
	 * send along the gid when it tries to implement setgid
	 * directories via NFS.
	 */
	err = 0;
	if ((iap->ia_valid &= (ATTR_UID|ATTR_GID|ATTR_MODE)) != 0)
		err = nfsd_setattr(rqstp, resfhp, iap);
out:
	return err;

out_nfserr:
	err = nfserrno(-err);
	goto out;
}
Пример #23
0
/*
 * Create a hardlink
 * N.B. After this call _both_ ffhp and tfhp need an fh_put
 */
int
nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
				char *fname, int len, struct svc_fh *tfhp)
{
	struct dentry	*ddir, *dnew, *dold;
	struct inode	*dirp, *dest;
	int		err;

	err = fh_verify(rqstp, ffhp, S_IFDIR, MAY_CREATE);
	if (err)
		goto out;
	err = fh_verify(rqstp, tfhp, S_IFREG, MAY_NOP);
	if (err)
		goto out;

	err = nfserr_perm;
	if (!len)
		goto out;

	ddir = ffhp->fh_dentry;
	dirp = ddir->d_inode;

	dnew = lookup_dentry(fname, dget(ddir), 0);
	err = PTR_ERR(dnew);
	if (IS_ERR(dnew))
		goto out_nfserr;
	/*
	 * Lock the parent before checking for existence
	 */
	err = fh_lock_parent(ffhp, dnew);
	if (err)
		goto out_dput;

	err = nfserr_exist;
	if (dnew->d_inode)
		goto out_unlock;

	dold = tfhp->fh_dentry;
	dest = dold->d_inode;

	err = nfserr_acces;
	if (nfsd_iscovered(ddir, ffhp->fh_export))
		goto out_unlock;
	/* FIXME: nxdev for NFSv3 */
	if (dirp->i_dev != dest->i_dev)
		goto out_unlock;

	err = nfserr_perm;
	if (IS_IMMUTABLE(dest) /* || IS_APPEND(dest) */ )
		goto out_unlock;
	if (!dirp->i_op || !dirp->i_op->link)
		goto out_unlock;

	DQUOT_INIT(dirp);
	err = dirp->i_op->link(dold, dirp, dnew);
	DQUOT_DROP(dirp);
	if (!err) {
		if (EX_ISSYNC(ffhp->fh_export)) {
			write_inode_now(dirp);
			write_inode_now(dest);
		}
	} else
		err = nfserrno(-err);

out_unlock:
	fh_unlock(ffhp);
out_dput:
	dput(dnew);
out:
	return err;

out_nfserr:
	err = nfserrno(-err);
	goto out;
}
Пример #24
0
/*
 * Create a symlink and look up its inode
 * N.B. After this call _both_ fhp and resfhp need an fh_put
 */
int
nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
				char *fname, int flen,
				char *path,  int plen,
				struct svc_fh *resfhp)
{
	struct dentry	*dentry, *dnew;
	struct inode	*dirp;
	int		err;

	err = nfserr_noent;
	if (!flen || !plen)
		goto out;

	err = fh_verify(rqstp, fhp, S_IFDIR, MAY_CREATE);
	if (err)
		goto out;
	dentry = fhp->fh_dentry;

	err = nfserr_perm;
	if (nfsd_iscovered(dentry, fhp->fh_export))
		goto out;
	dirp = dentry->d_inode;
	if (!dirp->i_op	|| !dirp->i_op->symlink)
		goto out;

	dnew = lookup_dentry(fname, dget(dentry), 0);
	err = PTR_ERR(dnew);
	if (IS_ERR(dnew))
		goto out_nfserr;

	/*
	 * Lock the parent before checking for existence
	 */
	err = fh_lock_parent(fhp, dnew);
	if (err)
		goto out_compose;

	err = nfserr_exist;
	if (!dnew->d_inode) {
		DQUOT_INIT(dirp);
		err = dirp->i_op->symlink(dirp, dnew, path);
		DQUOT_DROP(dirp);
		if (!err) {
			if (EX_ISSYNC(fhp->fh_export))
				write_inode_now(dirp);
		} else
			err = nfserrno(-err);
	}
	fh_unlock(fhp);

	/* Compose the fh so the dentry will be freed ... */
out_compose:
	fh_compose(resfhp, fhp->fh_export, dnew);
out:
	return err;

out_nfserr:
	err = nfserrno(-err);
	goto out;
}
Пример #25
0
/*
 * NAME:	ialloc()
 *
 * FUNCTION:	Allocate a new inode
 *
 */
struct inode *ialloc(struct inode *parent, umode_t mode)
{
	struct super_block *sb = parent->i_sb;
	struct inode *inode;
	struct jfs_inode_info *jfs_inode;
	int rc;

	inode = new_inode(sb);
	if (!inode) {
		jfs_warn("ialloc: new_inode returned NULL!");
		return ERR_PTR(-ENOMEM);
	}

	jfs_inode = JFS_IP(inode);

	rc = diAlloc(parent, S_ISDIR(mode), inode);
	if (rc) {
		jfs_warn("ialloc: diAlloc returned %d!", rc);
		if (rc == -EIO)
			make_bad_inode(inode);
		iput(inode);
		return ERR_PTR(rc);
	}

	inode->i_uid = current_fsuid();
	if (parent->i_mode & S_ISGID) {
		inode->i_gid = parent->i_gid;
		if (S_ISDIR(mode))
			mode |= S_ISGID;
	} else
		inode->i_gid = current_fsgid();

	/*
	 * New inodes need to save sane values on disk when
	 * uid & gid mount options are used
	 */
	jfs_inode->saved_uid = inode->i_uid;
	jfs_inode->saved_gid = inode->i_gid;

	/*
	 * Allocate inode to quota.
	 */
	if (DQUOT_ALLOC_INODE(inode)) {
		DQUOT_DROP(inode);
		inode->i_flags |= S_NOQUOTA;
		inode->i_nlink = 0;
		iput(inode);
		return ERR_PTR(-EDQUOT);
	}

	inode->i_mode = mode;
	/* inherit flags from parent */
	jfs_inode->mode2 = JFS_IP(parent)->mode2 & JFS_FL_INHERIT;

	if (S_ISDIR(mode)) {
		jfs_inode->mode2 |= IDIRECTORY;
		jfs_inode->mode2 &= ~JFS_DIRSYNC_FL;
	}
	else {
		jfs_inode->mode2 |= INLINEEA | ISPARSE;
		if (S_ISLNK(mode))
			jfs_inode->mode2 &= ~(JFS_IMMUTABLE_FL|JFS_APPEND_FL);
	}
	jfs_inode->mode2 |= mode;

	inode->i_blocks = 0;
	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
	jfs_inode->otime = inode->i_ctime.tv_sec;
	inode->i_generation = JFS_SBI(sb)->gengen++;

	jfs_inode->cflag = 0;

	/* Zero remaining fields */
	memset(&jfs_inode->acl, 0, sizeof(dxd_t));
	memset(&jfs_inode->ea, 0, sizeof(dxd_t));
	jfs_inode->next_index = 0;
	jfs_inode->acltype = 0;
	jfs_inode->btorder = 0;
	jfs_inode->btindex = 0;
	jfs_inode->bxflag = 0;
	jfs_inode->blid = 0;
	jfs_inode->atlhead = 0;
	jfs_inode->atltail = 0;
	jfs_inode->xtlid = 0;
	jfs_set_inode_flags(inode);

	jfs_info("ialloc returns inode = 0x%p\n", inode);

	return inode;
}
Пример #26
0
struct inode * udf_new_inode (struct inode *dir, int mode, int * err)
{
	struct super_block *sb = dir->i_sb;
	struct udf_sb_info *sbi = UDF_SB(sb);
	struct inode * inode;
	int block;
	uint32_t start = UDF_I_LOCATION(dir).logicalBlockNum;

	inode = new_inode(sb);

	if (!inode)
	{
		*err = -ENOMEM;
		return NULL;
	}
	*err = -ENOSPC;

	block = udf_new_block(dir->i_sb, NULL, UDF_I_LOCATION(dir).partitionReferenceNum,
		start, err);
	if (*err)
	{
		iput(inode);
		return NULL;
	}

	lock_udf_alloc_sem(sbi);
	UDF_I_UNIQUE(inode) = 0;
	UDF_I_LENEXTENTS(inode) = 0;
	UDF_I_NEXT_ALLOC_BLOCK(inode) = 0;
	UDF_I_NEXT_ALLOC_GOAL(inode) = 0;
	UDF_I_STRAT4096(inode) = 0;
	if (UDF_SB_LVIDBH(sb))
	{
		struct logicalVolHeaderDesc *lvhd;
		uint64_t uniqueID;
		lvhd = (struct logicalVolHeaderDesc *)(UDF_SB_LVID(sb)->logicalVolContentsUse);
		if (S_ISDIR(mode))
			UDF_SB_LVIDIU(sb)->numDirs =
				cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numDirs) + 1);
		else
			UDF_SB_LVIDIU(sb)->numFiles =
				cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numFiles) + 1);
		UDF_I_UNIQUE(inode) = uniqueID = le64_to_cpu(lvhd->uniqueID);
		if (!(++uniqueID & 0x00000000FFFFFFFFUL))
			uniqueID += 16;
		lvhd->uniqueID = cpu_to_le64(uniqueID);
		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
	}
	inode->i_mode = mode;
	inode->i_uid = current->fsuid;
	if (dir->i_mode & S_ISGID)
	{
		inode->i_gid = dir->i_gid;
		if (S_ISDIR(mode))
			mode |= S_ISGID;
	}
	else
		inode->i_gid = current->fsgid;

	UDF_I_LOCATION(inode).logicalBlockNum = block;
	UDF_I_LOCATION(inode).partitionReferenceNum = UDF_I_LOCATION(dir).partitionReferenceNum;
	inode->i_ino = udf_get_lb_pblock(sb, UDF_I_LOCATION(inode), 0);
	inode->i_blksize = PAGE_SIZE;
	inode->i_blocks = 0;
	UDF_I_LENEATTR(inode) = 0;
	UDF_I_LENALLOC(inode) = 0;
	UDF_I_USE(inode) = 0;
	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_EXTENDED_FE))
	{
		UDF_I_EFE(inode) = 1;
		UDF_UPDATE_UDFREV(inode->i_sb, UDF_VERS_USE_EXTENDED_FE);
		UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry), GFP_KERNEL);
		memset(UDF_I_DATA(inode), 0x00, inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
	}
	else
	{
		UDF_I_EFE(inode) = 0;
		UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct fileEntry), GFP_KERNEL);
		memset(UDF_I_DATA(inode), 0x00, inode->i_sb->s_blocksize - sizeof(struct fileEntry));
	}
	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_AD_IN_ICB))
		UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
	else if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
		UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
	else
		UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
	inode->i_mtime = inode->i_atime = inode->i_ctime =
		UDF_I_CRTIME(inode) = current_fs_time(inode->i_sb);
	insert_inode_hash(inode);
	mark_inode_dirty(inode);
	unlock_udf_alloc_sem(sbi);

	if (DQUOT_ALLOC_INODE(inode))
	{
		DQUOT_DROP(inode);
		inode->i_flags |= S_NOQUOTA;
		inode->i_nlink = 0;
		iput(inode);
		*err = -EDQUOT;
		return NULL;
	}

	*err = 0;
	return inode;
}
Пример #27
0
struct inode * ext2_new_inode (const struct inode * dir, int mode)
{
	struct super_block * sb;
	struct buffer_head * bh;
	struct buffer_head * bh2;
	int group, i;
	ino_t ino;
	struct inode * inode;
	struct ext2_group_desc * desc;
	struct ext2_super_block * es;
	int err;

	sb = dir->i_sb;
	inode = new_inode(sb);
	if (!inode)
		return ERR_PTR(-ENOMEM);

	lock_super (sb);
	es = sb->u.ext2_sb.s_es;
repeat:
	if (S_ISDIR(mode))
		group = find_group_dir(sb, dir->u.ext2_i.i_block_group);
	else 
		group = find_group_other(sb, dir->u.ext2_i.i_block_group);

	err = -ENOSPC;
	if (group == -1)
		goto fail;

	err = -EIO;
	bh = load_inode_bitmap (sb, group);
	if (IS_ERR(bh))
		goto fail2;

	i = ext2_find_first_zero_bit ((unsigned long *) bh->b_data,
				      EXT2_INODES_PER_GROUP(sb));
	if (i >= EXT2_INODES_PER_GROUP(sb))
		goto bad_count;
	ext2_set_bit (i, bh->b_data);

	mark_buffer_dirty(bh);
	if (sb->s_flags & MS_SYNCHRONOUS) {
		ll_rw_block (WRITE, 1, &bh);
		wait_on_buffer (bh);
	}

	ino = group * EXT2_INODES_PER_GROUP(sb) + i + 1;
	if (ino < EXT2_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
		ext2_error (sb, "ext2_new_inode",
			    "reserved inode or inode > inodes count - "
			    "block_group = %d,inode=%ld", group, ino);
		err = -EIO;
		goto fail2;
	}

	es->s_free_inodes_count =
		cpu_to_le32(le32_to_cpu(es->s_free_inodes_count) - 1);
	mark_buffer_dirty(sb->u.ext2_sb.s_sbh);
	sb->s_dirt = 1;
	inode->i_uid = current->fsuid;
	if (test_opt (sb, GRPID))
		inode->i_gid = dir->i_gid;
	else if (dir->i_mode & S_ISGID) {
		inode->i_gid = dir->i_gid;
		if (S_ISDIR(mode))
			mode |= S_ISGID;
	} else
		inode->i_gid = current->fsgid;
	inode->i_mode = mode;

	inode->i_ino = ino;
	inode->i_blksize = PAGE_SIZE;	/* This is the optimal IO size (for stat), not the fs block size */
	inode->i_blocks = 0;
	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
	inode->u.ext2_i.i_new_inode = 1;
	inode->u.ext2_i.i_flags = dir->u.ext2_i.i_flags & ~EXT2_BTREE_FL;
	if (S_ISLNK(mode))
		inode->u.ext2_i.i_flags &= ~(EXT2_IMMUTABLE_FL|EXT2_APPEND_FL);
	inode->u.ext2_i.i_block_group = group;
	if (inode->u.ext2_i.i_flags & EXT2_SYNC_FL)
		inode->i_flags |= S_SYNC;
	insert_inode_hash(inode);
	inode->i_generation = event++;
	mark_inode_dirty(inode);

	unlock_super (sb);
	if(DQUOT_ALLOC_INODE(inode)) {
		DQUOT_DROP(inode);
		inode->i_flags |= S_NOQUOTA;
		inode->i_nlink = 0;
		iput(inode);
		return ERR_PTR(-EDQUOT);
	}
	ext2_debug ("allocating inode %lu\n", inode->i_ino);
	return inode;

fail2:
	desc = ext2_get_group_desc (sb, group, &bh2);
	desc->bg_free_inodes_count =
		cpu_to_le16(le16_to_cpu(desc->bg_free_inodes_count) + 1);
	if (S_ISDIR(mode))
		desc->bg_used_dirs_count =
			cpu_to_le16(le16_to_cpu(desc->bg_used_dirs_count) - 1);
	mark_buffer_dirty(bh2);
fail:
	unlock_super(sb);
	make_bad_inode(inode);
	iput(inode);
	return ERR_PTR(err);

bad_count:
	ext2_error (sb, "ext2_new_inode",
		    "Free inodes count corrupted in group %d",
		    group);
	/* Is it really ENOSPC? */
	err = -ENOSPC;
	if (sb->s_flags & MS_RDONLY)
		goto fail;

	desc = ext2_get_group_desc (sb, group, &bh2);
	desc->bg_free_inodes_count = 0;
	mark_buffer_dirty(bh2);
	goto repeat;
}
Пример #28
0
/*
 * NAME:	ialloc()
 *
 * FUNCTION:	Allocate a new inode
 *
 */
struct inode *ialloc(struct inode *parent, umode_t mode)
{
	struct super_block *sb = parent->i_sb;
	struct inode *inode;
	struct jfs_inode_info *jfs_inode;
	int rc;

	inode = new_inode(sb);
	if (!inode) {
		jfs_warn("ialloc: new_inode returned NULL!");
		return inode;
	}

	jfs_inode = JFS_IP(inode);

	rc = diAlloc(parent, S_ISDIR(mode), inode);
	if (rc) {
		jfs_warn("ialloc: diAlloc returned %d!", rc);
		make_bad_inode(inode);
		iput(inode);
		return NULL;
	}

	inode->i_uid = current->fsuid;
	if (parent->i_mode & S_ISGID) {
		inode->i_gid = parent->i_gid;
		if (S_ISDIR(mode))
			mode |= S_ISGID;
	} else
		inode->i_gid = current->fsgid;

	/*
	 * Allocate inode to quota.
	 */
	if (DQUOT_ALLOC_INODE(inode)) {
		DQUOT_DROP(inode);
		inode->i_flags |= S_NOQUOTA;
		inode->i_nlink = 0;
		iput(inode);
		return NULL;
	}

	inode->i_mode = mode;
	if (S_ISDIR(mode))
		jfs_inode->mode2 = IDIRECTORY | mode;
	else
		jfs_inode->mode2 = INLINEEA | ISPARSE | mode;
	inode->i_blksize = sb->s_blocksize;
	inode->i_blocks = 0;
	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
	jfs_inode->otime = inode->i_ctime.tv_sec;
	inode->i_generation = JFS_SBI(sb)->gengen++;

	jfs_inode->cflag = 0;

	/* Zero remaining fields */
	memset(&jfs_inode->acl, 0, sizeof(dxd_t));
	memset(&jfs_inode->ea, 0, sizeof(dxd_t));
	jfs_inode->next_index = 0;
	jfs_inode->acltype = 0;
	jfs_inode->btorder = 0;
	jfs_inode->btindex = 0;
	jfs_inode->bxflag = 0;
	jfs_inode->blid = 0;
	jfs_inode->atlhead = 0;
	jfs_inode->atltail = 0;
	jfs_inode->xtlid = 0;

	jfs_info("ialloc returns inode = 0x%p\n", inode);

	return inode;
}