/* * Called when an inode is released. Note that this is different * from ext3_file_open: open gets called at every open, but release * gets called only when /all/ the files are closed. */ static int ext3_release_file (struct inode * inode, struct file * filp) { /* if we are the last writer on the inode, drop the block reservation */ if ((filp->f_mode & FMODE_WRITE) && (atomic_read(&inode->i_writecount) == 1)) { mutex_lock(&EXT3_I(inode)->truncate_mutex); ext3_discard_reservation(inode); mutex_unlock(&EXT3_I(inode)->truncate_mutex); } if (is_dx(inode) && filp->private_data) ext3_htree_free_dir_info(filp->private_data); return 0; }
/* * Restart the transaction associated with *handle. This does a commit, * so before we call here everything must be consistently dirtied against * this transaction. */ static int truncate_restart_transaction(handle_t *handle, struct inode *inode) { int ret; jbd_debug(2, "restarting handle %p\n", handle); /* * Drop truncate_mutex to avoid deadlock with ext3_get_blocks_handle * At this moment, get_block can be called only for blocks inside * i_size since page cache has been already dropped and writes are * blocked by i_mutex. So we can safely drop the truncate_mutex. */ mutex_unlock(&EXT3_I(inode)->truncate_mutex); ret = ext3_journal_restart(handle, blocks_for_truncate(inode)); mutex_lock(&EXT3_I(inode)->truncate_mutex); return ret; }
int ext3_sync_file(struct file *file, loff_t start, loff_t end, int datasync) { struct inode *inode = file->f_mapping->host; struct ext3_inode_info *ei = EXT3_I(inode); journal_t *journal = EXT3_SB(inode->i_sb)->s_journal; int ret, needs_barrier = 0; tid_t commit_tid; trace_ext3_sync_file_enter(file, datasync); if (inode->i_sb->s_flags & MS_RDONLY) return 0; ret = filemap_write_and_wait_range(inode->i_mapping, start, end); if (ret) goto out; J_ASSERT(ext3_journal_current_handle() == NULL); /* * data=writeback,ordered: * The caller's filemap_fdatawrite()/wait will sync the data. * Metadata is in the journal, we wait for a proper transaction * to commit here. * * data=journal: * filemap_fdatawrite won't do anything (the buffers are clean). * ext3_force_commit will write the file data into the journal and * will wait on that. * filemap_fdatawait() will encounter a ton of newly-dirtied pages * (they were dirtied by commit). But that's OK - the blocks are * safe in-journal, which is all fsync() needs to ensure. */ if (ext3_should_journal_data(inode)) { ret = ext3_force_commit(inode->i_sb); goto out; } if (datasync) commit_tid = atomic_read(&ei->i_datasync_tid); else commit_tid = atomic_read(&ei->i_sync_tid); if (test_opt(inode->i_sb, BARRIER) && !journal_trans_will_send_data_barrier(journal, commit_tid)) needs_barrier = 1; log_start_commit(journal, commit_tid); ret = log_wait_commit(journal, commit_tid); /* * In case we didn't commit a transaction, we have to flush * disk caches manually so that data really is on persistent * storage */ if (needs_barrier) blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); out: trace_ext3_sync_file_exit(inode, ret); return ret; }
/* * Inode operation get_posix_acl(). * * inode->i_mutex: don't care */ static struct posix_acl * ext3_get_acl(struct inode *inode, int type) { struct ext3_inode_info *ei = EXT3_I(inode); int name_index; char *value = NULL; struct posix_acl *acl; int retval; if (!test_opt(inode->i_sb, POSIX_ACL)) return NULL; switch(type) { case ACL_TYPE_ACCESS: acl = ext3_iget_acl(inode, &ei->i_acl); if (acl != EXT3_ACL_NOT_CACHED) return acl; name_index = EXT3_XATTR_INDEX_POSIX_ACL_ACCESS; break; case ACL_TYPE_DEFAULT: acl = ext3_iget_acl(inode, &ei->i_default_acl); if (acl != EXT3_ACL_NOT_CACHED) return acl; name_index = EXT3_XATTR_INDEX_POSIX_ACL_DEFAULT; break; default: return ERR_PTR(-EINVAL); } retval = ext3_xattr_get(inode, name_index, "", NULL, 0); if (retval > 0) { value = kmalloc(retval, GFP_KERNEL); if (!value) return ERR_PTR(-ENOMEM); retval = ext3_xattr_get(inode, name_index, "", value, retval); } if (retval > 0) acl = ext3_acl_from_disk(value, retval); else if (retval == -ENODATA || retval == -ENOSYS) acl = NULL; else acl = ERR_PTR(retval); kfree(value); if (!IS_ERR(acl)) { switch(type) { case ACL_TYPE_ACCESS: ext3_iset_acl(inode, &ei->i_acl, acl); break; case ACL_TYPE_DEFAULT: ext3_iset_acl(inode, &ei->i_default_acl, acl); break; } } return acl; }
static int find_group_other(struct super_block *sb, struct inode *parent) { int parent_group = EXT3_I(parent)->i_block_group; int ngroups = EXT3_SB(sb)->s_groups_count; struct ext3_group_desc *desc; struct buffer_head *bh; int group, i; /* * Try to place the inode in its parent directory */ group = parent_group; desc = ext3_get_group_desc (sb, group, &bh); if (desc && le16_to_cpu(desc->bg_free_inodes_count) && le16_to_cpu(desc->bg_free_blocks_count)) return group; /* * We're going to place this inode in a different blockgroup from its * parent. We want to cause files in a common directory to all land in * the same blockgroup. But we want files which are in a different * directory which shares a blockgroup with our parent to land in a * different blockgroup. * * So add our directory's i_ino into the starting point for the hash. */ group = (group + parent->i_ino) % ngroups; /* * Use a quadratic hash to find a group with a free inode and some free * blocks. */ for (i = 1; i < ngroups; i <<= 1) { group += i; if (group >= ngroups) group -= ngroups; desc = ext3_get_group_desc (sb, group, &bh); if (desc && le16_to_cpu(desc->bg_free_inodes_count) && le16_to_cpu(desc->bg_free_blocks_count)) return group; } /* * That failed: try linear search for a free inode, even if that group * has no free blocks. */ group = parent_group; for (i = 0; i < ngroups; i++) { if (++group >= ngroups) group = 0; desc = ext3_get_group_desc (sb, group, &bh); if (desc && le16_to_cpu(desc->bg_free_inodes_count)) return group; } return -1; }
/* * Called when an inode is released. Note that this is different * from ext3_file_open: open gets called at every open, but release * gets called only when /all/ the files are closed. */ static int ext3_release_file (struct inode * inode, struct file * filp) { if (ext3_test_inode_state(inode, EXT3_STATE_FLUSH_ON_CLOSE)) { filemap_flush(inode->i_mapping); ext3_clear_inode_state(inode, EXT3_STATE_FLUSH_ON_CLOSE); } /* if we are the last writer on the inode, drop the block reservation */ if ((filp->f_mode & FMODE_WRITE) && (atomic_read(&inode->i_writecount) == 1)) { mutex_lock(&EXT3_I(inode)->truncate_mutex); ext3_discard_reservation(inode); mutex_unlock(&EXT3_I(inode)->truncate_mutex); } if (is_dx(inode) && filp->private_data) ext3_htree_free_dir_info(filp->private_data); return 0; }
static inline int mlowerfs_ext3_should_journal_data(struct inode *inode) { if (!S_ISREG(inode->i_mode)) return 1; if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA) return 1; if (EXT3_I(inode)->i_flags & EXT3_JOURNAL_DATA_FL) return 1; return 0; }
/* * Inode operation get_posix_acl(). * * inode->i_sem: don't care * BKL: held */ struct posix_acl * ext3_get_acl(struct inode *inode, int type) { const size_t max_size = ext3_acl_size(EXT3_ACL_MAX_ENTRIES); struct ext3_inode_info *ei = EXT3_I(inode); int name_index; char *value; struct posix_acl *acl; int retval; if (!IS_POSIXACL(inode)) return 0; switch(type) { case ACL_TYPE_ACCESS: if (ei->i_acl != EXT3_ACL_NOT_CACHED) return posix_acl_dup(ei->i_acl); name_index = EXT3_XATTR_INDEX_POSIX_ACL_ACCESS; break; case ACL_TYPE_DEFAULT: if (ei->i_default_acl != EXT3_ACL_NOT_CACHED) return posix_acl_dup(ei->i_default_acl); name_index = EXT3_XATTR_INDEX_POSIX_ACL_DEFAULT; break; default: return ERR_PTR(-EINVAL); } value = kmalloc(max_size, GFP_KERNEL); if (!value) return ERR_PTR(-ENOMEM); retval = ext3_xattr_get(inode, name_index, "", value, max_size); acl = ERR_PTR(retval); if (retval > 0) acl = ext3_acl_from_disk(value, retval); else if (retval == -ENODATA || retval == -ENOSYS) acl = NULL; kfree(value); if (!IS_ERR(acl)) { switch(type) { case ACL_TYPE_ACCESS: ei->i_acl = posix_acl_dup(acl); break; case ACL_TYPE_DEFAULT: ei->i_default_acl = posix_acl_dup(acl); break; } } return acl; }
int ext3_sync_file(struct file * file, struct dentry *dentry, int datasync) { struct inode *inode = dentry->d_inode; struct ext3_inode_info *ei = EXT3_I(inode); journal_t *journal = EXT3_SB(inode->i_sb)->s_journal; int ret = 0; tid_t commit_tid; if (inode->i_sb->s_flags & MS_RDONLY) return 0; J_ASSERT(ext3_journal_current_handle() == NULL); /* * data=writeback,ordered: * The caller's filemap_fdatawrite()/wait will sync the data. * Metadata is in the journal, we wait for a proper transaction * to commit here. * * data=journal: * filemap_fdatawrite won't do anything (the buffers are clean). * ext3_force_commit will write the file data into the journal and * will wait on that. * filemap_fdatawait() will encounter a ton of newly-dirtied pages * (they were dirtied by commit). But that's OK - the blocks are * safe in-journal, which is all fsync() needs to ensure. */ if (ext3_should_journal_data(inode)) { ret = ext3_force_commit(inode->i_sb); goto out; } if (datasync) commit_tid = atomic_read(&ei->i_datasync_tid); else commit_tid = atomic_read(&ei->i_sync_tid); if (log_start_commit(journal, commit_tid)) { log_wait_commit(journal, commit_tid); goto out; } /* * In case we didn't commit a transaction, we have to flush * disk caches manually so that data really is on persistent * storage */ if (test_opt(inode->i_sb, BARRIER)) blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL, BLKDEV_IFL_WAIT); out: return ret; }
/* * There are two policies for allocating an inode. If the new inode is * a directory, then a forward search is made for a block group with both * free space and a low directory-to-inode ratio; if that fails, then of * the groups with above-average free space, that group with the fewest * directories already is chosen. * * For other inodes, search forward from the parent directory's block * group to find a free inode. */ struct inode *ext3_new_inode(handle_t *handle, struct inode * dir, int mode) { struct super_block *sb; struct buffer_head *bitmap_bh = NULL; struct buffer_head *bh2; int group; unsigned long ino = 0; struct inode * inode; struct ext3_group_desc * gdp = NULL; struct ext3_super_block * es; struct ext3_inode_info *ei; struct ext3_sb_info *sbi; int err = 0; struct inode *ret; int i; /* Cannot create files in a deleted directory */ if (!dir || !dir->i_nlink) return ERR_PTR(-EPERM); sb = dir->i_sb; inode = new_inode(sb); if (!inode) return ERR_PTR(-ENOMEM); ei = EXT3_I(inode); sbi = EXT3_SB(sb); es = sbi->s_es; if (S_ISDIR(mode)) { if (test_opt (sb, OLDALLOC)) group = find_group_dir(sb, dir); else group = find_group_orlov(sb, dir); } else group = find_group_other(sb, dir); err = -ENOSPC; if (group == -1) goto out; for (i = 0; i < sbi->s_groups_count; i++) { err = -EIO; gdp = ext3_get_group_desc(sb, group, &bh2); if (!gdp) goto fail; brelse(bitmap_bh); bitmap_bh = read_inode_bitmap(sb, group); if (!bitmap_bh) goto fail; ino = 0; repeat_in_this_group: ino = ext3_find_next_zero_bit((unsigned long *) bitmap_bh->b_data, EXT3_INODES_PER_GROUP(sb), ino); if (ino < EXT3_INODES_PER_GROUP(sb)) { BUFFER_TRACE(bitmap_bh, "get_write_access"); err = ext3_journal_get_write_access(handle, bitmap_bh); if (err) goto fail; if (!ext3_set_bit_atomic(sb_bgl_lock(sbi, group), ino, bitmap_bh->b_data)) { /* we won it */ BUFFER_TRACE(bitmap_bh, "call ext3_journal_dirty_metadata"); err = ext3_journal_dirty_metadata(handle, bitmap_bh); if (err) goto fail; goto got; } /* we lost it */ journal_release_buffer(handle, bitmap_bh); if (++ino < EXT3_INODES_PER_GROUP(sb)) goto repeat_in_this_group; } /* * This case is possible in concurrent environment. It is very * rare. We cannot repeat the find_group_xxx() call because * that will simply return the same blockgroup, because the * group descriptor metadata has not yet been updated. * So we just go onto the next blockgroup. */ if (++group == sbi->s_groups_count) group = 0; } err = -ENOSPC; goto out; got: ino += group * EXT3_INODES_PER_GROUP(sb) + 1; if (ino < EXT3_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) { ext3_error (sb, "ext3_new_inode", "reserved inode or inode > inodes count - " "block_group = %d, inode=%lu", group, ino); err = -EIO; goto fail; } BUFFER_TRACE(bh2, "get_write_access"); err = ext3_journal_get_write_access(handle, bh2); if (err) goto fail; spin_lock(sb_bgl_lock(sbi, group)); gdp->bg_free_inodes_count = cpu_to_le16(le16_to_cpu(gdp->bg_free_inodes_count) - 1); if (S_ISDIR(mode)) { gdp->bg_used_dirs_count = cpu_to_le16(le16_to_cpu(gdp->bg_used_dirs_count) + 1); } spin_unlock(sb_bgl_lock(sbi, group)); BUFFER_TRACE(bh2, "call ext3_journal_dirty_metadata"); err = ext3_journal_dirty_metadata(handle, bh2); if (err) goto fail; percpu_counter_dec(&sbi->s_freeinodes_counter); if (S_ISDIR(mode)) percpu_counter_inc(&sbi->s_dirs_counter); sb->s_dirt = 1; inode->i_uid = current->fsuid; if (test_opt (sb, GRPID)) inode->i_gid = dir->i_gid; else if (dir->i_mode & S_ISGID) { inode->i_gid = dir->i_gid; if (S_ISDIR(mode)) mode |= S_ISGID; } else inode->i_gid = current->fsgid; inode->i_mode = mode; inode->i_ino = ino; /* This is the optimal IO size (for stat), not the fs block size */ inode->i_blksize = PAGE_SIZE; inode->i_blocks = 0; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC; memset(ei->i_data, 0, sizeof(ei->i_data)); ei->i_dir_start_lookup = 0; ei->i_disksize = 0; ei->i_flags = EXT3_I(dir)->i_flags & ~EXT3_INDEX_FL; if (S_ISLNK(mode)) ei->i_flags &= ~(EXT3_IMMUTABLE_FL|EXT3_APPEND_FL); /* dirsync only applies to directories */ if (!S_ISDIR(mode)) ei->i_flags &= ~EXT3_DIRSYNC_FL; #ifdef EXT3_FRAGMENTS ei->i_faddr = 0; ei->i_frag_no = 0; ei->i_frag_size = 0; #endif ei->i_file_acl = 0; ei->i_dir_acl = 0; ei->i_dtime = 0; ei->i_block_alloc_info = NULL; ei->i_block_group = group; ext3_set_inode_flags(inode); if (IS_DIRSYNC(inode)) handle->h_sync = 1; insert_inode_hash(inode); spin_lock(&sbi->s_next_gen_lock); inode->i_generation = sbi->s_next_generation++; spin_unlock(&sbi->s_next_gen_lock); ei->i_state = EXT3_STATE_NEW; ei->i_extra_isize = (EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) ? sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE : 0; ret = inode; if(DQUOT_ALLOC_INODE(inode)) { err = -EDQUOT; goto fail_drop; } err = ext3_init_acl(handle, inode, dir); if (err) goto fail_free_drop; err = ext3_init_security(handle,inode, dir); if (err) goto fail_free_drop; err = ext3_mark_inode_dirty(handle, inode); if (err) { ext3_std_error(sb, err); goto fail_free_drop; } ext3_debug("allocating inode %lu\n", inode->i_ino); goto really_out; fail: ext3_std_error(sb, err); out: iput(inode); ret = ERR_PTR(err); really_out: brelse(bitmap_bh); return ret; fail_free_drop: DQUOT_FREE_INODE(inode); fail_drop: DQUOT_DROP(inode); inode->i_flags |= S_NOQUOTA; inode->i_nlink = 0; iput(inode); brelse(bitmap_bh); return ERR_PTR(err); }
static int find_group_orlov(struct super_block *sb, struct inode *parent) { int parent_group = EXT3_I(parent)->i_block_group; struct ext3_sb_info *sbi = EXT3_SB(sb); struct ext3_super_block *es = sbi->s_es; int ngroups = sbi->s_groups_count; int inodes_per_group = EXT3_INODES_PER_GROUP(sb); int freei, avefreei; int freeb, avefreeb; int blocks_per_dir, ndirs; int max_debt, max_dirs, min_blocks, min_inodes; int group = -1, i; struct ext3_group_desc *desc; struct buffer_head *bh; freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter); avefreei = freei / ngroups; freeb = percpu_counter_read_positive(&sbi->s_freeblocks_counter); avefreeb = freeb / ngroups; ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter); if ((parent == sb->s_root->d_inode) || (EXT3_I(parent)->i_flags & EXT3_TOPDIR_FL)) { int best_ndir = inodes_per_group; int best_group = -1; get_random_bytes(&group, sizeof(group)); parent_group = (unsigned)group % ngroups; for (i = 0; i < ngroups; i++) { group = (parent_group + i) % ngroups; desc = ext3_get_group_desc (sb, group, &bh); if (!desc || !desc->bg_free_inodes_count) continue; if (le16_to_cpu(desc->bg_used_dirs_count) >= best_ndir) continue; if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei) continue; if (le16_to_cpu(desc->bg_free_blocks_count) < avefreeb) continue; best_group = group; best_ndir = le16_to_cpu(desc->bg_used_dirs_count); } if (best_group >= 0) return best_group; goto fallback; } blocks_per_dir = (le32_to_cpu(es->s_blocks_count) - freeb) / ndirs; max_dirs = ndirs / ngroups + inodes_per_group / 16; min_inodes = avefreei - inodes_per_group / 4; min_blocks = avefreeb - EXT3_BLOCKS_PER_GROUP(sb) / 4; max_debt = EXT3_BLOCKS_PER_GROUP(sb) / max(blocks_per_dir, BLOCK_COST); if (max_debt * INODE_COST > inodes_per_group) max_debt = inodes_per_group / INODE_COST; if (max_debt > 255) max_debt = 255; if (max_debt == 0) max_debt = 1; for (i = 0; i < ngroups; i++) { group = (parent_group + i) % ngroups; desc = ext3_get_group_desc (sb, group, &bh); if (!desc || !desc->bg_free_inodes_count) continue; if (le16_to_cpu(desc->bg_used_dirs_count) >= max_dirs) continue; if (le16_to_cpu(desc->bg_free_inodes_count) < min_inodes) continue; if (le16_to_cpu(desc->bg_free_blocks_count) < min_blocks) continue; return group; } fallback: for (i = 0; i < ngroups; i++) { group = (parent_group + i) % ngroups; desc = ext3_get_group_desc (sb, group, &bh); if (!desc || !desc->bg_free_inodes_count) continue; if (le16_to_cpu(desc->bg_free_inodes_count) >= avefreei) return group; } if (avefreei) { /* * The free-inodes counter is approximate, and for really small * filesystems the above test can fail to find any blockgroups */ avefreei = 0; goto fallback; } return -1; }
long ext3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = filp->f_dentry->d_inode; struct ext3_inode_info *ei = EXT3_I(inode); unsigned int flags; unsigned short rsv_window_size; ext3_debug ("cmd = %u, arg = %lu\n", cmd, arg); switch (cmd) { case EXT3_IOC_GETFLAGS: ext3_get_inode_flags(ei); flags = ei->i_flags & EXT3_FL_USER_VISIBLE; return put_user(flags, (int __user *) arg); case EXT3_IOC_SETFLAGS: { handle_t *handle = NULL; int err; struct ext3_iloc iloc; unsigned int oldflags; unsigned int jflag; if (!inode_owner_or_capable(inode)) return -EACCES; if (get_user(flags, (int __user *) arg)) return -EFAULT; err = mnt_want_write_file(filp); if (err) return err; flags = ext3_mask_flags(inode->i_mode, flags); mutex_lock(&inode->i_mutex); /* Is it quota file? Do not allow user to mess with it */ err = -EPERM; if (IS_NOQUOTA(inode)) goto flags_out; oldflags = ei->i_flags; /* The JOURNAL_DATA flag is modifiable only by root */ jflag = flags & EXT3_JOURNAL_DATA_FL; /* * The IMMUTABLE and APPEND_ONLY flags can only be changed by * the relevant capability. * * This test looks nicer. Thanks to Pauline Middelink */ if ((flags ^ oldflags) & (EXT3_APPEND_FL | EXT3_IMMUTABLE_FL)) { if (!capable(CAP_LINUX_IMMUTABLE)) goto flags_out; } /* * The JOURNAL_DATA flag can only be changed by * the relevant capability. */ if ((jflag ^ oldflags) & (EXT3_JOURNAL_DATA_FL)) { if (!capable(CAP_SYS_RESOURCE)) goto flags_out; } handle = ext3_journal_start(inode, 1); if (IS_ERR(handle)) { err = PTR_ERR(handle); goto flags_out; } if (IS_SYNC(inode)) handle->h_sync = 1; err = ext3_reserve_inode_write(handle, inode, &iloc); if (err) goto flags_err; flags = flags & EXT3_FL_USER_MODIFIABLE; flags |= oldflags & ~EXT3_FL_USER_MODIFIABLE; ei->i_flags = flags; ext3_set_inode_flags(inode); inode->i_ctime = CURRENT_TIME_SEC; err = ext3_mark_iloc_dirty(handle, inode, &iloc); flags_err: ext3_journal_stop(handle); if (err) goto flags_out; if ((jflag ^ oldflags) & (EXT3_JOURNAL_DATA_FL)) err = ext3_change_inode_journal_flag(inode, jflag); flags_out: mutex_unlock(&inode->i_mutex); mnt_drop_write_file(filp); return err; } case EXT3_IOC_GETVERSION: case EXT3_IOC_GETVERSION_OLD: return put_user(inode->i_generation, (int __user *) arg); case EXT3_IOC_SETVERSION: case EXT3_IOC_SETVERSION_OLD: { handle_t *handle; struct ext3_iloc iloc; __u32 generation; int err; if (!inode_owner_or_capable(inode)) return -EPERM; err = mnt_want_write_file(filp); if (err) return err; if (get_user(generation, (int __user *) arg)) { err = -EFAULT; goto setversion_out; } handle = ext3_journal_start(inode, 1); if (IS_ERR(handle)) { err = PTR_ERR(handle); goto setversion_out; } err = ext3_reserve_inode_write(handle, inode, &iloc); if (err == 0) { inode->i_ctime = CURRENT_TIME_SEC; inode->i_generation = generation; err = ext3_mark_iloc_dirty(handle, inode, &iloc); } ext3_journal_stop(handle); setversion_out: mnt_drop_write_file(filp); return err; } case EXT3_IOC_GETRSVSZ: if (test_opt(inode->i_sb, RESERVATION) && S_ISREG(inode->i_mode) && ei->i_block_alloc_info) { rsv_window_size = ei->i_block_alloc_info->rsv_window_node.rsv_goal_size; return put_user(rsv_window_size, (int __user *)arg); } return -ENOTTY; case EXT3_IOC_SETRSVSZ: { int err; if (!test_opt(inode->i_sb, RESERVATION) ||!S_ISREG(inode->i_mode)) return -ENOTTY; err = mnt_want_write_file(filp); if (err) return err; if (!inode_owner_or_capable(inode)) { err = -EACCES; goto setrsvsz_out; } if (get_user(rsv_window_size, (int __user *)arg)) { err = -EFAULT; goto setrsvsz_out; } if (rsv_window_size > EXT3_MAX_RESERVE_BLOCKS) rsv_window_size = EXT3_MAX_RESERVE_BLOCKS; /* * need to allocate reservation structure for this inode * before set the window size */ mutex_lock(&ei->truncate_mutex); if (!ei->i_block_alloc_info) ext3_init_block_alloc_info(inode); if (ei->i_block_alloc_info){ struct ext3_reserve_window_node *rsv = &ei->i_block_alloc_info->rsv_window_node; rsv->rsv_goal_size = rsv_window_size; } mutex_unlock(&ei->truncate_mutex); setrsvsz_out: mnt_drop_write_file(filp); return err; } case EXT3_IOC_GROUP_EXTEND: { ext3_fsblk_t n_blocks_count; struct super_block *sb = inode->i_sb; int err, err2; if (!capable(CAP_SYS_RESOURCE)) return -EPERM; err = mnt_want_write_file(filp); if (err) return err; if (get_user(n_blocks_count, (__u32 __user *)arg)) { err = -EFAULT; goto group_extend_out; } err = ext3_group_extend(sb, EXT3_SB(sb)->s_es, n_blocks_count); journal_lock_updates(EXT3_SB(sb)->s_journal); err2 = journal_flush(EXT3_SB(sb)->s_journal); journal_unlock_updates(EXT3_SB(sb)->s_journal); if (err == 0) err = err2; group_extend_out: mnt_drop_write_file(filp); return err; } case EXT3_IOC_GROUP_ADD: { struct ext3_new_group_data input; struct super_block *sb = inode->i_sb; int err, err2; if (!capable(CAP_SYS_RESOURCE)) return -EPERM; err = mnt_want_write_file(filp); if (err) return err; if (copy_from_user(&input, (struct ext3_new_group_input __user *)arg, sizeof(input))) { err = -EFAULT; goto group_add_out; } err = ext3_group_add(sb, &input); journal_lock_updates(EXT3_SB(sb)->s_journal); err2 = journal_flush(EXT3_SB(sb)->s_journal); journal_unlock_updates(EXT3_SB(sb)->s_journal); if (err == 0) err = err2; group_add_out: mnt_drop_write_file(filp); return err; } case FITRIM: { struct super_block *sb = inode->i_sb; struct fstrim_range range; int ret = 0; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (copy_from_user(&range, (struct fstrim_range __user *)arg, sizeof(range))) return -EFAULT; ret = ext3_trim_fs(sb, &range); if (ret < 0) return ret; if (copy_to_user((struct fstrim_range __user *)arg, &range, sizeof(range))) return -EFAULT; return 0; } default: return -ENOTTY; } }
/* * There are two policies for allocating an inode. If the new inode is * a directory, then a forward search is made for a block group with both * free space and a low directory-to-inode ratio; if that fails, then of * the groups with above-average free space, that group with the fewest * directories already is chosen. * * For other inodes, search forward from the parent directory's block * group to find a free inode. */ struct inode *ext3_new_inode(handle_t *handle, struct inode * dir, const struct qstr *qstr, umode_t mode) { struct super_block *sb; struct buffer_head *bitmap_bh = NULL; struct buffer_head *bh2; int group; unsigned long ino = 0; struct inode * inode; struct ext3_group_desc * gdp = NULL; struct ext3_super_block * es; struct ext3_inode_info *ei; struct ext3_sb_info *sbi; int err = 0; struct inode *ret; int i; /* Cannot create files in a deleted directory */ if (!dir || !dir->i_nlink) return ERR_PTR(-EPERM); sb = dir->i_sb; trace_ext3_request_inode(dir, mode); inode = new_inode(sb); if (!inode) return ERR_PTR(-ENOMEM); ei = EXT3_I(inode); sbi = EXT3_SB(sb); es = sbi->s_es; if (S_ISDIR(mode)) group = find_group_orlov(sb, dir); else group = find_group_other(sb, dir); err = -ENOSPC; if (group == -1) goto out; for (i = 0; i < sbi->s_groups_count; i++) { err = -EIO; gdp = ext3_get_group_desc(sb, group, &bh2); if (!gdp) goto fail; brelse(bitmap_bh); bitmap_bh = read_inode_bitmap(sb, group); if (!bitmap_bh) goto fail; ino = 0; repeat_in_this_group: ino = ext3_find_next_zero_bit((unsigned long *) bitmap_bh->b_data, EXT3_INODES_PER_GROUP(sb), ino); if (ino < EXT3_INODES_PER_GROUP(sb)) { BUFFER_TRACE(bitmap_bh, "get_write_access"); err = ext3_journal_get_write_access(handle, bitmap_bh); if (err) goto fail; if (!ext3_set_bit_atomic(sb_bgl_lock(sbi, group), ino, bitmap_bh->b_data)) { /* we won it */ BUFFER_TRACE(bitmap_bh, "call ext3_journal_dirty_metadata"); err = ext3_journal_dirty_metadata(handle, bitmap_bh); if (err) goto fail; goto got; } /* we lost it */ journal_release_buffer(handle, bitmap_bh); if (++ino < EXT3_INODES_PER_GROUP(sb)) goto repeat_in_this_group; } /* * This case is possible in concurrent environment. It is very * rare. We cannot repeat the find_group_xxx() call because * that will simply return the same blockgroup, because the * group descriptor metadata has not yet been updated. * So we just go onto the next blockgroup. */ if (++group == sbi->s_groups_count) group = 0; } err = -ENOSPC; goto out; got: ino += group * EXT3_INODES_PER_GROUP(sb) + 1; if (ino < EXT3_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) { ext3_error (sb, "ext3_new_inode", "reserved inode or inode > inodes count - " "block_group = %d, inode=%lu", group, ino); err = -EIO; goto fail; } BUFFER_TRACE(bh2, "get_write_access"); err = ext3_journal_get_write_access(handle, bh2); if (err) goto fail; spin_lock(sb_bgl_lock(sbi, group)); le16_add_cpu(&gdp->bg_free_inodes_count, -1); if (S_ISDIR(mode)) { le16_add_cpu(&gdp->bg_used_dirs_count, 1); } spin_unlock(sb_bgl_lock(sbi, group)); BUFFER_TRACE(bh2, "call ext3_journal_dirty_metadata"); err = ext3_journal_dirty_metadata(handle, bh2); if (err) goto fail; percpu_counter_dec(&sbi->s_freeinodes_counter); if (S_ISDIR(mode)) percpu_counter_inc(&sbi->s_dirs_counter); if (test_opt(sb, GRPID)) { inode->i_mode = mode; inode->i_uid = current_fsuid(); inode->i_gid = dir->i_gid; } else inode_init_owner(inode, dir, mode); inode->i_ino = ino; /* This is the optimal IO size (for stat), not the fs block size */ inode->i_blocks = 0; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC; memset(ei->i_data, 0, sizeof(ei->i_data)); ei->i_dir_start_lookup = 0; ei->i_disksize = 0; ei->i_flags = ext3_mask_flags(mode, EXT3_I(dir)->i_flags & EXT3_FL_INHERITED); #ifdef EXT3_FRAGMENTS ei->i_faddr = 0; ei->i_frag_no = 0; ei->i_frag_size = 0; #endif ei->i_file_acl = 0; ei->i_dir_acl = 0; ei->i_dtime = 0; ei->i_block_alloc_info = NULL; ei->i_block_group = group; ext3_set_inode_flags(inode); if (IS_DIRSYNC(inode)) handle->h_sync = 1; if (insert_inode_locked(inode) < 0) { /* * Likely a bitmap corruption causing inode to be allocated * twice. */ err = -EIO; goto fail; } spin_lock(&sbi->s_next_gen_lock); inode->i_generation = sbi->s_next_generation++; spin_unlock(&sbi->s_next_gen_lock); ei->i_state_flags = 0; ext3_set_inode_state(inode, EXT3_STATE_NEW); /* See comment in ext3_iget for explanation */ if (ino >= EXT3_FIRST_INO(sb) + 1 && EXT3_INODE_SIZE(sb) > EXT3_GOOD_OLD_INODE_SIZE) { ei->i_extra_isize = sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE; } else { ei->i_extra_isize = 0; } ret = inode; dquot_initialize(inode); err = dquot_alloc_inode(inode); if (err) goto fail_drop; err = ext3_init_acl(handle, inode, dir); if (err) goto fail_free_drop; err = ext3_init_security(handle, inode, dir, qstr); if (err) goto fail_free_drop; err = ext3_mark_inode_dirty(handle, inode); if (err) { ext3_std_error(sb, err); goto fail_free_drop; } ext3_debug("allocating inode %lu\n", inode->i_ino); trace_ext3_allocate_inode(inode, dir, mode); goto really_out; fail: ext3_std_error(sb, err); out: iput(inode); ret = ERR_PTR(err); really_out: brelse(bitmap_bh); return ret; fail_free_drop: dquot_free_inode(inode); fail_drop: dquot_drop(inode); inode->i_flags |= S_NOQUOTA; clear_nlink(inode); unlock_new_inode(inode); iput(inode); brelse(bitmap_bh); return ERR_PTR(err); }
int mlowerfs_ext3_write_record(struct file *file, void *buf, int bufsize, loff_t *offs, int force_sync) { struct buffer_head *bh = NULL; unsigned long block; struct inode *inode = file->f_dentry->d_inode; loff_t old_size = i_size_read(inode), offset = *offs; loff_t new_size = i_size_read(inode); handle_t *handle; int err = 0, block_count = 0, blocksize, size, boffs; /* Determine how many transaction credits are needed */ blocksize = 1 << inode->i_blkbits; block_count = (*offs & (blocksize - 1)) + bufsize; block_count = (block_count + blocksize - 1) >> inode->i_blkbits; handle = _mlowerfs_ext3_journal_start(inode, block_count * EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + 2); if (IS_ERR(handle)) { MERROR("can't start transaction for %d blocks (%d bytes)\n", block_count * EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + 2, bufsize); return PTR_ERR(handle); } while (bufsize > 0) { if (bh != NULL) brelse(bh); block = offset >> inode->i_blkbits; boffs = offset & (blocksize - 1); size = min(blocksize - boffs, bufsize); bh = _mlowerfs_ext3_bread(handle, inode, block, 1, &err); if (!bh) { MERROR("can't read/create block: %d\n", err); goto out; } err = _mlowerfs_ext3_journal_get_write_access(handle, bh); if (err) { MERROR("journal_get_write_access() returned error %d\n", err); goto out; } MASSERT(bh->b_data + boffs + size <= bh->b_data + bh->b_size); memcpy(bh->b_data + boffs, buf, size); err = _mlowerfs_ext3_journal_dirty_metadata(handle, bh); if (err) { MERROR("journal_dirty_metadata() returned error %d\n", err); goto out; } if (offset + size > new_size) new_size = offset + size; offset += size; bufsize -= size; buf += size; } if (force_sync) handle->h_sync = 1; /* recovery likes this */ out: if (bh) brelse(bh); /* correct in-core and on-disk sizes */ if (new_size > i_size_read(inode)) { lock_kernel(); if (new_size > i_size_read(inode)) i_size_write(inode, new_size); if (i_size_read(inode) > EXT3_I(inode)->i_disksize) EXT3_I(inode)->i_disksize = i_size_read(inode); if (i_size_read(inode) > old_size) mark_inode_dirty(inode); unlock_kernel(); } _mlowerfs_ext3_journal_stop(handle); if (err == 0) *offs = offset; return err; }
/* * Called at inode eviction from icache */ void ext3_evict_inode (struct inode *inode) { struct ext3_inode_info *ei = EXT3_I(inode); struct ext3_block_alloc_info *rsv; handle_t *handle; int want_delete = 0; trace_ext3_evict_inode(inode); if (!inode->i_nlink && !is_bad_inode(inode)) { dquot_initialize(inode); want_delete = 1; } /* * When journalling data dirty buffers are tracked only in the journal. * So although mm thinks everything is clean and ready for reaping the * inode might still have some pages to write in the running * transaction or waiting to be checkpointed. Thus calling * journal_invalidatepage() (via truncate_inode_pages()) to discard * these buffers can cause data loss. Also even if we did not discard * these buffers, we would have no way to find them after the inode * is reaped and thus user could see stale data if he tries to read * them before the transaction is checkpointed. So be careful and * force everything to disk here... We use ei->i_datasync_tid to * store the newest transaction containing inode's data. * * Note that directories do not have this problem because they don't * use page cache. * * The s_journal check handles the case when ext3_get_journal() fails * and puts the journal inode. */ if (inode->i_nlink && ext3_should_journal_data(inode) && EXT3_SB(inode->i_sb)->s_journal && (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) && inode->i_ino != EXT3_JOURNAL_INO) { tid_t commit_tid = atomic_read(&ei->i_datasync_tid); journal_t *journal = EXT3_SB(inode->i_sb)->s_journal; log_start_commit(journal, commit_tid); log_wait_commit(journal, commit_tid); filemap_write_and_wait(&inode->i_data); } truncate_inode_pages(&inode->i_data, 0); ext3_discard_reservation(inode); rsv = ei->i_block_alloc_info; ei->i_block_alloc_info = NULL; if (unlikely(rsv)) kfree(rsv); if (!want_delete) goto no_delete; handle = start_transaction(inode); if (IS_ERR(handle)) { /* * If we're going to skip the normal cleanup, we still need to * make sure that the in-core orphan linked list is properly * cleaned up. */ ext3_orphan_del(NULL, inode); goto no_delete; } if (IS_SYNC(inode)) handle->h_sync = 1; inode->i_size = 0; if (inode->i_blocks) ext3_truncate(inode); /* * Kill off the orphan record created when the inode lost the last * link. Note that ext3_orphan_del() has to be able to cope with the * deletion of a non-existent orphan - ext3_truncate() could * have removed the record. */ ext3_orphan_del(handle, inode); ei->i_dtime = get_seconds(); /* * One subtle ordering requirement: if anything has gone wrong * (transaction abort, IO errors, whatever), then we can still * do these next steps (the fs will already have been marked as * having errors), but we can't free the inode if the mark_dirty * fails. */ if (ext3_mark_inode_dirty(handle, inode)) { /* If that failed, just dquot_drop() and be done with that */ dquot_drop(inode); clear_inode(inode); } else { ext3_xattr_delete_inode(handle, inode); dquot_free_inode(inode); dquot_drop(inode); clear_inode(inode); ext3_free_inode(handle, inode); } ext3_journal_stop(handle); return; no_delete: clear_inode(inode); dquot_drop(inode); }
/* * inode->i_sem: down, or inode is just being initialized * BKL: held */ static int ext3_do_set_acl(handle_t *handle, struct inode *inode, int type, struct posix_acl *acl) { struct ext3_inode_info *ei = EXT3_I(inode); int name_index; void *value = NULL; size_t size; int error; if (S_ISLNK(inode->i_mode)) return -ENODATA; switch(type) { case ACL_TYPE_ACCESS: name_index = EXT3_XATTR_INDEX_POSIX_ACL_ACCESS; if (acl) { mode_t mode = inode->i_mode; error = posix_acl_equiv_mode(acl, &mode); if (error < 0) return error; else { inode->i_mode = mode; ext3_mark_inode_dirty(handle, inode); if (error == 0) acl = NULL; } } break; case ACL_TYPE_DEFAULT: name_index = EXT3_XATTR_INDEX_POSIX_ACL_DEFAULT; if (!S_ISDIR(inode->i_mode)) return acl ? -EACCES : 0; break; default: return -EINVAL; } if (acl) { if (acl->a_count > EXT3_ACL_MAX_ENTRIES) return -EINVAL; value = ext3_acl_to_disk(acl, &size); if (IS_ERR(value)) return (int)PTR_ERR(value); } error = ext3_xattr_set_handle(handle, inode, name_index, "", value, size, 0); if (value) kfree(value); if (!error) { switch(type) { case ACL_TYPE_ACCESS: if (ei->i_acl != EXT3_ACL_NOT_CACHED) posix_acl_release(ei->i_acl); ei->i_acl = posix_acl_dup(acl); break; case ACL_TYPE_DEFAULT: if (ei->i_default_acl != EXT3_ACL_NOT_CACHED) posix_acl_release(ei->i_default_acl); ei->i_default_acl = posix_acl_dup(acl); break; } } return error; }
int ext3_ioctl (struct inode * inode, struct file * filp, unsigned int cmd, unsigned long arg) { struct ext3_inode_info *ei = EXT3_I(inode); unsigned int flags; unsigned short rsv_window_size; ext3_debug ("cmd = %u, arg = %lu\n", cmd, arg); switch (cmd) { case EXT3_IOC_GETFLAGS: flags = ei->i_flags & EXT3_FL_USER_VISIBLE; return put_user(flags, (int __user *) arg); case EXT3_IOC_SETFLAGS: { handle_t *handle = NULL; int err; struct ext3_iloc iloc; unsigned int oldflags; unsigned int jflag; if (IS_RDONLY(inode)) return -EROFS; if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER)) return -EACCES; if (get_user(flags, (int __user *) arg)) return -EFAULT; if (!S_ISDIR(inode->i_mode)) flags &= ~EXT3_DIRSYNC_FL; mutex_lock(&inode->i_mutex); oldflags = ei->i_flags; /* The JOURNAL_DATA flag is modifiable only by root */ jflag = flags & EXT3_JOURNAL_DATA_FL; /* * The IMMUTABLE and APPEND_ONLY flags can only be changed by * the relevant capability. * * This test looks nicer. Thanks to Pauline Middelink */ if ((flags ^ oldflags) & (EXT3_APPEND_FL | EXT3_IMMUTABLE_FL)) { if (!capable(CAP_LINUX_IMMUTABLE)) { mutex_unlock(&inode->i_mutex); return -EPERM; } } /* * The JOURNAL_DATA flag can only be changed by * the relevant capability. */ if ((jflag ^ oldflags) & (EXT3_JOURNAL_DATA_FL)) { if (!capable(CAP_SYS_RESOURCE)) { mutex_unlock(&inode->i_mutex); return -EPERM; } } handle = ext3_journal_start(inode, 1); if (IS_ERR(handle)) { mutex_unlock(&inode->i_mutex); return PTR_ERR(handle); } if (IS_SYNC(inode)) handle->h_sync = 1; err = ext3_reserve_inode_write(handle, inode, &iloc); if (err) goto flags_err; flags = flags & EXT3_FL_USER_MODIFIABLE; flags |= oldflags & ~EXT3_FL_USER_MODIFIABLE; ei->i_flags = flags; ext3_set_inode_flags(inode); inode->i_ctime = CURRENT_TIME_SEC; err = ext3_mark_iloc_dirty(handle, inode, &iloc); flags_err: ext3_journal_stop(handle); if (err) { mutex_unlock(&inode->i_mutex); return err; } if ((jflag ^ oldflags) & (EXT3_JOURNAL_DATA_FL)) err = ext3_change_inode_journal_flag(inode, jflag); mutex_unlock(&inode->i_mutex); return err; } case EXT3_IOC_GETVERSION: case EXT3_IOC_GETVERSION_OLD: return put_user(inode->i_generation, (int __user *) arg); case EXT3_IOC_SETVERSION: case EXT3_IOC_SETVERSION_OLD: { handle_t *handle; struct ext3_iloc iloc; __u32 generation; int err; if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER)) return -EPERM; if (IS_RDONLY(inode)) return -EROFS; if (get_user(generation, (int __user *) arg)) return -EFAULT; handle = ext3_journal_start(inode, 1); if (IS_ERR(handle)) return PTR_ERR(handle); err = ext3_reserve_inode_write(handle, inode, &iloc); if (err == 0) { inode->i_ctime = CURRENT_TIME_SEC; inode->i_generation = generation; err = ext3_mark_iloc_dirty(handle, inode, &iloc); } ext3_journal_stop(handle); return err; } #ifdef CONFIG_JBD_DEBUG case EXT3_IOC_WAIT_FOR_READONLY: /* * This is racy - by the time we're woken up and running, * the superblock could be released. And the module could * have been unloaded. So sue me. * * Returns 1 if it slept, else zero. */ { struct super_block *sb = inode->i_sb; DECLARE_WAITQUEUE(wait, current); int ret = 0; set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&EXT3_SB(sb)->ro_wait_queue, &wait); if (timer_pending(&EXT3_SB(sb)->turn_ro_timer)) { schedule(); ret = 1; } remove_wait_queue(&EXT3_SB(sb)->ro_wait_queue, &wait); return ret; } #endif case EXT3_IOC_GETRSVSZ: if (test_opt(inode->i_sb, RESERVATION) && S_ISREG(inode->i_mode) && ei->i_block_alloc_info) { rsv_window_size = ei->i_block_alloc_info->rsv_window_node.rsv_goal_size; return put_user(rsv_window_size, (int __user *)arg); } return -ENOTTY; case EXT3_IOC_SETRSVSZ: { if (!test_opt(inode->i_sb, RESERVATION) ||!S_ISREG(inode->i_mode)) return -ENOTTY; if (IS_RDONLY(inode)) return -EROFS; if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER)) return -EACCES; if (get_user(rsv_window_size, (int __user *)arg)) return -EFAULT; if (rsv_window_size > EXT3_MAX_RESERVE_BLOCKS) rsv_window_size = EXT3_MAX_RESERVE_BLOCKS; /* * need to allocate reservation structure for this inode * before set the window size */ mutex_lock(&ei->truncate_mutex); if (!ei->i_block_alloc_info) ext3_init_block_alloc_info(inode); if (ei->i_block_alloc_info){ struct ext3_reserve_window_node *rsv = &ei->i_block_alloc_info->rsv_window_node; rsv->rsv_goal_size = rsv_window_size; } mutex_unlock(&ei->truncate_mutex); return 0; } case EXT3_IOC_GROUP_EXTEND: { ext3_fsblk_t n_blocks_count; struct super_block *sb = inode->i_sb; int err; if (!capable(CAP_SYS_RESOURCE)) return -EPERM; if (IS_RDONLY(inode)) return -EROFS; if (get_user(n_blocks_count, (__u32 __user *)arg)) return -EFAULT; err = ext3_group_extend(sb, EXT3_SB(sb)->s_es, n_blocks_count); journal_lock_updates(EXT3_SB(sb)->s_journal); journal_flush(EXT3_SB(sb)->s_journal); journal_unlock_updates(EXT3_SB(sb)->s_journal); return err; } case EXT3_IOC_GROUP_ADD: { struct ext3_new_group_data input; struct super_block *sb = inode->i_sb; int err; if (!capable(CAP_SYS_RESOURCE)) return -EPERM; if (IS_RDONLY(inode)) return -EROFS; if (copy_from_user(&input, (struct ext3_new_group_input __user *)arg, sizeof(input))) return -EFAULT; err = ext3_group_add(sb, &input); journal_lock_updates(EXT3_SB(sb)->s_journal); journal_flush(EXT3_SB(sb)->s_journal); journal_unlock_updates(EXT3_SB(sb)->s_journal); return err; } default: return -ENOTTY; } }
static void * ext3_follow_link(struct dentry *dentry, struct nameidata *nd) { struct ext3_inode_info *ei = EXT3_I(dentry->d_inode); nd_set_link(nd, (char*)ei->i_data); return NULL; }
/* * Set the access or default ACL of an inode. * * inode->i_mutex: down unless called from ext3_new_inode */ static int ext3_set_acl(handle_t *handle, struct inode *inode, int type, struct posix_acl *acl) { struct ext3_inode_info *ei = EXT3_I(inode); int name_index; void *value = NULL; size_t size = 0; int error; if (S_ISLNK(inode->i_mode)) return -EOPNOTSUPP; switch(type) { case ACL_TYPE_ACCESS: name_index = EXT3_XATTR_INDEX_POSIX_ACL_ACCESS; if (acl) { mode_t mode = inode->i_mode; error = posix_acl_equiv_mode(acl, &mode); if (error < 0) return error; else { inode->i_mode = mode; inode->i_ctime = CURRENT_TIME_SEC; ext3_mark_inode_dirty(handle, inode); if (error == 0) acl = NULL; } } break; case ACL_TYPE_DEFAULT: name_index = EXT3_XATTR_INDEX_POSIX_ACL_DEFAULT; if (!S_ISDIR(inode->i_mode)) return acl ? -EACCES : 0; break; default: return -EINVAL; } if (acl) { value = ext3_acl_to_disk(acl, &size); if (IS_ERR(value)) return (int)PTR_ERR(value); } error = ext3_xattr_set_handle(handle, inode, name_index, "", value, size, 0); kfree(value); if (!error) { switch(type) { case ACL_TYPE_ACCESS: ext3_iset_acl(inode, &ei->i_acl, acl); break; case ACL_TYPE_DEFAULT: ext3_iset_acl(inode, &ei->i_default_acl, acl); break; } } return error; }
static int find_group_orlov(struct super_block *sb, struct inode *parent) { int parent_group = EXT3_I(parent)->i_block_group; struct ext3_sb_info *sbi = EXT3_SB(sb); int ngroups = sbi->s_groups_count; int inodes_per_group = EXT3_INODES_PER_GROUP(sb); unsigned int freei, avefreei; ext3_fsblk_t freeb, avefreeb; unsigned int ndirs; int max_dirs, min_inodes; ext3_grpblk_t min_blocks; int group = -1, i; struct ext3_group_desc *desc; freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter); avefreei = freei / ngroups; freeb = percpu_counter_read_positive(&sbi->s_freeblocks_counter); avefreeb = freeb / ngroups; ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter); if ((parent == sb->s_root->d_inode) || (EXT3_I(parent)->i_flags & EXT3_TOPDIR_FL)) { int best_ndir = inodes_per_group; int best_group = -1; group = prandom_u32(); parent_group = (unsigned)group % ngroups; for (i = 0; i < ngroups; i++) { group = (parent_group + i) % ngroups; desc = ext3_get_group_desc (sb, group, NULL); if (!desc || !desc->bg_free_inodes_count) continue; if (le16_to_cpu(desc->bg_used_dirs_count) >= best_ndir) continue; if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei) continue; if (le16_to_cpu(desc->bg_free_blocks_count) < avefreeb) continue; best_group = group; best_ndir = le16_to_cpu(desc->bg_used_dirs_count); } if (best_group >= 0) return best_group; goto fallback; } max_dirs = ndirs / ngroups + inodes_per_group / 16; min_inodes = avefreei - inodes_per_group / 4; min_blocks = avefreeb - EXT3_BLOCKS_PER_GROUP(sb) / 4; for (i = 0; i < ngroups; i++) { group = (parent_group + i) % ngroups; desc = ext3_get_group_desc (sb, group, NULL); if (!desc || !desc->bg_free_inodes_count) continue; if (le16_to_cpu(desc->bg_used_dirs_count) >= max_dirs) continue; if (le16_to_cpu(desc->bg_free_inodes_count) < min_inodes) continue; if (le16_to_cpu(desc->bg_free_blocks_count) < min_blocks) continue; return group; } fallback: for (i = 0; i < ngroups; i++) { group = (parent_group + i) % ngroups; desc = ext3_get_group_desc (sb, group, NULL); if (!desc || !desc->bg_free_inodes_count) continue; if (le16_to_cpu(desc->bg_free_inodes_count) >= avefreei) return group; } if (avefreei) { /* * The free-inodes counter is approximate, and for really small * filesystems the above test can fail to find any blockgroups */ avefreei = 0; goto fallback; } return -1; }