/** * inode_change_ok - check if attribute changes to an inode are allowed * @inode: inode to check * @attr: attributes to change * * Check if we are allowed to change the attributes contained in @attr * in the given inode. This includes the normal unix access permission * checks, as well as checks for rlimits and others. * * Should be called as the first thing in ->setattr implementations, * possibly after taking additional locks. */ int inode_change_ok(const struct inode *inode, struct iattr *attr) { unsigned int ia_valid = attr->ia_valid; /* * First check size constraints. These can't be overriden using * ATTR_FORCE. */ if (ia_valid & ATTR_SIZE) { int error = inode_newsize_ok(inode, attr->ia_size); if (error) return error; } /* If force is set do it anyway. */ if (ia_valid & ATTR_FORCE) return 0; /* Make sure a caller can chown. */ if ((ia_valid & ATTR_UID) && (current_fsuid() != inode->i_uid || attr->ia_uid != inode->i_uid) && !capable(CAP_CHOWN)) return -EPERM; /* Make sure caller can chgrp. */ if ((ia_valid & ATTR_GID) && (current_fsuid() != inode->i_uid || (!in_group_p(attr->ia_gid) && attr->ia_gid != inode->i_gid)) && !capable(CAP_CHOWN)) return -EPERM; /* Make sure a caller can chmod. */ if (ia_valid & ATTR_MODE) { if (!is_owner_or_cap(inode)) return -EPERM; /* Also check the setgid bit! */ if (!in_group_p((ia_valid & ATTR_GID) ? attr->ia_gid : inode->i_gid) && !capable(CAP_FSETID)) attr->ia_mode &= ~S_ISGID; } /* Check for setting the inode time. */ if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)) { if (!is_owner_or_cap(inode)) return -EPERM; } return 0; }
static int ocfs2_xattr_set_acl(struct inode *inode, int type, const void *value, size_t size) { struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct posix_acl *acl; int ret = 0; if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL)) return -EOPNOTSUPP; if (!is_owner_or_cap(inode)) return -EPERM; if (value) { acl = posix_acl_from_xattr(value, size); if (IS_ERR(acl)) return PTR_ERR(acl); else if (acl) { ret = posix_acl_valid(acl); if (ret) goto cleanup; } } else acl = NULL; ret = ocfs2_set_acl(NULL, inode, NULL, type, acl, NULL, NULL); cleanup: posix_acl_release(acl); return ret; }
static int ext2_xattr_set_acl(struct dentry *dentry, const char *name, const void *value, size_t size, int flags, int type) { struct posix_acl *acl; int error; if (strcmp(name, "") != 0) return -EINVAL; if (!test_opt(dentry->d_sb, POSIX_ACL)) return -EOPNOTSUPP; if (!is_owner_or_cap(dentry->d_inode)) return -EPERM; if (value) { acl = posix_acl_from_xattr(value, size); if (IS_ERR(acl)) return PTR_ERR(acl); else if (acl) { error = posix_acl_valid(acl); if (error) goto release_and_out; } } else acl = NULL; error = ext2_set_acl(dentry->d_inode, type, acl); release_and_out: posix_acl_release(acl); return error; }
static int jffs2_acl_setxattr(struct inode *inode, int type, const void *value, size_t size) { struct posix_acl *acl; int rc; if (!is_owner_or_cap(inode)) return -EPERM; if (value) { acl = posix_acl_from_xattr(value, size); if (IS_ERR(acl)) return PTR_ERR(acl); if (acl) { rc = posix_acl_valid(acl); if (rc) goto out; } } else { acl = NULL; } rc = jffs2_set_acl(inode, type, acl); out: posix_acl_release(acl); return rc; }
static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name, const void *value, size_t size, int flags, int type) { int ret; struct posix_acl *acl = NULL; if (!is_owner_or_cap(dentry->d_inode)) return -EPERM; if (!IS_POSIXACL(dentry->d_inode)) return -EOPNOTSUPP; if (value) { acl = posix_acl_from_xattr(value, size); if (acl == NULL) { value = NULL; size = 0; } else if (IS_ERR(acl)) { return PTR_ERR(acl); } } ret = btrfs_set_acl(NULL, dentry->d_inode, acl, type); posix_acl_release(acl); return ret; }
static int ext2_xattr_set_acl(struct inode *inode, int type, const void *value, size_t size) { struct posix_acl *acl; int error; if (!test_opt(inode->i_sb, POSIX_ACL)) return -EOPNOTSUPP; if (!is_owner_or_cap(inode)) return -EPERM; if (value) { acl = posix_acl_from_xattr(value, size); if (IS_ERR(acl)) return PTR_ERR(acl); else if (acl) { error = posix_acl_valid(acl); if (error) goto release_and_out; } } else acl = NULL; error = ext2_set_acl(inode, type, acl); release_and_out: posix_acl_release(acl); return error; }
/* POSIX UID/GID verification for setting inode attributes. */ int inode_change_ok(const struct inode *inode, struct iattr *attr) { int retval = -EPERM; unsigned int ia_valid = attr->ia_valid; /* If force is set do it anyway. */ if (ia_valid & ATTR_FORCE) goto fine; /* Make sure a caller can chown. */ if ((ia_valid & ATTR_UID) && (current_fsuid() != inode->i_uid || attr->ia_uid != inode->i_uid) && !capable(CAP_CHOWN)) goto error; /* Make sure caller can chgrp. */ if ((ia_valid & ATTR_GID) && (current_fsuid() != inode->i_uid || (!in_group_p(attr->ia_gid) && attr->ia_gid != inode->i_gid)) && !capable(CAP_CHOWN)) goto error; /* Make sure a caller can chmod. */ if (ia_valid & ATTR_MODE) { if (!is_owner_or_cap(inode)) goto error; /* Also check the setgid bit! */ if (!in_group_p((ia_valid & ATTR_GID) ? attr->ia_gid : inode->i_gid) && !capable(CAP_FSETID)) attr->ia_mode &= ~S_ISGID; } /* Check for setting the inode time. */ if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)) { if (!is_owner_or_cap(inode)) goto error; } fine: retval = 0; error: return retval; }
static int generic_acl_set(struct dentry *dentry, const char *name, const void *value, size_t size, int flags, int type) { struct inode *inode = dentry->d_inode; struct posix_acl *acl = NULL; int error; if (strcmp(name, "") != 0) return -EINVAL; if (S_ISLNK(inode->i_mode)) return -EOPNOTSUPP; if (!is_owner_or_cap(inode)) return -EPERM; if (value) { acl = posix_acl_from_xattr(value, size); if (IS_ERR(acl)) return PTR_ERR(acl); } if (acl) { mode_t mode; error = posix_acl_valid(acl); if (error) goto failed; switch (type) { case ACL_TYPE_ACCESS: mode = inode->i_mode; error = posix_acl_equiv_mode(acl, &mode); if (error < 0) goto failed; inode->i_mode = mode; if (error == 0) { posix_acl_release(acl); acl = NULL; } break; case ACL_TYPE_DEFAULT: if (!S_ISDIR(inode->i_mode)) { error = -EINVAL; goto failed; } break; } } set_cached_acl(inode, type, acl); error = 0; failed: posix_acl_release(acl); return error; }
static int posix_acl_set(struct dentry *dentry, const char *name, const void *value, size_t size, int flags, int type) { struct inode *inode = dentry->d_inode; struct posix_acl *acl; int error, error2; struct reiserfs_transaction_handle th; size_t jcreate_blocks; if (!reiserfs_posixacl(inode->i_sb)) return -EOPNOTSUPP; if (!is_owner_or_cap(inode)) return -EPERM; if (value) { acl = posix_acl_from_xattr(value, size); if (IS_ERR(acl)) { return PTR_ERR(acl); } else if (acl) { error = posix_acl_valid(acl); if (error) goto release_and_out; } } else acl = NULL; /* Pessimism: We can't assume that anything from the xattr root up * has been created. */ jcreate_blocks = reiserfs_xattr_jcreate_nblocks(inode) + reiserfs_xattr_nblocks(inode, size) * 2; reiserfs_write_lock(inode->i_sb); error = journal_begin(&th, inode->i_sb, jcreate_blocks); if (error == 0) { error = reiserfs_set_acl(&th, inode, type, acl); error2 = journal_end(&th, inode->i_sb, jcreate_blocks); if (error2) error = error2; } reiserfs_write_unlock(inode->i_sb); release_and_out: posix_acl_release(acl); return error; }
/* * Check permissions for extended attribute access. This is a bit complicated * because different namespaces have very different rules. */ static int xattr_permission(struct inode *inode, const char *name, int mask) { /* * We can never set or remove an extended attribute on a read-only * filesystem or on an immutable / append-only inode. */ if (mask & MAY_WRITE) { if (IS_RDONLY(inode)) return -EROFS; if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) return -EPERM; } /* * No restriction for security.* and system.* from the VFS. Decision * on these is left to the underlying filesystem / security module. */ if (!strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) || !strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) return 0; /* * The trusted.* namespace can only be accessed by a privileged user. */ if (!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN)) return (capable(CAP_SYS_ADMIN) ? 0 : -EPERM); /* In user.* namespace, only regular files and directories can have * extended attributes. For sticky directories, only the owner and * privileged user can write attributes. */ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) { if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode)) return -EPERM; if (S_ISDIR(inode->i_mode) && (inode->i_mode & S_ISVTX) && (mask & MAY_WRITE) && !is_owner_or_cap(inode)) return -EPERM; } return permission(inode, mask, NULL); }
static int ext4_xattr_set_acl(struct dentry *dentry, const char *name, const void *value, size_t size, int flags, int type) { struct inode *inode = dentry->d_inode; handle_t *handle; struct posix_acl *acl; int error, retries = 0; if (strcmp(name, "") != 0) return -EINVAL; if (!test_opt(inode->i_sb, POSIX_ACL)) return -EOPNOTSUPP; if (!is_owner_or_cap(inode)) return -EPERM; if (value) { acl = posix_acl_from_xattr(value, size); if (IS_ERR(acl)) return PTR_ERR(acl); else if (acl) { error = posix_acl_valid(acl); if (error) goto release_and_out; } } else acl = NULL; retry: handle = ext4_journal_start(inode, EXT4_DATA_TRANS_BLOCKS(inode->i_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); error = ext4_set_acl(handle, inode, type, acl); ext4_journal_stop(handle); if (error == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) goto retry; release_and_out: posix_acl_release(acl); return error; }
static int f2fs_xattr_set_acl(struct dentry *dentry, const char *name, const void *value, size_t size, int flags, int type) { struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb); struct inode *inode = dentry->d_inode; struct posix_acl *acl = NULL; int error; if (!test_opt(sbi, POSIX_ACL)) return -EOPNOTSUPP; if (strcmp(name, "") != 0) return -EINVAL; /* * 2.6.35 uses is_owner_or_cap() instead of inode_owner_or_capable() * -- marc1706 */ if (!is_owner_or_cap(inode)) return -EPERM; if (value) { acl = posix_acl_from_xattr(value, size); if (IS_ERR(acl)) return PTR_ERR(acl); if (acl) { error = posix_acl_valid(acl); if (error) goto release_and_out; } } else { acl = NULL; } error = f2fs_set_acl(inode, type, acl); release_and_out: posix_acl_release(acl); return error; }
long hdd_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = filp->f_dentry->d_inode; struct hdd_inode_info *hi = HDD_I(inode); unsigned int flags; int ret; switch (cmd) { case HDD_IOC_GETFLAGS: hdd_get_inode_flags(hi); flags = hi->i_flags & HDD_FL_USER_VISIBLE; return put_user(flags, (int __user *) arg); case HDD_IOC_SETFLAGS: { unsigned int oldflags; ret = mnt_want_write(filp->f_path.mnt); if (ret) return ret; if (!is_owner_or_cap(inode)) { ret = -EACCES; goto setflags_out; } if (get_user(flags, (int __user *) arg)) { ret = -EFAULT; goto setflags_out; } flags = hdd_mask_flags(inode->i_mode, flags); mutex_lock(&inode->i_mutex); if (IS_NOQUOTA(inode)) { mutex_unlock(&inode->i_mutex); ret = -EPERM; goto setflags_out; } oldflags = hi->i_flags; if ((flags ^ oldflags) & (HDD_APPEND_FL | HDD_IMMUTABLE_FL)) { if (!capable(CAP_LINUX_IMMUTABLE)) { mutex_unlock(&inode->i_mutex); ret = -EPERM; goto setflags_out; } } flags = flags & HDD_FL_USER_MODIFIABLE; flags |= oldflags & ~HDD_FL_USER_MODIFIABLE; hi->i_flags = flags; mutex_unlock(&inode->i_mutex); hdd_set_inode_flags(inode); inode->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(inode); setflags_out: mnt_drop_write(filp->f_path.mnt); return ret; } default: return -ENOTTY; } }
/* ** reiserfs_ioctl - handler for ioctl for inode ** supported commands: ** 1) REISERFS_IOC_UNPACK - try to unpack tail from direct item into indirect ** and prevent packing file (argument arg has to be non-zero) ** 2) REISERFS_IOC_[GS]ETFLAGS, REISERFS_IOC_[GS]ETVERSION ** 3) That's all for a while ... */ int reiserfs_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { unsigned int flags; switch (cmd) { case REISERFS_IOC_UNPACK: if (S_ISREG(inode->i_mode)) { if (arg) return reiserfs_unpack(inode, filp); else return 0; } else return -ENOTTY; /* following two cases are taken from fs/ext2/ioctl.c by Remy Card ([email protected]) */ case REISERFS_IOC_GETFLAGS: if (!reiserfs_attrs(inode->i_sb)) return -ENOTTY; flags = REISERFS_I(inode)->i_attrs; i_attrs_to_sd_attrs(inode, (__u16 *) & flags); return put_user(flags, (int __user *)arg); case REISERFS_IOC_SETFLAGS:{ if (!reiserfs_attrs(inode->i_sb)) return -ENOTTY; if (IS_RDONLY(inode)) return -EROFS; if (!is_owner_or_cap(inode)) return -EPERM; if (get_user(flags, (int __user *)arg)) return -EFAULT; if (((flags ^ REISERFS_I(inode)-> i_attrs) & (REISERFS_IMMUTABLE_FL | REISERFS_APPEND_FL)) && !capable(CAP_LINUX_IMMUTABLE)) return -EPERM; if ((flags & REISERFS_NOTAIL_FL) && S_ISREG(inode->i_mode)) { int result; result = reiserfs_unpack(inode, filp); if (result) return result; } sd_attrs_to_i_attrs(flags, inode); REISERFS_I(inode)->i_attrs = flags; inode->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(inode); return 0; } case REISERFS_IOC_GETVERSION: return put_user(inode->i_generation, (int __user *)arg); case REISERFS_IOC_SETVERSION: if (!is_owner_or_cap(inode)) return -EPERM; if (IS_RDONLY(inode)) return -EROFS; if (get_user(inode->i_generation, (int __user *)arg)) return -EFAULT; inode->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(inode); return 0; default: return -ENOTTY; } }
int ext3_ioctl (struct inode * inode, struct file * filp, unsigned int cmd, unsigned long arg) { struct ext3_inode_info *ei = EXT3_I(inode); unsigned int flags; unsigned short rsv_window_size; ext3_debug ("cmd = %u, arg = %lu\n", cmd, arg); switch (cmd) { case EXT3_IOC_GETFLAGS: ext3_get_inode_flags(ei); flags = ei->i_flags & EXT3_FL_USER_VISIBLE; return put_user(flags, (int __user *) arg); case EXT3_IOC_SETFLAGS: { handle_t *handle = NULL; int err; struct ext3_iloc iloc; unsigned int oldflags; unsigned int jflag; if (IS_RDONLY(inode)) return -EROFS; if (!is_owner_or_cap(inode)) return -EACCES; if (get_user(flags, (int __user *) arg)) return -EFAULT; if (!S_ISDIR(inode->i_mode)) flags &= ~EXT3_DIRSYNC_FL; mutex_lock(&inode->i_mutex); /* Is it quota file? Do not allow user to mess with it */ if (IS_NOQUOTA(inode)) { mutex_unlock(&inode->i_mutex); return -EPERM; } oldflags = ei->i_flags; /* The JOURNAL_DATA flag is modifiable only by root */ jflag = flags & EXT3_JOURNAL_DATA_FL; /* * The IMMUTABLE and APPEND_ONLY flags can only be changed by * the relevant capability. * * This test looks nicer. Thanks to Pauline Middelink */ if ((flags ^ oldflags) & (EXT3_APPEND_FL | EXT3_IMMUTABLE_FL)) { if (!capable(CAP_LINUX_IMMUTABLE)) { mutex_unlock(&inode->i_mutex); return -EPERM; } } /* * The JOURNAL_DATA flag can only be changed by * the relevant capability. */ if ((jflag ^ oldflags) & (EXT3_JOURNAL_DATA_FL)) { if (!capable(CAP_SYS_RESOURCE)) { mutex_unlock(&inode->i_mutex); return -EPERM; } } handle = ext3_journal_start(inode, 1); if (IS_ERR(handle)) { mutex_unlock(&inode->i_mutex); return PTR_ERR(handle); } if (IS_SYNC(inode)) handle->h_sync = 1; err = ext3_reserve_inode_write(handle, inode, &iloc); if (err) goto flags_err; flags = flags & EXT3_FL_USER_MODIFIABLE; flags |= oldflags & ~EXT3_FL_USER_MODIFIABLE; ei->i_flags = flags; ext3_set_inode_flags(inode); inode->i_ctime = CURRENT_TIME_SEC; err = ext3_mark_iloc_dirty(handle, inode, &iloc); flags_err: ext3_journal_stop(handle); if (err) { mutex_unlock(&inode->i_mutex); return err; } if ((jflag ^ oldflags) & (EXT3_JOURNAL_DATA_FL)) err = ext3_change_inode_journal_flag(inode, jflag); mutex_unlock(&inode->i_mutex); return err; } case EXT3_IOC_GETVERSION: case EXT3_IOC_GETVERSION_OLD: return put_user(inode->i_generation, (int __user *) arg); case EXT3_IOC_SETVERSION: case EXT3_IOC_SETVERSION_OLD: { handle_t *handle; struct ext3_iloc iloc; __u32 generation; int err; if (!is_owner_or_cap(inode)) return -EPERM; if (IS_RDONLY(inode)) return -EROFS; if (get_user(generation, (int __user *) arg)) return -EFAULT; handle = ext3_journal_start(inode, 1); if (IS_ERR(handle)) return PTR_ERR(handle); err = ext3_reserve_inode_write(handle, inode, &iloc); if (err == 0) { inode->i_ctime = CURRENT_TIME_SEC; inode->i_generation = generation; err = ext3_mark_iloc_dirty(handle, inode, &iloc); } ext3_journal_stop(handle); return err; } #ifdef CONFIG_JBD_DEBUG case EXT3_IOC_WAIT_FOR_READONLY: /* * This is racy - by the time we're woken up and running, * the superblock could be released. And the module could * have been unloaded. So sue me. * * Returns 1 if it slept, else zero. */ { struct super_block *sb = inode->i_sb; DECLARE_WAITQUEUE(wait, current); int ret = 0; set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&EXT3_SB(sb)->ro_wait_queue, &wait); if (timer_pending(&EXT3_SB(sb)->turn_ro_timer)) { schedule(); ret = 1; } remove_wait_queue(&EXT3_SB(sb)->ro_wait_queue, &wait); return ret; } #endif case EXT3_IOC_GETRSVSZ: if (test_opt(inode->i_sb, RESERVATION) && S_ISREG(inode->i_mode) && ei->i_block_alloc_info) { rsv_window_size = ei->i_block_alloc_info->rsv_window_node.rsv_goal_size; return put_user(rsv_window_size, (int __user *)arg); } return -ENOTTY; case EXT3_IOC_SETRSVSZ: { if (!test_opt(inode->i_sb, RESERVATION) ||!S_ISREG(inode->i_mode)) return -ENOTTY; if (IS_RDONLY(inode)) return -EROFS; if (!is_owner_or_cap(inode)) return -EACCES; if (get_user(rsv_window_size, (int __user *)arg)) return -EFAULT; if (rsv_window_size > EXT3_MAX_RESERVE_BLOCKS) rsv_window_size = EXT3_MAX_RESERVE_BLOCKS; /* * need to allocate reservation structure for this inode * before set the window size */ mutex_lock(&ei->truncate_mutex); if (!ei->i_block_alloc_info) ext3_init_block_alloc_info(inode); if (ei->i_block_alloc_info){ struct ext3_reserve_window_node *rsv = &ei->i_block_alloc_info->rsv_window_node; rsv->rsv_goal_size = rsv_window_size; } mutex_unlock(&ei->truncate_mutex); return 0; } case EXT3_IOC_GROUP_EXTEND: { ext3_fsblk_t n_blocks_count; struct super_block *sb = inode->i_sb; int err; if (!capable(CAP_SYS_RESOURCE)) return -EPERM; if (IS_RDONLY(inode)) return -EROFS; if (get_user(n_blocks_count, (__u32 __user *)arg)) return -EFAULT; err = ext3_group_extend(sb, EXT3_SB(sb)->s_es, n_blocks_count); journal_lock_updates(EXT3_SB(sb)->s_journal); journal_flush(EXT3_SB(sb)->s_journal); journal_unlock_updates(EXT3_SB(sb)->s_journal); return err; } case EXT3_IOC_GROUP_ADD: { struct ext3_new_group_data input; struct super_block *sb = inode->i_sb; int err; if (!capable(CAP_SYS_RESOURCE)) return -EPERM; if (IS_RDONLY(inode)) return -EROFS; if (copy_from_user(&input, (struct ext3_new_group_input __user *)arg, sizeof(input))) return -EFAULT; err = ext3_group_add(sb, &input); journal_lock_updates(EXT3_SB(sb)->s_journal); journal_flush(EXT3_SB(sb)->s_journal); journal_unlock_updates(EXT3_SB(sb)->s_journal); return err; } default: return -ENOTTY; } }
/* If times==NULL, set access and modification to current time, * must be owner or have write permission. * Else, update from *times, must be owner or super user. */ long do_utimes(int dfd, char __user *filename, struct timespec *times, int flags) { int error; struct nameidata nd; struct dentry *dentry; struct inode *inode; struct iattr newattrs; struct file *f = NULL; error = -EINVAL; if (times && (!nsec_valid(times[0].tv_nsec) || !nsec_valid(times[1].tv_nsec))) { goto out; } if (flags & ~AT_SYMLINK_NOFOLLOW) goto out; if (filename == NULL && dfd != AT_FDCWD) { error = -EINVAL; if (flags & AT_SYMLINK_NOFOLLOW) goto out; error = -EBADF; f = fget(dfd); if (!f) goto out; dentry = f->f_path.dentry; } else { error = __user_walk_fd(dfd, filename, (flags & AT_SYMLINK_NOFOLLOW) ? 0 : LOOKUP_FOLLOW, &nd); if (error) goto out; dentry = nd.dentry; } inode = dentry->d_inode; error = -EROFS; if (IS_RDONLY(inode)) goto dput_and_out; /* Don't worry, the checks are done in inode_change_ok() */ newattrs.ia_valid = ATTR_CTIME | ATTR_MTIME | ATTR_ATIME; if (times) { error = -EPERM; if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) goto dput_and_out; if (times[0].tv_nsec == UTIME_OMIT) newattrs.ia_valid &= ~ATTR_ATIME; else if (times[0].tv_nsec != UTIME_NOW) { newattrs.ia_atime.tv_sec = times[0].tv_sec; newattrs.ia_atime.tv_nsec = times[0].tv_nsec; newattrs.ia_valid |= ATTR_ATIME_SET; } if (times[1].tv_nsec == UTIME_OMIT) newattrs.ia_valid &= ~ATTR_MTIME; else if (times[1].tv_nsec != UTIME_NOW) { newattrs.ia_mtime.tv_sec = times[1].tv_sec; newattrs.ia_mtime.tv_nsec = times[1].tv_nsec; newattrs.ia_valid |= ATTR_MTIME_SET; } } else { error = -EACCES; if (IS_IMMUTABLE(inode)) goto dput_and_out; if (!is_owner_or_cap(inode)) { if (f) { if (!(f->f_mode & FMODE_WRITE)) goto dput_and_out; } else { error = vfs_permission(&nd, MAY_WRITE); if (error) goto dput_and_out; } } } mutex_lock(&inode->i_mutex); error = notify_change(dentry, &newattrs); mutex_unlock(&inode->i_mutex); dput_and_out: if (f) fput(f); else path_release(&nd); out: return error; }
long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = filp->f_dentry->d_inode; struct super_block *sb = inode->i_sb; struct ext4_inode_info *ei = EXT4_I(inode); unsigned int flags; ext4_debug("cmd = %u, arg = %lu\n", cmd, arg); switch (cmd) { case EXT4_IOC_GETFLAGS: ext4_get_inode_flags(ei); flags = ei->i_flags & EXT4_FL_USER_VISIBLE; return put_user(flags, (int __user *) arg); case EXT4_IOC_SETFLAGS: { handle_t *handle = NULL; int err, migrate = 0; struct ext4_iloc iloc; unsigned int oldflags; unsigned int jflag; if (!is_owner_or_cap(inode)) return -EACCES; if (get_user(flags, (int __user *) arg)) return -EFAULT; err = mnt_want_write(filp->f_path.mnt); if (err) return err; flags = ext4_mask_flags(inode->i_mode, flags); err = -EPERM; mutex_lock(&inode->i_mutex); /* Is it quota file? Do not allow user to mess with it */ if (IS_NOQUOTA(inode)) goto flags_out; oldflags = ei->i_flags; /* The JOURNAL_DATA flag is modifiable only by root */ jflag = flags & EXT4_JOURNAL_DATA_FL; /* * The IMMUTABLE and APPEND_ONLY flags can only be changed by * the relevant capability. * * This test looks nicer. Thanks to Pauline Middelink */ if ((flags ^ oldflags) & (EXT4_APPEND_FL | EXT4_IMMUTABLE_FL)) { if (!capable(CAP_LINUX_IMMUTABLE)) goto flags_out; } /* * The JOURNAL_DATA flag can only be changed by * the relevant capability. */ if ((jflag ^ oldflags) & (EXT4_JOURNAL_DATA_FL)) { if (!capable(CAP_SYS_RESOURCE)) goto flags_out; } if (oldflags & EXT4_EXTENTS_FL) { /* We don't support clearning extent flags */ if (!(flags & EXT4_EXTENTS_FL)) { err = -EOPNOTSUPP; goto flags_out; } } else if (flags & EXT4_EXTENTS_FL) { /* migrate the file */ migrate = 1; flags &= ~EXT4_EXTENTS_FL; } if (flags & EXT4_EOFBLOCKS_FL) { /* we don't support adding EOFBLOCKS flag */ if (!(oldflags & EXT4_EOFBLOCKS_FL)) { err = -EOPNOTSUPP; goto flags_out; } } else if (oldflags & EXT4_EOFBLOCKS_FL) ext4_truncate(inode); handle = ext4_journal_start(inode, 1); if (IS_ERR(handle)) { err = PTR_ERR(handle); goto flags_out; } if (IS_SYNC(inode)) ext4_handle_sync(handle); err = ext4_reserve_inode_write(handle, inode, &iloc); if (err) goto flags_err; flags = flags & EXT4_FL_USER_MODIFIABLE; flags |= oldflags & ~EXT4_FL_USER_MODIFIABLE; ei->i_flags = flags; ext4_set_inode_flags(inode); inode->i_ctime = ext4_current_time(inode); err = ext4_mark_iloc_dirty(handle, inode, &iloc); flags_err: ext4_journal_stop(handle); if (err) goto flags_out; if ((jflag ^ oldflags) & (EXT4_JOURNAL_DATA_FL)) err = ext4_change_inode_journal_flag(inode, jflag); if (err) goto flags_out; if (migrate) err = ext4_ext_migrate(inode); flags_out: mutex_unlock(&inode->i_mutex); mnt_drop_write(filp->f_path.mnt); return err; } case EXT4_IOC_GETVERSION: case EXT4_IOC_GETVERSION_OLD: return put_user(inode->i_generation, (int __user *) arg); case EXT4_IOC_SETVERSION: case EXT4_IOC_SETVERSION_OLD: { handle_t *handle; struct ext4_iloc iloc; __u32 generation; int err; if (!is_owner_or_cap(inode)) return -EPERM; err = mnt_want_write(filp->f_path.mnt); if (err) return err; if (get_user(generation, (int __user *) arg)) { err = -EFAULT; goto setversion_out; } handle = ext4_journal_start(inode, 1); if (IS_ERR(handle)) { err = PTR_ERR(handle); goto setversion_out; } err = ext4_reserve_inode_write(handle, inode, &iloc); if (err == 0) { inode->i_ctime = ext4_current_time(inode); inode->i_generation = generation; err = ext4_mark_iloc_dirty(handle, inode, &iloc); } ext4_journal_stop(handle); setversion_out: mnt_drop_write(filp->f_path.mnt); return err; } #ifdef CONFIG_JBD2_DEBUG case EXT4_IOC_WAIT_FOR_READONLY: /* * This is racy - by the time we're woken up and running, * the superblock could be released. And the module could * have been unloaded. So sue me. * * Returns 1 if it slept, else zero. */ { DECLARE_WAITQUEUE(wait, current); int ret = 0; set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&EXT4_SB(sb)->ro_wait_queue, &wait); if (timer_pending(&EXT4_SB(sb)->turn_ro_timer)) { schedule(); ret = 1; } remove_wait_queue(&EXT4_SB(sb)->ro_wait_queue, &wait); return ret; } #endif case EXT4_IOC_GROUP_EXTEND: { ext4_fsblk_t n_blocks_count; int err, err2=0; if (!capable(CAP_SYS_RESOURCE)) return -EPERM; if (get_user(n_blocks_count, (__u32 __user *)arg)) return -EFAULT; if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_BIGALLOC)) { ext4_msg(sb, KERN_ERR, "Online resizing not supported with bigalloc"); return -EOPNOTSUPP; } err = mnt_want_write(filp->f_path.mnt); if (err) return err; err = ext4_group_extend(sb, EXT4_SB(sb)->s_es, n_blocks_count); if (EXT4_SB(sb)->s_journal) { jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal); err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal); jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal); } if (err == 0) err = err2; mnt_drop_write(filp->f_path.mnt); return err; } case EXT4_IOC_MOVE_EXT: { struct move_extent me; struct file *donor_filp; int err; if (!(filp->f_mode & FMODE_READ) || !(filp->f_mode & FMODE_WRITE)) return -EBADF; if (copy_from_user(&me, (struct move_extent __user *)arg, sizeof(me))) return -EFAULT; donor_filp = fget(me.donor_fd); if (!donor_filp) return -EBADF; if (!(donor_filp->f_mode & FMODE_WRITE)) { err = -EBADF; goto mext_out; } if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_BIGALLOC)) { ext4_msg(sb, KERN_ERR, "Online defrag not supported with bigalloc"); return -EOPNOTSUPP; } err = mnt_want_write(filp->f_path.mnt); if (err) goto mext_out; me.moved_len = 0; err = ext4_move_extents(filp, donor_filp, me.orig_start, me.donor_start, me.len, &me.moved_len); mnt_drop_write(filp->f_path.mnt); if (me.moved_len > 0) file_remove_suid(donor_filp); if (copy_to_user((struct move_extent *)arg, &me, sizeof(me))) err = -EFAULT; mext_out: fput(donor_filp); return err; } case EXT4_IOC_GROUP_ADD: { struct ext4_new_group_data input; int err, err2=0; if (!capable(CAP_SYS_RESOURCE)) return -EPERM; if (copy_from_user(&input, (struct ext4_new_group_input __user *)arg, sizeof(input))) return -EFAULT; if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_BIGALLOC)) { ext4_msg(sb, KERN_ERR, "Online resizing not supported with bigalloc"); return -EOPNOTSUPP; } err = mnt_want_write(filp->f_path.mnt); if (err) return err; err = ext4_group_add(sb, &input); if (EXT4_SB(sb)->s_journal) { jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal); err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal); jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal); } if (err == 0) err = err2; mnt_drop_write(filp->f_path.mnt); return err; } case EXT4_IOC_MIGRATE: { int err; if (!is_owner_or_cap(inode)) return -EACCES; err = mnt_want_write(filp->f_path.mnt); if (err) return err; /* * inode_mutex prevent write and truncate on the file. * Read still goes through. We take i_data_sem in * ext4_ext_swap_inode_data before we switch the * inode format to prevent read. */ mutex_lock(&(inode->i_mutex)); err = ext4_ext_migrate(inode); mutex_unlock(&(inode->i_mutex)); mnt_drop_write(filp->f_path.mnt); return err; } case EXT4_IOC_ALLOC_DA_BLKS: { int err; if (!is_owner_or_cap(inode)) return -EACCES; err = mnt_want_write(filp->f_path.mnt); if (err) return err; err = ext4_alloc_da_blocks(inode); mnt_drop_write(filp->f_path.mnt); return err; } case FITRIM: { struct request_queue *q = bdev_get_queue(sb->s_bdev); struct fstrim_range range; int ret = 0; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!blk_queue_discard(q)) return -EOPNOTSUPP; if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_BIGALLOC)) { ext4_msg(sb, KERN_ERR, "FITRIM not supported with bigalloc"); return -EOPNOTSUPP; } if (copy_from_user(&range, (struct fstrim_range *)arg, sizeof(range))) return -EFAULT; range.minlen = max((unsigned int)range.minlen, q->limits.discard_granularity); ret = ext4_trim_fs(sb, &range); if (ret < 0) return ret; if (copy_to_user((struct fstrim_range *)arg, &range, sizeof(range))) return -EFAULT; return 0; } default: return -ENOTTY; } }
long jfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = filp->f_dentry->d_inode; struct jfs_inode_info *jfs_inode = JFS_IP(inode); unsigned int flags; switch (cmd) { case JFS_IOC_GETFLAGS: jfs_get_inode_flags(jfs_inode); flags = jfs_inode->mode2 & JFS_FL_USER_VISIBLE; flags = jfs_map_ext2(flags, 0); return put_user(flags, (int __user *) arg); case JFS_IOC_SETFLAGS: { unsigned int oldflags; int err; err = mnt_want_write(filp->f_path.mnt); if (err) return err; if (!is_owner_or_cap(inode)) { err = -EACCES; goto setflags_out; } if (get_user(flags, (int __user *) arg)) { err = -EFAULT; goto setflags_out; } flags = jfs_map_ext2(flags, 1); if (!S_ISDIR(inode->i_mode)) flags &= ~JFS_DIRSYNC_FL; /* Is it quota file? Do not allow user to mess with it */ if (IS_NOQUOTA(inode)) { err = -EPERM; goto setflags_out; } /* Lock against other parallel changes of flags */ mutex_lock(&inode->i_mutex); jfs_get_inode_flags(jfs_inode); oldflags = jfs_inode->mode2; /* * The IMMUTABLE and APPEND_ONLY flags can only be changed by * the relevant capability. */ if ((oldflags & JFS_IMMUTABLE_FL) || ((flags ^ oldflags) & (JFS_APPEND_FL | JFS_IMMUTABLE_FL))) { if (!capable(CAP_LINUX_IMMUTABLE)) { mutex_unlock(&inode->i_mutex); err = -EPERM; goto setflags_out; } } flags = flags & JFS_FL_USER_MODIFIABLE; flags |= oldflags & ~JFS_FL_USER_MODIFIABLE; jfs_inode->mode2 = flags; jfs_set_inode_flags(inode); mutex_unlock(&inode->i_mutex); inode->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(inode); setflags_out: mnt_drop_write(filp->f_path.mnt); return err; } default: return -ENOTTY; } }
/* * reiserfs_ioctl - handler for ioctl for inode * supported commands: * 1) REISERFS_IOC_UNPACK - try to unpack tail from direct item into indirect * and prevent packing file (argument arg has to be non-zero) * 2) REISERFS_IOC_[GS]ETFLAGS, REISERFS_IOC_[GS]ETVERSION * 3) That's all for a while ... */ long reiserfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = filp->f_path.dentry->d_inode; unsigned int flags; int err = 0; reiserfs_write_lock(inode->i_sb); switch (cmd) { case REISERFS_IOC_UNPACK: if (S_ISREG(inode->i_mode)) { if (arg) err = reiserfs_unpack(inode, filp); } else err = -ENOTTY; break; /* * following two cases are taken from fs/ext2/ioctl.c by Remy * Card ([email protected]) */ case REISERFS_IOC_GETFLAGS: if (!reiserfs_attrs(inode->i_sb)) { err = -ENOTTY; break; } flags = REISERFS_I(inode)->i_attrs; i_attrs_to_sd_attrs(inode, (__u16 *) & flags); err = put_user(flags, (int __user *)arg); break; case REISERFS_IOC_SETFLAGS:{ if (!reiserfs_attrs(inode->i_sb)) { err = -ENOTTY; break; } err = mnt_want_write(filp->f_path.mnt); if (err) break; if (!is_owner_or_cap(inode)) { err = -EPERM; goto setflags_out; } if (get_user(flags, (int __user *)arg)) { err = -EFAULT; goto setflags_out; } /* * Is it quota file? Do not allow user to mess with it */ if (IS_NOQUOTA(inode)) { err = -EPERM; goto setflags_out; } if (((flags ^ REISERFS_I(inode)-> i_attrs) & (REISERFS_IMMUTABLE_FL | REISERFS_APPEND_FL)) && !capable(CAP_LINUX_IMMUTABLE)) { err = -EPERM; goto setflags_out; } if ((flags & REISERFS_NOTAIL_FL) && S_ISREG(inode->i_mode)) { int result; result = reiserfs_unpack(inode, filp); if (result) { err = result; goto setflags_out; } } sd_attrs_to_i_attrs(flags, inode); REISERFS_I(inode)->i_attrs = flags; inode->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(inode); setflags_out: mnt_drop_write(filp->f_path.mnt); break; } case REISERFS_IOC_GETVERSION: err = put_user(inode->i_generation, (int __user *)arg); break; case REISERFS_IOC_SETVERSION: if (!is_owner_or_cap(inode)) { err = -EPERM; break; } err = mnt_want_write(filp->f_path.mnt); if (err) break; if (get_user(inode->i_generation, (int __user *)arg)) { err = -EFAULT; goto setversion_out; } inode->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(inode); setversion_out: mnt_drop_write(filp->f_path.mnt); break; default: err = -ENOTTY; } reiserfs_write_unlock(inode->i_sb); return err; }
static int setflags(struct inode *inode, int flags) { int oldflags, err, release; struct ubifs_inode *ui = ubifs_inode(inode); struct ubifs_info *c = inode->i_sb->s_fs_info; struct ubifs_budget_req req = { .dirtied_ino = 1, .dirtied_ino_d = ui->data_len }; err = ubifs_budget_space(c, &req); if (err) return err; /* * The IMMUTABLE and APPEND_ONLY flags can only be changed by * the relevant capability. */ mutex_lock(&ui->ui_mutex); oldflags = ubifs2ioctl(ui->flags); if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) { if (!capable(CAP_LINUX_IMMUTABLE)) { err = -EPERM; goto out_unlock; } } ui->flags = ioctl2ubifs(flags); ubifs_set_inode_flags(inode); inode->i_ctime = ubifs_current_time(inode); release = ui->dirty; mark_inode_dirty_sync(inode); mutex_unlock(&ui->ui_mutex); if (release) ubifs_release_budget(c, &req); if (IS_SYNC(inode)) err = write_inode_now(inode, 1); return err; out_unlock: ubifs_err("can't modify inode %lu attributes", inode->i_ino); mutex_unlock(&ui->ui_mutex); ubifs_release_budget(c, &req); return err; } long ubifs_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int flags; struct inode *inode = file->f_path.dentry->d_inode; switch (cmd) { case FS_IOC_GETFLAGS: flags = ubifs2ioctl(ubifs_inode(inode)->flags); return put_user(flags, (int __user *) arg); case FS_IOC_SETFLAGS: { if (IS_RDONLY(inode)) return -EROFS; if (!is_owner_or_cap(inode)) return -EACCES; if (get_user(flags, (int __user *) arg)) return -EFAULT; if (!S_ISDIR(inode->i_mode)) flags &= ~FS_DIRSYNC_FL; return setflags(inode, flags); } default: return -ENOTTY; } }
int hfsplus_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { unsigned int flags; switch (cmd) { case HFSPLUS_IOC_EXT2_GETFLAGS: flags = 0; if (HFSPLUS_I(inode).rootflags & HFSPLUS_FLG_IMMUTABLE) flags |= FS_IMMUTABLE_FL; /* EXT2_IMMUTABLE_FL */ if (HFSPLUS_I(inode).rootflags & HFSPLUS_FLG_APPEND) flags |= FS_APPEND_FL; /* EXT2_APPEND_FL */ if (HFSPLUS_I(inode).userflags & HFSPLUS_FLG_NODUMP) flags |= FS_NODUMP_FL; /* EXT2_NODUMP_FL */ return put_user(flags, (int __user *)arg); case HFSPLUS_IOC_EXT2_SETFLAGS: { if (IS_RDONLY(inode)) return -EROFS; if (!is_owner_or_cap(inode)) return -EACCES; if (get_user(flags, (int __user *)arg)) return -EFAULT; if (flags & (FS_IMMUTABLE_FL|FS_APPEND_FL) || HFSPLUS_I(inode).rootflags & (HFSPLUS_FLG_IMMUTABLE|HFSPLUS_FLG_APPEND)) { if (!capable(CAP_LINUX_IMMUTABLE)) return -EPERM; } /* don't silently ignore unsupported ext2 flags */ if (flags & ~(FS_IMMUTABLE_FL|FS_APPEND_FL|FS_NODUMP_FL)) return -EOPNOTSUPP; if (flags & FS_IMMUTABLE_FL) { /* EXT2_IMMUTABLE_FL */ inode->i_flags |= S_IMMUTABLE; HFSPLUS_I(inode).rootflags |= HFSPLUS_FLG_IMMUTABLE; } else { inode->i_flags &= ~S_IMMUTABLE; HFSPLUS_I(inode).rootflags &= ~HFSPLUS_FLG_IMMUTABLE; } if (flags & FS_APPEND_FL) { /* EXT2_APPEND_FL */ inode->i_flags |= S_APPEND; HFSPLUS_I(inode).rootflags |= HFSPLUS_FLG_APPEND; } else { inode->i_flags &= ~S_APPEND; HFSPLUS_I(inode).rootflags &= ~HFSPLUS_FLG_APPEND; } if (flags & FS_NODUMP_FL) /* EXT2_NODUMP_FL */ HFSPLUS_I(inode).userflags |= HFSPLUS_FLG_NODUMP; else HFSPLUS_I(inode).userflags &= ~HFSPLUS_FLG_NODUMP; inode->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(inode); return 0; } default: return -ENOTTY; } }
static int btrfs_ioctl_setflags(struct file *file, void __user *arg) { struct inode *inode = file->f_path.dentry->d_inode; struct btrfs_inode *ip = BTRFS_I(inode); struct btrfs_root *root = ip->root; struct btrfs_trans_handle *trans; unsigned int flags, oldflags; int ret; if (copy_from_user(&flags, arg, sizeof(flags))) return -EFAULT; if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \ FS_NOATIME_FL | FS_NODUMP_FL | \ FS_SYNC_FL | FS_DIRSYNC_FL)) return -EOPNOTSUPP; if (!is_owner_or_cap(inode)) return -EACCES; mutex_lock(&inode->i_mutex); flags = btrfs_mask_flags(inode->i_mode, flags); oldflags = btrfs_flags_to_ioctl(ip->flags); if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) { if (!capable(CAP_LINUX_IMMUTABLE)) { ret = -EPERM; goto out_unlock; } } ret = mnt_want_write(file->f_path.mnt); if (ret) goto out_unlock; if (flags & FS_SYNC_FL) ip->flags |= BTRFS_INODE_SYNC; else ip->flags &= ~BTRFS_INODE_SYNC; if (flags & FS_IMMUTABLE_FL) ip->flags |= BTRFS_INODE_IMMUTABLE; else ip->flags &= ~BTRFS_INODE_IMMUTABLE; if (flags & FS_APPEND_FL) ip->flags |= BTRFS_INODE_APPEND; else ip->flags &= ~BTRFS_INODE_APPEND; if (flags & FS_NODUMP_FL) ip->flags |= BTRFS_INODE_NODUMP; else ip->flags &= ~BTRFS_INODE_NODUMP; if (flags & FS_NOATIME_FL) ip->flags |= BTRFS_INODE_NOATIME; else ip->flags &= ~BTRFS_INODE_NOATIME; if (flags & FS_DIRSYNC_FL) ip->flags |= BTRFS_INODE_DIRSYNC; else ip->flags &= ~BTRFS_INODE_DIRSYNC; trans = btrfs_join_transaction(root, 1); BUG_ON(!trans); ret = btrfs_update_inode(trans, root, inode); BUG_ON(ret); btrfs_update_iflags(inode); inode->i_ctime = CURRENT_TIME; btrfs_end_transaction(trans, root); mnt_drop_write(file->f_path.mnt); out_unlock: mutex_unlock(&inode->i_mutex); return 0; }
static int hfsplus_ioctl_setflags(struct file *file, int __user *user_flags) { struct inode *inode = file->f_path.dentry->d_inode; struct hfsplus_inode_info *hip = HFSPLUS_I(inode); unsigned int flags; int err = 0; err = mnt_want_write(file->f_path.mnt); if (err) goto out; if (!is_owner_or_cap(inode)) { err = -EACCES; goto out_drop_write; } if (get_user(flags, user_flags)) { err = -EFAULT; goto out_drop_write; } mutex_lock(&inode->i_mutex); if ((flags & (FS_IMMUTABLE_FL|FS_APPEND_FL)) || inode->i_flags & (S_IMMUTABLE|S_APPEND)) { if (!capable(CAP_LINUX_IMMUTABLE)) { err = -EPERM; goto out_unlock_inode; } } /* don't silently ignore unsupported ext2 flags */ if (flags & ~(FS_IMMUTABLE_FL|FS_APPEND_FL|FS_NODUMP_FL)) { err = -EOPNOTSUPP; goto out_unlock_inode; } if (flags & FS_IMMUTABLE_FL) inode->i_flags |= S_IMMUTABLE; else inode->i_flags &= ~S_IMMUTABLE; if (flags & FS_APPEND_FL) inode->i_flags |= S_APPEND; else inode->i_flags &= ~S_APPEND; if (flags & FS_NODUMP_FL) hip->userflags |= HFSPLUS_FLG_NODUMP; else hip->userflags &= ~HFSPLUS_FLG_NODUMP; inode->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(inode); out_unlock_inode: mutex_lock(&inode->i_mutex); out_drop_write: mnt_drop_write(file->f_path.mnt); out: return err; }
static int ocfs2_set_inode_attr(struct inode *inode, unsigned flags, unsigned mask) { struct ocfs2_inode_info *ocfs2_inode = OCFS2_I(inode); struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); handle_t *handle = NULL; struct buffer_head *bh = NULL; unsigned oldflags; int status; mutex_lock(&inode->i_mutex); status = ocfs2_inode_lock(inode, &bh, 1); if (status < 0) { mlog_errno(status); goto bail; } status = -EACCES; if (!is_owner_or_cap(inode)) goto bail_unlock; if (!S_ISDIR(inode->i_mode)) flags &= ~OCFS2_DIRSYNC_FL; handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); if (IS_ERR(handle)) { status = PTR_ERR(handle); mlog_errno(status); goto bail_unlock; } oldflags = ocfs2_inode->ip_attr; flags = flags & mask; flags |= oldflags & ~mask; /* * The IMMUTABLE and APPEND_ONLY flags can only be changed by * the relevant capability. */ status = -EPERM; if ((oldflags & OCFS2_IMMUTABLE_FL) || ((flags ^ oldflags) & (OCFS2_APPEND_FL | OCFS2_IMMUTABLE_FL))) { if (!capable(CAP_LINUX_IMMUTABLE)) goto bail_unlock; } ocfs2_inode->ip_attr = flags; ocfs2_set_inode_flags(inode); status = ocfs2_mark_inode_dirty(handle, inode, bh); if (status < 0) mlog_errno(status); ocfs2_commit_trans(osb, handle); bail_unlock: ocfs2_inode_unlock(inode, 1); bail: mutex_unlock(&inode->i_mutex); brelse(bh); mlog_exit(status); return status; }
long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = filp->f_dentry->d_inode; struct ext4_inode_info *ei = EXT4_I(inode); unsigned int flags; ext4_debug("cmd = %u, arg = %lu\n", cmd, arg); switch (cmd) { case EXT4_IOC_GETFLAGS: ext4_get_inode_flags(ei); flags = ei->i_flags & EXT4_FL_USER_VISIBLE; return put_user(flags, (int __user *) arg); case EXT4_IOC_SETFLAGS: { handle_t *handle = NULL; int err, migrate = 0; struct ext4_iloc iloc; unsigned int oldflags; unsigned int jflag; if (!is_owner_or_cap(inode)) return -EACCES; if (get_user(flags, (int __user *) arg)) return -EFAULT; err = mnt_want_write(filp->f_path.mnt); if (err) return err; if (!S_ISDIR(inode->i_mode)) flags &= ~EXT4_DIRSYNC_FL; err = -EPERM; mutex_lock(&inode->i_mutex); /* Is it quota file? Do not allow user to mess with it */ if (IS_NOQUOTA(inode)) goto flags_out; oldflags = ei->i_flags; /* The JOURNAL_DATA flag is modifiable only by root */ jflag = flags & EXT4_JOURNAL_DATA_FL; /* * The IMMUTABLE and APPEND_ONLY flags can only be changed by * the relevant capability. * * This test looks nicer. Thanks to Pauline Middelink */ if ((flags ^ oldflags) & (EXT4_APPEND_FL | EXT4_IMMUTABLE_FL)) { if (!capable(CAP_LINUX_IMMUTABLE)) goto flags_out; } /* * The JOURNAL_DATA flag can only be changed by * the relevant capability. */ if ((jflag ^ oldflags) & (EXT4_JOURNAL_DATA_FL)) { if (!capable(CAP_SYS_RESOURCE)) goto flags_out; } if (oldflags & EXT4_EXTENTS_FL) { /* We don't support clearning extent flags */ if (!(flags & EXT4_EXTENTS_FL)) { err = -EOPNOTSUPP; goto flags_out; } } else if (flags & EXT4_EXTENTS_FL) { /* migrate the file */ migrate = 1; flags &= ~EXT4_EXTENTS_FL; } handle = ext4_journal_start(inode, 1); if (IS_ERR(handle)) { err = PTR_ERR(handle); goto flags_out; } if (IS_SYNC(inode)) ext4_handle_sync(handle); err = ext4_reserve_inode_write(handle, inode, &iloc); if (err) goto flags_err; flags = flags & EXT4_FL_USER_MODIFIABLE; flags |= oldflags & ~EXT4_FL_USER_MODIFIABLE; ei->i_flags = flags; ext4_set_inode_flags(inode); inode->i_ctime = ext4_current_time(inode); err = ext4_mark_iloc_dirty(handle, inode, &iloc); flags_err: ext4_journal_stop(handle); if (err) goto flags_out; if ((jflag ^ oldflags) & (EXT4_JOURNAL_DATA_FL)) err = ext4_change_inode_journal_flag(inode, jflag); if (err) goto flags_out; if (migrate) err = ext4_ext_migrate(inode); flags_out: mutex_unlock(&inode->i_mutex); mnt_drop_write(filp->f_path.mnt); return err; } case EXT4_IOC_GETVERSION: case EXT4_IOC_GETVERSION_OLD: return put_user(inode->i_generation, (int __user *) arg); case EXT4_IOC_SETVERSION: case EXT4_IOC_SETVERSION_OLD: { handle_t *handle; struct ext4_iloc iloc; __u32 generation; int err; if (!is_owner_or_cap(inode)) return -EPERM; err = mnt_want_write(filp->f_path.mnt); if (err) return err; if (get_user(generation, (int __user *) arg)) { err = -EFAULT; goto setversion_out; } handle = ext4_journal_start(inode, 1); if (IS_ERR(handle)) { err = PTR_ERR(handle); goto setversion_out; } err = ext4_reserve_inode_write(handle, inode, &iloc); if (err == 0) { inode->i_ctime = ext4_current_time(inode); inode->i_generation = generation; err = ext4_mark_iloc_dirty(handle, inode, &iloc); } ext4_journal_stop(handle); setversion_out: mnt_drop_write(filp->f_path.mnt); return err; } #ifdef CONFIG_JBD2_DEBUG case EXT4_IOC_WAIT_FOR_READONLY: /* * This is racy - by the time we're woken up and running, * the superblock could be released. And the module could * have been unloaded. So sue me. * * Returns 1 if it slept, else zero. */ { struct super_block *sb = inode->i_sb; DECLARE_WAITQUEUE(wait, current); int ret = 0; set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&EXT4_SB(sb)->ro_wait_queue, &wait); if (timer_pending(&EXT4_SB(sb)->turn_ro_timer)) { schedule(); ret = 1; } remove_wait_queue(&EXT4_SB(sb)->ro_wait_queue, &wait); return ret; } #endif case EXT4_IOC_GROUP_EXTEND: { ext4_fsblk_t n_blocks_count; struct super_block *sb = inode->i_sb; int err, err2; if (!capable(CAP_SYS_RESOURCE)) return -EPERM; if (get_user(n_blocks_count, (__u32 __user *)arg)) return -EFAULT; err = mnt_want_write(filp->f_path.mnt); if (err) return err; err = ext4_group_extend(sb, EXT4_SB(sb)->s_es, n_blocks_count); jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal); err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal); jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal); if (err == 0) err = err2; mnt_drop_write(filp->f_path.mnt); return err; } case EXT4_IOC_GROUP_ADD: { struct ext4_new_group_data input; struct super_block *sb = inode->i_sb; int err, err2; if (!capable(CAP_SYS_RESOURCE)) return -EPERM; if (copy_from_user(&input, (struct ext4_new_group_input __user *)arg, sizeof(input))) return -EFAULT; err = mnt_want_write(filp->f_path.mnt); if (err) return err; err = ext4_group_add(sb, &input); jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal); err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal); jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal); if (err == 0) err = err2; mnt_drop_write(filp->f_path.mnt); return err; } case EXT4_IOC_MIGRATE: { int err; if (!is_owner_or_cap(inode)) return -EACCES; err = mnt_want_write(filp->f_path.mnt); if (err) return err; /* * inode_mutex prevent write and truncate on the file. * Read still goes through. We take i_data_sem in * ext4_ext_swap_inode_data before we switch the * inode format to prevent read. */ mutex_lock(&(inode->i_mutex)); err = ext4_ext_migrate(inode); mutex_unlock(&(inode->i_mutex)); mnt_drop_write(filp->f_path.mnt); return err; } default: return -ENOTTY; } }
long ext2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = filp->f_dentry->d_inode; struct ext2_inode_info *ei = EXT2_I(inode); unsigned int flags; unsigned short rsv_window_size; int ret; ext2_debug ("cmd = %u, arg = %lu\n", cmd, arg); switch (cmd) { case EXT2_FAKE_B_ALLOC: /* Fake allocation for ext2 filesystem. * */ { struct ext2_fake_b_alloc_arg config; struct buffer_head bh_result; sector_t iblock, off; int ret = 0; ret = copy_from_user(&config, (struct ext2_fake_b_alloc_arg __user *)arg, sizeof(struct ext2_fake_b_alloc_arg)); if (ret != 0) { printk (KERN_DEBUG "can't copy from user"); return -EIO; } else ret = 0; /* Allocate blocks. */ off = config.efba_off; iblock = config.efba_off >> inode->i_blkbits; while ((iblock << inode->i_blkbits) < (config.efba_off + config.efba_size)) { memset(&bh_result, 0, sizeof(struct ext2_fake_b_alloc_arg)); ret = ext2_get_block(inode, iblock, &bh_result, 1); if (ret < 0) { printk (KERN_DEBUG "get_block_error %d, escaping", ret); break; } iblock++; } /* Set metadata */ write_lock(&EXT2_I(inode)->i_meta_lock); if (ret == 0) { printk (KERN_DEBUG "ok, set size"); inode->i_size = max_t(loff_t, inode->i_size, config.efba_off + config.efba_size); } else if(iblock != config.efba_off >> inode->i_blkbits) { /* Partially allocated, size must be fixed. * * But `i_blocks` should containt actual information. */ inode->i_size = inode->i_blocks << inode->i_blkbits; } inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC; inode->i_version++; write_unlock(&EXT2_I(inode)->i_meta_lock); printk(KERN_DEBUG, "returning %d", ret); return ret; } case EXT2_IOC_GETFLAGS: ext2_get_inode_flags(ei); flags = ei->i_flags & EXT2_FL_USER_VISIBLE; return put_user(flags, (int __user *) arg); case EXT2_IOC_SETFLAGS: { unsigned int oldflags; ret = mnt_want_write(filp->f_path.mnt); if (ret) return ret; if (!is_owner_or_cap(inode)) { ret = -EACCES; goto setflags_out; } if (get_user(flags, (int __user *) arg)) { ret = -EFAULT; goto setflags_out; } flags = ext2_mask_flags(inode->i_mode, flags); mutex_lock(&inode->i_mutex); /* Is it quota file? Do not allow user to mess with it */ if (IS_NOQUOTA(inode)) { mutex_unlock(&inode->i_mutex); ret = -EPERM; goto setflags_out; } oldflags = ei->i_flags; /* * The IMMUTABLE and APPEND_ONLY flags can only be changed by * the relevant capability. * * This test looks nicer. Thanks to Pauline Middelink */ if ((flags ^ oldflags) & (EXT2_APPEND_FL | EXT2_IMMUTABLE_FL)) { if (!capable(CAP_LINUX_IMMUTABLE)) { mutex_unlock(&inode->i_mutex); ret = -EPERM; goto setflags_out; } } flags = flags & EXT2_FL_USER_MODIFIABLE; flags |= oldflags & ~EXT2_FL_USER_MODIFIABLE; ei->i_flags = flags; mutex_unlock(&inode->i_mutex); ext2_set_inode_flags(inode); inode->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(inode); setflags_out: mnt_drop_write(filp->f_path.mnt); return ret; } case EXT2_IOC_GETVERSION: return put_user(inode->i_generation, (int __user *) arg); case EXT2_IOC_SETVERSION: if (!is_owner_or_cap(inode)) return -EPERM; ret = mnt_want_write(filp->f_path.mnt); if (ret) return ret; if (get_user(inode->i_generation, (int __user *) arg)) { ret = -EFAULT; } else { inode->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(inode); } mnt_drop_write(filp->f_path.mnt); return ret; case EXT2_IOC_GETRSVSZ: if (test_opt(inode->i_sb, RESERVATION) && S_ISREG(inode->i_mode) && ei->i_block_alloc_info) { rsv_window_size = ei->i_block_alloc_info->rsv_window_node.rsv_goal_size; return put_user(rsv_window_size, (int __user *)arg); } return -ENOTTY; case EXT2_IOC_SETRSVSZ: { if (!test_opt(inode->i_sb, RESERVATION) ||!S_ISREG(inode->i_mode)) return -ENOTTY; if (!is_owner_or_cap(inode)) return -EACCES; if (get_user(rsv_window_size, (int __user *)arg)) return -EFAULT; ret = mnt_want_write(filp->f_path.mnt); if (ret) return ret; if (rsv_window_size > EXT2_MAX_RESERVE_BLOCKS) rsv_window_size = EXT2_MAX_RESERVE_BLOCKS; /* * need to allocate reservation structure for this inode * before set the window size */ /* * XXX What lock should protect the rsv_goal_size? * Accessed in ext2_get_block only. ext3 uses i_truncate. */ mutex_lock(&ei->truncate_mutex); if (!ei->i_block_alloc_info) ext2_init_block_alloc_info(inode); if (ei->i_block_alloc_info){ struct ext2_reserve_window_node *rsv = &ei->i_block_alloc_info->rsv_window_node; rsv->rsv_goal_size = rsv_window_size; } mutex_unlock(&ei->truncate_mutex); mnt_drop_write(filp->f_path.mnt); return 0; } default: return -ENOTTY; } }
static int utimes_common(struct path *path, struct timespec *times) { int error; struct iattr newattrs; struct inode *inode = path->dentry->d_inode; error = mnt_want_write(path->mnt); if (error) goto out; if (times && times[0].tv_nsec == UTIME_NOW && times[1].tv_nsec == UTIME_NOW) times = NULL; newattrs.ia_valid = ATTR_CTIME | ATTR_MTIME | ATTR_ATIME; if (times) { if (times[0].tv_nsec == UTIME_OMIT) newattrs.ia_valid &= ~ATTR_ATIME; else if (times[0].tv_nsec != UTIME_NOW) { newattrs.ia_atime.tv_sec = times[0].tv_sec; newattrs.ia_atime.tv_nsec = times[0].tv_nsec; newattrs.ia_valid |= ATTR_ATIME_SET; } if (times[1].tv_nsec == UTIME_OMIT) newattrs.ia_valid &= ~ATTR_MTIME; else if (times[1].tv_nsec != UTIME_NOW) { newattrs.ia_mtime.tv_sec = times[1].tv_sec; newattrs.ia_mtime.tv_nsec = times[1].tv_nsec; newattrs.ia_valid |= ATTR_MTIME_SET; } /* * Tell inode_change_ok(), that this is an explicit time * update, even if neither ATTR_ATIME_SET nor ATTR_MTIME_SET * were used. */ newattrs.ia_valid |= ATTR_TIMES_SET; } else { /* * If times is NULL (or both times are UTIME_NOW), * then we need to check permissions, because * inode_change_ok() won't do it. */ error = -EACCES; if (IS_IMMUTABLE(inode)) goto mnt_drop_write_and_out; if (!is_owner_or_cap(inode)) { error = inode_permission(inode, MAY_WRITE); if (error) goto mnt_drop_write_and_out; } } if (!gr_acl_handle_utime(path->dentry, path->mnt)) { error = -EACCES; goto mnt_drop_write_and_out; } mutex_lock(&inode->i_mutex); error = notify_change(path->dentry, &newattrs); mutex_unlock(&inode->i_mutex); mnt_drop_write_and_out: mnt_drop_write(path->mnt); out: return error; }
/** * gfs2_set_flags - set flags on an inode * @inode: The inode * @flags: The flags to set * @mask: Indicates which flags are valid * */ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask) { struct inode *inode = filp->f_path.dentry->d_inode; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); struct buffer_head *bh; struct gfs2_holder gh; int error; u32 new_flags, flags; error = mnt_want_write(filp->f_path.mnt); if (error) return error; error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); if (error) goto out_drop_write; error = -EACCES; if (!is_owner_or_cap(inode)) goto out; error = 0; flags = ip->i_diskflags; new_flags = (flags & ~mask) | (reqflags & mask); if ((new_flags ^ flags) == 0) goto out; error = -EINVAL; if ((new_flags ^ flags) & ~GFS2_FLAGS_USER_SET) goto out; error = -EPERM; if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE)) goto out; if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY)) goto out; if (((new_flags ^ flags) & GFS2_DIF_IMMUTABLE) && !capable(CAP_LINUX_IMMUTABLE)) goto out; if (!IS_IMMUTABLE(inode)) { error = gfs2_permission(inode, MAY_WRITE); if (error) goto out; } if ((flags ^ new_flags) & GFS2_DIF_JDATA) { if (flags & GFS2_DIF_JDATA) gfs2_log_flush(sdp, ip->i_gl); error = filemap_fdatawrite(inode->i_mapping); if (error) goto out; error = filemap_fdatawait(inode->i_mapping); if (error) goto out; } error = gfs2_trans_begin(sdp, RES_DINODE, 0); if (error) goto out; error = gfs2_meta_inode_buffer(ip, &bh); if (error) goto out_trans_end; gfs2_trans_add_meta(ip->i_gl, bh); ip->i_diskflags = new_flags; gfs2_dinode_out(ip, bh->b_data); brelse(bh); gfs2_set_inode_flags(inode); gfs2_set_aops(inode); out_trans_end: gfs2_trans_end(sdp); out: gfs2_glock_dq_uninit(&gh); out_drop_write: mnt_drop_write(filp->f_path.mnt); return error; }
long ext2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = filp->f_dentry->d_inode; struct ext2_inode_info *ei = EXT2_I(inode); unsigned int flags; unsigned short rsv_window_size; int ret; ext2_debug ("cmd = %u, arg = %lu\n", cmd, arg); switch (cmd) { case EXT2_IOC_GETFLAGS: ext2_get_inode_flags(ei); flags = ei->i_flags & EXT2_FL_USER_VISIBLE; return put_user(flags, (int __user *) arg); case EXT2_IOC_SETFLAGS: { unsigned int oldflags; ret = mnt_want_write(filp->f_path.mnt); if (ret) return ret; if (!is_owner_or_cap(inode)) { ret = -EACCES; goto setflags_out; } if (get_user(flags, (int __user *) arg)) { ret = -EFAULT; goto setflags_out; } if (!S_ISDIR(inode->i_mode)) flags &= ~EXT2_DIRSYNC_FL; mutex_lock(&inode->i_mutex); /* Is it quota file? Do not allow user to mess with it */ if (IS_NOQUOTA(inode)) { mutex_unlock(&inode->i_mutex); ret = -EPERM; goto setflags_out; } oldflags = ei->i_flags; /* * The IMMUTABLE and APPEND_ONLY flags can only be changed by * the relevant capability. * * This test looks nicer. Thanks to Pauline Middelink */ if ((flags ^ oldflags) & (EXT2_APPEND_FL | EXT2_IMMUTABLE_FL)) { if (!capable(CAP_LINUX_IMMUTABLE)) { mutex_unlock(&inode->i_mutex); ret = -EPERM; goto setflags_out; } } flags = flags & EXT2_FL_USER_MODIFIABLE; flags |= oldflags & ~EXT2_FL_USER_MODIFIABLE; ei->i_flags = flags; mutex_unlock(&inode->i_mutex); ext2_set_inode_flags(inode); inode->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(inode); setflags_out: mnt_drop_write(filp->f_path.mnt); return ret; } case EXT2_IOC_GETVERSION: return put_user(inode->i_generation, (int __user *) arg); case EXT2_IOC_SETVERSION: if (!is_owner_or_cap(inode)) return -EPERM; ret = mnt_want_write(filp->f_path.mnt); if (ret) return ret; if (get_user(inode->i_generation, (int __user *) arg)) { ret = -EFAULT; } else { inode->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(inode); } mnt_drop_write(filp->f_path.mnt); return ret; case EXT2_IOC_GETRSVSZ: if (test_opt(inode->i_sb, RESERVATION) && S_ISREG(inode->i_mode) && ei->i_block_alloc_info) { rsv_window_size = ei->i_block_alloc_info->rsv_window_node.rsv_goal_size; return put_user(rsv_window_size, (int __user *)arg); } return -ENOTTY; case EXT2_IOC_SETRSVSZ: { if (!test_opt(inode->i_sb, RESERVATION) ||!S_ISREG(inode->i_mode)) return -ENOTTY; if (!is_owner_or_cap(inode)) return -EACCES; if (get_user(rsv_window_size, (int __user *)arg)) return -EFAULT; ret = mnt_want_write(filp->f_path.mnt); if (ret) return ret; if (rsv_window_size > EXT2_MAX_RESERVE_BLOCKS) rsv_window_size = EXT2_MAX_RESERVE_BLOCKS; /* * need to allocate reservation structure for this inode * before set the window size */ /* * XXX What lock should protect the rsv_goal_size? * Accessed in ext2_get_block only. ext3 uses i_truncate. */ mutex_lock(&ei->truncate_mutex); if (!ei->i_block_alloc_info) ext2_init_block_alloc_info(inode); if (ei->i_block_alloc_info){ struct ext2_reserve_window_node *rsv = &ei->i_block_alloc_info->rsv_window_node; rsv->rsv_goal_size = rsv_window_size; } mutex_unlock(&ei->truncate_mutex); mnt_drop_write(filp->f_path.mnt); return 0; } default: return -ENOTTY; } }