int gr_tpe_allow(const struct file *file) { #ifdef CONFIG_GRKERNSEC struct inode *inode = file->f_path.dentry->d_parent->d_inode; const struct cred *cred = current_cred(); if (cred->uid && ((grsec_enable_tpe && #ifdef CONFIG_GRKERNSEC_TPE_INVERT !in_group_p(grsec_tpe_gid) #else in_group_p(grsec_tpe_gid) #endif ) || gr_acl_tpe_check()) && (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))))) { gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt); return 0; } #ifdef CONFIG_GRKERNSEC_TPE_ALL if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all && ((inode->i_uid && (inode->i_uid != cred->uid)) || (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) { gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt); return 0; } #endif #endif return 1; }
/* * Return 0 if current is granted want access to the inode * by the acl. Returns -E... otherwise. */ int posix_acl_permission(struct inode *inode, const struct posix_acl *acl, int want) { const struct posix_acl_entry *pa, *pe, *mask_obj; int found = 0; want &= MAY_READ | MAY_WRITE | MAY_EXEC | MAY_NOT_BLOCK; FOREACH_ACL_ENTRY(pa, acl, pe) { switch(pa->e_tag) { case ACL_USER_OBJ: /* (May have been checked already) */ if (inode->i_uid == current_fsuid()) goto check_perm; break; case ACL_USER: if (pa->e_id == current_fsuid()) goto mask; break; case ACL_GROUP_OBJ: if (in_group_p(inode->i_gid)) { found = 1; if ((pa->e_perm & want) == want) goto mask; } break; case ACL_GROUP: if (in_group_p(pa->e_id)) { found = 1; if ((pa->e_perm & want) == want) goto mask; } break; case ACL_MASK: break; case ACL_OTHER: if (found) return -EACCES; else goto check_perm; default: return -EIO; } } return -EIO; mask: for (mask_obj = pa+1; mask_obj != pe; mask_obj++) { if (mask_obj->e_tag == ACL_MASK) { if ((pa->e_perm & mask_obj->e_perm & want) == want) return 0; return -EACCES; } } check_perm: if ((pa->e_perm & want) == want) return 0; return -EACCES; }
/** * inode_change_ok - check if attribute changes to an inode are allowed * @inode: inode to check * @attr: attributes to change * * Check if we are allowed to change the attributes contained in @attr * in the given inode. This includes the normal unix access permission * checks, as well as checks for rlimits and others. * * Should be called as the first thing in ->setattr implementations, * possibly after taking additional locks. */ int inode_change_ok(const struct inode *inode, struct iattr *attr) { unsigned int ia_valid = attr->ia_valid; /* * First check size constraints. These can't be overriden using * ATTR_FORCE. */ if (ia_valid & ATTR_SIZE) { int error = inode_newsize_ok(inode, attr->ia_size); if (error) return error; } /* If force is set do it anyway. */ if (ia_valid & ATTR_FORCE) return 0; /* Make sure a caller can chown. */ if ((ia_valid & ATTR_UID) && (!uid_eq(current_fsuid(), inode->i_uid) || !uid_eq(attr->ia_uid, inode->i_uid)) && !capable_wrt_inode_uidgid(inode, CAP_CHOWN)) return -EPERM; /* Make sure caller can chgrp. */ if ((ia_valid & ATTR_GID) && (!uid_eq(current_fsuid(), inode->i_uid) || (!in_group_p(attr->ia_gid) && !gid_eq(attr->ia_gid, inode->i_gid))) && !capable_wrt_inode_uidgid(inode, CAP_CHOWN)) return -EPERM; /* Make sure a caller can chmod. */ if (ia_valid & ATTR_MODE) { if (!inode_owner_or_capable(inode)) return -EPERM; /* Also check the setgid bit! */ if (!in_group_p((ia_valid & ATTR_GID) ? attr->ia_gid : inode->i_gid) && !capable_wrt_inode_uidgid(inode, CAP_FSETID)) attr->ia_mode &= ~S_ISGID; } /* Check for setting the inode time. */ if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)) { if (!inode_owner_or_capable(inode)) return -EPERM; } return 0; }
int inode_change_ok(const struct inode *inode, struct iattr *attr) { unsigned int ia_valid = attr->ia_valid; /* */ if (ia_valid & ATTR_SIZE) { int error = inode_newsize_ok(inode, attr->ia_size); if (error) return error; } /* */ if (ia_valid & ATTR_FORCE) return 0; /* */ if ((ia_valid & ATTR_UID) && (current_fsuid() != inode->i_uid || attr->ia_uid != inode->i_uid) && !capable(CAP_CHOWN)) return -EPERM; /* */ if ((ia_valid & ATTR_GID) && (current_fsuid() != inode->i_uid || (!in_group_p(attr->ia_gid) && attr->ia_gid != inode->i_gid)) && !capable(CAP_CHOWN)) return -EPERM; /* */ if (ia_valid & ATTR_MODE) { if (!inode_owner_or_capable(inode)) return -EPERM; /* */ if (!in_group_p((ia_valid & ATTR_GID) ? attr->ia_gid : inode->i_gid) && !capable(CAP_FSETID)) attr->ia_mode &= ~S_ISGID; } /* */ if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)) { if (!inode_owner_or_capable(inode)) return -EPERM; } return 0; }
int inode_setattr(struct inode * inode, struct iattr * attr) { unsigned int ia_valid = attr->ia_valid; if (ia_valid & ATTR_SIZE && attr->ia_size != i_size_read(inode)) { int error = vmtruncate(inode, attr->ia_size); if (error) return error; } if (ia_valid & ATTR_UID) inode->i_uid = attr->ia_uid; if (ia_valid & ATTR_GID) inode->i_gid = attr->ia_gid; if (ia_valid & ATTR_ATIME) inode->i_atime = timespec_trunc(attr->ia_atime, inode->i_sb->s_time_gran); if (ia_valid & ATTR_MTIME) inode->i_mtime = timespec_trunc(attr->ia_mtime, inode->i_sb->s_time_gran); if (ia_valid & ATTR_CTIME) inode->i_ctime = timespec_trunc(attr->ia_ctime, inode->i_sb->s_time_gran); if (ia_valid & ATTR_MODE) { umode_t mode = attr->ia_mode; if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) mode &= ~S_ISGID; inode->i_mode = mode; } mark_inode_dirty(inode); return 0; }
void obdo_from_iattr(struct obdo *oa, struct iattr *attr, unsigned int ia_valid) { if (ia_valid & ATTR_ATIME) { oa->o_atime = LTIME_S(attr->ia_atime); oa->o_valid |= OBD_MD_FLATIME; } if (ia_valid & ATTR_MTIME) { oa->o_mtime = LTIME_S(attr->ia_mtime); oa->o_valid |= OBD_MD_FLMTIME; } if (ia_valid & ATTR_CTIME) { oa->o_ctime = LTIME_S(attr->ia_ctime); oa->o_valid |= OBD_MD_FLCTIME; } if (ia_valid & ATTR_SIZE) { oa->o_size = attr->ia_size; oa->o_valid |= OBD_MD_FLSIZE; } if (ia_valid & ATTR_MODE) { oa->o_mode = attr->ia_mode; oa->o_valid |= OBD_MD_FLTYPE | OBD_MD_FLMODE; if (!in_group_p(make_kgid(&init_user_ns, oa->o_gid)) && !cfs_capable(CFS_CAP_FSETID)) oa->o_mode &= ~S_ISGID; } if (ia_valid & ATTR_UID) { oa->o_uid = from_kuid(&init_user_ns, attr->ia_uid); oa->o_valid |= OBD_MD_FLUID; } if (ia_valid & ATTR_GID) { oa->o_gid = from_kgid(&init_user_ns, attr->ia_gid); oa->o_valid |= OBD_MD_FLGID; } }
int inode_setattr(struct inode * inode, struct iattr * attr) { unsigned int ia_valid = attr->ia_valid; int error = 0; if (ia_valid & ATTR_SIZE) { error = vmtruncate(inode, attr->ia_size); if (error) goto out; } if (ia_valid & ATTR_UID) inode->i_uid = attr->ia_uid; if (ia_valid & ATTR_GID) inode->i_gid = attr->ia_gid; if (ia_valid & ATTR_ATIME) inode->i_atime = attr->ia_atime; if (ia_valid & ATTR_MTIME) inode->i_mtime = attr->ia_mtime; if (ia_valid & ATTR_CTIME) inode->i_ctime = attr->ia_ctime; if (ia_valid & ATTR_MODE) { inode->i_mode = attr->ia_mode; if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) inode->i_mode &= ~S_ISGID; } mark_inode_dirty(inode); out: return error; }
static void __setattr_copy(struct inode *inode, const struct iattr *attr) { struct f2fs_inode_info *fi = F2FS_I(inode); unsigned int ia_valid = attr->ia_valid; if (ia_valid & ATTR_UID) inode->i_uid = attr->ia_uid; if (ia_valid & ATTR_GID) inode->i_gid = attr->ia_gid; if (ia_valid & ATTR_ATIME) inode->i_atime = timespec_trunc(attr->ia_atime, inode->i_sb->s_time_gran); if (ia_valid & ATTR_MTIME) inode->i_mtime = timespec_trunc(attr->ia_mtime, inode->i_sb->s_time_gran); if (ia_valid & ATTR_CTIME) inode->i_ctime = timespec_trunc(attr->ia_ctime, inode->i_sb->s_time_gran); if (ia_valid & ATTR_MODE) { umode_t mode = attr->ia_mode; if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) mode &= ~S_ISGID; set_acl_inode(fi, mode); } }
/** * can_use_rp - check whether the user is allowed to use reserved pool. * @c: UBIFS file-system description object * * UBIFS has so-called "reserved pool" which is flash space reserved * for the superuser and for uses whose UID/GID is recorded in UBIFS superblock. * This function checks whether current user is allowed to use reserved pool. * Returns %1 current user is allowed to use reserved pool and %0 otherwise. */ static int can_use_rp(struct ubifs_info *c) { if (current->fsuid == c->rp_uid || capable(CAP_SYS_RESOURCE) || (c->rp_gid != 0 && in_group_p(c->rp_gid))) return 1; return 0; }
/** * setattr_copy - copy simple metadata updates into the generic inode * @inode: the inode to be updated * @attr: the new attributes * * setattr_copy must be called with i_mutex held. * * setattr_copy updates the inode's metadata with that specified * in attr. Noticeably missing is inode size update, which is more complex * as it requires pagecache updates. * * The inode is not marked as dirty after this operation. The rationale is * that for "simple" filesystems, the struct inode is the inode storage. * The caller is free to mark the inode dirty afterwards if needed. */ void setattr_copy(struct inode *inode, const struct iattr *attr) { unsigned int ia_valid = attr->ia_valid; if (ia_valid & ATTR_UID) inode->i_uid = attr->ia_uid; if (ia_valid & ATTR_GID) inode->i_gid = attr->ia_gid; if (ia_valid & ATTR_ATIME) inode->i_atime = timespec_trunc(attr->ia_atime, inode->i_sb->s_time_gran); if (ia_valid & ATTR_MTIME) inode->i_mtime = timespec_trunc(attr->ia_mtime, inode->i_sb->s_time_gran); if (ia_valid & ATTR_CTIME) inode->i_ctime = timespec_trunc(attr->ia_ctime, inode->i_sb->s_time_gran); if (ia_valid & ATTR_MODE) { umode_t mode = attr->ia_mode; if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) mode &= ~S_ISGID; inode->i_mode = mode; } }
/** * can_use_rp - check whether the user is allowed to use reserved pool. * @c: UBIFS file-system description object * * UBIFS has so-called "reserved pool" which is flash space reserved * for the superuser and for uses whose UID/GID is recorded in UBIFS superblock. * This function checks whether current user is allowed to use reserved pool. * Returns %1 current user is allowed to use reserved pool and %0 otherwise. */ static int can_use_rp(struct ubifs_info *c) { if (uid_eq(current_fsuid(), c->rp_uid) || capable(CAP_SYS_RESOURCE) || (!gid_eq(c->rp_gid, GLOBAL_ROOT_GID) && in_group_p(c->rp_gid))) return 1; return 0; }
static void mdc_setattr_pack_rec(struct mdt_rec_setattr *rec, struct md_op_data *op_data) { rec->sa_opcode = REINT_SETATTR; rec->sa_fsuid = from_kuid(&init_user_ns, current_fsuid()); rec->sa_fsgid = from_kgid(&init_user_ns, current_fsgid()); rec->sa_cap = cfs_curproc_cap_pack(); rec->sa_suppgid = -1; rec->sa_fid = op_data->op_fid1; rec->sa_valid = attr_pack(op_data->op_attr.ia_valid); rec->sa_mode = op_data->op_attr.ia_mode; rec->sa_uid = from_kuid(&init_user_ns, op_data->op_attr.ia_uid); rec->sa_gid = from_kgid(&init_user_ns, op_data->op_attr.ia_gid); rec->sa_size = op_data->op_attr.ia_size; rec->sa_blocks = op_data->op_attr_blocks; rec->sa_atime = LTIME_S(op_data->op_attr.ia_atime); rec->sa_mtime = LTIME_S(op_data->op_attr.ia_mtime); rec->sa_ctime = LTIME_S(op_data->op_attr.ia_ctime); rec->sa_attr_flags = ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags; if ((op_data->op_attr.ia_valid & ATTR_GID) && in_group_p(op_data->op_attr.ia_gid)) rec->sa_suppgid = from_kgid(&init_user_ns, op_data->op_attr.ia_gid); else rec->sa_suppgid = op_data->op_suppgids[0]; rec->sa_bias = op_data->op_bias; }
/** * ext4_has_free_blocks() * @sbi: in-core super block structure. * @nblocks: number of needed blocks * * Check if filesystem has nblocks free & available for allocation. * On success return 1, return 0 on failure. */ static int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks, unsigned int flags) { s64 free_blocks, dirty_blocks, root_blocks; struct percpu_counter *fbc = &sbi->s_freeblocks_counter; struct percpu_counter *dbc = &sbi->s_dirtyblocks_counter; free_blocks = percpu_counter_read_positive(fbc); dirty_blocks = percpu_counter_read_positive(dbc); root_blocks = ext4_r_blocks_count(sbi->s_es); if (free_blocks - (nblocks + root_blocks + dirty_blocks) < EXT4_FREEBLOCKS_WATERMARK) { free_blocks = percpu_counter_sum_positive(fbc); dirty_blocks = percpu_counter_sum_positive(dbc); } /* Check whether we have space after * accounting for current dirty blocks & root reserved blocks. */ if (free_blocks >= ((root_blocks + nblocks) + dirty_blocks)) return 1; /* Hm, nope. Are (enough) root reserved blocks available? */ if (sbi->s_resuid == current_fsuid() || ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) || capable(CAP_SYS_RESOURCE) || (flags & EXT4_MB_USE_ROOT_BLOCKS)) { if (free_blocks >= (nblocks + dirty_blocks)) return 1; } return 0; }
int sysfs_setattr(struct dentry * dentry, struct iattr * iattr) { struct inode * inode = dentry->d_inode; struct sysfs_dirent * sd = dentry->d_fsdata; struct iattr * sd_iattr; unsigned int ia_valid = iattr->ia_valid; int error; if (!sd) return -EINVAL; sd_iattr = sd->s_iattr; error = inode_change_ok(inode, iattr); if (error) return error; error = inode_setattr(inode, iattr); if (error) return error; if (!sd_iattr) { /* setting attributes for the first time, allocate now */ sd_iattr = kzalloc(sizeof(struct iattr), GFP_KERNEL); if (!sd_iattr) return -ENOMEM; /* assign default attributes */ sd_iattr->ia_mode = sd->s_mode; sd_iattr->ia_uid = 0; sd_iattr->ia_gid = 0; sd_iattr->ia_atime = sd_iattr->ia_mtime = sd_iattr->ia_ctime = CURRENT_TIME; sd->s_iattr = sd_iattr; } /* attributes were changed atleast once in past */ if (ia_valid & ATTR_UID) sd_iattr->ia_uid = iattr->ia_uid; if (ia_valid & ATTR_GID) sd_iattr->ia_gid = iattr->ia_gid; if (ia_valid & ATTR_ATIME) sd_iattr->ia_atime = timespec_trunc(iattr->ia_atime, inode->i_sb->s_time_gran); if (ia_valid & ATTR_MTIME) sd_iattr->ia_mtime = timespec_trunc(iattr->ia_mtime, inode->i_sb->s_time_gran); if (ia_valid & ATTR_CTIME) sd_iattr->ia_ctime = timespec_trunc(iattr->ia_ctime, inode->i_sb->s_time_gran); if (ia_valid & ATTR_MODE) { umode_t mode = iattr->ia_mode; if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) mode &= ~S_ISGID; sd_iattr->ia_mode = sd->s_mode = mode; } return error; }
/** * ext4_has_free_blocks() * @sbi: in-core super block structure. * @nblocks: number of needed blocks * * Check if filesystem has nblocks free & available for allocation. * On success return 1, return 0 on failure. */ static int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks) { s64 free_blocks, dirty_blocks, root_blocks; #ifdef CONFIG_EXT4_FS_SNAPSHOT_CTL_RESERVE ext4_fsblk_t snapshot_r_blocks; handle_t *handle = journal_current_handle(); #endif struct percpu_counter *fbc = &sbi->s_freeblocks_counter; struct percpu_counter *dbc = &sbi->s_dirtyblocks_counter; free_blocks = percpu_counter_read_positive(fbc); dirty_blocks = percpu_counter_read_positive(dbc); root_blocks = ext4_r_blocks_count(sbi->s_es); #ifdef CONFIG_EXT4_FS_SNAPSHOT_CTL_RESERVE if (handle && sbi->s_active_snapshot) { snapshot_r_blocks = le64_to_cpu(sbi->s_es->s_snapshot_r_blocks_count); /* * snapshot reserved blocks for COWing to active snapshot */ if (free_blocks < snapshot_r_blocks + 1 && !IS_COWING(handle)) { return 0; } /* * mortal users must reserve blocks for both snapshot and * root user */ root_blocks += snapshot_r_blocks; } #endif if (free_blocks - (nblocks + root_blocks + dirty_blocks) < EXT4_FREEBLOCKS_WATERMARK) { free_blocks = percpu_counter_sum_positive(fbc); dirty_blocks = percpu_counter_sum_positive(dbc); if (dirty_blocks < 0) { printk(KERN_CRIT "Dirty block accounting " "went wrong %lld\n", (long long)dirty_blocks); } } /* Check whether we have space after * accounting for current dirty blocks & root reserved blocks. */ if (free_blocks >= ((root_blocks + nblocks) + dirty_blocks)) return 1; /* Hm, nope. Are (enough) root reserved blocks available? */ if (sbi->s_resuid == current_fsuid() || ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) || capable(CAP_SYS_RESOURCE)) { if (free_blocks >= (nblocks + dirty_blocks)) return 1; } return 0; }
STATIC int linvfs_setattr( struct dentry *dentry, struct iattr *attr) { struct inode *inode = dentry->d_inode; unsigned int ia_valid = attr->ia_valid; vnode_t *vp = LINVFS_GET_VP(inode); vattr_t vattr; int flags = 0; int error; memset(&vattr, 0, sizeof(vattr_t)); if (ia_valid & ATTR_UID) { vattr.va_mask |= XFS_AT_UID; vattr.va_uid = attr->ia_uid; } if (ia_valid & ATTR_GID) { vattr.va_mask |= XFS_AT_GID; vattr.va_gid = attr->ia_gid; } if (ia_valid & ATTR_SIZE) { vattr.va_mask |= XFS_AT_SIZE; vattr.va_size = attr->ia_size; } if (ia_valid & ATTR_ATIME) { vattr.va_mask |= XFS_AT_ATIME; vattr.va_atime = attr->ia_atime; inode->i_atime = attr->ia_atime; } if (ia_valid & ATTR_MTIME) { vattr.va_mask |= XFS_AT_MTIME; vattr.va_mtime = attr->ia_mtime; } if (ia_valid & ATTR_CTIME) { vattr.va_mask |= XFS_AT_CTIME; vattr.va_ctime = attr->ia_ctime; } if (ia_valid & ATTR_MODE) { vattr.va_mask |= XFS_AT_MODE; vattr.va_mode = attr->ia_mode; if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) inode->i_mode &= ~S_ISGID; } if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET)) flags |= ATTR_UTIME; #ifdef ATTR_NO_BLOCK if ((ia_valid & ATTR_NO_BLOCK)) flags |= ATTR_NONBLOCK; #endif VOP_SETATTR(vp, &vattr, flags, NULL, error); if (error) return -error; vn_revalidate(vp); return error; }
static int fuse_allow_set_time(struct fuse_conn *fc, struct inode *inode) { if (fc->flags & FUSE_ALLOW_UTIME_GRP) { if (current_uid() != inode->i_uid && inode->i_mode & S_IWGRP && in_group_p(inode->i_gid)) return 1; } return 0; }
static int __maybe_unused ovl_posix_acl_xattr_set(const struct xattr_handler *handler, struct dentry *dentry, struct inode *inode, const char *name, const void *value, size_t size, int flags) { struct dentry *workdir = ovl_workdir(dentry); struct inode *realinode = ovl_inode_real(inode); struct posix_acl *acl = NULL; int err; /* Check that everything is OK before copy-up */ if (value) { acl = posix_acl_from_xattr(&init_user_ns, value, size); if (IS_ERR(acl)) return PTR_ERR(acl); } err = -EOPNOTSUPP; if (!IS_POSIXACL(d_inode(workdir))) goto out_acl_release; if (!realinode->i_op->set_acl) goto out_acl_release; if (handler->flags == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode)) { err = acl ? -EACCES : 0; goto out_acl_release; } err = -EPERM; if (!inode_owner_or_capable(inode)) goto out_acl_release; posix_acl_release(acl); /* * Check if sgid bit needs to be cleared (actual setacl operation will * be done with mounter's capabilities and so that won't do it for us). */ if (unlikely(inode->i_mode & S_ISGID) && handler->flags == ACL_TYPE_ACCESS && !in_group_p(inode->i_gid) && !capable_wrt_inode_uidgid(inode, CAP_FSETID)) { struct iattr iattr = { .ia_valid = ATTR_KILL_SGID }; err = ovl_setattr(dentry, &iattr); if (err) return err; } err = ovl_xattr_set(dentry, inode, handler->name, value, size, flags); if (!err) ovl_copyattr(ovl_inode_real(inode), inode); return err; out_acl_release: posix_acl_release(acl); return err; }
static int prepare_binprm(struct linux_binprm *bprm) { struct stat st; int mode; int retval, id_change; if(fstat(bprm->fd, &st) < 0) { return(-errno); } mode = st.st_mode; if(!S_ISREG(mode)) { /* Must be regular file */ return(-EACCES); } if(!(mode & 0111)) { /* Must have at least one execute bit set */ return(-EACCES); } bprm->e_uid = geteuid(); bprm->e_gid = getegid(); id_change = 0; /* Set-uid? */ if(mode & S_ISUID) { bprm->e_uid = st.st_uid; if(bprm->e_uid != geteuid()) { id_change = 1; } } /* Set-gid? */ /* * If setgid is set but no group execute bit then this * is a candidate for mandatory locking, not a setgid * executable. */ if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) { bprm->e_gid = st.st_gid; if (!in_group_p(bprm->e_gid)) { id_change = 1; } } memset(bprm->buf, 0, sizeof(bprm->buf)); retval = lseek(bprm->fd, 0L, SEEK_SET); if(retval >= 0) { retval = read(bprm->fd, bprm->buf, 128); } if(retval < 0) { perror("prepare_binprm"); exit(-1); /* return(-errno); */ } else { return(retval); } }
int sysfs_setattr(struct dentry * dentry, struct iattr * iattr) { struct inode * inode = dentry->d_inode; struct sysfs_dirent * sd = dentry->d_fsdata; struct sysfs_inode_attrs *sd_attrs; struct iattr *iattrs; unsigned int ia_valid = iattr->ia_valid; int error; if (!sd) return -EINVAL; sd_attrs = sd->s_iattr; error = inode_change_ok(inode, iattr); if (error) return error; iattr->ia_valid &= ~ATTR_SIZE; /* ignore size changes */ error = inode_setattr(inode, iattr); if (error) return error; if (!sd_attrs) { /* setting attributes for the first time, allocate now */ sd_attrs = sysfs_init_inode_attrs(sd); if (!sd_attrs) return -ENOMEM; sd->s_iattr = sd_attrs; } /* attributes were changed at least once in past */ iattrs = &sd_attrs->ia_iattr; if (ia_valid & ATTR_UID) iattrs->ia_uid = iattr->ia_uid; if (ia_valid & ATTR_GID) iattrs->ia_gid = iattr->ia_gid; if (ia_valid & ATTR_ATIME) iattrs->ia_atime = timespec_trunc(iattr->ia_atime, inode->i_sb->s_time_gran); if (ia_valid & ATTR_MTIME) iattrs->ia_mtime = timespec_trunc(iattr->ia_mtime, inode->i_sb->s_time_gran); if (ia_valid & ATTR_CTIME) iattrs->ia_ctime = timespec_trunc(iattr->ia_ctime, inode->i_sb->s_time_gran); if (ia_valid & ATTR_MODE) { umode_t mode = iattr->ia_mode; if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) mode &= ~S_ISGID; iattrs->ia_mode = sd->s_mode = mode; } return error; }
/* Pack the required supplementary groups into the supplied groups array. * If we don't need to use the groups from the target inode(s) then we * instead pack one or more groups from the user's supplementary group * array in case it might be useful. Not needed if doing an MDS-side upcall. */ void ll_i2gids(__u32 *suppgids, struct inode *i1, struct inode *i2) { LASSERT(i1 != NULL); LASSERT(suppgids != NULL); if (in_group_p(i1->i_stbuf.st_gid)) suppgids[0] = i1->i_stbuf.st_gid; else suppgids[0] = -1; if (i2) { if (in_group_p(i2->i_stbuf.st_gid)) suppgids[1] = i2->i_stbuf.st_gid; else suppgids[1] = -1; } else { suppgids[1] = -1; } }
STATIC int xfs_vn_setattr( struct dentry *dentry, struct iattr *attr) { struct inode *inode = dentry->d_inode; unsigned int ia_valid = attr->ia_valid; bhv_vattr_t vattr = { 0 }; int flags = 0; int error; if (ia_valid & ATTR_UID) { vattr.va_mask |= XFS_AT_UID; vattr.va_uid = attr->ia_uid; } if (ia_valid & ATTR_GID) { vattr.va_mask |= XFS_AT_GID; vattr.va_gid = attr->ia_gid; } if (ia_valid & ATTR_SIZE) { vattr.va_mask |= XFS_AT_SIZE; vattr.va_size = attr->ia_size; } if (ia_valid & ATTR_ATIME) { vattr.va_mask |= XFS_AT_ATIME; vattr.va_atime = attr->ia_atime; inode->i_atime = attr->ia_atime; } if (ia_valid & ATTR_MTIME) { vattr.va_mask |= XFS_AT_MTIME; vattr.va_mtime = attr->ia_mtime; } if (ia_valid & ATTR_CTIME) { vattr.va_mask |= XFS_AT_CTIME; vattr.va_ctime = attr->ia_ctime; } if (ia_valid & ATTR_MODE) { vattr.va_mask |= XFS_AT_MODE; vattr.va_mode = attr->ia_mode; if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) inode->i_mode &= ~S_ISGID; } if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET)) flags |= ATTR_UTIME; #ifdef ATTR_NO_BLOCK if ((ia_valid & ATTR_NO_BLOCK)) flags |= ATTR_NONBLOCK; #endif error = xfs_setattr(XFS_I(inode), &vattr, flags, NULL); if (likely(!error)) vn_revalidate(vn_from_inode(inode)); return -error; }
/* POSIX UID/GID verification for setting inode attributes. */ int inode_change_ok(struct inode *inode, struct iattr *attr) { int retval = -EPERM; unsigned int ia_valid = attr->ia_valid; /* If force is set do it anyway. */ if (ia_valid & ATTR_FORCE) goto fine; /* Make sure a caller can chown. */ if ((ia_valid & ATTR_UID) && (current->fsuid != inode->i_uid || attr->ia_uid != inode->i_uid) && !capable(CAP_CHOWN)) goto error; /* Make sure caller can chgrp. */ if ((ia_valid & ATTR_GID) && (current->fsuid != inode->i_uid || (!in_group_p(attr->ia_gid) && attr->ia_gid != inode->i_gid)) && !capable(CAP_CHOWN)) goto error; /* Make sure a caller can chmod. */ if (ia_valid & ATTR_MODE) { if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER)) goto error; /* Also check the setgid bit! */ if (!in_group_p((ia_valid & ATTR_GID) ? attr->ia_gid : inode->i_gid) && !capable(CAP_FSETID)) attr->ia_mode &= ~S_ISGID; } /* Check for setting the inode time. */ if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET)) { if (current->fsuid != inode->i_uid && !capable(CAP_FOWNER)) goto error; } fine: retval = 0; error: return retval; }
void gr_log_shmrm(const uid_t uid, const uid_t cuid) { #ifdef CONFIG_GRKERNSEC_AUDIT_IPC if ((grsec_enable_group && in_group_p(grsec_audit_gid) && grsec_enable_audit_ipc) || (grsec_enable_audit_ipc && !grsec_enable_group)) gr_log_int_int(GR_DO_AUDIT, GR_SHMR_AUDIT_MSG, uid, cuid); #endif return; }
/* * change the ownership of a key * - the keyring owned by the changer * - if the uid or gid is -1, then that parameter is not changed * - implements keyctl(KEYCTL_CHOWN) */ long keyctl_chown_key(key_serial_t id, uid_t uid, gid_t gid) { struct key *key; key_ref_t key_ref; long ret; ret = 0; if (uid == (uid_t) -1 && gid == (gid_t) -1) goto error; key_ref = lookup_user_key(NULL, id, 1, 1, KEY_SETATTR); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error; } key = key_ref_to_ptr(key_ref); /* make the changes with the locks held to prevent chown/chown races */ ret = -EACCES; down_write(&key->sem); if (!capable(CAP_SYS_ADMIN)) { /* only the sysadmin can chown a key to some other UID */ if (uid != (uid_t) -1 && key->uid != uid) goto no_access; /* only the sysadmin can set the key's GID to a group other * than one of those that the current process subscribes to */ if (gid != (gid_t) -1 && gid != key->gid && !in_group_p(gid)) goto no_access; } /* change the UID (have to update the quotas) */ if (uid != (uid_t) -1 && uid != key->uid) { /* don't support UID changing yet */ ret = -EOPNOTSUPP; goto no_access; } /* change the GID */ if (gid != (gid_t) -1) key->gid = gid; ret = 0; no_access: up_write(&key->sem); key_put(key); error: return ret; } /* end keyctl_chown_key() */
void gr_log_semget(const int err, const int semflg) { #ifdef CONFIG_GRKERNSEC_AUDIT_IPC if (((grsec_enable_group && in_group_p(grsec_audit_gid) && grsec_enable_audit_ipc) || (grsec_enable_audit_ipc && !grsec_enable_group)) && (err >= 0) && (semflg & IPC_CREAT)) gr_log_noargs(GR_DO_AUDIT, GR_SEM_AUDIT_MSG); #endif return; }
void gr_log_shmget(const int err, const int shmflg, const size_t size) { #ifdef CONFIG_GRKERNSEC_AUDIT_IPC if (((grsec_enable_group && in_group_p(grsec_audit_gid) && grsec_enable_audit_ipc) || (grsec_enable_audit_ipc && !grsec_enable_group)) && (err >= 0) && (shmflg & IPC_CREAT)) gr_log_int(GR_DO_AUDIT, GR_SHM_AUDIT_MSG, size); #endif return; }
void gr_log_msgget(const int ret, const int msgflg) { #ifdef CONFIG_GRKERNSEC_AUDIT_IPC if (((grsec_enable_group && in_group_p(grsec_audit_gid) && grsec_enable_audit_ipc) || (grsec_enable_audit_ipc && !grsec_enable_group)) && (ret >= 0) && (msgflg & IPC_CREAT)) gr_log_noargs(GR_DO_AUDIT, GR_MSGQ_AUDIT_MSG); #endif return; }
void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv) { #ifdef CONFIG_GRKERNSEC_EXECLOG char *grarg = gr_exec_arg_buf; unsigned int i, x, execlen = 0; char c; if (!((grsec_enable_execlog && grsec_enable_group && in_group_p(grsec_audit_gid)) || (grsec_enable_execlog && !grsec_enable_group))) return; mutex_lock(&gr_exec_arg_mutex); memset(grarg, 0, sizeof(gr_exec_arg_buf)); for (i = 0; i < bprm->argc && execlen < 128; i++) { const char __user *p; unsigned int len; p = get_user_arg_ptr(argv, i); if (IS_ERR(p)) goto log; len = strnlen_user(p, 128 - execlen); if (len > 128 - execlen) len = 128 - execlen; else if (len > 0) len--; if (copy_from_user(grarg + execlen, p, len)) goto log; /* rewrite unprintable characters */ for (x = 0; x < len; x++) { c = *(grarg + execlen + x); if (c < 32 || c > 126) *(grarg + execlen + x) = ' '; } execlen += len; *(grarg + execlen) = ' '; *(grarg + execlen + 1) = '\0'; execlen++; } log: gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry, bprm->file->f_path.mnt, grarg); mutex_unlock(&gr_exec_arg_mutex); #endif return; }
static int inode_change_ok(register struct inode *inode, register struct iattr *attr) { /* * If force is set do it anyway. */ if (attr->ia_valid & ATTR_FORCE) return 0; /* Make sure a caller can chown */ if ((attr->ia_valid & ATTR_UID) && (current->euid != inode->i_uid || attr->ia_uid != inode->i_uid) && !suser()) { return -EPERM; } /* Make sure caller can chgrp */ if ((attr->ia_valid & ATTR_GID) && (!in_group_p(attr->ia_gid) && attr->ia_gid != inode->i_gid) && !suser())return -EPERM; /* Make sure a caller can chmod */ if (attr->ia_valid & ATTR_MODE) { if ((current->euid != inode->i_uid) && !suser()) return -EPERM; /* Also check the setgid bit! */ if (!suser() && !in_group_p((attr->ia_valid & ATTR_GID) ? attr->ia_gid : inode->i_gid)) attr->ia_mode &= ~S_ISGID; } /* Check for setting the inode time */ if ((attr->ia_valid & (ATTR_ATIME_SET | ATTR_MTIME_SET)) && ((current->euid != inode->i_uid) && !suser())) return -EPERM; return 0; }