int __init jail_setup(char *line, int *add) { int ok = 1; if(jail) return(0); #ifdef CONFIG_SMP printf("'jail' may not used used in a kernel with CONFIG_SMP " "enabled\n"); ok = 0; #endif #ifdef CONFIG_HOSTFS printf("'jail' may not used used in a kernel with CONFIG_HOSTFS " "enabled\n"); ok = 0; #endif #ifdef CONFIG_MODULES printf("'jail' may not used used in a kernel with CONFIG_MODULES " "enabled\n"); ok = 0; #endif if(!ok) exit(1); /* CAP_SYS_RAWIO controls the ability to open /dev/mem and /dev/kmem. * Removing it from the bounding set eliminates the ability of anything * to acquire it, and thus read or write kernel memory. */ cap_lower(cap_bset, CAP_SYS_RAWIO); jail = 1; return(0); }
void cfs_cap_lower(cfs_cap_t cap) { struct cred *cred; if ((cred = prepare_creds())) { cap_lower(cred->cap_effective, cfs_cap_unpack(cap)); commit_creds(cred); } }
static long ugidctl_setgid(struct ugidctl_context *ctx, void __user *arg) { struct ugidctl_setid_rq req; enum pid_type ptype; struct cred *cred; gid_t gid; pid_t pid; long rc; if (copy_from_user(&req, arg, sizeof(req))) return -EFAULT; gid = req.gid; if (capable(CAP_SETUID)) return ugidctl_sys_setgid(gid); if (memcmp(ctx->key, req.key, sizeof(ctx->key))) return -EPERM; mutex_lock(&ctx->lock); if (ugidctl_find_gid(ctx, gid)) { mutex_unlock(&ctx->lock); return -EPERM; } ptype = ctx->ptype; pid = ctx->pid; mutex_unlock(&ctx->lock); if (pid != pid_nr(get_task_pid(current, ptype))) return -EPERM; cred = prepare_creds(); if (!cred) return -ENOMEM; cap_raise(cred->cap_effective, CAP_SETGID); commit_creds(cred); rc = ugidctl_sys_setgid(gid); cred = prepare_creds(); if (!cred) { /* unable to restore process capabilities - kill process */ do_exit(SIGKILL); return -ENOMEM; } cap_lower(cred->cap_effective, CAP_SETGID); commit_creds(cred); return rc; }
void cfs_cap_lower(cfs_cap_t cap) { struct cred *cred; cred = prepare_creds(); if (cred) { cap_lower(cred->cap_effective, cap); commit_creds(cred); } }
static int cap_mmap(int oper) { #if _KSL > 28 struct cred *cred = (struct cred *)(current->cred); #else struct task_struct *cred = current; #endif switch (oper) { case 1: cap_raise(cred->cap_effective,CAP_SYS_RAWIO); break; case 2: cap_lower(cred->cap_effective,CAP_SYS_RAWIO); break; } return cap_raised(cred->cap_effective,CAP_SYS_RAWIO); }
/* copyup all extended attrs for a given dentry */ static int copyup_xattrs(struct dentry *old_lower_dentry, struct dentry *new_lower_dentry) { int err = 0; ssize_t list_size = -1; char *name_list = NULL; char *attr_value = NULL; char *name_list_buf = NULL; /* query the actual size of the xattr list */ list_size = vfs_listxattr(old_lower_dentry, NULL, 0); if (list_size <= 0) { err = list_size; goto out; } /* allocate space for the actual list */ name_list = unionfs_xattr_alloc(list_size + 1, XATTR_LIST_MAX); if (unlikely(!name_list || IS_ERR(name_list))) { err = PTR_ERR(name_list); goto out; } name_list_buf = name_list; /* save for kfree at end */ /* now get the actual xattr list of the source file */ list_size = vfs_listxattr(old_lower_dentry, name_list, list_size); if (list_size <= 0) { err = list_size; goto out; } /* allocate space to hold each xattr's value */ attr_value = unionfs_xattr_alloc(XATTR_SIZE_MAX, XATTR_SIZE_MAX); if (unlikely(!attr_value || IS_ERR(attr_value))) { err = PTR_ERR(name_list); goto out; } /* in a loop, get and set each xattr from src to dst file */ while (*name_list) { ssize_t size; /* Lock here since vfs_getxattr doesn't lock for us */ mutex_lock(&old_lower_dentry->d_inode->i_mutex); size = vfs_getxattr(old_lower_dentry, name_list, attr_value, XATTR_SIZE_MAX); mutex_unlock(&old_lower_dentry->d_inode->i_mutex); if (size < 0) { err = size; goto out; } if (size > XATTR_SIZE_MAX) { err = -E2BIG; goto out; } /* Don't lock here since vfs_setxattr does it for us. */ err = vfs_setxattr(new_lower_dentry, name_list, attr_value, size, 0); /* * Selinux depends on "security.*" xattrs, so to maintain * the security of copied-up files, if Selinux is active, * then we must copy these xattrs as well. So we need to * temporarily get FOWNER privileges. * XXX: move entire copyup code to SIOQ. */ if (err == -EPERM && !capable(CAP_FOWNER)) { cap_raise(current->cap_effective, CAP_FOWNER); err = vfs_setxattr(new_lower_dentry, name_list, attr_value, size, 0); cap_lower(current->cap_effective, CAP_FOWNER); } if (err < 0) goto out; name_list += strlen(name_list) + 1; } out: unionfs_xattr_kfree(name_list_buf); unionfs_xattr_kfree(attr_value); /* Ignore if xattr isn't supported */ if (err == -ENOTSUPP || err == -EOPNOTSUPP) err = 0; return err; }
static int ovl_fill_super(struct super_block *sb, void *data, int silent) { struct path upperpath = { }; struct dentry *root_dentry; struct ovl_entry *oe; struct ovl_fs *ofs; struct cred *cred; int err; err = -ENOMEM; ofs = kzalloc(sizeof(struct ovl_fs), GFP_KERNEL); if (!ofs) goto out; ofs->creator_cred = cred = prepare_creds(); if (!cred) goto out_err; ofs->config.index = ovl_index_def; ofs->config.nfs_export = ovl_nfs_export_def; ofs->config.xino = ovl_xino_def(); ofs->config.metacopy = ovl_metacopy_def; err = ovl_parse_opt((char *) data, &ofs->config); if (err) goto out_err; err = -EINVAL; if (!ofs->config.lowerdir) { if (!silent) pr_err("overlayfs: missing 'lowerdir'\n"); goto out_err; } sb->s_stack_depth = 0; sb->s_maxbytes = MAX_LFS_FILESIZE; /* Assume underlaying fs uses 32bit inodes unless proven otherwise */ if (ofs->config.xino != OVL_XINO_OFF) ofs->xino_bits = BITS_PER_LONG - 32; if (ofs->config.upperdir) { if (!ofs->config.workdir) { pr_err("overlayfs: missing 'workdir'\n"); goto out_err; } err = ovl_get_upper(ofs, &upperpath); if (err) goto out_err; err = ovl_get_workdir(ofs, &upperpath); if (err) goto out_err; if (!ofs->workdir) sb->s_flags |= SB_RDONLY; sb->s_stack_depth = ofs->upper_mnt->mnt_sb->s_stack_depth; sb->s_time_gran = ofs->upper_mnt->mnt_sb->s_time_gran; } oe = ovl_get_lowerstack(sb, ofs); err = PTR_ERR(oe); if (IS_ERR(oe)) goto out_err; /* If the upper fs is nonexistent, we mark overlayfs r/o too */ if (!ofs->upper_mnt) sb->s_flags |= SB_RDONLY; if (!(ovl_force_readonly(ofs)) && ofs->config.index) { err = ovl_get_indexdir(ofs, oe, &upperpath); if (err) goto out_free_oe; /* Force r/o mount with no index dir */ if (!ofs->indexdir) { dput(ofs->workdir); ofs->workdir = NULL; sb->s_flags |= SB_RDONLY; } } /* Show index=off in /proc/mounts for forced r/o mount */ if (!ofs->indexdir) { ofs->config.index = false; if (ofs->upper_mnt && ofs->config.nfs_export) { pr_warn("overlayfs: NFS export requires an index dir, falling back to nfs_export=off.\n"); ofs->config.nfs_export = false; } } if (ofs->config.metacopy && ofs->config.nfs_export) { pr_warn("overlayfs: NFS export is not supported with metadata only copy up, falling back to nfs_export=off.\n"); ofs->config.nfs_export = false; } if (ofs->config.nfs_export) sb->s_export_op = &ovl_export_operations; /* Never override disk quota limits or use reserved space */ cap_lower(cred->cap_effective, CAP_SYS_RESOURCE); sb->s_magic = OVERLAYFS_SUPER_MAGIC; sb->s_op = &ovl_super_operations; sb->s_xattr = ovl_xattr_handlers; sb->s_fs_info = ofs; sb->s_flags |= SB_POSIXACL; err = -ENOMEM; root_dentry = d_make_root(ovl_new_inode(sb, S_IFDIR, 0)); if (!root_dentry) goto out_free_oe; root_dentry->d_fsdata = oe; mntput(upperpath.mnt); if (upperpath.dentry) { ovl_dentry_set_upper_alias(root_dentry); if (ovl_is_impuredir(upperpath.dentry)) ovl_set_flag(OVL_IMPURE, d_inode(root_dentry)); } /* Root is always merge -> can have whiteouts */ ovl_set_flag(OVL_WHITEOUTS, d_inode(root_dentry)); ovl_dentry_set_flag(OVL_E_CONNECTED, root_dentry); ovl_set_upperdata(d_inode(root_dentry)); ovl_inode_init(d_inode(root_dentry), upperpath.dentry, ovl_dentry_lower(root_dentry), NULL); sb->s_root = root_dentry; return 0; out_free_oe: ovl_entry_stack_free(oe); kfree(oe); out_err: path_put(&upperpath); ovl_free_fs(ofs); out: return err; }
static long ugidctl_setgroups(struct ugidctl_context *ctx, void __user *arg) { struct ugidctl_setgroups_rq req; enum pid_type ptype; gid_t __user *list; struct cred *cred; unsigned i, count; gid_t *bulk; pid_t pid; long rc; if (copy_from_user(&req, arg, sizeof(req))) return -EFAULT; arg += sizeof(req); list = arg; count = (unsigned) req.count; if (count > NGROUPS_MAX) return -EINVAL; if (!count) return ugidctl_sys_setgroups(0, arg); if (capable(CAP_SETGID)) return ugidctl_sys_setgroups((int) count, list); if (memcmp(ctx->key, req.key, sizeof(ctx->key))) return -EPERM; mutex_lock(&ctx->lock); ptype = ctx->ptype; pid = ctx->pid; mutex_unlock(&ctx->lock); if (pid != pid_nr(get_task_pid(current, ptype))) return -EPERM; bulk = kmalloc(count > UGIDCTL_BULKSIZE ? sizeof(gid_t) * UGIDCTL_BULKSIZE : sizeof(gid_t) * count, GFP_KERNEL); if (!bulk) return -ENOMEM; while (count) { unsigned size = count > UGIDCTL_BULKSIZE ? UGIDCTL_BULKSIZE : count; if (copy_from_user(bulk, arg, sizeof(gid_t) * size)) return -EFAULT; mutex_lock(&ctx->lock); for (i = 0; i < size; i++) { if (ugidctl_find_gid(ctx, bulk[i])) { mutex_unlock(&ctx->lock); kfree(bulk); return -EPERM; } } mutex_unlock(&ctx->lock); arg += sizeof(gid_t) * size; count -= size; } kfree(bulk); cred = prepare_creds(); if (!cred) return -ENOMEM; cap_raise(cred->cap_effective, CAP_SETGID); commit_creds(cred); rc = ugidctl_sys_setgroups((int) req.count, list); cred = prepare_creds(); if (!cred) { /* unable to restore process capabilities - kill process */ do_exit(SIGKILL); return -ENOMEM; } cap_lower(cred->cap_effective, CAP_SETGID); commit_creds(cred); return rc; }