struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) { struct mm_struct *mm; int err; err = mutex_lock_killable(&task->signal->cred_guard_mutex); if (err) return ERR_PTR(err); mm = get_task_mm(task); if (mm && mm != current->mm && !ptrace_may_access(task, mode) && !capable(CAP_SYS_RESOURCE)) { mmput(mm); mm = ERR_PTR(-EACCES); } mutex_unlock(&task->signal->cred_guard_mutex); return mm; }
COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid, compat_uptr_t __user *, head_ptr, compat_size_t __user *, len_ptr) { struct compat_robust_list_head __user *head; unsigned long ret; struct task_struct *p; if (!futex_cmpxchg_enabled) return -ENOSYS; rcu_read_lock(); ret = -ESRCH; if (!pid) p = current; else { p = find_task_by_vpid(pid); if (!p) goto err_unlock; } ret = -EPERM; if (!ptrace_may_access(p, PTRACE_MODE_READ)) goto err_unlock; head = p->compat_robust_list; rcu_read_unlock(); if (put_user(sizeof(*head), len_ptr)) return -EFAULT; return put_user(ptr_to_compat(head), head_ptr); err_unlock: rcu_read_unlock(); return ret; }
static const char *proc_ns_get_link(struct dentry *dentry, struct inode *inode, struct delayed_call *done) { const struct proc_ns_operations *ns_ops = PROC_I(inode)->ns_ops; struct task_struct *task; struct path ns_path; void *error = ERR_PTR(-EACCES); if (!dentry) return ERR_PTR(-ECHILD); task = get_proc_task(inode); if (!task) return error; if (ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) { error = ns_get_path(&ns_path, task, ns_ops); if (!error) nd_jump_link(&ns_path); } put_task_struct(task); return error; }
SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type, unsigned long, idx1, unsigned long, idx2) { struct task_struct *task1, *task2; int ret; rcu_read_lock(); /* * Tasks are looked up in caller's PID namespace only. */ task1 = find_task_by_vpid(pid1); task2 = find_task_by_vpid(pid2); if (!task1 || !task2) goto err_no_task; get_task_struct(task1); get_task_struct(task2); rcu_read_unlock(); /* * One should have enough rights to inspect task details. */ ret = kcmp_lock(&task1->signal->cred_guard_mutex, &task2->signal->cred_guard_mutex); if (ret) goto err; if (!ptrace_may_access(task1, PTRACE_MODE_READ_REALCREDS) || !ptrace_may_access(task2, PTRACE_MODE_READ_REALCREDS)) { ret = -EPERM; goto err_unlock; } switch (type) { case KCMP_FILE: { struct file *filp1, *filp2; filp1 = get_file_raw_ptr(task1, idx1); filp2 = get_file_raw_ptr(task2, idx2); if (filp1 && filp2) ret = kcmp_ptr(filp1, filp2, KCMP_FILE); else ret = -EBADF; break; } case KCMP_VM: ret = kcmp_ptr(task1->mm, task2->mm, KCMP_VM); break; case KCMP_FILES: ret = kcmp_ptr(task1->files, task2->files, KCMP_FILES); break; case KCMP_FS: ret = kcmp_ptr(task1->fs, task2->fs, KCMP_FS); break; case KCMP_SIGHAND: ret = kcmp_ptr(task1->sighand, task2->sighand, KCMP_SIGHAND); break; case KCMP_IO: ret = kcmp_ptr(task1->io_context, task2->io_context, KCMP_IO); break; case KCMP_SYSVSEM: #ifdef CONFIG_SYSVIPC ret = kcmp_ptr(task1->sysvsem.undo_list, task2->sysvsem.undo_list, KCMP_SYSVSEM); #else ret = -EOPNOTSUPP; #endif break; case KCMP_EPOLL_TFD: ret = kcmp_epoll_target(task1, task2, idx1, (void *)idx2); break; default: ret = -EINVAL; break; } err_unlock: kcmp_unlock(&task1->signal->cred_guard_mutex, &task2->signal->cred_guard_mutex); err: put_task_struct(task1); put_task_struct(task2); return ret; err_no_task: rcu_read_unlock(); return -ESRCH; }
int aed_get_process_bt(struct aee_process_bt *bt) { int nr_cpus, err; struct bt_sync s; struct task_struct *task; int timeout_max = 500000; if (down_interruptible(&process_bt_sem) < 0) { return -ERESTARTSYS; } err = 0; if (bt->pid > 0) { task = find_task_by_vpid(bt->pid); if (task == NULL) { err = -EINVAL; goto exit; } } else { err = -EINVAL; goto exit; } err = mutex_lock_killable(&task->signal->cred_guard_mutex); if (err) goto exit; if (!ptrace_may_access(task, PTRACE_MODE_ATTACH)) { mutex_unlock(&task->signal->cred_guard_mutex); err = -EPERM; goto exit; } get_online_cpus(); preempt_disable(); nr_cpus = num_online_cpus(); atomic_set(&s.cpus_report, nr_cpus - 1); atomic_set(&s.cpus_lock, 1); smp_call_function(per_cpu_get_bt, &s, 0); while (atomic_read(&s.cpus_report) != 0) { if (timeout_max-- > 0) { udelay(1); } else { break; } } aed_get_bt(task, bt); atomic_set(&s.cpus_report, nr_cpus - 1); atomic_set(&s.cpus_lock, 0); timeout_max = 500000; while (atomic_read(&s.cpus_report) != 0) { if (timeout_max-- > 0) { udelay(1); } else { break; } } preempt_enable(); put_online_cpus(); mutex_unlock(&task->signal->cred_guard_mutex); exit: up(&process_bt_sem); return err; }
static struct dentry *proc_ns_get_dentry(struct super_block *sb, struct task_struct *task, const struct proc_ns_operations *ns_ops) { struct dentry *dentry, *result; struct inode *inode; struct proc_inode *ei; struct qstr qname = { .name = "", }; void *ns; ns = ns_ops->get(task); if (!ns) return ERR_PTR(-ENOENT); dentry = d_alloc_pseudo(sb, &qname); if (!dentry) { ns_ops->put(ns); return ERR_PTR(-ENOMEM); } inode = iget_locked(sb, ns_ops->inum(ns)); if (!inode) { dput(dentry); ns_ops->put(ns); return ERR_PTR(-ENOMEM); } ei = PROC_I(inode); if (inode->i_state & I_NEW) { inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; inode->i_op = &ns_inode_operations; inode->i_mode = S_IFREG | S_IRUGO; inode->i_fop = &ns_file_operations; ei->ns.ns_ops = ns_ops; ei->ns.ns = ns; unlock_new_inode(inode); } else { ns_ops->put(ns); } d_set_d_op(dentry, &ns_dentry_operations); result = d_instantiate_unique(dentry, inode); if (result) { dput(dentry); dentry = result; } return dentry; } static void *proc_ns_follow_link(struct dentry *dentry, struct nameidata *nd) { struct inode *inode = dentry->d_inode; struct super_block *sb = inode->i_sb; struct proc_inode *ei = PROC_I(inode); struct task_struct *task; struct path ns_path; void *error = ERR_PTR(-EACCES); task = get_proc_task(inode); if (!task) goto out; if (!ptrace_may_access(task, PTRACE_MODE_READ)) goto out_put_task; ns_path.dentry = proc_ns_get_dentry(sb, task, ei->ns.ns_ops); if (IS_ERR(ns_path.dentry)) { error = ERR_CAST(ns_path.dentry); goto out_put_task; } ns_path.mnt = mntget(nd->path.mnt); nd_jump_link(nd, &ns_path); error = NULL; out_put_task: put_task_struct(task); out: return error; } static int proc_ns_readlink(struct dentry *dentry, char __user *buffer, int buflen) { struct inode *inode = dentry->d_inode; struct proc_inode *ei = PROC_I(inode); const struct proc_ns_operations *ns_ops = ei->ns.ns_ops; struct task_struct *task; void *ns; char name[50]; int len = -EACCES; task = get_proc_task(inode); if (!task) goto out; if (!ptrace_may_access(task, PTRACE_MODE_READ)) goto out_put_task; len = -ENOENT; ns = ns_ops->get(task); if (!ns) goto out_put_task; snprintf(name, sizeof(name), "%s:[%u]", ns_ops->name, ns_ops->inum(ns)); len = strlen(name); if (len > buflen) len = buflen; if (copy_to_user(buffer, name, len)) len = -EFAULT; ns_ops->put(ns); out_put_task: put_task_struct(task); out: return len; } static const struct inode_operations proc_ns_link_inode_operations = { .readlink = proc_ns_readlink, .follow_link = proc_ns_follow_link, .setattr = proc_setattr, }; static int proc_ns_instantiate(struct inode *dir, struct dentry *dentry, struct task_struct *task, const void *ptr) { const struct proc_ns_operations *ns_ops = ptr; struct inode *inode; struct proc_inode *ei; inode = proc_pid_make_inode(dir->i_sb, task); if (!inode) goto out; ei = PROC_I(inode); inode->i_mode = S_IFLNK|S_IRWXUGO; inode->i_op = &proc_ns_link_inode_operations; ei->ns.ns_ops = ns_ops; d_set_d_op(dentry, &pid_dentry_operations); d_add(dentry, inode); /* Close the race of the process dying before we return the dentry */ if (pid_revalidate(dentry, 0)) return 0; out: return -ENOENT; } static int proc_ns_dir_readdir(struct file *file, struct dir_context *ctx) { struct task_struct *task = get_proc_task(file_inode(file)); const struct proc_ns_operations **entry, **last; if (!task) return -ENOENT; if (!dir_emit_dots(file, ctx)) goto out; if (ctx->pos >= 2 + ARRAY_SIZE(ns_entries)) goto out; entry = ns_entries + (ctx->pos - 2); last = &ns_entries[ARRAY_SIZE(ns_entries) - 1]; while (entry <= last) { const struct proc_ns_operations *ops = *entry; if (!proc_fill_cache(file, ctx, ops->name, strlen(ops->name), proc_ns_instantiate, task, ops)) break; ctx->pos++; entry++; } out: put_task_struct(task); return 0; } const struct file_operations proc_ns_dir_operations = { .read = generic_read_dir, .iterate = proc_ns_dir_readdir, }; static struct dentry *proc_ns_dir_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { int error; struct task_struct *task = get_proc_task(dir); const struct proc_ns_operations **entry, **last; unsigned int len = dentry->d_name.len; error = -ENOENT; if (!task) goto out_no_task; last = &ns_entries[ARRAY_SIZE(ns_entries)]; for (entry = ns_entries; entry < last; entry++) { if (strlen((*entry)->name) != len) continue; if (!memcmp(dentry->d_name.name, (*entry)->name, len)) break; } if (entry == last) goto out; error = proc_ns_instantiate(dir, dentry, task, *entry); out: put_task_struct(task); out_no_task: return ERR_PTR(error); }