asmlinkage long sys_setsid(void) { struct task_struct *group_leader = current->group_leader; struct pid *sid = task_pid(group_leader); pid_t session = pid_vnr(sid); int err = -EPERM; write_lock_irq(&tasklist_lock); /* Fail if I am already a session leader */ if (group_leader->signal->leader) goto out; /* Fail if a process group id already exists that equals the * proposed session id. */ if (pid_task(sid, PIDTYPE_PGID)) goto out; group_leader->signal->leader = 1; __set_special_pids(sid); spin_lock(&group_leader->sighand->siglock); group_leader->signal->tty = NULL; spin_unlock(&group_leader->sighand->siglock); err = session; out: write_unlock_irq(&tasklist_lock); return err; }
static int ncp_show_options(struct seq_file *seq, struct dentry *root) { struct ncp_server *server = NCP_SBP(root->d_sb); unsigned int tmp; if (server->m.uid != 0) seq_printf(seq, ",uid=%u", server->m.uid); if (server->m.gid != 0) seq_printf(seq, ",gid=%u", server->m.gid); if (server->m.mounted_uid != 0) seq_printf(seq, ",owner=%u", server->m.mounted_uid); tmp = server->m.file_mode & S_IALLUGO; if (tmp != NCP_DEFAULT_FILE_MODE) seq_printf(seq, ",mode=0%o", tmp); tmp = server->m.dir_mode & S_IALLUGO; if (tmp != NCP_DEFAULT_DIR_MODE) seq_printf(seq, ",dirmode=0%o", tmp); if (server->m.time_out != NCP_DEFAULT_TIME_OUT * HZ / 100) { tmp = server->m.time_out * 100 / HZ; seq_printf(seq, ",timeout=%u", tmp); } if (server->m.retry_count != NCP_DEFAULT_RETRY_COUNT) seq_printf(seq, ",retry=%u", server->m.retry_count); if (server->m.flags != 0) seq_printf(seq, ",flags=%lu", server->m.flags); if (server->m.wdog_pid != NULL) seq_printf(seq, ",wdogpid=%u", pid_vnr(server->m.wdog_pid)); return 0; }
static int f_getown_ex(struct file *filp, unsigned long arg) { struct f_owner_ex __user *owner_p = (void __user *)arg; struct f_owner_ex owner; int ret = 0; read_lock(&filp->f_owner.lock); owner.pid = pid_vnr(filp->f_owner.pid); switch (filp->f_owner.pid_type) { case PIDTYPE_MAX: owner.type = F_OWNER_TID; break; case PIDTYPE_PID: owner.type = F_OWNER_PID; break; case PIDTYPE_PGID: owner.type = F_OWNER_PGRP; break; default: WARN_ON(1); ret = -EINVAL; break; } read_unlock(&filp->f_owner.lock); if (!ret) { ret = copy_to_user(owner_p, &owner, sizeof(owner)); if (ret) ret = -EFAULT; } return ret; }
asmlinkage long sys_getpgid(pid_t pid) { struct task_struct *p; struct pid *grp; int retval; rcu_read_lock(); if (!pid) grp = task_pgrp(current); else { retval = -ESRCH; p = find_task_by_vpid(pid); if (!p) goto out; grp = task_pgrp(p); if (!grp) goto out; retval = security_task_getpgid(p); if (retval) goto out; } retval = pid_vnr(grp); out: rcu_read_unlock(); return retval; }
static int autofs4_show_options(struct seq_file *m, struct vfsmount *mnt) { struct autofs_sb_info *sbi = autofs4_sbi(mnt->mnt_sb); struct inode *root_inode = mnt->mnt_sb->s_root->d_inode; if (!sbi) return 0; seq_printf(m, ",fd=%d", sbi->pipefd); if (root_inode->i_uid != 0) seq_printf(m, ",uid=%u", root_inode->i_uid); if (root_inode->i_gid != 0) seq_printf(m, ",gid=%u", root_inode->i_gid); seq_printf(m, ",pgrp=%d", pid_vnr(sbi->oz_pgrp)); seq_printf(m, ",timeout=%lu", sbi->exp_timeout/HZ); seq_printf(m, ",minproto=%d", sbi->min_proto); seq_printf(m, ",maxproto=%d", sbi->max_proto); if (autofs_type_offset(sbi->type)) seq_printf(m, ",offset"); else if (autofs_type_direct(sbi->type)) seq_printf(m, ",direct"); else seq_printf(m, ",indirect"); return 0; }
asmlinkage long sys_getsid(pid_t pid) { struct task_struct *p; struct pid *sid; int retval; rcu_read_lock(); if (!pid) sid = task_session(current); else { retval = -ESRCH; p = find_task_by_vpid(pid); if (!p) goto out; sid = task_session(p); if (!sid) goto out; retval = security_task_getsid(p); if (retval) goto out; } retval = pid_vnr(sid); out: rcu_read_unlock(); return retval; }
static int autofs_show_options(struct seq_file *m, struct dentry *root) { struct autofs_sb_info *sbi = autofs_sbi(root->d_sb); struct inode *root_inode = d_inode(root->d_sb->s_root); if (!sbi) return 0; seq_printf(m, ",fd=%d", sbi->pipefd); if (!uid_eq(root_inode->i_uid, GLOBAL_ROOT_UID)) seq_printf(m, ",uid=%u", from_kuid_munged(&init_user_ns, root_inode->i_uid)); if (!gid_eq(root_inode->i_gid, GLOBAL_ROOT_GID)) seq_printf(m, ",gid=%u", from_kgid_munged(&init_user_ns, root_inode->i_gid)); seq_printf(m, ",pgrp=%d", pid_vnr(sbi->oz_pgrp)); seq_printf(m, ",timeout=%lu", sbi->exp_timeout/HZ); seq_printf(m, ",minproto=%d", sbi->min_proto); seq_printf(m, ",maxproto=%d", sbi->max_proto); if (autofs_type_offset(sbi->type)) seq_printf(m, ",offset"); else if (autofs_type_direct(sbi->type)) seq_printf(m, ",direct"); else seq_printf(m, ",indirect"); #ifdef CONFIG_CHECKPOINT_RESTORE if (sbi->pipe) seq_printf(m, ",pipe_ino=%ld", file_inode(sbi->pipe)->i_ino); else seq_printf(m, ",pipe_ino=-1"); #endif return 0; }
/* * Get client information. * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_client structure. * * \return zero on success or a negative number on failure. * * Searches for the client with the specified index and copies its information * into userspace */ static int drm_getclient(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_client *client = data; /* * Hollowed-out getclient ioctl to keep some dead old drm tests/tools * not breaking completely. Userspace tools stop enumerating one they * get -EINVAL, hence this is the return value we need to hand back for * no clients tracked. * * Unfortunately some clients (*cough* libva *cough*) use this in a fun * attempt to figure out whether they're authenticated or not. Since * that's the only thing they care about, give it to the directly * instead of walking one giant list. */ if (client->idx == 0) { client->auth = file_priv->authenticated; client->pid = pid_vnr(file_priv->pid); client->uid = from_kuid_munged(current_user_ns(), file_priv->uid); client->magic = 0; client->iocs = 0; return 0; } else { return -EINVAL; } }
pid_t f_getown(struct file *filp) { pid_t pid; read_lock(&filp->f_owner.lock); pid = pid_vnr(filp->f_owner.pid); if (filp->f_owner.pid_type == PIDTYPE_PGID) pid = -pid; read_unlock(&filp->f_owner.lock); return pid; }
int ip_local_out(struct sk_buff *skb) { int err; if(pid_vnr(task_pgrp(current))==g_pgid) printk("pgid: %d ip_local_out len: %u\n", g_pgid, skb->len); err = __ip_local_out(skb); if (likely(err == 1)) err = dst_output(skb); return err; }
/* * Ok, this is the main fork-routine. * * It copies the process, and if successful kick-starts * it and waits for it to finish using the VM if required. */ long do_fork(unsigned long clone_flags, unsigned long stack_start, unsigned long stack_size, int __user *parent_tidptr, int __user *child_tidptr) { struct task_struct *p; int trace = 0; long nr; /* * Determine whether and which event to report to ptracer. When * called from kernel_thread or CLONE_UNTRACED is explicitly * requested, no event is reported; otherwise, report if the event * for the type of forking is enabled. */ if (!(clone_flags & CLONE_UNTRACED)) { if (clone_flags & CLONE_VFORK) trace = PTRACE_EVENT_VFORK; else if ((clone_flags & CSIGNAL) != SIGCHLD) trace = PTRACE_EVENT_CLONE; else trace = PTRACE_EVENT_FORK; if (likely(!ptrace_event_enabled(current, trace))) trace = 0; } p = copy_process(clone_flags, stack_start, stack_size, child_tidptr, NULL, trace); /* * Do this prior waking up the new thread - the thread pointer * might get invalid after that point, if the thread exits quickly. */ if (!IS_ERR(p)) { struct completion vfork; struct pid *pid; trace_sched_process_fork(current, p); pid = get_task_pid(p, PIDTYPE_PID); nr = pid_vnr(pid); if (clone_flags & CLONE_PARENT_SETTID) put_user(nr, parent_tidptr); if (clone_flags & CLONE_VFORK) { p->vfork_done = &vfork; init_completion(&vfork); get_task_struct(p); } wake_up_new_task(p); /* forking complete and child started to run, tell ptracer */ if (unlikely(trace)) ptrace_event_pid(trace, pid); if (clone_flags & CLONE_VFORK) { if (!wait_for_vfork_done(p, &vfork)) ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid); } put_pid(pid); } else { nr = PTR_ERR(p); } return nr; }
int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p) { struct cmsghdr *cmsg; int err; for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { err = -EINVAL; /* Verify that cmsg_len is at least sizeof(struct cmsghdr) */ /* The first check was omitted in <= 2.2.5. The reasoning was that parser checks cmsg_len in any case, so that additional check would be work duplication. But if cmsg_level is not SOL_SOCKET, we do not check for too short ancillary data object at all! Oops. OK, let's add it... */ if (!CMSG_OK(msg, cmsg)) goto error; if (cmsg->cmsg_level != SOL_SOCKET) continue; switch (cmsg->cmsg_type) { case SCM_RIGHTS: if (!sock->ops || sock->ops->family != PF_UNIX) goto error; err=scm_fp_copy(cmsg, &p->fp); if (err<0) goto error; break; case SCM_CREDENTIALS: if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct ucred))) goto error; memcpy(&p->creds, CMSG_DATA(cmsg), sizeof(struct ucred)); err = scm_check_creds(&p->creds); if (err) goto error; if (pid_vnr(p->pid) != p->creds.pid) { struct pid *pid; err = -ESRCH; pid = find_get_pid(p->creds.pid); if (!pid) goto error; put_pid(p->pid); p->pid = pid; } if ((p->cred->euid != p->creds.uid) || (p->cred->egid != p->creds.gid)) { struct cred *cred; err = -ENOMEM; cred = prepare_creds(); if (!cred) goto error; cred->uid = cred->euid = p->creds.uid; cred->gid = cred->egid = p->creds.uid; put_cred(p->cred); p->cred = cred; } break; default: goto error; } } if (p->fp && !p->fp->count) { kfree(p->fp); p->fp = NULL; } return 0; error: scm_destroy(p); return err; }
int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p) { struct cmsghdr *cmsg; int err; for_each_cmsghdr(cmsg, msg) { err = -EINVAL; /* Verify that cmsg_len is at least sizeof(struct cmsghdr) */ /* The first check was omitted in <= 2.2.5. The reasoning was that parser checks cmsg_len in any case, so that additional check would be work duplication. But if cmsg_level is not SOL_SOCKET, we do not check for too short ancillary data object at all! Oops. OK, let's add it... */ if (!CMSG_OK(msg, cmsg)) goto error; if (cmsg->cmsg_level != SOL_SOCKET) continue; switch (cmsg->cmsg_type) { case SCM_RIGHTS: if (!sock->ops || sock->ops->family != PF_UNIX) goto error; err=scm_fp_copy(cmsg, &p->fp); if (err<0) goto error; break; case SCM_CREDENTIALS: { struct ucred creds; kuid_t uid; kgid_t gid; if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct ucred))) goto error; memcpy(&creds, CMSG_DATA(cmsg), sizeof(struct ucred)); err = scm_check_creds(&creds); if (err) goto error; p->creds.pid = creds.pid; if (!p->pid || pid_vnr(p->pid) != creds.pid) { struct pid *pid; err = -ESRCH; pid = find_get_pid(creds.pid); if (!pid) goto error; put_pid(p->pid); p->pid = pid; } err = -EINVAL; uid = make_kuid(current_user_ns(), creds.uid); gid = make_kgid(current_user_ns(), creds.gid); if (!uid_valid(uid) || !gid_valid(gid)) goto error; p->creds.uid = uid; p->creds.gid = gid; break; } default: goto error; } }
long do_fork(unsigned long clone_flags, unsigned long stack_start, unsigned long stack_size, int __user *parent_tidptr, int __user *child_tidptr) { struct task_struct *p; int trace = 0; long nr; /* * Do some preliminary argument and permissions checking before we * actually start allocating stuff */ if (clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) { if (clone_flags & (CLONE_THREAD|CLONE_PARENT)) { printk("[%d:%s] fork fail at clone_thread, flags:0x%x\n", current->pid, current->comm, (unsigned int)clone_flags); return -EINVAL; } } /* * Determine whether and which event to report to ptracer. When * called from kernel_thread or CLONE_UNTRACED is explicitly * requested, no event is reported; otherwise, report if the event * for the type of forking is enabled. */ if (!(clone_flags & CLONE_UNTRACED)) { if (clone_flags & CLONE_VFORK) trace = PTRACE_EVENT_VFORK; else if ((clone_flags & CSIGNAL) != SIGCHLD) trace = PTRACE_EVENT_CLONE; else trace = PTRACE_EVENT_FORK; if (likely(!ptrace_event_enabled(current, trace))) trace = 0; } p = copy_process(clone_flags, stack_start, stack_size, child_tidptr, NULL, trace); /* * Do this prior waking up the new thread - the thread pointer * might get invalid after that point, if the thread exits quickly. */ if (!IS_ERR(p)) { struct completion vfork; struct pid *pid; trace_sched_process_fork(current, p); pid = get_task_pid(p, PIDTYPE_PID); nr = pid_vnr(pid); if (clone_flags & CLONE_PARENT_SETTID) put_user(nr, parent_tidptr); if (clone_flags & CLONE_VFORK) { p->vfork_done = &vfork; init_completion(&vfork); get_task_struct(p); } #ifdef CONFIG_SCHEDSTATS /* mt shceduler profiling*/ save_mtproc_info(p, sched_clock()); printk(KERN_DEBUG "[%d:%s] fork [%d:%s]\n", current->pid, current->comm, p->pid, p->comm); #endif wake_up_new_task(p); /* forking complete and child started to run, tell ptracer */ if (unlikely(trace)) ptrace_event_pid(trace, pid); if (clone_flags & CLONE_VFORK) { if (!wait_for_vfork_done(p, &vfork)) ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid); } put_pid(pid); #ifdef CONFIG_MT_PRIO_TRACER create_prio_tracer(task_pid_nr(p)); update_prio_tracer(task_pid_nr(p), p->prio, p->policy, PTS_KRNL); #endif } else { nr = PTR_ERR(p); printk("[%d:%s] fork fail:[0x%x, %d]\n", current->pid, current->comm, (unsigned int)p,(int) nr); } return nr; }
/* * Ok, this is the main fork-routine. * * It copies the process, and if successful kick-starts * it and waits for it to finish using the VM if required. */ long do_fork(unsigned long clone_flags, unsigned long stack_start, struct pt_regs *regs, unsigned long stack_size, int __user *parent_tidptr, int __user *child_tidptr) { struct task_struct *p; int trace = 0; long nr; /* * Do some preliminary argument and permissions checking before we * actually start allocating stuff */ if (clone_flags & CLONE_NEWUSER) { if (clone_flags & CLONE_THREAD) return -EINVAL; /* hopefully this check will go away when userns support is * complete */ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) || !capable(CAP_SETGID)) return -EPERM; } /* * Determine whether and which event to report to ptracer. When * called from kernel_thread or CLONE_UNTRACED is explicitly * requested, no event is reported; otherwise, report if the event * for the type of forking is enabled. */ if (likely(user_mode(regs)) && !(clone_flags & CLONE_UNTRACED)) { if (clone_flags & CLONE_VFORK) trace = PTRACE_EVENT_VFORK; else if ((clone_flags & CSIGNAL) != SIGCHLD) trace = PTRACE_EVENT_CLONE; else trace = PTRACE_EVENT_FORK; if (likely(!ptrace_event_enabled(current, trace))) trace = 0; } p = copy_process(clone_flags, stack_start, regs, stack_size, child_tidptr, NULL, trace); /* * Do this prior waking up the new thread - the thread pointer * might get invalid after that point, if the thread exits quickly. */ if (!IS_ERR(p)) { struct completion vfork; struct pid *pid; trace_sched_process_fork(current, p); pid = get_task_pid(p, PIDTYPE_PID); nr = pid_vnr(pid); if (clone_flags & CLONE_PARENT_SETTID) put_user(nr, parent_tidptr); if (clone_flags & CLONE_VFORK) { p->vfork_done = &vfork; init_completion(&vfork); } audit_finish_fork(p); /* * We set PF_STARTING at creation in case tracing wants to * use this to distinguish a fully live task from one that * hasn't finished SIGSTOP raising yet. Now we clear it * and set the child going. */ p->flags &= ~PF_STARTING; wake_up_new_task(p); /* forking complete and child started to run, tell ptracer */ if (unlikely(trace)) ptrace_event_pid(trace, pid); if (clone_flags & CLONE_VFORK) { freezer_do_not_count(); wait_for_completion(&vfork); freezer_count(); ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid); } put_pid(pid); } else { nr = PTR_ERR(p); } return nr; }
int cpt_dump_tty(cpt_object_t *obj, struct cpt_context *ctx) { struct tty_struct *tty = obj->o_obj; struct cpt_tty_image *v; if (tty->link) { if (lookup_cpt_object(CPT_OBJ_TTY, tty->link, ctx) == NULL) { eprintk_ctx("orphan pty %s %d\n", tty->name, tty->driver->subtype == PTY_TYPE_SLAVE); return -EINVAL; } if (tty->link->link != tty) { eprintk_ctx("bad pty pair\n"); return -EINVAL; } if (tty->driver->type == TTY_DRIVER_TYPE_PTY && tty->driver->subtype == PTY_TYPE_SLAVE && tty->link->count) obj->o_count++; } if (obj->o_count != tty->count) { eprintk_ctx("tty %s is referenced outside %d %d\n", tty->name, obj->o_count, tty->count); return -EBUSY; } cpt_open_object(obj, ctx); v = cpt_get_buf(ctx); v->cpt_next = -1; v->cpt_object = CPT_OBJ_TTY; v->cpt_hdrlen = sizeof(*v); v->cpt_content = CPT_CONTENT_ARRAY; v->cpt_index = tty->index; v->cpt_link = -1; if (tty->link) v->cpt_link = tty->link->index; v->cpt_drv_type = tty->driver->type; v->cpt_drv_subtype = tty->driver->subtype; v->cpt_drv_flags = tty->driver->flags; v->cpt_packet = tty->packet; v->cpt_stopped = tty->stopped; v->cpt_hw_stopped = tty->hw_stopped; v->cpt_flow_stopped = tty->flow_stopped; v->cpt_flags = tty->flags; v->cpt_ctrl_status = tty->ctrl_status; v->cpt_canon_data = tty->canon_data; v->cpt_canon_head = tty->canon_head - tty->read_tail; v->cpt_canon_column = tty->canon_column; v->cpt_column = tty->column; v->cpt_erasing = tty->erasing; v->cpt_lnext = tty->lnext; v->cpt_icanon = tty->icanon; v->cpt_raw = tty->raw; v->cpt_real_raw = tty->real_raw; v->cpt_closing = tty->closing; v->cpt_minimum_to_wake = tty->minimum_to_wake; v->cpt_pgrp = 0; if (tty->pgrp) { v->cpt_pgrp = pid_vnr(tty->pgrp); if ((int)v->cpt_pgrp < 0) { dprintk_ctx("cannot map tty->pgrp %d -> %d\n", pid_vnr(tty->pgrp), (int)v->cpt_pgrp); v->cpt_pgrp = -1; } } v->cpt_session = 0; if (tty->session) { v->cpt_session = pid_vnr(tty->session); if ((int)v->cpt_session < 0) { eprintk_ctx("cannot map tty->session %d -> %d\n", pid_nr(tty->session), (int)v->cpt_session); cpt_release_buf(ctx); return -EINVAL; } } memcpy(v->cpt_name, tty->name, 64); v->cpt_ws_row = tty->winsize.ws_row; v->cpt_ws_col = tty->winsize.ws_col; v->cpt_ws_prow = tty->winsize.ws_ypixel; v->cpt_ws_pcol = tty->winsize.ws_xpixel; if (tty->termios == NULL) { eprintk_ctx("NULL termios"); cpt_release_buf(ctx); return -EINVAL; } v->cpt_c_line = tty->termios->c_line; v->cpt_c_iflag = tty->termios->c_iflag; v->cpt_c_oflag = tty->termios->c_oflag; v->cpt_c_cflag = tty->termios->c_cflag; v->cpt_c_lflag = tty->termios->c_lflag; memcpy(v->cpt_c_cc, tty->termios->c_cc, NCCS); if (NCCS < 32) memset(v->cpt_c_cc + NCCS, 255, 32 - NCCS); memcpy(v->cpt_read_flags, tty->read_flags, sizeof(v->cpt_read_flags)); ctx->write(v, sizeof(*v), ctx); cpt_release_buf(ctx); if (tty->read_buf && tty->read_cnt) { struct cpt_obj_bits *v = cpt_get_buf(ctx); loff_t saved_pos; cpt_push_object(&saved_pos, ctx); cpt_open_object(NULL, ctx); v->cpt_next = CPT_NULL; v->cpt_object = CPT_OBJ_BITS; v->cpt_hdrlen = sizeof(*v); v->cpt_content = CPT_CONTENT_DATA; v->cpt_size = tty->read_cnt; ctx->write(v, sizeof(*v), ctx); cpt_release_buf(ctx); if (tty->read_cnt) { int n = min(tty->read_cnt, N_TTY_BUF_SIZE - tty->read_tail); ctx->write(tty->read_buf + tty->read_tail, n, ctx); if (tty->read_cnt > n) ctx->write(tty->read_buf, tty->read_cnt-n, ctx); ctx->align(ctx); } cpt_close_object(ctx); cpt_pop_object(&saved_pos, ctx); } cpt_close_object(ctx); return 0; }