static int real_msgsnd (int msqid, struct msgbuf *msgp, size_t msgsz, int msgflg) { int id; struct msqid_ds *msq; struct ipc_perm *ipcp; struct msg *msgh; long mtype; if (msgsz > MSGMAX || (long) msgsz < 0 || msqid < 0) return -EINVAL; if (get_user(mtype, &msgp->mtype)) return -EFAULT; if (mtype < 1) return -EINVAL; id = (unsigned int) msqid % MSGMNI; msq = msgque [id]; if (msq == IPC_UNUSED || msq == IPC_NOID) return -EINVAL; ipcp = &msq->msg_perm; slept: if (msq->msg_perm.seq != (unsigned int) msqid / MSGMNI) return -EIDRM; if (ipcperms(ipcp, S_IWUGO)) return -EACCES; if (msgsz + msq->msg_cbytes > msq->msg_qbytes) { if (msgsz + msq->msg_cbytes > msq->msg_qbytes) { /* still no space in queue */ if (msgflg & IPC_NOWAIT) return -EAGAIN; if (signal_pending(current)) return -EINTR; interruptible_sleep_on (&msq->wwait); goto slept; } } /* allocate message header and text space*/ msgh = (struct msg *) kmalloc (sizeof(*msgh) + msgsz, GFP_KERNEL); if (!msgh) return -ENOMEM; msgh->msg_spot = (char *) (msgh + 1); if (copy_from_user(msgh->msg_spot, msgp->mtext, msgsz)) { kfree(msgh); return -EFAULT; } if (msgque[id] == IPC_UNUSED || msgque[id] == IPC_NOID || msq->msg_perm.seq != (unsigned int) msqid / MSGMNI) { kfree(msgh); return -EIDRM; } msgh->msg_next = NULL; msgh->msg_ts = msgsz; msgh->msg_type = mtype; msgh->msg_stime = CURRENT_TIME; if (!msq->msg_first) msq->msg_first = msq->msg_last = msgh; else { msq->msg_last->msg_next = msgh; msq->msg_last = msgh; } msq->msg_cbytes += msgsz; msgbytes += msgsz; msghdrs++; msq->msg_qnum++; msq->msg_lspid = current->pid; msq->msg_stime = CURRENT_TIME; wake_up (&msq->rwait); return 0; }
asmlinkage int sys_shmctl (int shmid, int cmd, struct shmid_ds *buf) { struct shmid_ds tbuf; struct shmid_kernel *shp; struct ipc_perm *ipcp; int id, err = -EINVAL; lock_kernel(); if (cmd < 0 || shmid < 0) goto out; if (cmd == IPC_SET) { err = -EFAULT; if(copy_from_user (&tbuf, buf, sizeof (*buf))) goto out; } switch (cmd) { /* replace with proc interface ? */ case IPC_INFO: { struct shminfo shminfo; err = -EFAULT; if (!buf) goto out; shminfo.shmmni = SHMMNI; shminfo.shmmax = shmmax; shminfo.shmmin = SHMMIN; shminfo.shmall = shmall; shminfo.shmseg = SHMSEG; if(copy_to_user (buf, &shminfo, sizeof(struct shminfo))) goto out; err = max_shmid; goto out; } case SHM_INFO: { struct shm_info shm_info; err = -EFAULT; shm_info.used_ids = used_segs; shm_info.shm_rss = shm_rss; shm_info.shm_tot = shm_tot; shm_info.shm_swp = shm_swp; shm_info.swap_attempts = swap_attempts; shm_info.swap_successes = swap_successes; if(copy_to_user (buf, &shm_info, sizeof(shm_info))) goto out; err = max_shmid; goto out; } case SHM_STAT: err = -EINVAL; if (shmid > max_shmid) goto out; shp = shm_segs[shmid]; if (shp == IPC_UNUSED || shp == IPC_NOID) goto out; if (ipcperms (&shp->u.shm_perm, S_IRUGO)) goto out; id = (unsigned int) shp->u.shm_perm.seq * SHMMNI + shmid; err = -EFAULT; if(copy_to_user (buf, &shp->u, sizeof(*buf))) goto out; err = id; goto out; } shp = shm_segs[id = (unsigned int) shmid % SHMMNI]; err = -EINVAL; if (shp == IPC_UNUSED || shp == IPC_NOID) goto out; err = -EIDRM; if (shp->u.shm_perm.seq != (unsigned int) shmid / SHMMNI) goto out; ipcp = &shp->u.shm_perm; switch (cmd) { case SHM_UNLOCK: err = -EPERM; if (!capable(CAP_IPC_LOCK)) goto out; err = -EINVAL; if (!(ipcp->mode & SHM_LOCKED)) goto out; ipcp->mode &= ~SHM_LOCKED; break; case SHM_LOCK: /* Allow superuser to lock segment in memory */ /* Should the pages be faulted in here or leave it to user? */ /* need to determine interaction with current->swappable */ err = -EPERM; if (!capable(CAP_IPC_LOCK)) goto out; err = -EINVAL; if (ipcp->mode & SHM_LOCKED) goto out; ipcp->mode |= SHM_LOCKED; break; case IPC_STAT: err = -EACCES; if (ipcperms (ipcp, S_IRUGO)) goto out; err = -EFAULT; if(copy_to_user (buf, &shp->u, sizeof(shp->u))) goto out; break; case IPC_SET: if (current->euid == shp->u.shm_perm.uid || current->euid == shp->u.shm_perm.cuid || capable(CAP_SYS_ADMIN)) { ipcp->uid = tbuf.shm_perm.uid; ipcp->gid = tbuf.shm_perm.gid; ipcp->mode = (ipcp->mode & ~S_IRWXUGO) | (tbuf.shm_perm.mode & S_IRWXUGO); shp->u.shm_ctime = CURRENT_TIME; break; } err = -EPERM; goto out; case IPC_RMID: if (current->euid == shp->u.shm_perm.uid || current->euid == shp->u.shm_perm.cuid || capable(CAP_SYS_ADMIN)) { shp->u.shm_perm.mode |= SHM_DEST; if (shp->u.shm_nattch <= 0) killseg (id); break; } err = -EPERM; goto out; default: err = -EINVAL; goto out; } err = 0; out: unlock_kernel(); return err; }
static int real_msgrcv (int msqid, struct msgbuf *msgp, size_t msgsz, long msgtyp, int msgflg) { struct msqid_ds *msq; struct ipc_perm *ipcp; struct msg *tmsg, *leastp = NULL; struct msg *nmsg = NULL; int id; if (msqid < 0 || (long) msgsz < 0) return -EINVAL; id = (unsigned int) msqid % MSGMNI; msq = msgque [id]; if (msq == IPC_NOID || msq == IPC_UNUSED) return -EINVAL; ipcp = &msq->msg_perm; /* * find message of correct type. * msgtyp = 0 => get first. * msgtyp > 0 => get first message of matching type. * msgtyp < 0 => get message with least type must be < abs(msgtype). */ while (!nmsg) { if (msq->msg_perm.seq != (unsigned int) msqid / MSGMNI) { return -EIDRM; } if (ipcperms (ipcp, S_IRUGO)) { return -EACCES; } if (msgtyp == 0) nmsg = msq->msg_first; else if (msgtyp > 0) { if (msgflg & MSG_EXCEPT) { for (tmsg = msq->msg_first; tmsg; tmsg = tmsg->msg_next) if (tmsg->msg_type != msgtyp) break; nmsg = tmsg; } else { for (tmsg = msq->msg_first; tmsg; tmsg = tmsg->msg_next) if (tmsg->msg_type == msgtyp) break; nmsg = tmsg; } } else { for (leastp = tmsg = msq->msg_first; tmsg; tmsg = tmsg->msg_next) if (tmsg->msg_type < leastp->msg_type) leastp = tmsg; if (leastp && leastp->msg_type <= - msgtyp) nmsg = leastp; } if (nmsg) { /* done finding a message */ if ((msgsz < nmsg->msg_ts) && !(msgflg & MSG_NOERROR)) { return -E2BIG; } msgsz = (msgsz > nmsg->msg_ts)? nmsg->msg_ts : msgsz; if (nmsg == msq->msg_first) msq->msg_first = nmsg->msg_next; else { for (tmsg = msq->msg_first; tmsg; tmsg = tmsg->msg_next) if (tmsg->msg_next == nmsg) break; tmsg->msg_next = nmsg->msg_next; if (nmsg == msq->msg_last) msq->msg_last = tmsg; } if (!(--msq->msg_qnum)) msq->msg_last = msq->msg_first = NULL; msq->msg_rtime = CURRENT_TIME; msq->msg_lrpid = current->pid; msgbytes -= nmsg->msg_ts; msghdrs--; msq->msg_cbytes -= nmsg->msg_ts; wake_up (&msq->wwait); if (put_user (nmsg->msg_type, &msgp->mtype) || copy_to_user (msgp->mtext, nmsg->msg_spot, msgsz)) msgsz = -EFAULT; kfree(nmsg); return msgsz; } else { /* did not find a message */ if (msgflg & IPC_NOWAIT) { return -ENOMSG; } if (signal_pending(current)) { return -EINTR; } interruptible_sleep_on (&msq->rwait); } } /* end while */ return -1; }
asmlinkage int sys_msgctl (int msqid, int cmd, struct msqid_ds *buf) { int id, err = -EINVAL; struct msqid_ds *msq; struct msqid_ds tbuf; struct ipc_perm *ipcp; lock_kernel(); if (msqid < 0 || cmd < 0) goto out; err = -EFAULT; switch (cmd) { case IPC_INFO: case MSG_INFO: if (!buf) goto out; { struct msginfo msginfo; msginfo.msgmni = MSGMNI; msginfo.msgmax = MSGMAX; msginfo.msgmnb = MSGMNB; msginfo.msgmap = MSGMAP; msginfo.msgpool = MSGPOOL; msginfo.msgtql = MSGTQL; msginfo.msgssz = MSGSSZ; msginfo.msgseg = MSGSEG; if (cmd == MSG_INFO) { msginfo.msgpool = used_queues; msginfo.msgmap = msghdrs; msginfo.msgtql = msgbytes; } err = -EFAULT; if (copy_to_user (buf, &msginfo, sizeof(struct msginfo))) goto out; err = max_msqid; goto out; } case MSG_STAT: if (!buf) goto out; err = -EINVAL; if (msqid > max_msqid) goto out; msq = msgque[msqid]; if (msq == IPC_UNUSED || msq == IPC_NOID) goto out; err = -EACCES; if (ipcperms (&msq->msg_perm, S_IRUGO)) goto out; id = (unsigned int) msq->msg_perm.seq * MSGMNI + msqid; tbuf.msg_perm = msq->msg_perm; tbuf.msg_stime = msq->msg_stime; tbuf.msg_rtime = msq->msg_rtime; tbuf.msg_ctime = msq->msg_ctime; tbuf.msg_cbytes = msq->msg_cbytes; tbuf.msg_qnum = msq->msg_qnum; tbuf.msg_qbytes = msq->msg_qbytes; tbuf.msg_lspid = msq->msg_lspid; tbuf.msg_lrpid = msq->msg_lrpid; err = -EFAULT; if (copy_to_user (buf, &tbuf, sizeof(*buf))) goto out; err = id; goto out; case IPC_SET: if (!buf) goto out; err = -EFAULT; if (!copy_from_user (&tbuf, buf, sizeof (*buf))) err = 0; break; case IPC_STAT: if (!buf) goto out; break; } id = (unsigned int) msqid % MSGMNI; msq = msgque [id]; err = -EINVAL; if (msq == IPC_UNUSED || msq == IPC_NOID) goto out; err = -EIDRM; if (msq->msg_perm.seq != (unsigned int) msqid / MSGMNI) goto out; ipcp = &msq->msg_perm; switch (cmd) { case IPC_STAT: err = -EACCES; if (ipcperms (ipcp, S_IRUGO)) goto out; tbuf.msg_perm = msq->msg_perm; tbuf.msg_stime = msq->msg_stime; tbuf.msg_rtime = msq->msg_rtime; tbuf.msg_ctime = msq->msg_ctime; tbuf.msg_cbytes = msq->msg_cbytes; tbuf.msg_qnum = msq->msg_qnum; tbuf.msg_qbytes = msq->msg_qbytes; tbuf.msg_lspid = msq->msg_lspid; tbuf.msg_lrpid = msq->msg_lrpid; err = -EFAULT; if (!copy_to_user (buf, &tbuf, sizeof (*buf))) err = 0; goto out; case IPC_SET: err = -EPERM; if (current->euid != ipcp->cuid && current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN)) /* We _could_ check for CAP_CHOWN above, but we don't */ goto out; if (tbuf.msg_qbytes > MSGMNB && !capable(CAP_SYS_RESOURCE)) goto out; msq->msg_qbytes = tbuf.msg_qbytes; ipcp->uid = tbuf.msg_perm.uid; ipcp->gid = tbuf.msg_perm.gid; ipcp->mode = (ipcp->mode & ~S_IRWXUGO) | (S_IRWXUGO & tbuf.msg_perm.mode); msq->msg_ctime = CURRENT_TIME; err = 0; goto out; case IPC_RMID: err = -EPERM; if (current->euid != ipcp->cuid && current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN)) goto out; freeque (id); err = 0; goto out; default: err = -EINVAL; goto out; } out: unlock_kernel(); return err; }
long do_msgsnd(int msqid, long mtype, void __user *mtext, size_t msgsz, int msgflg) { struct msg_queue *msq; struct msg_msg *msg; int err; struct ipc_namespace *ns; ns = current->nsproxy->ipc_ns; if (msgsz > ns->msg_ctlmax || (long) msgsz < 0 || msqid < 0) return -EINVAL; if (mtype < 1) return -EINVAL; msg = load_msg(mtext, msgsz); if (IS_ERR(msg)) return PTR_ERR(msg); msg->m_type = mtype; msg->m_ts = msgsz; msq = msg_lock_check(ns, msqid); if (IS_ERR(msq)) { err = PTR_ERR(msq); goto out_free; } for (;;) { struct msg_sender s; err = -EACCES; if (ipcperms(&msq->q_perm, S_IWUGO)) goto out_unlock_free; err = security_msg_queue_msgsnd(msq, msg, msgflg); if (err) goto out_unlock_free; if (msgsz + msq->q_cbytes <= msq->q_qbytes && 1 + msq->q_qnum <= msq->q_qbytes) { break; } /* queue full, wait: */ if (msgflg & IPC_NOWAIT) { err = -EAGAIN; goto out_unlock_free; } ss_add(msq, &s); ipc_rcu_getref(msq); msg_unlock(msq); schedule(); ipc_lock_by_ptr(&msq->q_perm); ipc_rcu_putref(msq); if (msq->q_perm.deleted) { err = -EIDRM; goto out_unlock_free; } ss_del(&s); if (signal_pending(current)) { err = -ERESTARTNOHAND; goto out_unlock_free; } } msq->q_lspid = current->tgid; msq->q_stime = get_seconds(); if (!pipelined_send(msq, msg)) { /* noone is waiting for this message, enqueue it */ list_add_tail(&msg->m_list, &msq->q_messages); msq->q_cbytes += msgsz; msq->q_qnum++; atomic_add(msgsz, &msg_bytes); atomic_inc(&msg_hdrs); } err = 0; msg = NULL; out_unlock_free: msg_unlock(msq); out_free: if (msg != NULL) free_msg(msg); return err; }
long do_msgrcv(int msqid, long *pmtype, void __user *mtext, size_t msgsz, long msgtyp, int msgflg) { struct msg_queue *msq; struct msg_msg *msg; int mode; struct ipc_namespace *ns; if (msqid < 0 || (long) msgsz < 0) return -EINVAL; mode = convert_mode(&msgtyp, msgflg); ns = current->nsproxy->ipc_ns; msq = msg_lock_check(ns, msqid); if (IS_ERR(msq)) return PTR_ERR(msq); for (;;) { struct msg_receiver msr_d; struct list_head *tmp; msg = ERR_PTR(-EACCES); if (ipcperms(&msq->q_perm, S_IRUGO)) goto out_unlock; msg = ERR_PTR(-EAGAIN); tmp = msq->q_messages.next; while (tmp != &msq->q_messages) { struct msg_msg *walk_msg; walk_msg = list_entry(tmp, struct msg_msg, m_list); if (testmsg(walk_msg, msgtyp, mode) && !security_msg_queue_msgrcv(msq, walk_msg, current, msgtyp, mode)) { msg = walk_msg; if (mode == SEARCH_LESSEQUAL && walk_msg->m_type != 1) { msg = walk_msg; msgtyp = walk_msg->m_type - 1; } else { msg = walk_msg; break; } } tmp = tmp->next; } if (!IS_ERR(msg)) { /* * Found a suitable message. * Unlink it from the queue. */ if ((msgsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) { msg = ERR_PTR(-E2BIG); goto out_unlock; } list_del(&msg->m_list); msq->q_qnum--; msq->q_rtime = get_seconds(); msq->q_lrpid = current->tgid; msq->q_cbytes -= msg->m_ts; atomic_sub(msg->m_ts, &msg_bytes); atomic_dec(&msg_hdrs); ss_wakeup(&msq->q_senders, 0); msg_unlock(msq); break; } /* No message waiting. Wait for a message */ if (msgflg & IPC_NOWAIT) { msg = ERR_PTR(-ENOMSG); goto out_unlock; } list_add_tail(&msr_d.r_list, &msq->q_receivers); msr_d.r_tsk = current; msr_d.r_msgtype = msgtyp; msr_d.r_mode = mode; if (msgflg & MSG_NOERROR) msr_d.r_maxsize = INT_MAX; else msr_d.r_maxsize = msgsz; msr_d.r_msg = ERR_PTR(-EAGAIN); current->state = TASK_INTERRUPTIBLE; msg_unlock(msq); schedule(); /* Lockless receive, part 1: * Disable preemption. We don't hold a reference to the queue * and getting a reference would defeat the idea of a lockless * operation, thus the code relies on rcu to guarantee the * existance of msq: * Prior to destruction, expunge_all(-EIRDM) changes r_msg. * Thus if r_msg is -EAGAIN, then the queue not yet destroyed. * rcu_read_lock() prevents preemption between reading r_msg * and the spin_lock() inside ipc_lock_by_ptr(). */ rcu_read_lock(); /* Lockless receive, part 2: * Wait until pipelined_send or expunge_all are outside of * wake_up_process(). There is a race with exit(), see * ipc/mqueue.c for the details. */ msg = (struct msg_msg*)msr_d.r_msg; while (msg == NULL) { cpu_relax(); msg = (struct msg_msg *)msr_d.r_msg; } /* Lockless receive, part 3: * If there is a message or an error then accept it without * locking. */ if (msg != ERR_PTR(-EAGAIN)) { rcu_read_unlock(); break; } /* Lockless receive, part 3: * Acquire the queue spinlock. */ ipc_lock_by_ptr(&msq->q_perm); rcu_read_unlock(); /* Lockless receive, part 4: * Repeat test after acquiring the spinlock. */ msg = (struct msg_msg*)msr_d.r_msg; if (msg != ERR_PTR(-EAGAIN)) goto out_unlock; list_del(&msr_d.r_list); if (signal_pending(current)) { msg = ERR_PTR(-ERESTARTNOHAND); out_unlock: msg_unlock(msq); break; } } if (IS_ERR(msg)) return PTR_ERR(msg); msgsz = (msgsz > msg->m_ts) ? msg->m_ts : msgsz; *pmtype = msg->m_type; if (store_msg(mtext, msg, msgsz)) msgsz = -EFAULT; free_msg(msg); return msgsz; }
asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf) { struct kern_ipc_perm *ipcp; struct msq_setbuf setbuf; struct msg_queue *msq; int err, version; struct ipc_namespace *ns; if (msqid < 0 || cmd < 0) return -EINVAL; memset(&setbuf.mode, 0, sizeof(setbuf.mode)); memset(&setbuf.gid, 0, sizeof(setbuf.gid)); memset(&setbuf.uid, 0, sizeof(setbuf.uid)); memset(&setbuf.qbytes, 0, sizeof(setbuf.qbytes)); version = ipc_parse_version(&cmd); ns = current->nsproxy->ipc_ns; switch (cmd) { case IPC_INFO: case MSG_INFO: { struct msginfo msginfo; int max_id; if (!buf) return -EFAULT; /* * We must not return kernel stack data. * due to padding, it's not enough * to set all member fields. */ err = security_msg_queue_msgctl(NULL, cmd); if (err) return err; memset(&msginfo, 0, sizeof(msginfo)); msginfo.msgmni = ns->msg_ctlmni; msginfo.msgmax = ns->msg_ctlmax; msginfo.msgmnb = ns->msg_ctlmnb; msginfo.msgssz = MSGSSZ; msginfo.msgseg = MSGSEG; mutex_lock(&msg_ids(ns).mutex); if (cmd == MSG_INFO) { msginfo.msgpool = msg_ids(ns).in_use; msginfo.msgmap = atomic_read(&msg_hdrs); msginfo.msgtql = atomic_read(&msg_bytes); } else { msginfo.msgmap = MSGMAP; msginfo.msgpool = MSGPOOL; msginfo.msgtql = MSGTQL; } max_id = msg_ids(ns).max_id; mutex_unlock(&msg_ids(ns).mutex); if (copy_to_user(buf, &msginfo, sizeof(struct msginfo))) return -EFAULT; return (max_id < 0) ? 0 : max_id; } case MSG_STAT: case IPC_STAT: { struct msqid64_ds tbuf; int success_return; if (!buf) return -EFAULT; if (cmd == MSG_STAT && msqid >= msg_ids(ns).entries->size) return -EINVAL; memset(&tbuf, 0, sizeof(tbuf)); msq = msg_lock(ns, msqid); if (msq == NULL) return -EINVAL; if (cmd == MSG_STAT) { success_return = msg_buildid(ns, msqid, msq->q_perm.seq); } else { err = -EIDRM; if (msg_checkid(ns, msq, msqid)) goto out_unlock; success_return = 0; } err = -EACCES; if (ipcperms(&msq->q_perm, S_IRUGO)) goto out_unlock; err = security_msg_queue_msgctl(msq, cmd); if (err) goto out_unlock; kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm); tbuf.msg_stime = msq->q_stime; tbuf.msg_rtime = msq->q_rtime; tbuf.msg_ctime = msq->q_ctime; tbuf.msg_cbytes = msq->q_cbytes; tbuf.msg_qnum = msq->q_qnum; tbuf.msg_qbytes = msq->q_qbytes; tbuf.msg_lspid = msq->q_lspid; tbuf.msg_lrpid = msq->q_lrpid; msg_unlock(msq); if (copy_msqid_to_user(buf, &tbuf, version)) return -EFAULT; return success_return; } case IPC_SET: if (!buf) return -EFAULT; if (copy_msqid_from_user(&setbuf, buf, version)) return -EFAULT; break; case IPC_RMID: break; default: return -EINVAL; } mutex_lock(&msg_ids(ns).mutex); msq = msg_lock(ns, msqid); err = -EINVAL; if (msq == NULL) goto out_up; err = -EIDRM; if (msg_checkid(ns, msq, msqid)) goto out_unlock_up; ipcp = &msq->q_perm; err = audit_ipc_obj(ipcp); if (err) goto out_unlock_up; if (cmd==IPC_SET) { err = audit_ipc_set_perm(setbuf.qbytes, setbuf.uid, setbuf.gid, setbuf.mode); if (err) goto out_unlock_up; } err = -EPERM; if (current->euid != ipcp->cuid && current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN)) /* We _could_ check for CAP_CHOWN above, but we don't */ goto out_unlock_up; err = security_msg_queue_msgctl(msq, cmd); if (err) goto out_unlock_up; switch (cmd) { case IPC_SET: { err = -EPERM; if (setbuf.qbytes > ns->msg_ctlmnb && !capable(CAP_SYS_RESOURCE)) goto out_unlock_up; msq->q_qbytes = setbuf.qbytes; ipcp->uid = setbuf.uid; ipcp->gid = setbuf.gid; ipcp->mode = (ipcp->mode & ~S_IRWXUGO) | (S_IRWXUGO & setbuf.mode); msq->q_ctime = get_seconds(); /* sleeping receivers might be excluded by * stricter permissions. */ expunge_all(msq, -EAGAIN); /* sleeping senders might be able to send * due to a larger queue size. */ ss_wakeup(&msq->q_senders, 0); msg_unlock(msq); break; } case IPC_RMID: freeque(ns, msq, msqid); break; } err = 0; out_up: mutex_unlock(&msg_ids(ns).mutex); return err; out_unlock_up: msg_unlock(msq); goto out_up; out_unlock: msg_unlock(msq); return err; }
asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf) { struct msg_queue *msq; int err, version; struct ipc_namespace *ns; if (msqid < 0 || cmd < 0) return -EINVAL; version = ipc_parse_version(&cmd); ns = current->nsproxy->ipc_ns; switch (cmd) { case IPC_INFO: case MSG_INFO: { struct msginfo msginfo; int max_id; if (!buf) return -EFAULT; /* * We must not return kernel stack data. * due to padding, it's not enough * to set all member fields. */ err = security_msg_queue_msgctl(NULL, cmd); if (err) return err; memset(&msginfo, 0, sizeof(msginfo)); msginfo.msgmni = ns->msg_ctlmni; msginfo.msgmax = ns->msg_ctlmax; msginfo.msgmnb = ns->msg_ctlmnb; msginfo.msgssz = MSGSSZ; msginfo.msgseg = MSGSEG; down_read(&msg_ids(ns).rw_mutex); if (cmd == MSG_INFO) { msginfo.msgpool = msg_ids(ns).in_use; msginfo.msgmap = atomic_read(&msg_hdrs); msginfo.msgtql = atomic_read(&msg_bytes); } else { msginfo.msgmap = MSGMAP; msginfo.msgpool = MSGPOOL; msginfo.msgtql = MSGTQL; } max_id = ipc_get_maxid(&msg_ids(ns)); up_read(&msg_ids(ns).rw_mutex); if (copy_to_user(buf, &msginfo, sizeof(struct msginfo))) return -EFAULT; return (max_id < 0) ? 0 : max_id; } case MSG_STAT: /* msqid is an index rather than a msg queue id */ case IPC_STAT: { struct msqid64_ds tbuf; int success_return; if (!buf) return -EFAULT; if (cmd == MSG_STAT) { msq = msg_lock(ns, msqid); if (IS_ERR(msq)) return PTR_ERR(msq); success_return = msq->q_perm.id; } else { msq = msg_lock_check(ns, msqid); if (IS_ERR(msq)) return PTR_ERR(msq); success_return = 0; } err = -EACCES; if (ipcperms(&msq->q_perm, S_IRUGO)) goto out_unlock; err = security_msg_queue_msgctl(msq, cmd); if (err) goto out_unlock; memset(&tbuf, 0, sizeof(tbuf)); kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm); tbuf.msg_stime = msq->q_stime; tbuf.msg_rtime = msq->q_rtime; tbuf.msg_ctime = msq->q_ctime; tbuf.msg_cbytes = msq->q_cbytes; tbuf.msg_qnum = msq->q_qnum; tbuf.msg_qbytes = msq->q_qbytes; tbuf.msg_lspid = msq->q_lspid; tbuf.msg_lrpid = msq->q_lrpid; msg_unlock(msq); if (copy_msqid_to_user(buf, &tbuf, version)) return -EFAULT; return success_return; } case IPC_SET: case IPC_RMID: err = msgctl_down(ns, msqid, cmd, buf, version); return err; default: return -EINVAL; } out_unlock: msg_unlock(msq); return err; }
asmlinkage long sys_semop (int semid, struct sembuf *tsops, unsigned nsops) { int error = -EINVAL; struct sem_array *sma; struct sembuf fast_sops[SEMOPM_FAST]; struct sembuf* sops = fast_sops, *sop; struct sem_undo *un; int undos = 0, decrease = 0, alter = 0; struct sem_queue queue; if (nsops < 1 || semid < 0) return -EINVAL; if (nsops > sc_semopm) return -E2BIG; if(nsops > SEMOPM_FAST) { sops = kmalloc(sizeof(*sops)*nsops,GFP_KERNEL); if(sops==NULL) return -ENOMEM; } if (copy_from_user (sops, tsops, nsops * sizeof(*tsops))) { error=-EFAULT; goto out_free; } sma = sem_lock(semid); error=-EINVAL; if(sma==NULL) goto out_free; error = -EIDRM; if (sem_checkid(sma,semid)) goto out_unlock_free; error = -EFBIG; for (sop = sops; sop < sops + nsops; sop++) { if (sop->sem_num >= sma->sem_nsems) goto out_unlock_free; if (sop->sem_flg & SEM_UNDO) undos++; if (sop->sem_op < 0) decrease = 1; if (sop->sem_op > 0) alter = 1; } alter |= decrease; error = -EACCES; if (ipcperms(&sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) goto out_unlock_free; if (undos) { /* Make sure we have an undo structure * for this process and this semaphore set. */ un=current->semundo; while(un != NULL) { if(un->semid==semid) break; if(un->semid==-1) un=freeundos(sma,un); else un=un->proc_next; } if (!un) { error = alloc_undo(sma,&un,semid,alter); if(error) goto out_free; } } else un = NULL; error = try_atomic_semop (sma, sops, nsops, un, current->pid, 0); if (error <= 0) goto update; /* We need to sleep on this operation, so we put the current * task into the pending queue and go to sleep. */ queue.sma = sma; queue.sops = sops; queue.nsops = nsops; queue.undo = un; queue.pid = current->pid; queue.alter = decrease; queue.id = semid; if (alter) append_to_queue(sma ,&queue); else prepend_to_queue(sma ,&queue); current->semsleeping = &queue; for (;;) { struct sem_array* tmp; queue.status = -EINTR; queue.sleeper = current; current->state = TASK_INTERRUPTIBLE; sem_unlock(semid); schedule(); tmp = sem_lock(semid); if(tmp==NULL) { if(queue.prev != NULL) BUG(); current->semsleeping = NULL; error = -EIDRM; goto out_free; } /* * If queue.status == 1 we where woken up and * have to retry else we simply return. * If an interrupt occurred we have to clean up the * queue * */ if (queue.status == 1) { error = try_atomic_semop (sma, sops, nsops, un, current->pid,0); if (error <= 0) break; } else { error = queue.status; if (queue.prev) /* got Interrupt */ break; /* Everything done by update_queue */ current->semsleeping = NULL; goto out_unlock_free; } } current->semsleeping = NULL; remove_from_queue(sma,&queue); update: if (alter) update_queue (sma); out_unlock_free: sem_unlock(semid); out_free: if(sops != fast_sops) kfree(sops); return error; }
static int semctl_main(int semid, int semnum, int cmd, int version, union semun arg) { struct sem_array *sma; struct sem* curr; int err; ushort fast_sem_io[SEMMSL_FAST]; ushort* sem_io = fast_sem_io; int nsems; sma = sem_lock(semid); if(sma==NULL) return -EINVAL; nsems = sma->sem_nsems; err=-EIDRM; if (sem_checkid(sma,semid)) goto out_unlock; err = -EACCES; if (ipcperms (&sma->sem_perm, (cmd==SETVAL||cmd==SETALL)?S_IWUGO:S_IRUGO)) goto out_unlock; switch (cmd) { case GETALL: { ushort *array = arg.array; int i; if(nsems > SEMMSL_FAST) { sem_unlock(semid); sem_io = ipc_alloc(sizeof(ushort)*nsems); if(sem_io == NULL) return -ENOMEM; err = sem_revalidate(semid, sma, nsems, S_IRUGO); if(err) goto out_free; } for (i = 0; i < sma->sem_nsems; i++) sem_io[i] = sma->sem_base[i].semval; sem_unlock(semid); err = 0; if(copy_to_user(array, sem_io, nsems*sizeof(ushort))) err = -EFAULT; goto out_free; } case SETALL: { int i; struct sem_undo *un; sem_unlock(semid); if(nsems > SEMMSL_FAST) { sem_io = ipc_alloc(sizeof(ushort)*nsems); if(sem_io == NULL) return -ENOMEM; } if (copy_from_user (sem_io, arg.array, nsems*sizeof(ushort))) { err = -EFAULT; goto out_free; } for (i = 0; i < nsems; i++) { if (sem_io[i] > SEMVMX) { err = -ERANGE; goto out_free; } } err = sem_revalidate(semid, sma, nsems, S_IWUGO); if(err) goto out_free; for (i = 0; i < nsems; i++) sma->sem_base[i].semval = sem_io[i]; for (un = sma->undo; un; un = un->id_next) for (i = 0; i < nsems; i++) un->semadj[i] = 0; sma->sem_ctime = CURRENT_TIME; /* maybe some queued-up processes were waiting for this */ update_queue(sma); err = 0; goto out_unlock; } case IPC_STAT: { struct semid64_ds tbuf; memset(&tbuf,0,sizeof(tbuf)); kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm); tbuf.sem_otime = sma->sem_otime; tbuf.sem_ctime = sma->sem_ctime; tbuf.sem_nsems = sma->sem_nsems; sem_unlock(semid); if (copy_semid_to_user (arg.buf, &tbuf, version)) return -EFAULT; return 0; } /* GETVAL, GETPID, GETNCTN, GETZCNT, SETVAL: fall-through */ } err = -EINVAL; if(semnum < 0 || semnum >= nsems) goto out_unlock; curr = &sma->sem_base[semnum]; switch (cmd) { case GETVAL: err = curr->semval; goto out_unlock; case GETPID: err = curr->sempid & 0xffff; goto out_unlock; case GETNCNT: err = count_semncnt(sma,semnum); goto out_unlock; case GETZCNT: err = count_semzcnt(sma,semnum); goto out_unlock; case SETVAL: { int val = arg.val; struct sem_undo *un; err = -ERANGE; if (val > SEMVMX || val < 0) goto out_unlock; for (un = sma->undo; un; un = un->id_next) un->semadj[semnum] = 0; curr->semval = val; curr->sempid = current->pid; sma->sem_ctime = CURRENT_TIME; /* maybe some queued-up processes were waiting for this */ update_queue(sma); err = 0; goto out_unlock; } } out_unlock: sem_unlock(semid); out_free: if(sem_io != fast_sem_io) ipc_free(sem_io, sizeof(ushort)*nsems); return err; }
static int semctl_nolock(int semid, int semnum, int cmd, int version, union semun arg) { int err = -EINVAL; switch(cmd) { case IPC_INFO: case SEM_INFO: { struct seminfo seminfo; int max_id; memset(&seminfo,0,sizeof(seminfo)); seminfo.semmni = sc_semmni; seminfo.semmns = sc_semmns; seminfo.semmsl = sc_semmsl; seminfo.semopm = sc_semopm; seminfo.semvmx = SEMVMX; seminfo.semmnu = SEMMNU; seminfo.semmap = SEMMAP; seminfo.semume = SEMUME; down(&sem_ids.sem); if (cmd == SEM_INFO) { seminfo.semusz = sem_ids.in_use; seminfo.semaem = used_sems; } else { seminfo.semusz = SEMUSZ; seminfo.semaem = SEMAEM; } max_id = sem_ids.max_id; up(&sem_ids.sem); if (copy_to_user (arg.__buf, &seminfo, sizeof(struct seminfo))) return -EFAULT; return (max_id < 0) ? 0: max_id; } case SEM_STAT: { struct sem_array *sma; struct semid64_ds tbuf; int id; if(semid >= sem_ids.size) return -EINVAL; memset(&tbuf,0,sizeof(tbuf)); sma = sem_lock(semid); if(sma == NULL) return -EINVAL; err = -EACCES; if (ipcperms (&sma->sem_perm, S_IRUGO)) goto out_unlock; id = sem_buildid(semid, sma->sem_perm.seq); kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm); tbuf.sem_otime = sma->sem_otime; tbuf.sem_ctime = sma->sem_ctime; tbuf.sem_nsems = sma->sem_nsems; sem_unlock(semid); if (copy_semid_to_user (arg.buf, &tbuf, version)) return -EFAULT; return id; } default: return -EINVAL; } return err; out_unlock: sem_unlock(semid); return err; }
asmlinkage long sys_msgrcv (int msqid, struct msgbuf *msgp, size_t msgsz, long msgtyp, int msgflg) { struct msg_queue *msq; struct msg_receiver msr_d; struct list_head* tmp; struct msg_msg* msg, *found_msg; int err; int mode; if (msqid < 0 || (long) msgsz < 0) return -EINVAL; mode = convert_mode(&msgtyp,msgflg); msq = msg_lock(msqid); if(msq==NULL) return -EINVAL; retry: err = -EIDRM; if (msg_checkid(msq,msqid)) goto out_unlock; err=-EACCES; if (ipcperms (&msq->q_perm, S_IRUGO)) goto out_unlock; tmp = msq->q_messages.next; found_msg=NULL; while (tmp != &msq->q_messages) { msg = list_entry(tmp,struct msg_msg,m_list); if(testmsg(msg,msgtyp,mode) && !security_msg_queue_msgrcv(msq, msg, current, msgtyp, mode)) { found_msg = msg; if(mode == SEARCH_LESSEQUAL && msg->m_type != 1) { found_msg=msg; msgtyp=msg->m_type-1; } else { found_msg=msg; break; } } tmp = tmp->next; } if(found_msg) { msg=found_msg; if ((msgsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) { err=-E2BIG; goto out_unlock; } list_del(&msg->m_list); msq->q_qnum--; msq->q_rtime = get_seconds(); msq->q_lrpid = current->tgid; msq->q_cbytes -= msg->m_ts; atomic_sub(msg->m_ts,&msg_bytes); atomic_dec(&msg_hdrs); ss_wakeup(&msq->q_senders,0); msg_unlock(msq); out_success: msgsz = (msgsz > msg->m_ts) ? msg->m_ts : msgsz; if (put_user (msg->m_type, &msgp->mtype) || store_msg(msgp->mtext, msg, msgsz)) { msgsz = -EFAULT; } free_msg(msg); return msgsz; } else { /* no message waiting. Prepare for pipelined * receive. */ if (msgflg & IPC_NOWAIT) { err=-ENOMSG; goto out_unlock; } list_add_tail(&msr_d.r_list,&msq->q_receivers); msr_d.r_tsk = current; msr_d.r_msgtype = msgtyp; msr_d.r_mode = mode; if(msgflg & MSG_NOERROR) msr_d.r_maxsize = INT_MAX; else msr_d.r_maxsize = msgsz; msr_d.r_msg = ERR_PTR(-EAGAIN); current->state = TASK_INTERRUPTIBLE; msg_unlock(msq); schedule(); /* * The below optimisation is buggy. A sleeping thread that is * woken up checks if it got a message and if so, copies it to * userspace and just returns without taking any locks. * But this return to user space can be faster than the message * send, and if the receiver immediately exits the * wake_up_process performed by the sender will oops. */ #if 0 msg = (struct msg_msg*) msr_d.r_msg; if(!IS_ERR(msg)) goto out_success; #endif msq = msg_lock(msqid); msg = (struct msg_msg*)msr_d.r_msg; if(!IS_ERR(msg)) { /* our message arived while we waited for * the spinlock. Process it. */ if(msq) msg_unlock(msq); goto out_success; } err = PTR_ERR(msg); if(err == -EAGAIN) { if(!msq) BUG(); list_del(&msr_d.r_list); if (signal_pending(current)) err=-EINTR; else goto retry; } } out_unlock: if(msq) msg_unlock(msq); return err; }
asmlinkage long sys_msgsnd (int msqid, struct msgbuf *msgp, size_t msgsz, int msgflg) { struct msg_queue *msq; struct msg_msg *msg; long mtype; int err; if (msgsz > msg_ctlmax || (long) msgsz < 0 || msqid < 0) return -EINVAL; if (get_user(mtype, &msgp->mtype)) return -EFAULT; if (mtype < 1) return -EINVAL; msg = load_msg(msgp->mtext, msgsz); if(IS_ERR(msg)) return PTR_ERR(msg); msg->m_type = mtype; msg->m_ts = msgsz; msq = msg_lock(msqid); err=-EINVAL; if(msq==NULL) goto out_free; retry: err= -EIDRM; if (msg_checkid(msq,msqid)) goto out_unlock_free; err=-EACCES; if (ipcperms(&msq->q_perm, S_IWUGO)) goto out_unlock_free; err = security_msg_queue_msgsnd(msq, msg, msgflg); if (err) goto out_unlock_free; if(msgsz + msq->q_cbytes > msq->q_qbytes || 1 + msq->q_qnum > msq->q_qbytes) { struct msg_sender s; if(msgflg&IPC_NOWAIT) { err=-EAGAIN; goto out_unlock_free; } ss_add(msq, &s); msg_unlock(msq); schedule(); current->state= TASK_RUNNING; msq = msg_lock(msqid); err = -EIDRM; if(msq==NULL) goto out_free; ss_del(&s); if (signal_pending(current)) { err=-EINTR; goto out_unlock_free; } goto retry; } msq->q_lspid = current->tgid; msq->q_stime = get_seconds(); if(!pipelined_send(msq,msg)) { /* noone is waiting for this message, enqueue it */ list_add_tail(&msg->m_list,&msq->q_messages); msq->q_cbytes += msgsz; msq->q_qnum++; atomic_add(msgsz,&msg_bytes); atomic_inc(&msg_hdrs); } err = 0; msg = NULL; out_unlock_free: msg_unlock(msq); out_free: if(msg!=NULL) free_msg(msg); return err; }