long do_msgrcv(int msqid, long *pmtype, void __user *mtext, size_t msgsz, long msgtyp, int msgflg) { struct msg_queue *msq; struct msg_msg *msg; int mode; struct ipc_namespace *ns; if (msqid < 0 || (long) msgsz < 0) return -EINVAL; mode = convert_mode(&msgtyp, msgflg); ns = current->nsproxy->ipc_ns; msq = msg_lock_check(ns, msqid); if (IS_ERR(msq)) return PTR_ERR(msq); for (;;) { struct msg_receiver msr_d; struct list_head *tmp; msg = ERR_PTR(-EACCES); if (ipcperms(&msq->q_perm, S_IRUGO)) goto out_unlock; msg = ERR_PTR(-EAGAIN); tmp = msq->q_messages.next; while (tmp != &msq->q_messages) { struct msg_msg *walk_msg; walk_msg = list_entry(tmp, struct msg_msg, m_list); if (testmsg(walk_msg, msgtyp, mode) && !security_msg_queue_msgrcv(msq, walk_msg, current, msgtyp, mode)) { msg = walk_msg; if (mode == SEARCH_LESSEQUAL && walk_msg->m_type != 1) { msg = walk_msg; msgtyp = walk_msg->m_type - 1; } else { msg = walk_msg; break; } } tmp = tmp->next; } if (!IS_ERR(msg)) { /* * Found a suitable message. * Unlink it from the queue. */ if ((msgsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) { msg = ERR_PTR(-E2BIG); goto out_unlock; } list_del(&msg->m_list); msq->q_qnum--; msq->q_rtime = get_seconds(); msq->q_lrpid = current->tgid; msq->q_cbytes -= msg->m_ts; atomic_sub(msg->m_ts, &msg_bytes); atomic_dec(&msg_hdrs); ss_wakeup(&msq->q_senders, 0); msg_unlock(msq); break; } /* No message waiting. Wait for a message */ if (msgflg & IPC_NOWAIT) { msg = ERR_PTR(-ENOMSG); goto out_unlock; } list_add_tail(&msr_d.r_list, &msq->q_receivers); msr_d.r_tsk = current; msr_d.r_msgtype = msgtyp; msr_d.r_mode = mode; if (msgflg & MSG_NOERROR) msr_d.r_maxsize = INT_MAX; else msr_d.r_maxsize = msgsz; msr_d.r_msg = ERR_PTR(-EAGAIN); current->state = TASK_INTERRUPTIBLE; msg_unlock(msq); schedule(); /* Lockless receive, part 1: * Disable preemption. We don't hold a reference to the queue * and getting a reference would defeat the idea of a lockless * operation, thus the code relies on rcu to guarantee the * existance of msq: * Prior to destruction, expunge_all(-EIRDM) changes r_msg. * Thus if r_msg is -EAGAIN, then the queue not yet destroyed. * rcu_read_lock() prevents preemption between reading r_msg * and the spin_lock() inside ipc_lock_by_ptr(). */ rcu_read_lock(); /* Lockless receive, part 2: * Wait until pipelined_send or expunge_all are outside of * wake_up_process(). There is a race with exit(), see * ipc/mqueue.c for the details. */ msg = (struct msg_msg*)msr_d.r_msg; while (msg == NULL) { cpu_relax(); msg = (struct msg_msg *)msr_d.r_msg; } /* Lockless receive, part 3: * If there is a message or an error then accept it without * locking. */ if (msg != ERR_PTR(-EAGAIN)) { rcu_read_unlock(); break; } /* Lockless receive, part 3: * Acquire the queue spinlock. */ ipc_lock_by_ptr(&msq->q_perm); rcu_read_unlock(); /* Lockless receive, part 4: * Repeat test after acquiring the spinlock. */ msg = (struct msg_msg*)msr_d.r_msg; if (msg != ERR_PTR(-EAGAIN)) goto out_unlock; list_del(&msr_d.r_list); if (signal_pending(current)) { msg = ERR_PTR(-ERESTARTNOHAND); out_unlock: msg_unlock(msq); break; } } if (IS_ERR(msg)) return PTR_ERR(msg); msgsz = (msgsz > msg->m_ts) ? msg->m_ts : msgsz; *pmtype = msg->m_type; if (store_msg(mtext, msg, msgsz)) msgsz = -EFAULT; free_msg(msg); return msgsz; }
asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds *buf) { int err, version; struct msg_queue *msq; struct msq_setbuf setbuf; struct kern_ipc_perm *ipcp; if (msqid < 0 || cmd < 0) return -EINVAL; version = ipc_parse_version(&cmd); switch (cmd) { case IPC_INFO: case MSG_INFO: { struct msginfo msginfo; int max_id; if (!buf) return -EFAULT; /* We must not return kernel stack data. * due to padding, it's not enough * to set all member fields. */ memset(&msginfo,0,sizeof(msginfo)); msginfo.msgmni = msg_ctlmni; msginfo.msgmax = msg_ctlmax; msginfo.msgmnb = msg_ctlmnb; msginfo.msgssz = MSGSSZ; msginfo.msgseg = MSGSEG; down(&msg_ids.sem); if (cmd == MSG_INFO) { msginfo.msgpool = msg_ids.in_use; msginfo.msgmap = atomic_read(&msg_hdrs); msginfo.msgtql = atomic_read(&msg_bytes); } else { msginfo.msgmap = MSGMAP; msginfo.msgpool = MSGPOOL; msginfo.msgtql = MSGTQL; } max_id = msg_ids.max_id; up(&msg_ids.sem); if (copy_to_user (buf, &msginfo, sizeof(struct msginfo))) return -EFAULT; return (max_id < 0) ? 0: max_id; } case MSG_STAT: case IPC_STAT: { struct msqid64_ds tbuf; int success_return; if (!buf) return -EFAULT; if(cmd == MSG_STAT && msqid >= msg_ids.size) return -EINVAL; memset(&tbuf,0,sizeof(tbuf)); msq = msg_lock(msqid); if (msq == NULL) return -EINVAL; if(cmd == MSG_STAT) { success_return = msg_buildid(msqid, msq->q_perm.seq); } else { err = -EIDRM; if (msg_checkid(msq,msqid)) goto out_unlock; success_return = 0; } err = -EACCES; if (ipcperms (&msq->q_perm, S_IRUGO)) goto out_unlock; kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm); tbuf.msg_stime = msq->q_stime; tbuf.msg_rtime = msq->q_rtime; tbuf.msg_ctime = msq->q_ctime; tbuf.msg_cbytes = msq->q_cbytes; tbuf.msg_qnum = msq->q_qnum; tbuf.msg_qbytes = msq->q_qbytes; tbuf.msg_lspid = msq->q_lspid; tbuf.msg_lrpid = msq->q_lrpid; msg_unlock(msqid); if (copy_msqid_to_user(buf, &tbuf, version)) return -EFAULT; return success_return; } case IPC_SET: if (!buf) return -EFAULT; if (copy_msqid_from_user (&setbuf, buf, version)) return -EFAULT; break; case IPC_RMID: break; default: return -EINVAL; } down(&msg_ids.sem); msq = msg_lock(msqid); err=-EINVAL; if (msq == NULL) goto out_up; err = -EIDRM; if (msg_checkid(msq,msqid)) goto out_unlock_up; ipcp = &msq->q_perm; err = -EPERM; if (current->euid != ipcp->cuid && current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN)) /* We _could_ check for CAP_CHOWN above, but we don't */ goto out_unlock_up; switch (cmd) { case IPC_SET: { if (setbuf.qbytes > msg_ctlmnb && !capable(CAP_SYS_RESOURCE)) goto out_unlock_up; msq->q_qbytes = setbuf.qbytes; ipcp->uid = setbuf.uid; ipcp->gid = setbuf.gid; ipcp->mode = (ipcp->mode & ~S_IRWXUGO) | (S_IRWXUGO & setbuf.mode); msq->q_ctime = CURRENT_TIME; /* sleeping receivers might be excluded by * stricter permissions. */ expunge_all(msq,-EAGAIN); /* sleeping senders might be able to send * due to a larger queue size. */ ss_wakeup(&msq->q_senders,0); msg_unlock(msqid); break; } case IPC_RMID: freeque (msqid); break; } err = 0; out_up: up(&msg_ids.sem); return err; out_unlock_up: msg_unlock(msqid); goto out_up; out_unlock: msg_unlock(msqid); return err; }
asmlinkage long sys_msgrcv (int msqid, struct msgbuf *msgp, size_t msgsz, long msgtyp, int msgflg) { struct msg_queue *msq; struct msg_receiver msr_d; struct list_head* tmp; struct msg_msg* msg, *found_msg; int err; int mode; if (msqid < 0 || (long) msgsz < 0) return -EINVAL; mode = convert_mode(&msgtyp,msgflg); msq = msg_lock(msqid); if(msq==NULL) return -EINVAL; retry: err = -EIDRM; if (msg_checkid(msq,msqid)) goto out_unlock; err=-EACCES; if (ipcperms (&msq->q_perm, S_IRUGO)) goto out_unlock; tmp = msq->q_messages.next; found_msg=NULL; while (tmp != &msq->q_messages) { msg = list_entry(tmp,struct msg_msg,m_list); if(testmsg(msg,msgtyp,mode) && !security_msg_queue_msgrcv(msq, msg, current, msgtyp, mode)) { found_msg = msg; if(mode == SEARCH_LESSEQUAL && msg->m_type != 1) { found_msg=msg; msgtyp=msg->m_type-1; } else { found_msg=msg; break; } } tmp = tmp->next; } if(found_msg) { msg=found_msg; if ((msgsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) { err=-E2BIG; goto out_unlock; } list_del(&msg->m_list); msq->q_qnum--; msq->q_rtime = get_seconds(); msq->q_lrpid = current->tgid; msq->q_cbytes -= msg->m_ts; atomic_sub(msg->m_ts,&msg_bytes); atomic_dec(&msg_hdrs); ss_wakeup(&msq->q_senders,0); msg_unlock(msq); out_success: msgsz = (msgsz > msg->m_ts) ? msg->m_ts : msgsz; if (put_user (msg->m_type, &msgp->mtype) || store_msg(msgp->mtext, msg, msgsz)) { msgsz = -EFAULT; } free_msg(msg); return msgsz; } else { /* no message waiting. Prepare for pipelined * receive. */ if (msgflg & IPC_NOWAIT) { err=-ENOMSG; goto out_unlock; } list_add_tail(&msr_d.r_list,&msq->q_receivers); msr_d.r_tsk = current; msr_d.r_msgtype = msgtyp; msr_d.r_mode = mode; if(msgflg & MSG_NOERROR) msr_d.r_maxsize = INT_MAX; else msr_d.r_maxsize = msgsz; msr_d.r_msg = ERR_PTR(-EAGAIN); current->state = TASK_INTERRUPTIBLE; msg_unlock(msq); schedule(); /* * The below optimisation is buggy. A sleeping thread that is * woken up checks if it got a message and if so, copies it to * userspace and just returns without taking any locks. * But this return to user space can be faster than the message * send, and if the receiver immediately exits the * wake_up_process performed by the sender will oops. */ #if 0 msg = (struct msg_msg*) msr_d.r_msg; if(!IS_ERR(msg)) goto out_success; #endif msq = msg_lock(msqid); msg = (struct msg_msg*)msr_d.r_msg; if(!IS_ERR(msg)) { /* our message arived while we waited for * the spinlock. Process it. */ if(msq) msg_unlock(msq); goto out_success; } err = PTR_ERR(msg); if(err == -EAGAIN) { if(!msq) BUG(); list_del(&msr_d.r_list); if (signal_pending(current)) err=-EINTR; else goto retry; } } out_unlock: if(msq) msg_unlock(msq); return err; }