asmlinkage long sys_msgget (key_t key, int msgflg) { int id, ret = -EPERM; struct msg_queue *msq; down(&msg_ids.sem); if (key == IPC_PRIVATE) ret = newque(key, msgflg); else if ((id = ipc_findkey(&msg_ids, key)) == -1) { /* key not used */ if (!(msgflg & IPC_CREAT)) ret = -ENOENT; else ret = newque(key, msgflg); } else if (msgflg & IPC_CREAT && msgflg & IPC_EXCL) { ret = -EEXIST; } else { msq = msg_lock(id); if(msq==NULL) BUG(); if (ipcperms(&msq->q_perm, msgflg)) ret = -EACCES; else ret = msg_buildid(id, msq->q_perm.seq); msg_unlock(id); } up(&msg_ids.sem); TRACE_IPC(TRACE_EV_IPC_MSG_CREATE, ret, msgflg); return ret; }
static int sysvipc_msg_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data) { off_t pos = 0; off_t begin = 0; int i, len = 0; down(&msg_ids.sem); len += sprintf(buffer, " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n"); for(i = 0; i <= msg_ids.max_id; i++) { struct msg_queue * msq; msq = msg_lock(i); if(msq != NULL) { len += sprintf(buffer + len, "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n", msq->q_perm.key, msg_buildid(i,msq->q_perm.seq), msq->q_perm.mode, msq->q_cbytes, msq->q_qnum, msq->q_lspid, msq->q_lrpid, msq->q_perm.uid, msq->q_perm.gid, msq->q_perm.cuid, msq->q_perm.cgid, msq->q_stime, msq->q_rtime, msq->q_ctime); msg_unlock(i); pos += len; if(pos < offset) { len = 0; begin = pos; } if(pos > offset + length) goto done; } } *eof = 1; done: up(&msg_ids.sem); *start = buffer + (offset - begin); len -= (offset - begin); if(len > length) len = length; if(len < 0) len = 0; return len; }
void msg_exit_ns(struct ipc_namespace *ns) { int i; struct msg_queue *msq; mutex_lock(&msg_ids(ns).mutex); for (i = 0; i <= msg_ids(ns).max_id; i++) { msq = msg_lock(ns, i); if (msq == NULL) continue; freeque(ns, msq, i); } mutex_unlock(&msg_ids(ns).mutex); ipc_fini_ids(ns->ids[IPC_MSG_IDS]); kfree(ns->ids[IPC_MSG_IDS]); ns->ids[IPC_MSG_IDS] = NULL; }
asmlinkage long sys_msgget(key_t key, int msgflg) { struct msg_queue *msq; int id, ret = -EPERM; struct ipc_namespace *ns; ns = current->nsproxy->ipc_ns; mutex_lock(&msg_ids(ns).mutex); if (key == IPC_PRIVATE) ret = newque(ns, key, msgflg); else if ((id = ipc_findkey(&msg_ids(ns), key)) == -1) { /* key not used */ if (!(msgflg & IPC_CREAT)) ret = -ENOENT; else ret = newque(ns, key, msgflg); } else if (msgflg & IPC_CREAT && msgflg & IPC_EXCL) { ret = -EEXIST; } else { msq = msg_lock(ns, id); BUG_ON(msq == NULL); if (ipcperms(&msq->q_perm, msgflg)) ret = -EACCES; else { int qid = msg_buildid(ns, id, msq->q_perm.seq); ret = security_msg_queue_associate(msq, msgflg); if (!ret) ret = qid; } msg_unlock(msq); } mutex_unlock(&msg_ids(ns).mutex); return ret; }
int msg_callback(int mode, struct kern_ipc_perm *ipc, struct ipc_namespace *ns) { switch(mode) { case 0: printf("key msqid owner perms used-bytes messages\n"); break; case 1: { struct msg_queue *msg = msg_lock(ns, ipc->id); printf("0x%08x %-10d %-10d %-10o %-12ld %-12ld\n", ipc->key, ipc->id, ipc->uid, ipc->mode & 0777, msg->q_cbytes, msg->q_qnum); } break; case 2: break; } return 1; }
SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf) { struct msg_queue *msq; int err, version; struct ipc_namespace *ns; if (msqid < 0 || cmd < 0) return -EINVAL; version = ipc_parse_version(&cmd); ns = current->nsproxy->ipc_ns; switch (cmd) { case IPC_INFO: case MSG_INFO: { struct msginfo msginfo; int max_id; if (!buf) return -EFAULT; /* * We must not return kernel stack data. * due to padding, it's not enough * to set all member fields. */ err = security_msg_queue_msgctl(NULL, cmd); if (err) return err; memset(&msginfo, 0, sizeof(msginfo)); msginfo.msgmni = ns->msg_ctlmni; msginfo.msgmax = ns->msg_ctlmax; msginfo.msgmnb = ns->msg_ctlmnb; msginfo.msgssz = MSGSSZ; msginfo.msgseg = MSGSEG; down_read(&msg_ids(ns).rw_mutex); if (cmd == MSG_INFO) { msginfo.msgpool = msg_ids(ns).in_use; msginfo.msgmap = atomic_read(&ns->msg_hdrs); msginfo.msgtql = atomic_read(&ns->msg_bytes); } else { msginfo.msgmap = MSGMAP; msginfo.msgpool = MSGPOOL; msginfo.msgtql = MSGTQL; } max_id = ipc_get_maxid(&msg_ids(ns)); up_read(&msg_ids(ns).rw_mutex); if (copy_to_user(buf, &msginfo, sizeof(struct msginfo))) return -EFAULT; return (max_id < 0) ? 0 : max_id; } case MSG_STAT: /* msqid is an index rather than a msg queue id */ case IPC_STAT: { struct msqid64_ds tbuf; int success_return; if (!buf) return -EFAULT; if (cmd == MSG_STAT) { msq = msg_lock(ns, msqid); if (IS_ERR(msq)) return PTR_ERR(msq); success_return = msq->q_perm.id; } else { msq = msg_lock_check(ns, msqid); if (IS_ERR(msq)) return PTR_ERR(msq); success_return = 0; } err = -EACCES; if (ipcperms(&msq->q_perm, S_IRUGO)) goto out_unlock; err = security_msg_queue_msgctl(msq, cmd); if (err) goto out_unlock; memset(&tbuf, 0, sizeof(tbuf)); kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm); tbuf.msg_stime = msq->q_stime; tbuf.msg_rtime = msq->q_rtime; tbuf.msg_ctime = msq->q_ctime; tbuf.msg_cbytes = msq->q_cbytes; tbuf.msg_qnum = msq->q_qnum; tbuf.msg_qbytes = msq->q_qbytes; tbuf.msg_lspid = msq->q_lspid; tbuf.msg_lrpid = msq->q_lrpid; msg_unlock(msq); if (copy_msqid_to_user(buf, &tbuf, version)) return -EFAULT; return success_return; } case IPC_SET: case IPC_RMID: err = msgctl_down(ns, msqid, cmd, buf, version); return err; default: return -EINVAL; } out_unlock: msg_unlock(msq); return err; }
asmlinkage long sys_msgrcv (int msqid, struct msgbuf *msgp, size_t msgsz, long msgtyp, int msgflg) { struct msg_queue *msq; struct msg_receiver msr_d; struct list_head* tmp; struct msg_msg* msg, *found_msg; int err; int mode; if (msqid < 0 || (long) msgsz < 0) return -EINVAL; mode = convert_mode(&msgtyp,msgflg); msq = msg_lock(msqid); if(msq==NULL) return -EINVAL; retry: err = -EIDRM; if (msg_checkid(msq,msqid)) goto out_unlock; err=-EACCES; if (ipcperms (&msq->q_perm, S_IRUGO)) goto out_unlock; tmp = msq->q_messages.next; found_msg=NULL; while (tmp != &msq->q_messages) { msg = list_entry(tmp,struct msg_msg,m_list); if(testmsg(msg,msgtyp,mode)) { found_msg = msg; if(mode == SEARCH_LESSEQUAL && msg->m_type != 1) { found_msg=msg; msgtyp=msg->m_type-1; } else { found_msg=msg; break; } } tmp = tmp->next; } if(found_msg) { msg=found_msg; if ((msgsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) { err=-E2BIG; goto out_unlock; } list_del(&msg->m_list); msq->q_qnum--; msq->q_rtime = CURRENT_TIME; msq->q_lrpid = current->pid; msq->q_cbytes -= msg->m_ts; atomic_sub(msg->m_ts,&msg_bytes); atomic_dec(&msg_hdrs); ss_wakeup(&msq->q_senders,0); msg_unlock(msqid); out_success: msgsz = (msgsz > msg->m_ts) ? msg->m_ts : msgsz; if (put_user (msg->m_type, &msgp->mtype) || store_msg(msgp->mtext, msg, msgsz)) { msgsz = -EFAULT; } free_msg(msg); return msgsz; } else { struct msg_queue *t; /* no message waiting. Prepare for pipelined * receive. */ if (msgflg & IPC_NOWAIT) { err=-ENOMSG; goto out_unlock; } list_add_tail(&msr_d.r_list,&msq->q_receivers); msr_d.r_tsk = current; msr_d.r_msgtype = msgtyp; msr_d.r_mode = mode; if(msgflg & MSG_NOERROR) msr_d.r_maxsize = INT_MAX; else msr_d.r_maxsize = msgsz; msr_d.r_msg = ERR_PTR(-EAGAIN); current->state = TASK_INTERRUPTIBLE; msg_unlock(msqid); schedule(); current->state = TASK_RUNNING; /* This introduces a race so we must always take the slow path msg = (struct msg_msg*) msr_d.r_msg; if(!IS_ERR(msg)) goto out_success; */ t = msg_lock(msqid); if(t==NULL) msqid=-1; msg = (struct msg_msg*)msr_d.r_msg; if(!IS_ERR(msg)) { /* our message arived while we waited for * the spinlock. Process it. */ if(msqid!=-1) msg_unlock(msqid); goto out_success; } err = PTR_ERR(msg); if(err == -EAGAIN) { if(msqid==-1) BUG(); list_del(&msr_d.r_list); if (signal_pending(current)) err=-EINTR; else goto retry; } } out_unlock: if(msqid!=-1) msg_unlock(msqid); return err; }
asmlinkage long sys_msgsnd (int msqid, struct msgbuf *msgp, size_t msgsz, int msgflg) { struct msg_queue *msq; struct msg_msg *msg; long mtype; int err; if (msgsz > msg_ctlmax || (long) msgsz < 0 || msqid < 0) return -EINVAL; if (get_user(mtype, &msgp->mtype)) return -EFAULT; if (mtype < 1) return -EINVAL; msg = load_msg(msgp->mtext, msgsz); if(IS_ERR(msg)) return PTR_ERR(msg); msg->m_type = mtype; msg->m_ts = msgsz; msq = msg_lock(msqid); err=-EINVAL; if(msq==NULL) goto out_free; retry: err= -EIDRM; if (msg_checkid(msq,msqid)) goto out_unlock_free; err=-EACCES; if (ipcperms(&msq->q_perm, S_IWUGO)) goto out_unlock_free; if(msgsz + msq->q_cbytes > msq->q_qbytes || 1 + msq->q_qnum > msq->q_qbytes) { struct msg_sender s; if(msgflg&IPC_NOWAIT) { err=-EAGAIN; goto out_unlock_free; } ss_add(msq, &s); msg_unlock(msqid); schedule(); current->state= TASK_RUNNING; msq = msg_lock(msqid); err = -EIDRM; if(msq==NULL) goto out_free; ss_del(&s); if (signal_pending(current)) { err=-EINTR; goto out_unlock_free; } goto retry; } msq->q_lspid = current->pid; msq->q_stime = CURRENT_TIME; if(!pipelined_send(msq,msg)) { /* noone is waiting for this message, enqueue it */ list_add_tail(&msg->m_list,&msq->q_messages); msq->q_cbytes += msgsz; msq->q_qnum++; atomic_add(msgsz,&msg_bytes); atomic_inc(&msg_hdrs); } err = 0; msg = NULL; out_unlock_free: msg_unlock(msqid); out_free: if(msg!=NULL) free_msg(msg); return err; }
asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds *buf) { int err, version; struct msg_queue *msq; struct msq_setbuf setbuf; struct kern_ipc_perm *ipcp; if (msqid < 0 || cmd < 0) return -EINVAL; version = ipc_parse_version(&cmd); switch (cmd) { case IPC_INFO: case MSG_INFO: { struct msginfo msginfo; int max_id; if (!buf) return -EFAULT; /* We must not return kernel stack data. * due to padding, it's not enough * to set all member fields. */ memset(&msginfo,0,sizeof(msginfo)); msginfo.msgmni = msg_ctlmni; msginfo.msgmax = msg_ctlmax; msginfo.msgmnb = msg_ctlmnb; msginfo.msgssz = MSGSSZ; msginfo.msgseg = MSGSEG; down(&msg_ids.sem); if (cmd == MSG_INFO) { msginfo.msgpool = msg_ids.in_use; msginfo.msgmap = atomic_read(&msg_hdrs); msginfo.msgtql = atomic_read(&msg_bytes); } else { msginfo.msgmap = MSGMAP; msginfo.msgpool = MSGPOOL; msginfo.msgtql = MSGTQL; } max_id = msg_ids.max_id; up(&msg_ids.sem); if (copy_to_user (buf, &msginfo, sizeof(struct msginfo))) return -EFAULT; return (max_id < 0) ? 0: max_id; } case MSG_STAT: case IPC_STAT: { struct msqid64_ds tbuf; int success_return; if (!buf) return -EFAULT; if(cmd == MSG_STAT && msqid >= msg_ids.size) return -EINVAL; memset(&tbuf,0,sizeof(tbuf)); msq = msg_lock(msqid); if (msq == NULL) return -EINVAL; if(cmd == MSG_STAT) { success_return = msg_buildid(msqid, msq->q_perm.seq); } else { err = -EIDRM; if (msg_checkid(msq,msqid)) goto out_unlock; success_return = 0; } err = -EACCES; if (ipcperms (&msq->q_perm, S_IRUGO)) goto out_unlock; kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm); tbuf.msg_stime = msq->q_stime; tbuf.msg_rtime = msq->q_rtime; tbuf.msg_ctime = msq->q_ctime; tbuf.msg_cbytes = msq->q_cbytes; tbuf.msg_qnum = msq->q_qnum; tbuf.msg_qbytes = msq->q_qbytes; tbuf.msg_lspid = msq->q_lspid; tbuf.msg_lrpid = msq->q_lrpid; msg_unlock(msqid); if (copy_msqid_to_user(buf, &tbuf, version)) return -EFAULT; return success_return; } case IPC_SET: if (!buf) return -EFAULT; if (copy_msqid_from_user (&setbuf, buf, version)) return -EFAULT; break; case IPC_RMID: break; default: return -EINVAL; } down(&msg_ids.sem); msq = msg_lock(msqid); err=-EINVAL; if (msq == NULL) goto out_up; err = -EIDRM; if (msg_checkid(msq,msqid)) goto out_unlock_up; ipcp = &msq->q_perm; err = -EPERM; if (current->euid != ipcp->cuid && current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN)) /* We _could_ check for CAP_CHOWN above, but we don't */ goto out_unlock_up; switch (cmd) { case IPC_SET: { if (setbuf.qbytes > msg_ctlmnb && !capable(CAP_SYS_RESOURCE)) goto out_unlock_up; msq->q_qbytes = setbuf.qbytes; ipcp->uid = setbuf.uid; ipcp->gid = setbuf.gid; ipcp->mode = (ipcp->mode & ~S_IRWXUGO) | (S_IRWXUGO & setbuf.mode); msq->q_ctime = CURRENT_TIME; /* sleeping receivers might be excluded by * stricter permissions. */ expunge_all(msq,-EAGAIN); /* sleeping senders might be able to send * due to a larger queue size. */ ss_wakeup(&msq->q_senders,0); msg_unlock(msqid); break; } case IPC_RMID: freeque (msqid); break; } err = 0; out_up: up(&msg_ids.sem); return err; out_unlock_up: msg_unlock(msqid); goto out_up; out_unlock: msg_unlock(msqid); return err; }
asmlinkage long sys_msgrcv (int msqid, struct msgbuf __user *msgp, size_t msgsz, long msgtyp, int msgflg) { struct msg_queue *msq; struct msg_receiver msr_d; struct list_head* tmp; struct msg_msg* msg, *found_msg; int err; int mode; if (msqid < 0 || (long) msgsz < 0) return -EINVAL; mode = convert_mode(&msgtyp,msgflg); msq = msg_lock(msqid); if(msq==NULL) return -EINVAL; retry: err = -EIDRM; if (msg_checkid(msq,msqid)) goto out_unlock; err=-EACCES; if (ipcperms (&msq->q_perm, S_IRUGO)) goto out_unlock; tmp = msq->q_messages.next; found_msg=NULL; while (tmp != &msq->q_messages) { msg = list_entry(tmp,struct msg_msg,m_list); if(testmsg(msg,msgtyp,mode) && !security_msg_queue_msgrcv(msq, msg, current, msgtyp, mode)) { found_msg = msg; if(mode == SEARCH_LESSEQUAL && msg->m_type != 1) { found_msg=msg; msgtyp=msg->m_type-1; } else { found_msg=msg; break; } } tmp = tmp->next; } if(found_msg) { msg=found_msg; if ((msgsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) { err=-E2BIG; goto out_unlock; } list_del(&msg->m_list); msq->q_qnum--; msq->q_rtime = get_seconds(); msq->q_lrpid = current->tgid; msq->q_cbytes -= msg->m_ts; atomic_sub(msg->m_ts,&msg_bytes); atomic_dec(&msg_hdrs); ss_wakeup(&msq->q_senders,0); msg_unlock(msq); out_success: msgsz = (msgsz > msg->m_ts) ? msg->m_ts : msgsz; if (put_user (msg->m_type, &msgp->mtype) || store_msg(msgp->mtext, msg, msgsz)) { msgsz = -EFAULT; } free_msg(msg); return msgsz; } else { /* no message waiting. Prepare for pipelined * receive. */ if (msgflg & IPC_NOWAIT) { err=-ENOMSG; goto out_unlock; } list_add_tail(&msr_d.r_list,&msq->q_receivers); msr_d.r_tsk = current; msr_d.r_msgtype = msgtyp; msr_d.r_mode = mode; if(msgflg & MSG_NOERROR) msr_d.r_maxsize = INT_MAX; else msr_d.r_maxsize = msgsz; msr_d.r_msg = ERR_PTR(-EAGAIN); current->state = TASK_INTERRUPTIBLE; msg_unlock(msq); schedule(); /* * The below optimisation is buggy. A sleeping thread that is * woken up checks if it got a message and if so, copies it to * userspace and just returns without taking any locks. * But this return to user space can be faster than the message * send, and if the receiver immediately exits the * wake_up_process performed by the sender will oops. */ #if 0 msg = (struct msg_msg*) msr_d.r_msg; if(!IS_ERR(msg)) goto out_success; #endif msq = msg_lock(msqid); msg = (struct msg_msg*)msr_d.r_msg; if(!IS_ERR(msg)) { /* our message arived while we waited for * the spinlock. Process it. */ if(msq) msg_unlock(msq); goto out_success; } err = PTR_ERR(msg); if(err == -EAGAIN) { if(!msq) BUG(); list_del(&msr_d.r_list); if (signal_pending(current)) err=-EINTR; else goto retry; } } out_unlock: if(msq) msg_unlock(msq); return err; }
long do_msgrcv(int msqid, long *pmtype, void __user *mtext, size_t msgsz, long msgtyp, int msgflg) { struct msg_queue *msq; struct msg_msg *msg; int mode; struct ipc_namespace *ns; if (msqid < 0 || (long) msgsz < 0) return -EINVAL; mode = convert_mode(&msgtyp, msgflg); ns = current->nsproxy->ipc_ns; msq = msg_lock(ns, msqid); if (msq == NULL) return -EINVAL; msg = ERR_PTR(-EIDRM); if (msg_checkid(ns, msq, msqid)) goto out_unlock; for (;;) { struct msg_receiver msr_d; struct list_head *tmp; msg = ERR_PTR(-EACCES); if (ipcperms(&msq->q_perm, S_IRUGO)) goto out_unlock; msg = ERR_PTR(-EAGAIN); tmp = msq->q_messages.next; while (tmp != &msq->q_messages) { struct msg_msg *walk_msg; walk_msg = list_entry(tmp, struct msg_msg, m_list); if (testmsg(walk_msg, msgtyp, mode) && !security_msg_queue_msgrcv(msq, walk_msg, current, msgtyp, mode)) { msg = walk_msg; if (mode == SEARCH_LESSEQUAL && walk_msg->m_type != 1) { msg = walk_msg; msgtyp = walk_msg->m_type - 1; } else { msg = walk_msg; break; } } tmp = tmp->next; } if (!IS_ERR(msg)) { /* * Found a suitable message. * Unlink it from the queue. */ if ((msgsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) { msg = ERR_PTR(-E2BIG); goto out_unlock; } list_del(&msg->m_list); msq->q_qnum--; msq->q_rtime = get_seconds(); msq->q_lrpid = current->tgid; msq->q_cbytes -= msg->m_ts; atomic_sub(msg->m_ts, &msg_bytes); atomic_dec(&msg_hdrs); ss_wakeup(&msq->q_senders, 0); msg_unlock(msq); break; } /* No message waiting. Wait for a message */ if (msgflg & IPC_NOWAIT) { msg = ERR_PTR(-ENOMSG); goto out_unlock; } list_add_tail(&msr_d.r_list, &msq->q_receivers); msr_d.r_tsk = current; msr_d.r_msgtype = msgtyp; msr_d.r_mode = mode; if (msgflg & MSG_NOERROR) msr_d.r_maxsize = INT_MAX; else msr_d.r_maxsize = msgsz; msr_d.r_msg = ERR_PTR(-EAGAIN); current->state = TASK_INTERRUPTIBLE; msg_unlock(msq); schedule(); /* Lockless receive, part 1: * Disable preemption. We don't hold a reference to the queue * and getting a reference would defeat the idea of a lockless * operation, thus the code relies on rcu to guarantee the * existance of msq: * Prior to destruction, expunge_all(-EIRDM) changes r_msg. * Thus if r_msg is -EAGAIN, then the queue not yet destroyed. * rcu_read_lock() prevents preemption between reading r_msg * and the spin_lock() inside ipc_lock_by_ptr(). */ rcu_read_lock(); /* Lockless receive, part 2: * Wait until pipelined_send or expunge_all are outside of * wake_up_process(). There is a race with exit(), see * ipc/mqueue.c for the details. */ msg = (struct msg_msg*)msr_d.r_msg; while (msg == NULL) { cpu_relax(); msg = (struct msg_msg *)msr_d.r_msg; } /* Lockless receive, part 3: * If there is a message or an error then accept it without * locking. */ if (msg != ERR_PTR(-EAGAIN)) { rcu_read_unlock(); break; } /* Lockless receive, part 3: * Acquire the queue spinlock. */ ipc_lock_by_ptr(&msq->q_perm); rcu_read_unlock(); /* Lockless receive, part 4: * Repeat test after acquiring the spinlock. */ msg = (struct msg_msg*)msr_d.r_msg; if (msg != ERR_PTR(-EAGAIN)) goto out_unlock; list_del(&msr_d.r_list); if (signal_pending(current)) { msg = ERR_PTR(-ERESTARTNOHAND); out_unlock: msg_unlock(msq); break; } } if (IS_ERR(msg)) return PTR_ERR(msg); msgsz = (msgsz > msg->m_ts) ? msg->m_ts : msgsz; *pmtype = msg->m_type; if (store_msg(mtext, msg, msgsz)) msgsz = -EFAULT; free_msg(msg); return msgsz; }
long do_msgsnd(int msqid, long mtype, void __user *mtext, size_t msgsz, int msgflg) { struct msg_queue *msq; struct msg_msg *msg; int err; struct ipc_namespace *ns; ns = current->nsproxy->ipc_ns; if (msgsz > ns->msg_ctlmax || (long) msgsz < 0 || msqid < 0) return -EINVAL; if (mtype < 1) return -EINVAL; msg = load_msg(mtext, msgsz); if (IS_ERR(msg)) return PTR_ERR(msg); msg->m_type = mtype; msg->m_ts = msgsz; msq = msg_lock(ns, msqid); err = -EINVAL; if (msq == NULL) goto out_free; err= -EIDRM; if (msg_checkid(ns, msq, msqid)) goto out_unlock_free; for (;;) { struct msg_sender s; err = -EACCES; if (ipcperms(&msq->q_perm, S_IWUGO)) goto out_unlock_free; err = security_msg_queue_msgsnd(msq, msg, msgflg); if (err) goto out_unlock_free; if (msgsz + msq->q_cbytes <= msq->q_qbytes && 1 + msq->q_qnum <= msq->q_qbytes) { break; } /* queue full, wait: */ if (msgflg & IPC_NOWAIT) { err = -EAGAIN; goto out_unlock_free; } ss_add(msq, &s); ipc_rcu_getref(msq); msg_unlock(msq); schedule(); ipc_lock_by_ptr(&msq->q_perm); ipc_rcu_putref(msq); if (msq->q_perm.deleted) { err = -EIDRM; goto out_unlock_free; } ss_del(&s); if (signal_pending(current)) { err = -ERESTARTNOHAND; goto out_unlock_free; } } msq->q_lspid = current->tgid; msq->q_stime = get_seconds(); if (!pipelined_send(msq, msg)) { /* noone is waiting for this message, enqueue it */ list_add_tail(&msg->m_list, &msq->q_messages); msq->q_cbytes += msgsz; msq->q_qnum++; atomic_add(msgsz, &msg_bytes); atomic_inc(&msg_hdrs); } err = 0; msg = NULL; out_unlock_free: msg_unlock(msq); out_free: if (msg != NULL) free_msg(msg); return err; }