static int __sc_pend(struct task_struct *curr, struct pt_regs *regs) { char **mboxp, *msg; long timeout; int err; /* We should be able to write to a mailbox storage, even if we * actually don't. */ if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg1(regs), sizeof(msg))) return -EFAULT; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg3(regs), sizeof(msg))) return -EFAULT; mboxp = (char **)__xn_reg_arg1(regs); timeout = __xn_reg_arg2(regs); msg = sc_pend(mboxp, timeout, &err); if (!err) __xn_copy_to_user(curr, (void __user *)__xn_reg_arg3(regs), &msg, sizeof(msg)); return err; }
static int __sc_qinquiry(struct task_struct *curr, struct pt_regs *regs) { int qid, count, err; char *msg; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg2(regs), sizeof(count))) return -EFAULT; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg3(regs), sizeof(msg))) return -EFAULT; qid = __xn_reg_arg1(regs); msg = sc_qinquiry(qid, &count, &err); if (!err) { __xn_copy_to_user(curr, (void __user *)__xn_reg_arg2(regs), &count, sizeof(count)); __xn_copy_to_user(curr, (void __user *)__xn_reg_arg3(regs), &msg, sizeof(msg)); } return err; }
static int __wind_task_setmode(struct task_struct *curr, struct pt_regs *regs) { int setmask, clrmask, mode_r; clrmask = __xn_reg_arg1(regs); setmask = __xn_reg_arg2(regs); /* Primary required: current thread must be valid. */ mode_r = xnpod_set_thread_mode(xnpod_current_thread(), clrmask, setmask); if (__xn_reg_arg3(regs)) __xn_copy_to_user(curr, (void __user *)__xn_reg_arg3(regs), &mode_r, sizeof(mode_r)); return 0; }
static int __sc_tinquiry(struct task_struct *curr, struct pt_regs *regs) { int err, tid, pinfo[3]; TCB *tcb; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg1(regs), sizeof(pinfo))) return -EFAULT; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg2(regs), sizeof(*tcb))) return -EFAULT; tid = __xn_reg_arg3(regs); tcb = sc_tinquiry(pinfo, tid, &err); if (!err) { __xn_copy_to_user(curr, (void __user *)__xn_reg_arg1(regs), pinfo, sizeof(pinfo)); __xn_copy_to_user(curr, (void __user *)__xn_reg_arg2(regs), tcb, sizeof(*tcb)); } return err; }
static int __ui_twai_flg(struct pt_regs *regs) { UINT flgptn, waiptn, wfmode; TMO tmout; ID flgid; ER err; flgid = __xn_reg_arg2(regs); waiptn = __xn_reg_arg3(regs); wfmode = __xn_reg_arg4(regs); tmout = __xn_reg_arg5(regs); err = twai_flg(&flgptn, flgid, waiptn, wfmode, tmout); if (err == E_OK) { if (__xn_safe_copy_to_user((void __user *)__xn_reg_arg1(regs), &flgptn, sizeof(flgptn))) return -EFAULT; } else if (err == E_RLWAI) { uitask_t *task = ui_current_task(); if (!xnthread_test_info(&task->threadbase, uITRON_TASK_RLWAIT)) err = -EINTR; } return err; }
static int __sc_fclear(struct task_struct *curr, struct pt_regs *regs) { int fid, mask, mask_r, err; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg3(regs), sizeof(mask_r))) return -EFAULT; fid = __xn_reg_arg1(regs); mask = __xn_reg_arg2(regs); mask_r = sc_fclear(fid, mask, &err); if (!err) __xn_copy_to_user(curr, (void __user *)__xn_reg_arg3(regs), &mask_r, sizeof(mask_r)); return err; }
static int __sc_screate(struct task_struct *curr, struct pt_regs *regs) { int semid, opt, err; unsigned initval; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg3(regs), sizeof(semid))) return -EFAULT; initval = __xn_reg_arg1(regs); opt = __xn_reg_arg2(regs); semid = sc_screate(initval, opt, &err); if (!err) __xn_copy_to_user(curr, (void __user *)__xn_reg_arg3(regs), &semid, sizeof(semid)); return err; }
static int __sc_hcreate(struct task_struct *curr, struct pt_regs *regs) { unsigned log2psize; vrtx_hdesc_t hdesc; vrtxheap_t *heap; u_long heapsize; int err, hid; spl_t s; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg3(regs), sizeof(hdesc))) return -EFAULT; /* Size of heap space. */ heapsize = __xn_reg_arg1(regs); /* Page size. */ log2psize = (int)__xn_reg_arg2(regs); hid = sc_hcreate(NULL, heapsize, log2psize, &err); if (err) return err; xnlock_get_irqsave(&nklock, s); heap = xnmap_fetch(vrtx_heap_idmap, hid); if (heap) { /* Paranoid. */ heap->mm = curr->mm; hdesc.hid = hid; hdesc.hcb = &heap->sysheap; hdesc.hsize = xnheap_extentsize(&heap->sysheap); xnlock_put_irqrestore(&nklock, s); __xn_copy_to_user(curr, (void __user *)__xn_reg_arg3(regs), &hdesc, sizeof(hdesc)); } else { xnlock_put_irqrestore(&nklock, s); err = ER_ID; } return err; }
static int __sc_tecreate(struct task_struct *curr, struct pt_regs *regs) { xncompletion_t __user *u_completion; struct vrtx_arg_bulk bulk; int prio, mode, tid, err; vrtxtask_t *task; if (!__xn_access_ok (curr, VERIFY_READ, __xn_reg_arg1(regs), sizeof(bulk))) return -EFAULT; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg2(regs), sizeof(tid))) return -EFAULT; __xn_copy_from_user(curr, &bulk, (void __user *)__xn_reg_arg1(regs), sizeof(bulk)); /* Suggested task id. */ tid = bulk.a1; /* Task priority. */ prio = bulk.a2; /* Task mode. */ mode = bulk.a3 | 0x100; /* Completion descriptor our parent thread is pending on. */ u_completion = (xncompletion_t __user *)__xn_reg_arg3(regs); task = xnmalloc(sizeof(*task)); if (!task) { err = ER_TCB; goto done; } xnthread_clear_state(&task->threadbase, XNZOMBIE); tid = sc_tecreate_inner(task, NULL, tid, prio, mode, 0, 0, NULL, 0, &err); if (tid < 0) { if (u_completion) xnshadow_signal_completion(u_completion, err); } else { __xn_copy_to_user(curr, (void __user *)__xn_reg_arg2(regs), &tid, sizeof(tid)); err = xnshadow_map(&task->threadbase, u_completion); } if (err && !xnthread_test_state(&task->threadbase, XNZOMBIE)) xnfree(task); done: return err; }
static int __sc_halloc(struct task_struct *curr, struct pt_regs *regs) { vrtxheap_t *heap; char *buf = NULL; u_long bsize; int err, hid; spl_t s; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg3(regs), sizeof(buf))) return -EFAULT; hid = __xn_reg_arg1(regs); bsize = (u_long)__xn_reg_arg2(regs); xnlock_get_irqsave(&nklock, s); heap = xnmap_fetch(vrtx_heap_idmap, hid); if (!heap || heap->mm != curr->mm) { /* Allocation requests must be issued from the same * process which created the heap. */ err = ER_ID; goto unlock_and_exit; } buf = sc_halloc(hid, bsize, &err); /* Convert the allocated buffer kernel-based address to the equivalent area into the caller's address space. */ if (!err) buf = heap->mapbase + xnheap_mapped_offset(&heap->sysheap, buf); unlock_and_exit: xnlock_put_irqrestore(&nklock, s); __xn_copy_to_user(curr, (void __user *)__xn_reg_arg3(regs), &buf, sizeof(buf)); return err; }
static int __sc_qpend(struct task_struct *curr, struct pt_regs *regs) { long timeout; int qid, err; char *msg; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg3(regs), sizeof(msg))) return -EFAULT; qid = __xn_reg_arg1(regs); timeout = __xn_reg_arg2(regs); msg = sc_qpend(qid, timeout, &err); if (!err) __xn_copy_to_user(curr, (void __user *)__xn_reg_arg3(regs), &msg, sizeof(msg)); return err; }
static int __ui_cre_tsk(struct pt_regs *regs) { xncompletion_t __user *u_completion; struct task_struct *p = current; unsigned long __user *u_mode_offset; uitask_t *task; T_CTSK pk_ctsk; ID tskid; spl_t s; ER err; tskid = __xn_reg_arg1(regs); if (__xn_safe_copy_from_user(&pk_ctsk, (void __user *)__xn_reg_arg2(regs), sizeof(pk_ctsk))) return -EFAULT; pk_ctsk.tskatr |= TA_SHADOW; /* Completion descriptor our parent thread is pending on. */ u_completion = (xncompletion_t __user *)__xn_reg_arg3(regs); u_mode_offset = (unsigned long __user *)__xn_reg_arg4(regs); err = cre_tsk(tskid, &pk_ctsk); if (likely(err == E_OK)) { xnlock_get_irqsave(&nklock, s); task = xnmap_fetch(ui_task_idmap, tskid); if (!task) { xnlock_put_irqrestore(&nklock, s); err = E_OBJ; goto fail; } strncpy(p->comm, xnthread_name(&task->threadbase), sizeof(p->comm)); p->comm[sizeof(p->comm) - 1] = '\0'; xnlock_put_irqrestore(&nklock, s); /* Since we may not hold the superlock across a call * to xnshadow_map(), we do have a small race window * here, if the created task is killed then its TCB * recycled before we could map it; however, the risk * is mitigated by consistency checks performed in * xnshadow_map(). */ return xnshadow_map(&task->threadbase, u_completion, u_mode_offset); } fail: /* Unblock and pass back the error code. */ if (u_completion) xnshadow_signal_completion(u_completion, err); return err; }
static int __wind_sem_ccreate(struct task_struct *curr, struct pt_regs *regs) { int flags, count; wind_sem_t *sem; SEM_ID sem_id; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg3(regs), sizeof(sem_id))) return -EFAULT; flags = __xn_reg_arg1(regs); count = __xn_reg_arg2(regs); sem = (wind_sem_t *)semCCreate(flags, count); if (!sem) return wind_errnoget(); sem_id = sem->handle; __xn_copy_to_user(curr, (void __user *)__xn_reg_arg3(regs), &sem_id, sizeof(sem_id)); return 0; }
static int __wind_msgq_receive(struct task_struct *curr, struct pt_regs *regs) { xnhandle_t handle = __xn_reg_arg1(regs); char tmp_buf[128], *msgbuf; wind_msgq_t *msgq; int timeout, err; unsigned nbytes; nbytes = __xn_reg_arg3(regs); timeout = __xn_reg_arg4(regs); if (!__xn_access_ok(curr, VERIFY_WRITE, __xn_reg_arg2(regs), nbytes)) return -EFAULT; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg5(regs), sizeof(nbytes))) return -EFAULT; msgq = (wind_msgq_t *)xnregistry_fetch(handle); if (!msgq) return S_objLib_OBJ_ID_ERROR; if (nbytes <= sizeof(tmp_buf)) msgbuf = tmp_buf; else { msgbuf = (char *)xnmalloc(nbytes); if (!msgbuf) return S_memLib_NOT_ENOUGH_MEMORY; } /* This is sub-optimal since we end up copying the data twice. */ err = msgQReceive((MSG_Q_ID)msgq, msgbuf, nbytes, timeout); if (err != ERROR) { __xn_copy_to_user(curr, (void __user *)__xn_reg_arg2(regs), msgbuf, err); __xn_copy_to_user(curr, (void __user *)__xn_reg_arg5(regs), &err, sizeof(err)); err = 0; } else err = wind_errnoget(); if (msgbuf != tmp_buf) xnfree(msgbuf); return err; }
static int __wind_wd_start(struct task_struct *curr, struct pt_regs *regs) { wind_rholder_t *rh; long start_server; xnhandle_t handle; wind_wd_t *wd; int timeout; spl_t s; if (!__xn_access_ok(curr, VERIFY_WRITE, __xn_reg_arg5(regs), sizeof(start_server))) return -EFAULT; handle = __xn_reg_arg1(regs); wd = (wind_wd_t *)xnregistry_fetch(handle); if (!wd) return S_objLib_OBJ_ID_ERROR; rh = wind_get_rholder(); if (wd->rh != rh) /* * User may not fiddle with watchdogs created from * other processes. */ return S_objLib_OBJ_UNAVAILABLE; timeout = __xn_reg_arg2(regs); xnlock_get_irqsave(&nklock, s); if (wdStart ((WDOG_ID)wd, timeout, (wind_timer_t) & __wind_wd_handler, (long)wd) == ERROR) { xnlock_put_irqrestore(&nklock, s); return wind_errnoget(); } wd->wdt.handler = (wind_timer_t) __xn_reg_arg3(regs); wd->wdt.arg = (long)__xn_reg_arg4(regs); start_server = rh->wdcount++ == 0; xnlock_put_irqrestore(&nklock, s); __xn_copy_to_user(curr, (void __user *)__xn_reg_arg5(regs), &start_server, sizeof(start_server)); return 0; }
static int __wind_msgq_send(struct pt_regs *regs) { xnhandle_t handle = __xn_reg_arg1(regs); char tmp_buf[128], *msgbuf; wind_msgq_t *msgq; int timeout, prio; unsigned nbytes; STATUS err; nbytes = __xn_reg_arg3(regs); timeout = __xn_reg_arg4(regs); prio = __xn_reg_arg5(regs); if (timeout != NO_WAIT && !xnpod_primary_p()) return -EPERM; msgq = (wind_msgq_t *)xnregistry_fetch(handle); if (!msgq) return S_objLib_OBJ_ID_ERROR; if (nbytes > msgq->msg_length) return S_msgQLib_INVALID_MSG_LENGTH; if (nbytes <= sizeof(tmp_buf)) msgbuf = tmp_buf; else { msgbuf = (char *)xnmalloc(nbytes); if (!msgbuf) return S_memLib_NOT_ENOUGH_MEMORY; } /* This is sub-optimal since we end up copying the data twice. */ if (__xn_safe_copy_from_user(msgbuf, (void __user *)__xn_reg_arg2(regs), nbytes)) err = -EFAULT; else { if (msgQSend((MSG_Q_ID)msgq, msgbuf, nbytes, timeout, prio) == ERROR) err = wind_errnoget(); else err = 0; } if (msgbuf != tmp_buf) xnfree(msgbuf); return err; }
static int sys_rtdm_sendmsg(struct pt_regs *regs) { struct task_struct *p = current; struct msghdr krnl_msg; if (unlikely(!access_rok(__xn_reg_arg2(regs), sizeof(krnl_msg)) || __xn_copy_from_user(&krnl_msg, (void __user *)__xn_reg_arg2(regs), sizeof(krnl_msg)))) return -EFAULT; return __rt_dev_sendmsg(p, __xn_reg_arg1(regs), &krnl_msg, __xn_reg_arg3(regs)); }
static int __rt_shm_heap_open(struct task_struct *curr, struct pt_regs *regs) { unsigned long name; int size; int suprt, in_kheap; unsigned long off; unsigned long opaque; void *ret; extern void *_shm_alloc(unsigned long name, int size, int suprt, int in_kheap, unsigned long *opaque); if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg2(regs), sizeof(size)) || !__xn_access_ok(curr, VERIFY_WRITE, __xn_reg_arg5(regs), sizeof(off))) return 0; name = (unsigned long)__xn_reg_arg1(regs); /* Size of heap space. */ __xn_copy_from_user(curr, &size, (void __user *)__xn_reg_arg2(regs), sizeof(size)); /* Creation mode. */ suprt = (int)__xn_reg_arg3(regs); in_kheap = (int)__xn_reg_arg4(regs); ret = _shm_alloc(name, size, suprt, in_kheap, &opaque); if (!ret) goto free_and_fail; off = xnheap_mapped_offset((xnheap_t *)opaque, ret); size = (int)((xnheap_t *)opaque)->extentsize; __xn_copy_to_user(curr, (void __user *)__xn_reg_arg2(regs), &size, sizeof(size)); __xn_copy_to_user(curr, (void __user *)__xn_reg_arg5(regs), &off, sizeof(off)); return (int)opaque; free_and_fail: return 0; }
static int __sc_qecreate(struct task_struct *curr, struct pt_regs *regs) { int qid, qsize, opt, err; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg4(regs), sizeof(qid))) return -EFAULT; qid = __xn_reg_arg1(regs); qsize = __xn_reg_arg2(regs); opt = __xn_reg_arg3(regs); qid = sc_qecreate(qid, qsize, opt, &err); if (!err) __xn_copy_to_user(curr, (void __user *)__xn_reg_arg4(regs), &qid, sizeof(qid)); return err; }
static int __wind_sem_ccreate(struct pt_regs *regs) { int flags, count; wind_sem_t *sem; SEM_ID sem_id; flags = __xn_reg_arg1(regs); count = __xn_reg_arg2(regs); sem = (wind_sem_t *)semCCreate(flags, count); if (!sem) return wind_errnoget(); sem_id = sem->handle; return __xn_safe_copy_to_user((void __user *)__xn_reg_arg3(regs), &sem_id, sizeof(sem_id)); }
static int __wind_sem_bcreate(struct pt_regs *regs) { SEM_B_STATE state; wind_sem_t *sem; SEM_ID sem_id; int flags; flags = __xn_reg_arg1(regs); state = __xn_reg_arg2(regs); sem = (wind_sem_t *)semBCreate(flags, state); if (!sem) return wind_errnoget(); sem_id = sem->handle; return __xn_safe_copy_to_user((void __user *)__xn_reg_arg3(regs), &sem_id, sizeof(sem_id)); }
static int __ui_pol_flg(struct pt_regs *regs) { UINT flgptn, waiptn, wfmode; ID flgid; ER err; flgid = __xn_reg_arg2(regs); waiptn = __xn_reg_arg3(regs); wfmode = __xn_reg_arg4(regs); err = pol_flg(&flgptn, flgid, waiptn, wfmode); if (err == E_OK && __xn_safe_copy_to_user((void __user *)__xn_reg_arg1(regs), &flgptn, sizeof(flgptn))) return -EFAULT; return err; }
static int __wind_msgq_create(struct pt_regs *regs) { int nb_msgs, length, flags; wind_msgq_t *msgq; MSG_Q_ID qid; nb_msgs = __xn_reg_arg1(regs); length = __xn_reg_arg2(regs); flags = __xn_reg_arg3(regs); msgq = (wind_msgq_t *)msgQCreate(nb_msgs, length, flags); if (!msgq) return wind_errnoget(); qid = msgq->handle; return __xn_safe_copy_to_user((void __user *)__xn_reg_arg4(regs), &qid, sizeof(qid)); }
static int __sc_fpend(struct task_struct *curr, struct pt_regs *regs) { int fid, mask, mask_r, opt, err; long timeout; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg5(regs), sizeof(mask_r))) return -EFAULT; fid = __xn_reg_arg1(regs); timeout = __xn_reg_arg2(regs); mask = __xn_reg_arg3(regs); opt = __xn_reg_arg4(regs); mask_r = sc_fpend(fid, timeout, mask, opt, &err); if (!err) __xn_copy_to_user(curr, (void __user *)__xn_reg_arg5(regs), &mask_r, sizeof(mask_r)); return err; }
static int __ui_trcv_msg(struct pt_regs *regs) { ID mbxid = __xn_reg_arg2(regs); TMO tmout = __xn_reg_arg3(regs); T_MSG *pk_msg; ER err; err = trcv_msg(&pk_msg, mbxid, tmout); if (err == E_OK) { if (__xn_safe_copy_to_user((void __user *)__xn_reg_arg1(regs), &pk_msg, sizeof(pk_msg))) return -EFAULT; } else if (err == E_RLWAI) { uitask_t *task = ui_current_task(); if (!xnthread_test_info(&task->threadbase, uITRON_TASK_RLWAIT)) err = -EINTR; } return err; }
static int sys_rtdm_recvmsg(struct pt_regs *regs) { struct task_struct *p = current; struct user_msghdr krnl_msg; int ret; if (unlikely(!access_wok(__xn_reg_arg2(regs), sizeof(krnl_msg)) || __xn_copy_from_user(&krnl_msg, (void __user *)__xn_reg_arg2(regs), sizeof(krnl_msg)))) return -EFAULT; ret = __rt_dev_recvmsg(p, __xn_reg_arg1(regs), &krnl_msg, __xn_reg_arg3(regs)); if (unlikely(ret < 0)) return ret; if (unlikely(__xn_copy_to_user((void __user *)__xn_reg_arg2(regs), &krnl_msg, sizeof(krnl_msg)))) return -EFAULT; return ret; }
static int __wind_msgq_create(struct task_struct *curr, struct pt_regs *regs) { int nb_msgs, length, flags; wind_msgq_t *msgq; MSG_Q_ID qid; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg4(regs), sizeof(qid))) return -EFAULT; nb_msgs = __xn_reg_arg1(regs); length = __xn_reg_arg2(regs); flags = __xn_reg_arg3(regs); msgq = (wind_msgq_t *)msgQCreate(nb_msgs, length, flags); if (!msgq) return wind_errnoget(); qid = msgq->handle; __xn_copy_to_user(curr, (void __user *)__xn_reg_arg4(regs), &qid, sizeof(qid)); return 0; }
static int sys_rtdm_write(struct pt_regs *regs) { return __rt_dev_write(current, __xn_reg_arg1(regs), (const void *)__xn_reg_arg2(regs), __xn_reg_arg3(regs)); }
static int sys_rtdm_read(struct pt_regs *regs) { return __rt_dev_read(current, __xn_reg_arg1(regs), (void *)__xn_reg_arg2(regs), __xn_reg_arg3(regs)); }
static int sys_rtdm_socket(struct pt_regs *regs) { return __rt_dev_socket(current, __xn_reg_arg1(regs), __xn_reg_arg2(regs), __xn_reg_arg3(regs)); }