static int __sc_qinquiry(struct task_struct *curr, struct pt_regs *regs) { int qid, count, err; char *msg; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg2(regs), sizeof(count))) return -EFAULT; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg3(regs), sizeof(msg))) return -EFAULT; qid = __xn_reg_arg1(regs); msg = sc_qinquiry(qid, &count, &err); if (!err) { __xn_copy_to_user(curr, (void __user *)__xn_reg_arg2(regs), &count, sizeof(count)); __xn_copy_to_user(curr, (void __user *)__xn_reg_arg3(regs), &msg, sizeof(msg)); } return err; }
static int __sc_gclock(struct task_struct *curr, struct pt_regs *regs) { struct timespec time; unsigned long ns; int err; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg1(regs), sizeof(time))) return -EFAULT; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg2(regs), sizeof(ns))) return -EFAULT; sc_gclock(&time, &ns, &err); if (!err) { __xn_copy_to_user(curr, (void __user *)__xn_reg_arg1(regs), &time, sizeof(time)); __xn_copy_to_user(curr, (void __user *)__xn_reg_arg2(regs), &ns, sizeof(ns)); } return err; }
static int __sc_tinquiry(struct task_struct *curr, struct pt_regs *regs) { int err, tid, pinfo[3]; TCB *tcb; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg1(regs), sizeof(pinfo))) return -EFAULT; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg2(regs), sizeof(*tcb))) return -EFAULT; tid = __xn_reg_arg3(regs); tcb = sc_tinquiry(pinfo, tid, &err); if (!err) { __xn_copy_to_user(curr, (void __user *)__xn_reg_arg1(regs), pinfo, sizeof(pinfo)); __xn_copy_to_user(curr, (void __user *)__xn_reg_arg2(regs), tcb, sizeof(*tcb)); } return err; }
static int __wind_msgq_receive(struct task_struct *curr, struct pt_regs *regs) { xnhandle_t handle = __xn_reg_arg1(regs); char tmp_buf[128], *msgbuf; wind_msgq_t *msgq; int timeout, err; unsigned nbytes; nbytes = __xn_reg_arg3(regs); timeout = __xn_reg_arg4(regs); if (!__xn_access_ok(curr, VERIFY_WRITE, __xn_reg_arg2(regs), nbytes)) return -EFAULT; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg5(regs), sizeof(nbytes))) return -EFAULT; msgq = (wind_msgq_t *)xnregistry_fetch(handle); if (!msgq) return S_objLib_OBJ_ID_ERROR; if (nbytes <= sizeof(tmp_buf)) msgbuf = tmp_buf; else { msgbuf = (char *)xnmalloc(nbytes); if (!msgbuf) return S_memLib_NOT_ENOUGH_MEMORY; } /* This is sub-optimal since we end up copying the data twice. */ err = msgQReceive((MSG_Q_ID)msgq, msgbuf, nbytes, timeout); if (err != ERROR) { __xn_copy_to_user(curr, (void __user *)__xn_reg_arg2(regs), msgbuf, err); __xn_copy_to_user(curr, (void __user *)__xn_reg_arg5(regs), &err, sizeof(err)); err = 0; } else err = wind_errnoget(); if (msgbuf != tmp_buf) xnfree(msgbuf); return err; }
static int __wind_taskinfo_name(struct task_struct *curr, struct pt_regs *regs) { xnhandle_t handle = __xn_reg_arg1(regs); const char *name; WIND_TCB *pTcb; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg2(regs), XNOBJECT_NAME_LEN)) return -EFAULT; pTcb = (WIND_TCB *)xnregistry_fetch(handle); if (!pTcb) return S_objLib_OBJ_ID_ERROR; name = taskName((TASK_ID) pTcb); if (!name) return S_objLib_OBJ_ID_ERROR; /* We assume that a VxWorks task name fits in XNOBJECT_NAME_LEN bytes, including the trailing \0. */ __xn_copy_to_user(curr, (void __user *)__xn_reg_arg2(regs), name, strlen(name) + 1); return 0; }
static int __wind_taskinfo_status(struct task_struct *curr, struct pt_regs *regs) { xnhandle_t handle = __xn_reg_arg1(regs); unsigned long status; WIND_TCB *pTcb; spl_t s; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg2(regs), sizeof(status))) return -EFAULT; xnlock_get_irqsave(&nklock, s); pTcb = (WIND_TCB *)xnregistry_fetch(handle); if (!pTcb || pTcb->magic != WIND_TASK_MAGIC) { xnlock_put_irqrestore(&nklock, s); return S_objLib_OBJ_ID_ERROR; } status = xnthread_state_flags(&pTcb->threadbase); xnlock_put_irqrestore(&nklock, s); __xn_copy_to_user(curr, (void __user *)__xn_reg_arg2(regs), &status, sizeof(status)); return 0; }
/* * int __wind_taskinfo_get(TASK_ID task_id, TASK_DESC *desc) */ static int __wind_taskinfo_get(struct task_struct *curr, struct pt_regs *regs) { xnhandle_t handle = __xn_reg_arg1(regs); TASK_DESC desc; WIND_TCB *pTcb; int err; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg2(regs), sizeof(desc))) return -EFAULT; pTcb = (WIND_TCB *)xnregistry_fetch(handle); if (!pTcb) return S_objLib_OBJ_ID_ERROR; err = taskInfoGet((TASK_ID)pTcb, &desc); if (!err) { /* Replace the kernel-based pointer by the userland handle. */ desc.td_tid = handle; __xn_copy_to_user(curr, (void __user *)__xn_reg_arg2(regs), &desc, sizeof(desc)); } return err; }
static int __sc_pend(struct task_struct *curr, struct pt_regs *regs) { char **mboxp, *msg; long timeout; int err; /* We should be able to write to a mailbox storage, even if we * actually don't. */ if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg1(regs), sizeof(msg))) return -EFAULT; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg3(regs), sizeof(msg))) return -EFAULT; mboxp = (char **)__xn_reg_arg1(regs); timeout = __xn_reg_arg2(regs); msg = sc_pend(mboxp, timeout, &err); if (!err) __xn_copy_to_user(curr, (void __user *)__xn_reg_arg3(regs), &msg, sizeof(msg)); return err; }
static int __wind_task_priorityget(struct task_struct *curr, struct pt_regs *regs) { xnhandle_t handle = __xn_reg_arg1(regs); WIND_TCB *pTcb; int prio; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg2(regs), sizeof(prio))) return -EFAULT; if (handle) pTcb = (WIND_TCB *)xnregistry_fetch(handle); else pTcb = __wind_task_current(curr); if (!pTcb) return S_objLib_OBJ_ID_ERROR; if (taskPriorityGet((TASK_ID) pTcb, &prio) == ERROR) return wind_errnoget(); __xn_copy_to_user(curr, (void __user *)__xn_reg_arg2(regs), &prio, sizeof(prio)); return 0; }
static int __wind_msgq_nummsgs(struct task_struct *curr, struct pt_regs *regs) { xnhandle_t handle = __xn_reg_arg1(regs); wind_msgq_t *msgq; int nummsgs; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg2(regs), sizeof(nummsgs))) return -EFAULT; msgq = (wind_msgq_t *)xnregistry_fetch(handle); if (!msgq) return S_objLib_OBJ_ID_ERROR; nummsgs = msgQNumMsgs((MSG_Q_ID)msgq); if (nummsgs == ERROR) return wind_errnoget(); __xn_copy_to_user(curr, (void __user *)__xn_reg_arg2(regs), &nummsgs, sizeof(nummsgs)); return 0; }
static int __wind_task_nametoid(struct task_struct *curr, struct pt_regs *regs) { char name[XNOBJECT_NAME_LEN]; WIND_TCB_PLACEHOLDER ph; xnhandle_t handle; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg2(regs), sizeof(ph))) return -EFAULT; if (!__xn_reg_arg1(regs)) return S_taskLib_NAME_NOT_FOUND; if (!__xn_access_ok (curr, VERIFY_READ, __xn_reg_arg1(regs), sizeof(name))) return -EFAULT; __xn_strncpy_from_user(curr, name, (const char __user *)__xn_reg_arg1(regs), sizeof(name) - 1); name[sizeof(name) - 1] = '\0'; handle = taskNameToHandle(name); if (handle == XN_NO_HANDLE) return wind_errnoget(); ph.handle = handle; /* Copy back the task handle. */ __xn_copy_to_user(curr, (void __user *)__xn_reg_arg2(regs), &ph, sizeof(ph)); return 0; }
static int __wind_errno_taskget(struct task_struct *curr, struct pt_regs *regs) { xnhandle_t handle = __xn_reg_arg1(regs); WIND_TCB *pTcb; int errcode; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg2(regs), sizeof(errcode))) return -EFAULT; if (!handle) errcode = wind_errnoget(); else { pTcb = (WIND_TCB *)xnregistry_fetch(handle); if (!pTcb) return S_objLib_OBJ_ID_ERROR; errcode = errnoOfTaskGet((TASK_ID) pTcb); if (errcode == ERROR) return wind_errnoget(); } __xn_copy_to_user(curr, (void __user *)__xn_reg_arg2(regs), &errcode, sizeof(errcode)); return 0; }
static int __wind_wd_wait(struct task_struct *curr, struct pt_regs *regs) { xnholder_t *holder; wind_rholder_t *rh; WIND_TCB *pTcb; wind_wd_t *wd; int err = 0; spl_t s; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg1(regs), sizeof(wd->wdt))) return -EFAULT; rh = wind_get_rholder(); xnlock_get_irqsave(&nklock, s); pTcb = __wind_task_current(curr); if (xnthread_base_priority(&pTcb->threadbase) != XNCORE_IRQ_PRIO) /* Renice the waiter above all regular tasks if needed. */ xnpod_renice_thread(&pTcb->threadbase, XNCORE_IRQ_PRIO); if (!emptyq_p(&rh->wdpending)) goto pull_event; xnsynch_sleep_on(&rh->wdsynch, XN_INFINITE, XN_RELATIVE); if (xnthread_test_info(&pTcb->threadbase, XNBREAK)) { err = -EINTR; /* Unblocked. */ goto unlock_and_exit; } if (xnthread_test_info(&pTcb->threadbase, XNRMID)) { err = -EIDRM; /* Watchdog deleted while pending. */ goto unlock_and_exit; } pull_event: holder = getq(&rh->wdpending); if (holder) { wd = link2wind_wd(holder); /* We need the following to mark the watchdog as unqueued. */ inith(holder); xnlock_put_irqrestore(&nklock, s); __xn_copy_to_user(curr, (void __user *)__xn_reg_arg1(regs), &wd->wdt, sizeof(wd->wdt)); return 0; } unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return err; }
static int __rt_shm_heap_open(struct task_struct *curr, struct pt_regs *regs) { unsigned long name; int size; int suprt, in_kheap; unsigned long off; unsigned long opaque; void *ret; extern void *_shm_alloc(unsigned long name, int size, int suprt, int in_kheap, unsigned long *opaque); if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg2(regs), sizeof(size)) || !__xn_access_ok(curr, VERIFY_WRITE, __xn_reg_arg5(regs), sizeof(off))) return 0; name = (unsigned long)__xn_reg_arg1(regs); /* Size of heap space. */ __xn_copy_from_user(curr, &size, (void __user *)__xn_reg_arg2(regs), sizeof(size)); /* Creation mode. */ suprt = (int)__xn_reg_arg3(regs); in_kheap = (int)__xn_reg_arg4(regs); ret = _shm_alloc(name, size, suprt, in_kheap, &opaque); if (!ret) goto free_and_fail; off = xnheap_mapped_offset((xnheap_t *)opaque, ret); size = (int)((xnheap_t *)opaque)->extentsize; __xn_copy_to_user(curr, (void __user *)__xn_reg_arg2(regs), &size, sizeof(size)); __xn_copy_to_user(curr, (void __user *)__xn_reg_arg5(regs), &off, sizeof(off)); return (int)opaque; free_and_fail: return 0; }
static int __sc_tecreate(struct task_struct *curr, struct pt_regs *regs) { xncompletion_t __user *u_completion; struct vrtx_arg_bulk bulk; int prio, mode, tid, err; vrtxtask_t *task; if (!__xn_access_ok (curr, VERIFY_READ, __xn_reg_arg1(regs), sizeof(bulk))) return -EFAULT; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg2(regs), sizeof(tid))) return -EFAULT; __xn_copy_from_user(curr, &bulk, (void __user *)__xn_reg_arg1(regs), sizeof(bulk)); /* Suggested task id. */ tid = bulk.a1; /* Task priority. */ prio = bulk.a2; /* Task mode. */ mode = bulk.a3 | 0x100; /* Completion descriptor our parent thread is pending on. */ u_completion = (xncompletion_t __user *)__xn_reg_arg3(regs); task = xnmalloc(sizeof(*task)); if (!task) { err = ER_TCB; goto done; } xnthread_clear_state(&task->threadbase, XNZOMBIE); tid = sc_tecreate_inner(task, NULL, tid, prio, mode, 0, 0, NULL, 0, &err); if (tid < 0) { if (u_completion) xnshadow_signal_completion(u_completion, err); } else { __xn_copy_to_user(curr, (void __user *)__xn_reg_arg2(regs), &tid, sizeof(tid)); err = xnshadow_map(&task->threadbase, u_completion); } if (err && !xnthread_test_state(&task->threadbase, XNZOMBIE)) xnfree(task); done: return err; }
static int __wind_wd_start(struct task_struct *curr, struct pt_regs *regs) { wind_rholder_t *rh; long start_server; xnhandle_t handle; wind_wd_t *wd; int timeout; spl_t s; if (!__xn_access_ok(curr, VERIFY_WRITE, __xn_reg_arg5(regs), sizeof(start_server))) return -EFAULT; handle = __xn_reg_arg1(regs); wd = (wind_wd_t *)xnregistry_fetch(handle); if (!wd) return S_objLib_OBJ_ID_ERROR; rh = wind_get_rholder(); if (wd->rh != rh) /* * User may not fiddle with watchdogs created from * other processes. */ return S_objLib_OBJ_UNAVAILABLE; timeout = __xn_reg_arg2(regs); xnlock_get_irqsave(&nklock, s); if (wdStart ((WDOG_ID)wd, timeout, (wind_timer_t) & __wind_wd_handler, (long)wd) == ERROR) { xnlock_put_irqrestore(&nklock, s); return wind_errnoget(); } wd->wdt.handler = (wind_timer_t) __xn_reg_arg3(regs); wd->wdt.arg = (long)__xn_reg_arg4(regs); start_server = rh->wdcount++ == 0; xnlock_put_irqrestore(&nklock, s); __xn_copy_to_user(curr, (void __user *)__xn_reg_arg5(regs), &start_server, sizeof(start_server)); return 0; }
static int __wind_sys_clkrateget(struct task_struct *curr, struct pt_regs *regs) { int hz; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg1(regs), sizeof(hz))) return -EFAULT; hz = sysClkRateGet(); __xn_copy_to_user(curr, (void __user *)__xn_reg_arg1(regs), &hz, sizeof(hz)); return 0; }
static int __sc_gtime(struct task_struct *curr, struct pt_regs *regs) { unsigned long ticks; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg1(regs), sizeof(ticks))) return -EFAULT; ticks = sc_gtime(); __xn_copy_to_user(curr, (void __user *)__xn_reg_arg1(regs), &ticks, sizeof(ticks)); return 0; }
static int __wind_tick_get(struct task_struct *curr, struct pt_regs *regs) { ULONG ticks; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg1(regs), sizeof(ticks))) return -EFAULT; ticks = tickGet(); __xn_copy_to_user(curr, (void __user *)__xn_reg_arg1(regs), &ticks, sizeof(ticks)); return 0; }
static int __sc_fcreate(struct task_struct *curr, struct pt_regs *regs) { int fid, err; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg1(regs), sizeof(fid))) return -EFAULT; fid = sc_fcreate(&err); if (!err) __xn_copy_to_user(curr, (void __user *)__xn_reg_arg1(regs), &fid, sizeof(fid)); return err; }
static int __wind_taskinfo_iddfl(struct task_struct *curr, struct pt_regs *regs) { xnhandle_t handle = __xn_reg_arg1(regs); TASK_ID ret_id; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg2(regs), sizeof(ret_id))) return -EFAULT; ret_id = taskIdDefault(handle); __xn_copy_to_user(curr, (void __user *)__xn_reg_arg2(regs), &ret_id, sizeof(ret_id)); return 0; }
static int __wind_task_setmode(struct task_struct *curr, struct pt_regs *regs) { int setmask, clrmask, mode_r; clrmask = __xn_reg_arg1(regs); setmask = __xn_reg_arg2(regs); /* Primary required: current thread must be valid. */ mode_r = xnpod_set_thread_mode(xnpod_current_thread(), clrmask, setmask); if (__xn_reg_arg3(regs)) __xn_copy_to_user(curr, (void __user *)__xn_reg_arg3(regs), &mode_r, sizeof(mode_r)); return 0; }
static RTAI_SYSCALL_MODE int sys_rtdm_recvmsg(long fd, struct msghdr *msg, long flags) { struct msghdr krnl_msg; struct task_struct *curr = current; int ret; if (unlikely(!__xn_access_ok(curr, VERIFY_WRITE, msg, sizeof(krnl_msg)))) { return -EFAULT; } __xn_copy_from_user(curr, &krnl_msg, msg, sizeof(krnl_msg)); if ((ret = __rt_dev_recvmsg(curr, fd, &krnl_msg, flags)) >= 0) { __xn_copy_to_user(curr, msg, &krnl_msg, sizeof(krnl_msg)); } return ret; }
static int __sc_minquiry(struct task_struct *curr, struct pt_regs *regs) { int mid, status, err; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg2(regs), sizeof(status))) return -EFAULT; mid = __xn_reg_arg1(regs); status = sc_minquiry(mid, &err); if (!err) __xn_copy_to_user(curr, (void __user *)__xn_reg_arg2(regs), &status, sizeof(status)); return err; }
static int __sc_sinquiry(struct task_struct *curr, struct pt_regs *regs) { int semid, count_r, err; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg2(regs), sizeof(count_r))) return -EFAULT; semid = __xn_reg_arg1(regs); count_r = sc_sinquiry(semid, &err); if (!err) __xn_copy_to_user(curr, (void __user *)__xn_reg_arg2(regs), &count_r, sizeof(count_r)); return err; }
static int __sc_hinquiry(struct task_struct *curr, struct pt_regs *regs) { int err, hid, pinfo[3]; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg1(regs), sizeof(pinfo))) return -EFAULT; hid = __xn_reg_arg2(regs); sc_tinquiry(pinfo, hid, &err); if (!err) __xn_copy_to_user(curr, (void __user *)__xn_reg_arg1(regs), pinfo, sizeof(pinfo)); return err; }
static int __sc_fclear(struct task_struct *curr, struct pt_regs *regs) { int fid, mask, mask_r, err; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg3(regs), sizeof(mask_r))) return -EFAULT; fid = __xn_reg_arg1(regs); mask = __xn_reg_arg2(regs); mask_r = sc_fclear(fid, mask, &err); if (!err) __xn_copy_to_user(curr, (void __user *)__xn_reg_arg3(regs), &mask_r, sizeof(mask_r)); return err; }
static int __sc_qecreate(struct task_struct *curr, struct pt_regs *regs) { int qid, qsize, opt, err; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg4(regs), sizeof(qid))) return -EFAULT; qid = __xn_reg_arg1(regs); qsize = __xn_reg_arg2(regs); opt = __xn_reg_arg3(regs); qid = sc_qecreate(qid, qsize, opt, &err); if (!err) __xn_copy_to_user(curr, (void __user *)__xn_reg_arg4(regs), &qid, sizeof(qid)); return err; }
static int __sc_screate(struct task_struct *curr, struct pt_regs *regs) { int semid, opt, err; unsigned initval; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg3(regs), sizeof(semid))) return -EFAULT; initval = __xn_reg_arg1(regs); opt = __xn_reg_arg2(regs); semid = sc_screate(initval, opt, &err); if (!err) __xn_copy_to_user(curr, (void __user *)__xn_reg_arg3(regs), &semid, sizeof(semid)); return err; }
static int __sc_hcreate(struct task_struct *curr, struct pt_regs *regs) { unsigned log2psize; vrtx_hdesc_t hdesc; vrtxheap_t *heap; u_long heapsize; int err, hid; spl_t s; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg3(regs), sizeof(hdesc))) return -EFAULT; /* Size of heap space. */ heapsize = __xn_reg_arg1(regs); /* Page size. */ log2psize = (int)__xn_reg_arg2(regs); hid = sc_hcreate(NULL, heapsize, log2psize, &err); if (err) return err; xnlock_get_irqsave(&nklock, s); heap = xnmap_fetch(vrtx_heap_idmap, hid); if (heap) { /* Paranoid. */ heap->mm = curr->mm; hdesc.hid = hid; hdesc.hcb = &heap->sysheap; hdesc.hsize = xnheap_extentsize(&heap->sysheap); xnlock_put_irqrestore(&nklock, s); __xn_copy_to_user(curr, (void __user *)__xn_reg_arg3(regs), &hdesc, sizeof(hdesc)); } else { xnlock_put_irqrestore(&nklock, s); err = ER_ID; } return err; }