static int __wind_sem_mcreate(struct pt_regs *regs) { wind_sem_t *sem; SEM_ID sem_id; int flags; flags = __xn_reg_arg1(regs); sem = (wind_sem_t *)semMCreate(flags); if (!sem) return wind_errnoget(); sem_id = sem->handle; return __xn_safe_copy_to_user((void __user *)__xn_reg_arg2(regs), &sem_id, sizeof(sem_id)); }
static int __sc_qaccept(struct task_struct *curr, struct pt_regs *regs) { int qid, err; char *msg; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg2(regs), sizeof(msg))) return -EFAULT; qid = __xn_reg_arg1(regs); msg = sc_qaccept(qid, &err); if (!err) __xn_copy_to_user(curr, (void __user *)__xn_reg_arg2(regs), &msg, sizeof(msg)); return err; }
static int __sc_fclear(struct task_struct *curr, struct pt_regs *regs) { int fid, mask, mask_r, err; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg3(regs), sizeof(mask_r))) return -EFAULT; fid = __xn_reg_arg1(regs); mask = __xn_reg_arg2(regs); mask_r = sc_fclear(fid, mask, &err); if (!err) __xn_copy_to_user(curr, (void __user *)__xn_reg_arg3(regs), &mask_r, sizeof(mask_r)); return err; }
static int __rt_shm_heap_open(struct task_struct *curr, struct pt_regs *regs) { unsigned long name; int size; int suprt, in_kheap; unsigned long off; unsigned long opaque; void *ret; extern void *_shm_alloc(unsigned long name, int size, int suprt, int in_kheap, unsigned long *opaque); if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg2(regs), sizeof(size)) || !__xn_access_ok(curr, VERIFY_WRITE, __xn_reg_arg5(regs), sizeof(off))) return 0; name = (unsigned long)__xn_reg_arg1(regs); /* Size of heap space. */ __xn_copy_from_user(curr, &size, (void __user *)__xn_reg_arg2(regs), sizeof(size)); /* Creation mode. */ suprt = (int)__xn_reg_arg3(regs); in_kheap = (int)__xn_reg_arg4(regs); ret = _shm_alloc(name, size, suprt, in_kheap, &opaque); if (!ret) goto free_and_fail; off = xnheap_mapped_offset((xnheap_t *)opaque, ret); size = (int)((xnheap_t *)opaque)->extentsize; __xn_copy_to_user(curr, (void __user *)__xn_reg_arg2(regs), &size, sizeof(size)); __xn_copy_to_user(curr, (void __user *)__xn_reg_arg5(regs), &off, sizeof(off)); return (int)opaque; free_and_fail: return 0; }
static int __wind_wd_start(struct pt_regs *regs) { wind_rholder_t *rh; long start_server; xnhandle_t handle; wind_wd_t *wd; int timeout; spl_t s; handle = __xn_reg_arg1(regs); wd = (wind_wd_t *)xnregistry_fetch(handle); if (!wd) return S_objLib_OBJ_ID_ERROR; rh = wind_get_rholder(); if (wd->rh != rh) /* * User may not fiddle with watchdogs created from * other processes. */ return S_objLib_OBJ_UNAVAILABLE; timeout = __xn_reg_arg2(regs); xnlock_get_irqsave(&nklock, s); if (wdStart ((WDOG_ID)wd, timeout, (wind_timer_t) & __wind_wd_handler, (long)wd) == ERROR) { xnlock_put_irqrestore(&nklock, s); return wind_errnoget(); } wd->wdt.handler = (wind_timer_t) __xn_reg_arg3(regs); wd->wdt.arg = (long)__xn_reg_arg4(regs); start_server = rh->wdcount++ == 0; xnlock_put_irqrestore(&nklock, s); return __xn_safe_copy_to_user((void __user *)__xn_reg_arg5(regs), &start_server, sizeof(start_server)); }
static int __wind_msgq_receive(struct pt_regs *regs) { xnhandle_t handle = __xn_reg_arg1(regs); char tmp_buf[128], *msgbuf; wind_msgq_t *msgq; int timeout, err; unsigned nbytes; nbytes = __xn_reg_arg3(regs); timeout = __xn_reg_arg4(regs); msgq = (wind_msgq_t *)xnregistry_fetch(handle); if (!msgq) return S_objLib_OBJ_ID_ERROR; if (nbytes <= sizeof(tmp_buf)) msgbuf = tmp_buf; else { msgbuf = (char *)xnmalloc(nbytes); if (!msgbuf) return S_memLib_NOT_ENOUGH_MEMORY; } /* This is sub-optimal since we end up copying the data twice. */ err = msgQReceive((MSG_Q_ID)msgq, msgbuf, nbytes, timeout); if (err != ERROR) { if (__xn_safe_copy_to_user((void __user *)__xn_reg_arg2(regs), msgbuf, err) || __xn_safe_copy_to_user((void __user *)__xn_reg_arg5(regs), &err, sizeof(err))) err = -EFAULT; else err = 0; } else err = wind_errnoget(); if (msgbuf != tmp_buf) xnfree(msgbuf); return err; }
static int __wind_task_delete(struct pt_regs *regs) { xnhandle_t handle = __xn_reg_arg1(regs); WIND_TCB *pTcb; if (handle) pTcb = __wind_lookup_task(handle); else pTcb = __wind_task_current(current); if (!pTcb) return S_objLib_OBJ_ID_ERROR; if (taskDelete((TASK_ID) pTcb) == ERROR) return wind_errnoget(); return 0; }
static int __wind_task_suspend(struct task_struct *curr, struct pt_regs *regs) { xnhandle_t handle = __xn_reg_arg1(regs); WIND_TCB *pTcb; if (handle) pTcb = (WIND_TCB *)xnregistry_fetch(handle); else pTcb = __wind_task_current(curr); if (!pTcb) return S_objLib_OBJ_ID_ERROR; if (taskSuspend((TASK_ID) pTcb) == ERROR) return wind_errnoget(); return 0; }
static int __sc_screate(struct task_struct *curr, struct pt_regs *regs) { int semid, opt, err; unsigned initval; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg3(regs), sizeof(semid))) return -EFAULT; initval = __xn_reg_arg1(regs); opt = __xn_reg_arg2(regs); semid = sc_screate(initval, opt, &err); if (!err) __xn_copy_to_user(curr, (void __user *)__xn_reg_arg3(regs), &semid, sizeof(semid)); return err; }
static int __sc_qecreate(struct task_struct *curr, struct pt_regs *regs) { int qid, qsize, opt, err; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg4(regs), sizeof(qid))) return -EFAULT; qid = __xn_reg_arg1(regs); qsize = __xn_reg_arg2(regs); opt = __xn_reg_arg3(regs); qid = sc_qecreate(qid, qsize, opt, &err); if (!err) __xn_copy_to_user(curr, (void __user *)__xn_reg_arg4(regs), &qid, sizeof(qid)); return err; }
static int __sc_hcreate(struct task_struct *curr, struct pt_regs *regs) { unsigned log2psize; vrtx_hdesc_t hdesc; vrtxheap_t *heap; u_long heapsize; int err, hid; spl_t s; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg3(regs), sizeof(hdesc))) return -EFAULT; /* Size of heap space. */ heapsize = __xn_reg_arg1(regs); /* Page size. */ log2psize = (int)__xn_reg_arg2(regs); hid = sc_hcreate(NULL, heapsize, log2psize, &err); if (err) return err; xnlock_get_irqsave(&nklock, s); heap = xnmap_fetch(vrtx_heap_idmap, hid); if (heap) { /* Paranoid. */ heap->mm = curr->mm; hdesc.hid = hid; hdesc.hcb = &heap->sysheap; hdesc.hsize = xnheap_extentsize(&heap->sysheap); xnlock_put_irqrestore(&nklock, s); __xn_copy_to_user(curr, (void __user *)__xn_reg_arg3(regs), &hdesc, sizeof(hdesc)); } else { xnlock_put_irqrestore(&nklock, s); err = ER_ID; } return err; }
static int __sc_halloc(struct task_struct *curr, struct pt_regs *regs) { vrtxheap_t *heap; char *buf = NULL; u_long bsize; int err, hid; spl_t s; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg3(regs), sizeof(buf))) return -EFAULT; hid = __xn_reg_arg1(regs); bsize = (u_long)__xn_reg_arg2(regs); xnlock_get_irqsave(&nklock, s); heap = xnmap_fetch(vrtx_heap_idmap, hid); if (!heap || heap->mm != curr->mm) { /* Allocation requests must be issued from the same * process which created the heap. */ err = ER_ID; goto unlock_and_exit; } buf = sc_halloc(hid, bsize, &err); /* Convert the allocated buffer kernel-based address to the equivalent area into the caller's address space. */ if (!err) buf = heap->mapbase + xnheap_mapped_offset(&heap->sysheap, buf); unlock_and_exit: xnlock_put_irqrestore(&nklock, s); __xn_copy_to_user(curr, (void __user *)__xn_reg_arg3(regs), &buf, sizeof(buf)); return err; }
static int __wind_msgq_nummsgs(struct pt_regs *regs) { xnhandle_t handle = __xn_reg_arg1(regs); wind_msgq_t *msgq; int nummsgs; msgq = (wind_msgq_t *)xnregistry_fetch(handle); if (!msgq) return S_objLib_OBJ_ID_ERROR; nummsgs = msgQNumMsgs((MSG_Q_ID)msgq); if (nummsgs == ERROR) return wind_errnoget(); return __xn_safe_copy_to_user((void __user *)__xn_reg_arg2(regs), &nummsgs, sizeof(nummsgs)); }
static int __wind_msgq_create(struct pt_regs *regs) { int nb_msgs, length, flags; wind_msgq_t *msgq; MSG_Q_ID qid; nb_msgs = __xn_reg_arg1(regs); length = __xn_reg_arg2(regs); flags = __xn_reg_arg3(regs); msgq = (wind_msgq_t *)msgQCreate(nb_msgs, length, flags); if (!msgq) return wind_errnoget(); qid = msgq->handle; return __xn_safe_copy_to_user((void __user *)__xn_reg_arg4(regs), &qid, sizeof(qid)); }
static int __ui_pol_flg(struct pt_regs *regs) { UINT flgptn, waiptn, wfmode; ID flgid; ER err; flgid = __xn_reg_arg2(regs); waiptn = __xn_reg_arg3(regs); wfmode = __xn_reg_arg4(regs); err = pol_flg(&flgptn, flgid, waiptn, wfmode); if (err == E_OK && __xn_safe_copy_to_user((void __user *)__xn_reg_arg1(regs), &flgptn, sizeof(flgptn))) return -EFAULT; return err; }
static int __sc_pbind(struct task_struct *curr, struct pt_regs *regs) { caddr_t mapbase = (caddr_t) __xn_reg_arg2(regs); int pid = __xn_reg_arg1(regs), err = 0; vrtxpt_t *pt; spl_t s; xnlock_get_irqsave(&nklock, s); pt = xnmap_fetch(vrtx_pt_idmap, pid); if (pt && pt->mm == curr->mm) pt->mapbase = mapbase; else err = ER_PID; xnlock_put_irqrestore(&nklock, s); return err; }
static int __sc_fpend(struct task_struct *curr, struct pt_regs *regs) { int fid, mask, mask_r, opt, err; long timeout; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg5(regs), sizeof(mask_r))) return -EFAULT; fid = __xn_reg_arg1(regs); timeout = __xn_reg_arg2(regs); mask = __xn_reg_arg3(regs); opt = __xn_reg_arg4(regs); mask_r = sc_fpend(fid, timeout, mask, opt, &err); if (!err) __xn_copy_to_user(curr, (void __user *)__xn_reg_arg5(regs), &mask_r, sizeof(mask_r)); return err; }
static int __ui_rcv_msg(struct pt_regs *regs) { ID mbxid = __xn_reg_arg2(regs); T_MSG *pk_msg; ER err; err = rcv_msg(&pk_msg, mbxid); if (err == E_OK) { if (__xn_safe_copy_to_user((void __user *)__xn_reg_arg1(regs), &pk_msg, sizeof(pk_msg))) return -EFAULT; } else if (err == E_RLWAI) { uitask_t *task = ui_current_task(); if (!xnthread_test_info(&task->threadbase, uITRON_TASK_RLWAIT)) err = -EINTR; } return err; }
static int __wind_errno_taskset(struct task_struct *curr, struct pt_regs *regs) { xnhandle_t handle = __xn_reg_arg1(regs); int errcode = __xn_reg_arg2(regs); WIND_TCB *pTcb; if (!handle) { wind_errnoset(errcode); return 0; } pTcb = (WIND_TCB *)xnregistry_fetch(handle); if (!pTcb) return S_objLib_OBJ_ID_ERROR; if (errnoOfTaskSet((TASK_ID) pTcb, errcode) == ERROR) return wind_errnoget(); return 0; }
static int __sc_hbind(struct task_struct *curr, struct pt_regs *regs) { caddr_t mapbase = (caddr_t) __xn_reg_arg2(regs); int hid = __xn_reg_arg1(regs), err = 0; vrtxheap_t *heap; spl_t s; xnlock_get_irqsave(&nklock, s); heap = xnmap_fetch(vrtx_heap_idmap, hid); if (heap && heap->mm == curr->mm) heap->mapbase = mapbase; else err = ER_ID; xnlock_put_irqrestore(&nklock, s); return err; }
static int __wind_task_priorityget(struct pt_regs *regs) { xnhandle_t handle = __xn_reg_arg1(regs); WIND_TCB *pTcb; int prio; if (handle) pTcb = __wind_lookup_task(handle); else pTcb = __wind_task_current(current); if (!pTcb) return S_objLib_OBJ_ID_ERROR; if (taskPriorityGet((TASK_ID) pTcb, &prio) == ERROR) return wind_errnoget(); return __xn_safe_copy_to_user((void __user *)__xn_reg_arg2(regs), &prio, sizeof(prio)); }
static int __sc_gblock(struct task_struct *curr, struct pt_regs *regs) { char *buf = NULL; vrtxpt_t *pt; int err, pid; spl_t s; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg2(regs), sizeof(buf))) return -EFAULT; pid = __xn_reg_arg1(regs); xnlock_get_irqsave(&nklock, s); pt = xnmap_fetch(vrtx_pt_idmap, pid); if (!pt || pt->mm != curr->mm) { /* Allocation requests must be issued from the same * process which created the partition. */ err = ER_PID; goto unlock_and_exit; } buf = sc_gblock(pid, &err); /* Convert the allocated buffer kernel-based address to the equivalent area into the caller's address space. */ if (!err) buf = pt->mapbase + xnheap_mapped_offset(pt->sysheap, buf); unlock_and_exit: xnlock_put_irqrestore(&nklock, s); __xn_copy_to_user(curr, (void __user *)__xn_reg_arg2(regs), &buf, sizeof(buf)); return err; }
static int __wind_errno_taskget(struct pt_regs *regs) { xnhandle_t handle = __xn_reg_arg1(regs); WIND_TCB *pTcb; int errcode; if (!handle) errcode = wind_errnoget(); else { pTcb = __wind_lookup_task(handle); if (!pTcb) return S_objLib_OBJ_ID_ERROR; errcode = errnoOfTaskGet((TASK_ID) pTcb); if (errcode == ERROR) return wind_errnoget(); } return __xn_safe_copy_to_user((void __user *)__xn_reg_arg2(regs), &errcode, sizeof(errcode)); }
static int __wind_taskinfo_name(struct pt_regs *regs) { xnhandle_t handle = __xn_reg_arg1(regs); const char *name; WIND_TCB *pTcb; pTcb = __wind_lookup_task(handle); if (!pTcb) return S_objLib_OBJ_ID_ERROR; name = taskName((TASK_ID) pTcb); if (!name) return S_objLib_OBJ_ID_ERROR; /* We assume that a VxWorks task name fits in XNOBJECT_NAME_LEN bytes, including the trailing \0. */ return __xn_safe_copy_to_user((void __user *)__xn_reg_arg2(regs), name, strlen(name) + 1); }
/* * int __wind_taskinfo_get(TASK_ID task_id, TASK_DESC *desc) */ static int __wind_taskinfo_get(struct pt_regs *regs) { xnhandle_t handle = __xn_reg_arg1(regs); TASK_DESC desc; WIND_TCB *pTcb; int err; pTcb = __wind_lookup_task(handle); if (!pTcb) return S_objLib_OBJ_ID_ERROR; err = taskInfoGet((TASK_ID)pTcb, &desc); if (err) return err; /* Replace the kernel-based pointer by the userland handle. */ desc.td_tid = handle; return __xn_safe_copy_to_user((void __user *)__xn_reg_arg2(regs), &desc, sizeof(desc)); }
static int __wind_sem_mcreate(struct task_struct *curr, struct pt_regs *regs) { wind_sem_t *sem; SEM_ID sem_id; int flags; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg2(regs), sizeof(sem_id))) return -EFAULT; flags = __xn_reg_arg1(regs); sem = (wind_sem_t *)semMCreate(flags); if (!sem) return wind_errnoget(); sem_id = sem->handle; __xn_copy_to_user(curr, (void __user *)__xn_reg_arg2(regs), &sem_id, sizeof(sem_id)); return 0; }
static int __sc_rblock(struct task_struct *curr, struct pt_regs *regs) { char __user *buf; vrtxpt_t *pt; int pid, err; spl_t s; pid = __xn_reg_arg1(regs); buf = (char __user *)__xn_reg_arg2(regs); xnlock_get_irqsave(&nklock, s); pt = xnmap_fetch(vrtx_pt_idmap, pid); if (!pt || pt->mm != curr->mm) { /* Deallocation requests must be issued from the same * process which created the partition. */ err = ER_ID; goto unlock_and_exit; } /* Convert the caller-based address of buf to the equivalent area into the kernel address space. */ if (buf) { buf = xnheap_mapped_address(pt->sysheap, (caddr_t) buf - pt->mapbase); sc_rblock(pid, buf, &err); } else err = ER_NMB; unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return err; }
static int __wind_taskinfo_status(struct pt_regs *regs) { xnhandle_t handle = __xn_reg_arg1(regs); unsigned long status; WIND_TCB *pTcb; spl_t s; xnlock_get_irqsave(&nklock, s); pTcb = __wind_lookup_task(handle); if (!pTcb || pTcb->magic != WIND_TASK_MAGIC) { xnlock_put_irqrestore(&nklock, s); return S_objLib_OBJ_ID_ERROR; } status = xnthread_state_flags(&pTcb->threadbase); xnlock_put_irqrestore(&nklock, s); return __xn_safe_copy_to_user((void __user *)__xn_reg_arg2(regs), &status, sizeof(status)); }
static int __ui_wai_flg(struct pt_regs *regs) { UINT flgptn, waiptn, wfmode; ID flgid; ER err; flgid = __xn_reg_arg2(regs); waiptn = __xn_reg_arg3(regs); wfmode = __xn_reg_arg4(regs); err = wai_flg(&flgptn, flgid, waiptn, wfmode); if (err == E_OK) { if (__xn_safe_copy_to_user((void __user *)__xn_reg_arg1(regs), &flgptn, sizeof(flgptn))) return -EFAULT; } else if (err == E_RLWAI) { uitask_t *task = ui_current_task(); if (!xnthread_test_info(&task->threadbase, uITRON_TASK_RLWAIT)) err = -EINTR; } return err; }
static int sys_rtdm_recvmsg(struct pt_regs *regs) { struct task_struct *p = current; struct msghdr krnl_msg; int ret; if (unlikely(!access_wok(__xn_reg_arg2(regs), sizeof(krnl_msg)) || __xn_copy_from_user(&krnl_msg, (void __user *)__xn_reg_arg2(regs), sizeof(krnl_msg)))) return -EFAULT; ret = __rt_dev_recvmsg(p, __xn_reg_arg1(regs), &krnl_msg, __xn_reg_arg3(regs)); if (unlikely(ret < 0)) return ret; if (unlikely(__xn_copy_to_user((void __user *)__xn_reg_arg2(regs), &krnl_msg, sizeof(krnl_msg)))) return -EFAULT; return ret; }