static int __sc_tecreate(struct task_struct *curr, struct pt_regs *regs) { xncompletion_t __user *u_completion; struct vrtx_arg_bulk bulk; int prio, mode, tid, err; vrtxtask_t *task; if (!__xn_access_ok (curr, VERIFY_READ, __xn_reg_arg1(regs), sizeof(bulk))) return -EFAULT; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg2(regs), sizeof(tid))) return -EFAULT; __xn_copy_from_user(curr, &bulk, (void __user *)__xn_reg_arg1(regs), sizeof(bulk)); /* Suggested task id. */ tid = bulk.a1; /* Task priority. */ prio = bulk.a2; /* Task mode. */ mode = bulk.a3 | 0x100; /* Completion descriptor our parent thread is pending on. */ u_completion = (xncompletion_t __user *)__xn_reg_arg3(regs); task = xnmalloc(sizeof(*task)); if (!task) { err = ER_TCB; goto done; } xnthread_clear_state(&task->threadbase, XNZOMBIE); tid = sc_tecreate_inner(task, NULL, tid, prio, mode, 0, 0, NULL, 0, &err); if (tid < 0) { if (u_completion) xnshadow_signal_completion(u_completion, err); } else { __xn_copy_to_user(curr, (void __user *)__xn_reg_arg2(regs), &tid, sizeof(tid)); err = xnshadow_map(&task->threadbase, u_completion); } if (err && !xnthread_test_state(&task->threadbase, XNZOMBIE)) xnfree(task); done: return err; }
static RTAI_SYSCALL_MODE int sys_rtdm_sendmsg(long fd, const struct msghdr *msg, long flags) { struct msghdr krnl_msg; struct task_struct *curr = current; if (unlikely(!__xn_access_ok(curr, VERIFY_READ, msg, sizeof(krnl_msg)))) { return -EFAULT; } __xn_copy_from_user(curr, &krnl_msg, msg, sizeof(krnl_msg)); return __rt_dev_sendmsg(curr, fd, &krnl_msg, flags); }
static RTAI_SYSCALL_MODE int sys_rtdm_open(const char *path, long oflag) { struct task_struct *curr = current; char krnl_path[RTDM_MAX_DEVNAME_LEN + 1]; if (unlikely(!__xn_access_ok(curr, VERIFY_READ, path, sizeof(krnl_path)))) { return -EFAULT; } __xn_copy_from_user(curr, krnl_path, path, sizeof(krnl_path) - 1); krnl_path[sizeof(krnl_path) - 1] = '\0'; return __rt_dev_open(curr, (const char *)krnl_path, oflag); }
static int sys_rtdm_sendmsg(struct pt_regs *regs) { struct task_struct *p = current; struct msghdr krnl_msg; if (unlikely(!access_rok(__xn_reg_arg2(regs), sizeof(krnl_msg)) || __xn_copy_from_user(&krnl_msg, (void __user *)__xn_reg_arg2(regs), sizeof(krnl_msg)))) return -EFAULT; return __rt_dev_sendmsg(p, __xn_reg_arg1(regs), &krnl_msg, __xn_reg_arg3(regs)); }
static RTAI_SYSCALL_MODE int sys_rtdm_recvmsg(long fd, struct msghdr *msg, long flags) { struct msghdr krnl_msg; struct task_struct *curr = current; int ret; if (unlikely(!__xn_access_ok(curr, VERIFY_WRITE, msg, sizeof(krnl_msg)))) { return -EFAULT; } __xn_copy_from_user(curr, &krnl_msg, msg, sizeof(krnl_msg)); if ((ret = __rt_dev_recvmsg(curr, fd, &krnl_msg, flags)) >= 0) { __xn_copy_to_user(curr, msg, &krnl_msg, sizeof(krnl_msg)); } return ret; }
static int __wind_msgq_send(struct task_struct *curr, struct pt_regs *regs) { xnhandle_t handle = __xn_reg_arg1(regs); char tmp_buf[128], *msgbuf; wind_msgq_t *msgq; int timeout, prio; unsigned nbytes; STATUS err; nbytes = __xn_reg_arg3(regs); timeout = __xn_reg_arg4(regs); prio = __xn_reg_arg5(regs); if (timeout != NO_WAIT && !xnpod_primary_p()) return -EPERM; msgq = (wind_msgq_t *)xnregistry_fetch(handle); if (!msgq) return S_objLib_OBJ_ID_ERROR; if (nbytes > msgq->msg_length) return S_msgQLib_INVALID_MSG_LENGTH; if (!__xn_access_ok(curr, VERIFY_READ, __xn_reg_arg2(regs), nbytes)) return -EFAULT; if (nbytes <= sizeof(tmp_buf)) msgbuf = tmp_buf; else { msgbuf = (char *)xnmalloc(nbytes); if (!msgbuf) return S_memLib_NOT_ENOUGH_MEMORY; } /* This is sub-optimal since we end up copying the data twice. */ __xn_copy_from_user(curr, msgbuf, (void __user *)__xn_reg_arg2(regs), nbytes); err = msgQSend((MSG_Q_ID)msgq, msgbuf, nbytes, timeout, prio); if (msgbuf != tmp_buf) xnfree(msgbuf); return err == ERROR ? wind_errnoget() : 0; }
static int __sc_adelay(struct task_struct *curr, struct pt_regs *regs) { struct timespec time; int err; if (!__xn_access_ok (curr, VERIFY_READ, __xn_reg_arg1(regs), sizeof(time))) return -EFAULT; __xn_copy_from_user(curr, &time, (void __user *)__xn_reg_arg1(regs), sizeof(time)); sc_adelay(time, &err); return err; }
static int __rt_shm_heap_open(struct task_struct *curr, struct pt_regs *regs) { unsigned long name; int size; int suprt, in_kheap; unsigned long off; unsigned long opaque; void *ret; extern void *_shm_alloc(unsigned long name, int size, int suprt, int in_kheap, unsigned long *opaque); if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg2(regs), sizeof(size)) || !__xn_access_ok(curr, VERIFY_WRITE, __xn_reg_arg5(regs), sizeof(off))) return 0; name = (unsigned long)__xn_reg_arg1(regs); /* Size of heap space. */ __xn_copy_from_user(curr, &size, (void __user *)__xn_reg_arg2(regs), sizeof(size)); /* Creation mode. */ suprt = (int)__xn_reg_arg3(regs); in_kheap = (int)__xn_reg_arg4(regs); ret = _shm_alloc(name, size, suprt, in_kheap, &opaque); if (!ret) goto free_and_fail; off = xnheap_mapped_offset((xnheap_t *)opaque, ret); size = (int)((xnheap_t *)opaque)->extentsize; __xn_copy_to_user(curr, (void __user *)__xn_reg_arg2(regs), &size, sizeof(size)); __xn_copy_to_user(curr, (void __user *)__xn_reg_arg5(regs), &off, sizeof(off)); return (int)opaque; free_and_fail: return 0; }
static int __sc_sclock(struct task_struct *curr, struct pt_regs *regs) { struct timespec time; unsigned long ns; int err; if (!__xn_access_ok (curr, VERIFY_READ, __xn_reg_arg1(regs), sizeof(time))) return -EFAULT; __xn_copy_from_user(curr, &time, (void __user *)__xn_reg_arg1(regs), sizeof(time)); ns = __xn_reg_arg1(regs); sc_sclock(time, ns, &err); return err; }
static int sys_rtdm_recvmsg(struct pt_regs *regs) { struct task_struct *p = current; struct user_msghdr krnl_msg; int ret; if (unlikely(!access_wok(__xn_reg_arg2(regs), sizeof(krnl_msg)) || __xn_copy_from_user(&krnl_msg, (void __user *)__xn_reg_arg2(regs), sizeof(krnl_msg)))) return -EFAULT; ret = __rt_dev_recvmsg(p, __xn_reg_arg1(regs), &krnl_msg, __xn_reg_arg3(regs)); if (unlikely(ret < 0)) return ret; if (unlikely(__xn_copy_to_user((void __user *)__xn_reg_arg2(regs), &krnl_msg, sizeof(krnl_msg)))) return -EFAULT; return ret; }
static int __wind_task_init(struct task_struct *curr, struct pt_regs *regs) { xncompletion_t __user *u_completion; char name[XNOBJECT_NAME_LEN]; struct wind_arg_bulk bulk; int err = 0, prio, flags; WIND_TCB_PLACEHOLDER ph; WIND_TCB *task; if (!__xn_access_ok (curr, VERIFY_READ, __xn_reg_arg1(regs), sizeof(bulk))) return -EFAULT; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg2(regs), sizeof(ph))) return -EFAULT; __xn_copy_from_user(curr, &bulk, (void __user *)__xn_reg_arg1(regs), sizeof(bulk)); if (bulk.a1) { if (!__xn_access_ok(curr, VERIFY_READ, bulk.a1, sizeof(name))) return -EFAULT; __xn_strncpy_from_user(curr, name, (const char __user *)bulk.a1, sizeof(name) - 1); name[sizeof(name) - 1] = '\0'; strncpy(curr->comm, name, sizeof(curr->comm)); curr->comm[sizeof(curr->comm) - 1] = '\0'; } else *name = '\0'; /* Task priority. */ prio = bulk.a2; /* Task flags. */ flags = bulk.a3 | VX_SHADOW; /* Completion descriptor our parent thread is pending on. */ u_completion = (xncompletion_t __user *)__xn_reg_arg3(regs); task = (WIND_TCB *)xnmalloc(sizeof(*task)); if (!task) { if (u_completion) xnshadow_signal_completion(u_completion, -ENOMEM); return -ENOMEM; } xnthread_clear_state(&task->threadbase, XNZOMBIE); /* Force FPU support in user-space. This will lead to a no-op if the platform does not support it. */ if (taskInit(task, name, prio, flags, NULL, 0, NULL, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) == OK) { /* Let the skin discard the TCB memory upon exit. */ task->auto_delete = 1; task->ptid = bulk.a4; /* Copy back the registry handle to the ph struct. */ ph.handle = xnthread_handle(&task->threadbase); __xn_copy_to_user(curr, (void __user *)__xn_reg_arg2(regs), &ph, sizeof(ph)); err = xnshadow_map(&task->threadbase, u_completion); } else { /* Unblock and pass back error code. */ err = wind_errnoget(); if (u_completion) xnshadow_signal_completion(u_completion, err); } if (err && !xnthread_test_state(&task->threadbase, XNZOMBIE)) xnfree(task); return err; }