Beispiel #1
0
static int __ui_twai_flg(struct pt_regs *regs)
{
	UINT flgptn, waiptn, wfmode;
	TMO tmout;
	ID flgid;
	ER err;

	flgid = __xn_reg_arg2(regs);
	waiptn = __xn_reg_arg3(regs);
	wfmode = __xn_reg_arg4(regs);
	tmout = __xn_reg_arg5(regs);

	err = twai_flg(&flgptn, flgid, waiptn, wfmode, tmout);

	if (err == E_OK) {
		if (__xn_safe_copy_to_user((void __user *)__xn_reg_arg1(regs), &flgptn,
					   sizeof(flgptn)))
			return -EFAULT;
	} else if (err == E_RLWAI) {
		uitask_t *task = ui_current_task();
		if (!xnthread_test_info(&task->threadbase, uITRON_TASK_RLWAIT))
			err = -EINTR;
	}

	return err;
}
Beispiel #2
0
static int __sc_qecreate(struct task_struct *curr, struct pt_regs *regs)
{
	int qid, qsize, opt, err;

	if (!__xn_access_ok
	    (curr, VERIFY_WRITE, __xn_reg_arg4(regs), sizeof(qid)))
		return -EFAULT;

	qid = __xn_reg_arg1(regs);
	qsize = __xn_reg_arg2(regs);
	opt = __xn_reg_arg3(regs);
	qid = sc_qecreate(qid, qsize, opt, &err);

	if (!err)
		__xn_copy_to_user(curr, (void __user *)__xn_reg_arg4(regs),
				  &qid, sizeof(qid));
	return err;
}
Beispiel #3
0
static int __ui_cre_tsk(struct pt_regs *regs)
{
	xncompletion_t __user *u_completion;
	struct task_struct *p = current;
	unsigned long __user *u_mode_offset;
	uitask_t *task;
	T_CTSK pk_ctsk;
	ID tskid;
	spl_t s;
	ER err;

	tskid = __xn_reg_arg1(regs);

	if (__xn_safe_copy_from_user(&pk_ctsk, (void __user *)__xn_reg_arg2(regs),
				     sizeof(pk_ctsk)))
		return -EFAULT;

	pk_ctsk.tskatr |= TA_SHADOW;
	/* Completion descriptor our parent thread is pending on. */
	u_completion = (xncompletion_t __user *)__xn_reg_arg3(regs);

	u_mode_offset = (unsigned long __user *)__xn_reg_arg4(regs);

	err = cre_tsk(tskid, &pk_ctsk);

	if (likely(err == E_OK)) {
		xnlock_get_irqsave(&nklock, s);
		task = xnmap_fetch(ui_task_idmap, tskid);
		if (!task) {
			xnlock_put_irqrestore(&nklock, s);
			err = E_OBJ;
			goto fail;
		}
		strncpy(p->comm, xnthread_name(&task->threadbase),
			sizeof(p->comm));
		p->comm[sizeof(p->comm) - 1] = '\0';
		xnlock_put_irqrestore(&nklock, s);
		/* Since we may not hold the superlock across a call
		 * to xnshadow_map(), we do have a small race window
		 * here, if the created task is killed then its TCB
		 * recycled before we could map it; however, the risk
		 * is mitigated by consistency checks performed in
		 * xnshadow_map(). */
		return xnshadow_map(&task->threadbase,
				    u_completion, u_mode_offset);
	}

      fail:
	/* Unblock and pass back the error code. */

	if (u_completion)
		xnshadow_signal_completion(u_completion, err);

	return err;
}
Beispiel #4
0
static int __wind_msgq_receive(struct task_struct *curr, struct pt_regs *regs)
{
	xnhandle_t handle = __xn_reg_arg1(regs);
	char tmp_buf[128], *msgbuf;
	wind_msgq_t *msgq;
	int timeout, err;
	unsigned nbytes;

	nbytes = __xn_reg_arg3(regs);
	timeout = __xn_reg_arg4(regs);

	if (!__xn_access_ok(curr, VERIFY_WRITE, __xn_reg_arg2(regs), nbytes))
		return -EFAULT;

	if (!__xn_access_ok
	    (curr, VERIFY_WRITE, __xn_reg_arg5(regs), sizeof(nbytes)))
		return -EFAULT;

	msgq = (wind_msgq_t *)xnregistry_fetch(handle);

	if (!msgq)
		return S_objLib_OBJ_ID_ERROR;

	if (nbytes <= sizeof(tmp_buf))
		msgbuf = tmp_buf;
	else {
		msgbuf = (char *)xnmalloc(nbytes);

		if (!msgbuf)
			return S_memLib_NOT_ENOUGH_MEMORY;
	}

	/* This is sub-optimal since we end up copying the data twice. */

	err = msgQReceive((MSG_Q_ID)msgq, msgbuf, nbytes, timeout);

	if (err != ERROR) {
		__xn_copy_to_user(curr, (void __user *)__xn_reg_arg2(regs),
				  msgbuf, err);
		__xn_copy_to_user(curr, (void __user *)__xn_reg_arg5(regs),
				  &err, sizeof(err));
		err = 0;
	} else
		err = wind_errnoget();

	if (msgbuf != tmp_buf)
		xnfree(msgbuf);

	return err;
}
Beispiel #5
0
static int __wind_wd_start(struct task_struct *curr, struct pt_regs *regs)
{
	wind_rholder_t *rh;
	long start_server;
	xnhandle_t handle;
	wind_wd_t *wd;
	int timeout;
	spl_t s;

	if (!__xn_access_ok(curr, VERIFY_WRITE, __xn_reg_arg5(regs), sizeof(start_server)))
		return -EFAULT;

	handle = __xn_reg_arg1(regs);

	wd = (wind_wd_t *)xnregistry_fetch(handle);

	if (!wd)
		return S_objLib_OBJ_ID_ERROR;

	rh = wind_get_rholder();

	if (wd->rh != rh)
		/*
		 * User may not fiddle with watchdogs created from
		 * other processes.
		 */
		return S_objLib_OBJ_UNAVAILABLE;

	timeout = __xn_reg_arg2(regs);

	xnlock_get_irqsave(&nklock, s);

	if (wdStart
	    ((WDOG_ID)wd, timeout, (wind_timer_t) & __wind_wd_handler,
	     (long)wd) == ERROR) {
		xnlock_put_irqrestore(&nklock, s);
		return wind_errnoget();
	}

	wd->wdt.handler = (wind_timer_t) __xn_reg_arg3(regs);
	wd->wdt.arg = (long)__xn_reg_arg4(regs);
	start_server = rh->wdcount++ == 0;

	xnlock_put_irqrestore(&nklock, s);

	__xn_copy_to_user(curr, (void __user *)__xn_reg_arg5(regs), &start_server,
			  sizeof(start_server));

	return 0;
}
Beispiel #6
0
static int __wind_msgq_send(struct pt_regs *regs)
{
	xnhandle_t handle = __xn_reg_arg1(regs);
	char tmp_buf[128], *msgbuf;
	wind_msgq_t *msgq;
	int timeout, prio;
	unsigned nbytes;
	STATUS err;

	nbytes = __xn_reg_arg3(regs);
	timeout = __xn_reg_arg4(regs);
	prio = __xn_reg_arg5(regs);

	if (timeout != NO_WAIT && !xnpod_primary_p())
		return -EPERM;

	msgq = (wind_msgq_t *)xnregistry_fetch(handle);

	if (!msgq)
		return S_objLib_OBJ_ID_ERROR;

	if (nbytes > msgq->msg_length)
		return S_msgQLib_INVALID_MSG_LENGTH;

	if (nbytes <= sizeof(tmp_buf))
		msgbuf = tmp_buf;
	else {
		msgbuf = (char *)xnmalloc(nbytes);

		if (!msgbuf)
			return S_memLib_NOT_ENOUGH_MEMORY;
	}

	/* This is sub-optimal since we end up copying the data twice. */

	if (__xn_safe_copy_from_user(msgbuf, (void __user *)__xn_reg_arg2(regs), nbytes))
		err = -EFAULT;
	else {
		if (msgQSend((MSG_Q_ID)msgq, msgbuf, nbytes, timeout, prio) == ERROR)
			err = wind_errnoget();
		else
			err = 0;
	}

	if (msgbuf != tmp_buf)
		xnfree(msgbuf);

	return err;
}
Beispiel #7
0
static int __wind_msgq_create(struct task_struct *curr, struct pt_regs *regs)
{
	int nb_msgs, length, flags;
	wind_msgq_t *msgq;
	MSG_Q_ID qid;

	if (!__xn_access_ok
	    (curr, VERIFY_WRITE, __xn_reg_arg4(regs), sizeof(qid)))
		return -EFAULT;

	nb_msgs = __xn_reg_arg1(regs);
	length = __xn_reg_arg2(regs);
	flags = __xn_reg_arg3(regs);
	msgq = (wind_msgq_t *)msgQCreate(nb_msgs, length, flags);

	if (!msgq)
		return wind_errnoget();

	qid = msgq->handle;
	__xn_copy_to_user(curr, (void __user *)__xn_reg_arg4(regs), &qid,
			  sizeof(qid));

	return 0;
}
Beispiel #8
0
static int __rt_shm_heap_open(struct task_struct *curr, struct pt_regs *regs)
{
    unsigned long name;
    int size;
    int suprt, in_kheap;

    unsigned long off;
    unsigned long opaque;
    void *ret;
    extern void *_shm_alloc(unsigned long name, int size, int suprt,
                            int in_kheap, unsigned long *opaque);

    if (!__xn_access_ok
            (curr, VERIFY_WRITE, __xn_reg_arg2(regs), sizeof(size))
            || !__xn_access_ok(curr, VERIFY_WRITE, __xn_reg_arg5(regs),
                               sizeof(off)))
        return 0;

    name = (unsigned long)__xn_reg_arg1(regs);
    /* Size of heap space. */
    __xn_copy_from_user(curr, &size, (void __user *)__xn_reg_arg2(regs),
                        sizeof(size));
    /* Creation mode. */
    suprt = (int)__xn_reg_arg3(regs);
    in_kheap = (int)__xn_reg_arg4(regs);

    ret = _shm_alloc(name, size, suprt, in_kheap, &opaque);

    if (!ret)
        goto free_and_fail;

    off = xnheap_mapped_offset((xnheap_t *)opaque, ret);

    size = (int)((xnheap_t *)opaque)->extentsize;
    __xn_copy_to_user(curr, (void __user *)__xn_reg_arg2(regs), &size,
                      sizeof(size));
    __xn_copy_to_user(curr, (void __user *)__xn_reg_arg5(regs), &off,
                      sizeof(off));

    return (int)opaque;

free_and_fail:

    return 0;
}
Beispiel #9
0
static int __ui_pol_flg(struct pt_regs *regs)
{
	UINT flgptn, waiptn, wfmode;
	ID flgid;
	ER err;

	flgid = __xn_reg_arg2(regs);
	waiptn = __xn_reg_arg3(regs);
	wfmode = __xn_reg_arg4(regs);

	err = pol_flg(&flgptn, flgid, waiptn, wfmode);

	if (err == E_OK &&
		__xn_safe_copy_to_user((void __user *)__xn_reg_arg1(regs), &flgptn,
				       sizeof(flgptn)))
		return -EFAULT;

	return err;
}
Beispiel #10
0
static int __wind_msgq_create(struct pt_regs *regs)
{
	int nb_msgs, length, flags;
	wind_msgq_t *msgq;
	MSG_Q_ID qid;

	nb_msgs = __xn_reg_arg1(regs);
	length = __xn_reg_arg2(regs);
	flags = __xn_reg_arg3(regs);
	msgq = (wind_msgq_t *)msgQCreate(nb_msgs, length, flags);

	if (!msgq)
		return wind_errnoget();

	qid = msgq->handle;

	return __xn_safe_copy_to_user((void __user *)__xn_reg_arg4(regs), &qid,
				      sizeof(qid));
}
Beispiel #11
0
static int __sc_fpend(struct task_struct *curr, struct pt_regs *regs)
{
	int fid, mask, mask_r, opt, err;
	long timeout;

	if (!__xn_access_ok
	    (curr, VERIFY_WRITE, __xn_reg_arg5(regs), sizeof(mask_r)))
		return -EFAULT;

	fid = __xn_reg_arg1(regs);
	timeout = __xn_reg_arg2(regs);
	mask = __xn_reg_arg3(regs);
	opt = __xn_reg_arg4(regs);
	mask_r = sc_fpend(fid, timeout, mask, opt, &err);

	if (!err)
		__xn_copy_to_user(curr, (void __user *)__xn_reg_arg5(regs),
				  &mask_r, sizeof(mask_r));
	return err;
}
Beispiel #12
0
static int __sc_pcreate(struct task_struct *curr, struct pt_regs *regs)
{
	u_long ptsize, bsize;
	vrtx_pdesc_t pdesc;
	xnheap_t *ptheap;
	vrtxpt_t *pt;
	int err, pid;
	char *ptaddr;
	spl_t s;

	if (!__xn_access_ok
	    (curr, VERIFY_WRITE, __xn_reg_arg4(regs), sizeof(pdesc)))
		return -EFAULT;

	ptheap = (xnheap_t *)xnmalloc(sizeof(*ptheap));

	if (!ptheap)
		return ER_MEM;

	/* Suggested partition ID. */
	pid = __xn_reg_arg1(regs);
	/* Size of partition space -- account for the heap mgmt overhead. */
	ptsize = __xn_reg_arg2(regs);
	/* Shared heaps use the natural page size (PAGE_SIZE) */
	ptsize = xnheap_rounded_size(ptsize, PAGE_SIZE);
	/* Block size. */
	bsize = __xn_reg_arg3(regs);

	err = xnheap_init_mapped(ptheap, ptsize, 0);

	if (err)
		goto free_heap;

	/* Allocate the partition space as a single shared heap block. */
	ptaddr = xnheap_alloc(ptheap, ptsize);
	pid = sc_pcreate(pid, ptaddr, ptsize, bsize, &err);

	if (err)
		goto unmap_pt;

	xnlock_get_irqsave(&nklock, s);

	pt = xnmap_fetch(vrtx_pt_idmap, pid);

	if (pt) {		/* Paranoid. */
		pt->mm = curr->mm;
		pt->sysheap = ptheap;
		pdesc.pid = pid;
		pdesc.ptcb = ptheap;
		pdesc.ptsize = xnheap_extentsize(ptheap);

		xnlock_put_irqrestore(&nklock, s);

		__xn_copy_to_user(curr, (void __user *)__xn_reg_arg4(regs),
				  &pdesc, sizeof(pdesc));
		return 0;
	}

	xnlock_put_irqrestore(&nklock, s);

	err = ER_PID;

unmap_pt:

	xnheap_destroy_mapped(ptheap, NULL, NULL);

free_heap:

	xnfree(ptheap);

	return err;
}