示例#1
0
static int __ui_cre_tsk(struct pt_regs *regs)
{
	xncompletion_t __user *u_completion;
	struct task_struct *p = current;
	unsigned long __user *u_mode_offset;
	uitask_t *task;
	T_CTSK pk_ctsk;
	ID tskid;
	spl_t s;
	ER err;

	tskid = __xn_reg_arg1(regs);

	if (__xn_safe_copy_from_user(&pk_ctsk, (void __user *)__xn_reg_arg2(regs),
				     sizeof(pk_ctsk)))
		return -EFAULT;

	pk_ctsk.tskatr |= TA_SHADOW;
	/* Completion descriptor our parent thread is pending on. */
	u_completion = (xncompletion_t __user *)__xn_reg_arg3(regs);

	u_mode_offset = (unsigned long __user *)__xn_reg_arg4(regs);

	err = cre_tsk(tskid, &pk_ctsk);

	if (likely(err == E_OK)) {
		xnlock_get_irqsave(&nklock, s);
		task = xnmap_fetch(ui_task_idmap, tskid);
		if (!task) {
			xnlock_put_irqrestore(&nklock, s);
			err = E_OBJ;
			goto fail;
		}
		strncpy(p->comm, xnthread_name(&task->threadbase),
			sizeof(p->comm));
		p->comm[sizeof(p->comm) - 1] = '\0';
		xnlock_put_irqrestore(&nklock, s);
		/* Since we may not hold the superlock across a call
		 * to xnshadow_map(), we do have a small race window
		 * here, if the created task is killed then its TCB
		 * recycled before we could map it; however, the risk
		 * is mitigated by consistency checks performed in
		 * xnshadow_map(). */
		return xnshadow_map(&task->threadbase,
				    u_completion, u_mode_offset);
	}

      fail:
	/* Unblock and pass back the error code. */

	if (u_completion)
		xnshadow_signal_completion(u_completion, err);

	return err;
}
示例#2
0
static int __ui_cre_mbx(struct pt_regs *regs)
{
	ID mbxid = __xn_reg_arg1(regs);
	T_CMBX pk_cmbx;

	if (__xn_safe_copy_from_user(&pk_cmbx, (void __user *)__xn_reg_arg2(regs),
				     sizeof(pk_cmbx)))
		return -EFAULT;

	return cre_mbx(mbxid, &pk_cmbx);
}
示例#3
0
static int __ui_cre_flg(struct pt_regs *regs)
{
	ID flgid = __xn_reg_arg1(regs);
	T_CFLG pk_cflg;

	if (__xn_safe_copy_from_user(&pk_cflg, (void __user *)__xn_reg_arg2(regs),
				     sizeof(pk_cflg)))
		return -EFAULT;

	return cre_flg(flgid, &pk_cflg);
}
示例#4
0
static int __ui_cre_sem(struct pt_regs *regs)
{
	ID semid = __xn_reg_arg1(regs);
	T_CSEM pk_csem;

	if (__xn_safe_copy_from_user(&pk_csem, (void __user *)__xn_reg_arg2(regs),
				     sizeof(pk_csem)))
		return -EFAULT;

	return cre_sem(semid, &pk_csem);
}
示例#5
0
int cobalt_mutexattr_destroy(pthread_mutexattr_t __user *u_attr)
{
	pthread_mutexattr_t attr;
	int err;

	if (__xn_safe_copy_from_user(&attr, u_attr, sizeof(attr)))
		return -EFAULT;

	err = pthread_mutexattr_destroy(&attr);
	if (err)
		return -err;

	return __xn_safe_copy_to_user(u_attr, &attr, sizeof(*u_attr));
}
示例#6
0
static int __wind_msgq_send(struct pt_regs *regs)
{
	xnhandle_t handle = __xn_reg_arg1(regs);
	char tmp_buf[128], *msgbuf;
	wind_msgq_t *msgq;
	int timeout, prio;
	unsigned nbytes;
	STATUS err;

	nbytes = __xn_reg_arg3(regs);
	timeout = __xn_reg_arg4(regs);
	prio = __xn_reg_arg5(regs);

	if (timeout != NO_WAIT && !xnpod_primary_p())
		return -EPERM;

	msgq = (wind_msgq_t *)xnregistry_fetch(handle);

	if (!msgq)
		return S_objLib_OBJ_ID_ERROR;

	if (nbytes > msgq->msg_length)
		return S_msgQLib_INVALID_MSG_LENGTH;

	if (nbytes <= sizeof(tmp_buf))
		msgbuf = tmp_buf;
	else {
		msgbuf = (char *)xnmalloc(nbytes);

		if (!msgbuf)
			return S_memLib_NOT_ENOUGH_MEMORY;
	}

	/* This is sub-optimal since we end up copying the data twice. */

	if (__xn_safe_copy_from_user(msgbuf, (void __user *)__xn_reg_arg2(regs), nbytes))
		err = -EFAULT;
	else {
		if (msgQSend((MSG_Q_ID)msgq, msgbuf, nbytes, timeout, prio) == ERROR)
			err = wind_errnoget();
		else
			err = 0;
	}

	if (msgbuf != tmp_buf)
		xnfree(msgbuf);

	return err;
}
示例#7
0
int cobalt_mutexattr_getpshared(const pthread_mutexattr_t __user *u_attr,
				int __user *u_pshared)
{
	pthread_mutexattr_t attr;
	int err, pshared;

	if (__xn_safe_copy_from_user(&attr, u_attr, sizeof(attr)))
		return -EFAULT;

	err = pthread_mutexattr_getpshared(&attr, &pshared);
	if (err)
		return -err;

	return __xn_safe_copy_to_user(u_pshared, &pshared, sizeof(*u_pshared));
}
示例#8
0
int cobalt_mutexattr_setprotocol(pthread_mutexattr_t __user *u_attr,
				 int proto)
{
	pthread_mutexattr_t attr;
	int err;

	if (__xn_safe_copy_from_user(&attr, u_attr, sizeof(attr)))
		return -EFAULT;

	err = pthread_mutexattr_setprotocol(&attr, proto);
	if (err)
		return -err;

	return __xn_safe_copy_to_user(u_attr, &attr, sizeof(*u_attr));
}
示例#9
0
int cobalt_mutexattr_gettype(const pthread_mutexattr_t __user *u_attr,
			     int __user *u_type)
{
	pthread_mutexattr_t attr;
	int err, type;

	if (__xn_safe_copy_from_user(&attr, u_attr, sizeof(attr)))
		return -EFAULT;

	err = pthread_mutexattr_gettype(&attr, &type);
	if (err)
		return -err;

	return __xn_safe_copy_to_user(u_type, &type, sizeof(*u_type));
}
示例#10
0
static int __wind_task_init(struct pt_regs *regs)
{
	xncompletion_t __user *u_completion;
	struct task_struct *p = current;
	char name[XNOBJECT_NAME_LEN];
	struct wind_arg_bulk bulk;
	int err = 0, prio, flags;
	WIND_TCB_PLACEHOLDER ph;
	WIND_TCB *task;

	if (__xn_safe_copy_from_user(&bulk, (void __user *)__xn_reg_arg1(regs),
				     sizeof(bulk)))
		return -EFAULT;

	if (bulk.a1) {
		if (__xn_safe_strncpy_from_user(name, (const char __user *)bulk.a1,
						sizeof(name) - 1) < 0)
			return -EFAULT;

		name[sizeof(name) - 1] = '\0';
		strncpy(p->comm, name, sizeof(p->comm));
		p->comm[sizeof(p->comm) - 1] = '\0';
	} else
		*name = '\0';

	/* Task priority. */
	prio = bulk.a2;
	/* Task flags. */
	flags = bulk.a3 | VX_SHADOW;
	/* Completion descriptor our parent thread is pending on. */
	u_completion = (xncompletion_t __user *)__xn_reg_arg3(regs);

	task = (WIND_TCB *)xnmalloc(sizeof(*task));

	if (!task) {
		err = -ENOMEM;
		goto fail;
	}

	xnthread_clear_state(&task->threadbase, XNZOMBIE);

	/* Force FPU support in user-space. This will lead to a no-op if
	   the platform does not support it. */

	if (taskInit(task, name, prio, flags, NULL, 0, NULL,
		     0, 0, 0, 0, 0, 0, 0, 0, 0, 0) == OK) {
		/* Let the skin discard the TCB memory upon exit. */
		task->auto_delete = 1;
		task->ptid = bulk.a4;
		/* Copy back the registry handle to the ph struct. */
		ph.handle = xnthread_handle(&task->threadbase);
		if (__xn_safe_copy_to_user((void __user *)__xn_reg_arg2(regs), &ph,
					   sizeof(ph)))
			err = -EFAULT;
		else {
			err = xnshadow_map(&task->threadbase, u_completion,
					   (unsigned long __user *)bulk.a5);
			if (!err)
				goto out;
		}
		taskDeleteForce((TASK_ID) task);
	} else
		err = wind_errnoget();

	/* Unblock and pass back error code. */

fail:

	if (u_completion)
		xnshadow_signal_completion(u_completion, err);

	if (task && !xnthread_test_state(&task->threadbase, XNZOMBIE))
		xnfree(task);
out:
	return err;
}