Ejemplo n.º 1
0
ER del_mbx(ID mbxid)
{
	uimbx_t *mbx;
	spl_t s;

	if (xnpod_asynch_p())
		return EN_CTXID;

	if (mbxid <= 0 || mbxid > uITRON_MAX_MBXID)
		return E_ID;

	xnlock_get_irqsave(&nklock, s);

	mbx = xnmap_fetch(ui_mbx_idmap, mbxid);

	if (!mbx) {
		xnlock_put_irqrestore(&nklock, s);
		return E_NOEXS;
	}

	xnmap_remove(ui_mbx_idmap, mbx->id);
	ui_mark_deleted(mbx);
#ifdef CONFIG_XENO_OPT_REGISTRY
	xnregistry_remove(mbx->handle);
#endif /* CONFIG_XENO_OPT_REGISTRY */
	xnfree(mbx->ring);
	xnfree(mbx);

	if (xnsynch_destroy(&mbx->synchbase) == XNSYNCH_RESCHED)
		xnpod_schedule();

	xnlock_put_irqrestore(&nklock, s);

	return E_OK;
}
Ejemplo n.º 2
0
void sc_sdelete(int semid, int opt, int *errp)
{
	vrtxsem_t *sem;
	spl_t s;

	if (opt & ~1) {
		*errp = ER_IIP;
		return;
	}

	xnlock_get_irqsave(&nklock, s);

	sem = xnmap_fetch(vrtx_sem_idmap, semid);

	if (sem == NULL) {
		*errp = ER_ID;
		goto unlock_and_exit;
	}

	if (opt == 0 && xnsynch_nsleepers(&sem->synchbase) > 0) {
		*errp = ER_PND;
		goto unlock_and_exit;
	}

	/* forcing delete or no task pending */
	if (sem_destroy_internal(sem) == XNSYNCH_RESCHED)
		xnpod_schedule();

	*errp = RET_OK;

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);
}
Ejemplo n.º 3
0
void sc_spost(int semid, int *errp)
{
	vrtxsem_t *sem;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	sem = xnmap_fetch(vrtx_sem_idmap, semid);

	if (sem == NULL) {
		*errp = ER_ID;
		goto unlock_and_exit;
	}

	*errp = RET_OK;

	if (xnsynch_wakeup_one_sleeper(&sem->synchbase))
		xnpod_schedule();
	else if (sem->count == MAX_SEM_VALUE)
		*errp = ER_OVF;
	else
		sem->count++;

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);
}
Ejemplo n.º 4
0
ER clr_flg(ID flgid, UINT clrptn)
{
	uiflag_t *flag;
	ER err = E_OK;
	spl_t s;

	if (xnpod_asynch_p())
		return EN_CTXID;

	if (flgid <= 0 || flgid > uITRON_MAX_FLAGID)
		return E_ID;

	xnlock_get_irqsave(&nklock, s);

	flag = xnmap_fetch(ui_flag_idmap, flgid);

	if (!flag) {
		err = E_NOEXS;
		goto unlock_and_exit;
	}

	flag->flgvalue &= clrptn;

unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Ejemplo n.º 5
0
ER del_flg(ID flgid)
{
	uiflag_t *flag;
	spl_t s;

	if (xnpod_asynch_p())
		return EN_CTXID;

	if (flgid <= 0 || flgid > uITRON_MAX_FLAGID)
		return E_ID;

	xnlock_get_irqsave(&nklock, s);

	flag = xnmap_fetch(ui_flag_idmap, flgid);

	if (!flag) {
		xnlock_put_irqrestore(&nklock, s);
		return E_NOEXS;
	}

	xnmap_remove(ui_flag_idmap, flag->id);
	ui_mark_deleted(flag);

	xnregistry_remove(flag->handle);
	xnfree(flag);

	if (xnsynch_destroy(&flag->synchbase) == XNSYNCH_RESCHED)
		xnpod_schedule();

	xnlock_put_irqrestore(&nklock, s);

	return E_OK;
}
Ejemplo n.º 6
0
void sc_maccept(int mid, int *errp)
{
	vrtxmx_t *mx;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	if (xnpod_unblockable_p()) {
		*errp = -EPERM;
		goto unlock_and_exit;
	}

	mx = xnmap_fetch(vrtx_mx_idmap, mid);
	if (mx == NULL) {
		*errp = ER_ID;
		goto unlock_and_exit;
	}

	if (xnthread_try_grab(xnpod_current_thread(), &mx->synchbase))
		*errp = RET_OK;
	else
		*errp = ER_PND;

unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);
}
Ejemplo n.º 7
0
void sc_mdelete(int mid, int opt, int *errp)
{
	xnthread_t *owner;
	vrtxmx_t *mx;
	spl_t s;

	if (opt & ~1) {
		*errp = ER_IIP;
		return;
	}

	xnlock_get_irqsave(&nklock, s);

	mx = xnmap_fetch(vrtx_mx_idmap, mid);
	if (mx == NULL) {
		*errp = ER_ID;
		goto unlock_and_exit;
	}

	owner = xnsynch_owner(&mx->synchbase);
	if (owner && (opt == 0 || xnpod_current_thread() != owner)) {
		*errp = ER_PND;
		goto unlock_and_exit;
	}

	*errp = RET_OK;

	if (mx_destroy_internal(mx) == XNSYNCH_RESCHED)
		xnpod_schedule();

unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);
}
Ejemplo n.º 8
0
ER set_flg(ID flgid, UINT setptn)
{
	xnpholder_t *holder, *nholder;
	uiflag_t *flag;
	ER err = E_OK;
	spl_t s;

	if (xnpod_asynch_p())
		return EN_CTXID;

	if (flgid <= 0 || flgid > uITRON_MAX_FLAGID)
		return E_ID;

	xnlock_get_irqsave(&nklock, s);

	flag = xnmap_fetch(ui_flag_idmap, flgid);

	if (!flag) {
		err = E_NOEXS;
		goto unlock_and_exit;
	}

	if (setptn == 0)
		goto unlock_and_exit;

	flag->flgvalue |= setptn;

	if (!xnsynch_pended_p(&flag->synchbase))
		goto unlock_and_exit;

	nholder = getheadpq(xnsynch_wait_queue(&flag->synchbase));

	while ((holder = nholder) != NULL) {
		uitask_t *sleeper = thread2uitask(link2thread(holder, plink));
		UINT wfmode = sleeper->wargs.flag.wfmode;
		UINT waiptn = sleeper->wargs.flag.waiptn;

		if (((wfmode & TWF_ORW) && (waiptn & flag->flgvalue) != 0)
		    || (!(wfmode & TWF_ORW) && ((waiptn & flag->flgvalue) == waiptn))) {
			nholder = xnsynch_wakeup_this_sleeper(&flag->synchbase, holder);
			sleeper->wargs.flag.waiptn = flag->flgvalue;

			if (wfmode & TWF_CLR)
				flag->flgvalue = 0;
		} else
			nholder = nextpq(xnsynch_wait_queue(&flag->synchbase), holder);
	}

	xnpod_schedule();


unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Ejemplo n.º 9
0
static int __ui_cre_tsk(struct pt_regs *regs)
{
	xncompletion_t __user *u_completion;
	struct task_struct *p = current;
	unsigned long __user *u_mode_offset;
	uitask_t *task;
	T_CTSK pk_ctsk;
	ID tskid;
	spl_t s;
	ER err;

	tskid = __xn_reg_arg1(regs);

	if (__xn_safe_copy_from_user(&pk_ctsk, (void __user *)__xn_reg_arg2(regs),
				     sizeof(pk_ctsk)))
		return -EFAULT;

	pk_ctsk.tskatr |= TA_SHADOW;
	/* Completion descriptor our parent thread is pending on. */
	u_completion = (xncompletion_t __user *)__xn_reg_arg3(regs);

	u_mode_offset = (unsigned long __user *)__xn_reg_arg4(regs);

	err = cre_tsk(tskid, &pk_ctsk);

	if (likely(err == E_OK)) {
		xnlock_get_irqsave(&nklock, s);
		task = xnmap_fetch(ui_task_idmap, tskid);
		if (!task) {
			xnlock_put_irqrestore(&nklock, s);
			err = E_OBJ;
			goto fail;
		}
		strncpy(p->comm, xnthread_name(&task->threadbase),
			sizeof(p->comm));
		p->comm[sizeof(p->comm) - 1] = '\0';
		xnlock_put_irqrestore(&nklock, s);
		/* Since we may not hold the superlock across a call
		 * to xnshadow_map(), we do have a small race window
		 * here, if the created task is killed then its TCB
		 * recycled before we could map it; however, the risk
		 * is mitigated by consistency checks performed in
		 * xnshadow_map(). */
		return xnshadow_map(&task->threadbase,
				    u_completion, u_mode_offset);
	}

      fail:
	/* Unblock and pass back the error code. */

	if (u_completion)
		xnshadow_signal_completion(u_completion, err);

	return err;
}
Ejemplo n.º 10
0
void sc_mpend(int mid, unsigned long timeout, int *errp)
{
	xnthread_t *cur = xnpod_current_thread();
	vrtxtask_t *task;
	vrtxmx_t *mx;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	if (xnpod_unblockable_p()) {
		*errp = -EPERM;
		goto unlock_and_exit;
	}

	mx = xnmap_fetch(vrtx_mx_idmap, mid);
	if (mx == NULL) {
		*errp = ER_ID;
		goto unlock_and_exit;
	}

	*errp = RET_OK;

	if (xnthread_try_grab(cur, &mx->synchbase))
		goto unlock_and_exit;

	if (xnsynch_owner(&mx->synchbase) == cur)
		goto unlock_and_exit;

	task = thread2vrtxtask(cur);
	task->vrtxtcb.TCBSTAT = TBSMUTEX;

	if (timeout)
		task->vrtxtcb.TCBSTAT |= TBSDELAY;

	xnsynch_acquire(&mx->synchbase, timeout, XN_RELATIVE);

	if (xnthread_test_info(cur, XNBREAK))
		*errp = -EINTR;
	else if (xnthread_test_info(cur, XNRMID))
		*errp = ER_DEL;	/* Mutex deleted while pending. */
	else if (xnthread_test_info(cur, XNTIMEO))
		*errp = ER_TMO;	/* Timeout. */

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);
}
Ejemplo n.º 11
0
void sc_spend(int semid, long timeout, int *errp)
{
	vrtxtask_t *task;
	vrtxsem_t *sem;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	sem = xnmap_fetch(vrtx_sem_idmap, semid);

	if (sem == NULL) {
		*errp = ER_ID;
		goto unlock_and_exit;
	}

	*errp = RET_OK;

	if (sem->count > 0)
		sem->count--;
	else {
		if (xnpod_unblockable_p()) {
			*errp = -EPERM;
			goto unlock_and_exit;
		}

		task = vrtx_current_task();

		task->vrtxtcb.TCBSTAT = TBSSEMA;

		if (timeout)
			task->vrtxtcb.TCBSTAT |= TBSDELAY;

		xnsynch_sleep_on(&sem->synchbase, timeout, XN_RELATIVE);

		if (xnthread_test_info(&task->threadbase, XNBREAK))
			*errp = -EINTR;
		else if (xnthread_test_info(&task->threadbase, XNRMID))
			*errp = ER_DEL;	/* Semaphore deleted while pending. */
		else if (xnthread_test_info(&task->threadbase, XNTIMEO))
			*errp = ER_TMO;	/* Timeout. */
	}

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);
}
Ejemplo n.º 12
0
static int __sc_hcreate(struct task_struct *curr, struct pt_regs *regs)
{
	unsigned log2psize;
	vrtx_hdesc_t hdesc;
	vrtxheap_t *heap;
	u_long heapsize;
	int err, hid;
	spl_t s;

	if (!__xn_access_ok
	    (curr, VERIFY_WRITE, __xn_reg_arg3(regs), sizeof(hdesc)))
		return -EFAULT;

	/* Size of heap space. */
	heapsize = __xn_reg_arg1(regs);
	/* Page size. */
	log2psize = (int)__xn_reg_arg2(regs);

	hid = sc_hcreate(NULL, heapsize, log2psize, &err);

	if (err)
		return err;

	xnlock_get_irqsave(&nklock, s);

	heap = xnmap_fetch(vrtx_heap_idmap, hid);

	if (heap) {		/* Paranoid. */
		heap->mm = curr->mm;
		hdesc.hid = hid;
		hdesc.hcb = &heap->sysheap;
		hdesc.hsize = xnheap_extentsize(&heap->sysheap);

		xnlock_put_irqrestore(&nklock, s);

		__xn_copy_to_user(curr, (void __user *)__xn_reg_arg3(regs),
				  &hdesc, sizeof(hdesc));
	} else {
		xnlock_put_irqrestore(&nklock, s);
		err = ER_ID;
	}

	return err;
}
Ejemplo n.º 13
0
ER snd_msg(ID mbxid, T_MSG *pk_msg)
{
	uitask_t *sleeper;
	ER err = E_OK;
	uimbx_t *mbx;
	int wrptr;
	spl_t s;

	if (mbxid <= 0 || mbxid > uITRON_MAX_MBXID)
		return E_ID;

	xnlock_get_irqsave(&nklock, s);

	mbx = xnmap_fetch(ui_mbx_idmap, mbxid);

	if (!mbx) {
		err = E_NOEXS;
		goto unlock_and_exit;
	}

	sleeper = thread2uitask(xnsynch_wakeup_one_sleeper(&mbx->synchbase));

	if (sleeper) {
		sleeper->wargs.msg = pk_msg;
		xnpod_schedule();
		goto unlock_and_exit;
	}

	wrptr = mbx->wrptr;

	if (mbx->mcount > 0 && wrptr == mbx->rdptr)
		err = E_QOVR;
	else {
		mbx->ring[wrptr] = pk_msg;
		mbx->wrptr = (wrptr + 1) % mbx->bufcnt;
		mbx->mcount++;
	}

unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Ejemplo n.º 14
0
static int __sc_halloc(struct task_struct *curr, struct pt_regs *regs)
{
	vrtxheap_t *heap;
	char *buf = NULL;
	u_long bsize;
	int err, hid;
	spl_t s;

	if (!__xn_access_ok
	    (curr, VERIFY_WRITE, __xn_reg_arg3(regs), sizeof(buf)))
		return -EFAULT;

	hid = __xn_reg_arg1(regs);
	bsize = (u_long)__xn_reg_arg2(regs);

	xnlock_get_irqsave(&nklock, s);

	heap = xnmap_fetch(vrtx_heap_idmap, hid);

	if (!heap || heap->mm != curr->mm) {
		/* Allocation requests must be issued from the same
		 * process which created the heap. */
		err = ER_ID;
		goto unlock_and_exit;
	}

	buf = sc_halloc(hid, bsize, &err);

	/* Convert the allocated buffer kernel-based address to the
	   equivalent area into the caller's address space. */

	if (!err)
		buf = heap->mapbase + xnheap_mapped_offset(&heap->sysheap, buf);

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	__xn_copy_to_user(curr, (void __user *)__xn_reg_arg3(regs), &buf,
			  sizeof(buf));

	return err;
}
Ejemplo n.º 15
0
static int __sc_pbind(struct task_struct *curr, struct pt_regs *regs)
{
	caddr_t mapbase = (caddr_t) __xn_reg_arg2(regs);
	int pid = __xn_reg_arg1(regs), err = 0;
	vrtxpt_t *pt;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	pt = xnmap_fetch(vrtx_pt_idmap, pid);

	if (pt && pt->mm == curr->mm)
		pt->mapbase = mapbase;
	else
		err = ER_PID;

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Ejemplo n.º 16
0
static int __sc_hbind(struct task_struct *curr, struct pt_regs *regs)
{
	caddr_t mapbase = (caddr_t) __xn_reg_arg2(regs);
	int hid = __xn_reg_arg1(regs), err = 0;
	vrtxheap_t *heap;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	heap = xnmap_fetch(vrtx_heap_idmap, hid);

	if (heap && heap->mm == curr->mm)
		heap->mapbase = mapbase;
	else
		err = ER_ID;

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Ejemplo n.º 17
0
ER ref_mbx(T_RMBX *pk_rmbx, ID mbxid)
{
	uitask_t *sleeper;
	ER err = E_OK;
	uimbx_t *mbx;
	spl_t s;

	if (xnpod_asynch_p())
		return EN_CTXID;

	if (mbxid <= 0 || mbxid > uITRON_MAX_FLAGID)
		return E_ID;

	xnlock_get_irqsave(&nklock, s);

	mbx = xnmap_fetch(ui_mbx_idmap, mbxid);

	if (!mbx) {
		err = E_NOEXS;
		goto unlock_and_exit;
	}

	if (xnsynch_pended_p(&mbx->synchbase)) {
		sleeper =
			thread2uitask(link2thread
				      (getheadpq(xnsynch_wait_queue(&mbx->synchbase)),
				       plink));
		pk_rmbx->wtsk = sleeper->id;
	} else
		pk_rmbx->wtsk = FALSE;

	pk_rmbx->exinf = mbx->exinf;
	pk_rmbx->pk_msg =
	    mbx->mcount > 0 ? mbx->ring[mbx->rdptr] : (T_MSG *) NADR;

unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Ejemplo n.º 18
0
static int __sc_gblock(struct task_struct *curr, struct pt_regs *regs)
{
	char *buf = NULL;
	vrtxpt_t *pt;
	int err, pid;
	spl_t s;

	if (!__xn_access_ok
	    (curr, VERIFY_WRITE, __xn_reg_arg2(regs), sizeof(buf)))
		return -EFAULT;

	pid = __xn_reg_arg1(regs);

	xnlock_get_irqsave(&nklock, s);

	pt = xnmap_fetch(vrtx_pt_idmap, pid);

	if (!pt || pt->mm != curr->mm) {
		/* Allocation requests must be issued from the same
		 * process which created the partition. */
		err = ER_PID;
		goto unlock_and_exit;
	}

	buf = sc_gblock(pid, &err);

	/* Convert the allocated buffer kernel-based address to the
	   equivalent area into the caller's address space. */

	if (!err)
		buf = pt->mapbase + xnheap_mapped_offset(pt->sysheap, buf);

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	__xn_copy_to_user(curr, (void __user *)__xn_reg_arg2(regs), &buf,
			  sizeof(buf));

	return err;
}
Ejemplo n.º 19
0
int sc_sinquiry(int semid, int *errp)
{
	vrtxsem_t *sem;
	int count;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	sem = xnmap_fetch(vrtx_sem_idmap, semid);

	if (sem == NULL) {
		*errp = ER_ID;
		count = 0;
	} else {
		*errp = RET_OK;
		count = sem->count;
	}

	xnlock_put_irqrestore(&nklock, s);

	return count;
}
Ejemplo n.º 20
0
ER ref_flg(T_RFLG *pk_rflg, ID flgid)
{
	uitask_t *sleeper;
	uiflag_t *flag;
	ER err = E_OK;
	spl_t s;

	if (xnpod_asynch_p())
		return EN_CTXID;

	if (flgid <= 0 || flgid > uITRON_MAX_FLAGID)
		return E_ID;

	xnlock_get_irqsave(&nklock, s);

	flag = xnmap_fetch(ui_flag_idmap, flgid);

	if (!flag) {
		err = E_NOEXS;
		goto unlock_and_exit;
	}

	if (xnsynch_pended_p(&flag->synchbase)) {
		xnpholder_t *holder = getheadpq(xnsynch_wait_queue(&flag->synchbase));
		xnthread_t *thread = link2thread(holder, plink);
		sleeper = thread2uitask(thread);
		pk_rflg->wtsk = sleeper->id;
	} else
		pk_rflg->wtsk = FALSE;

	pk_rflg->exinf = flag->exinf;
	pk_rflg->flgptn = flag->flgvalue;

unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Ejemplo n.º 21
0
static int __sc_rblock(struct task_struct *curr, struct pt_regs *regs)
{
	char __user *buf;
	vrtxpt_t *pt;
	int pid, err;
	spl_t s;

	pid = __xn_reg_arg1(regs);
	buf = (char __user *)__xn_reg_arg2(regs);

	xnlock_get_irqsave(&nklock, s);

	pt = xnmap_fetch(vrtx_pt_idmap, pid);

	if (!pt || pt->mm != curr->mm) {
		/* Deallocation requests must be issued from the same
		 * process which created the partition. */
		err = ER_ID;
		goto unlock_and_exit;
	}

	/* Convert the caller-based address of buf to the equivalent area
	   into the kernel address space. */

	if (buf) {
		buf =
		    xnheap_mapped_address(pt->sysheap,
					  (caddr_t) buf - pt->mapbase);
		sc_rblock(pid, buf, &err);
	} else
		err = ER_NMB;

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Ejemplo n.º 22
0
int sc_minquiry(int mid, int *errp)
{
	vrtxmx_t *mx;
	spl_t s;
	int rc;

	xnlock_get_irqsave(&nklock, s);

	mx = xnmap_fetch(vrtx_mx_idmap, mid);
	if (mx == NULL) {
		rc = 0;
		*errp = ER_ID;
		goto unlock_and_exit;
	}

	rc = xnsynch_owner(&mx->synchbase) == NULL;

unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return rc;
}
Ejemplo n.º 23
0
void sc_saccept(int semid, int *errp)
{
	vrtxsem_t *sem;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	sem = xnmap_fetch(vrtx_sem_idmap, semid);

	if (sem == NULL) {
		*errp = ER_ID;
		goto unlock_and_exit;
	}

	if (sem->count > 0) {
		sem->count--;
		*errp = RET_OK;
	} else
		*errp = ER_NMP;

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);
}
Ejemplo n.º 24
0
void sc_mpost(int mid, int *errp)
{
	xnthread_t *cur = xnpod_current_thread();
	vrtxmx_t *mx;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	mx = xnmap_fetch(vrtx_mx_idmap, mid);
	/* Return ER_ID if the poster does not own the mutex. */
	if (mx == NULL || xnsynch_owner(&mx->synchbase) != cur) {
		*errp = ER_ID;
		goto unlock_and_exit;
	}

	*errp = RET_OK;

	if (xnsynch_release(&mx->synchbase))
		xnpod_schedule();

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);
}
Ejemplo n.º 25
0
static ER wai_flg_helper(UINT *p_flgptn,
			 ID flgid, UINT waiptn, UINT wfmode, TMO tmout)
{
	xnticks_t timeout;
	uitask_t *task;
	uiflag_t *flag;
	ER err = E_OK;
	spl_t s;

	if (xnpod_unblockable_p())
		return E_CTX;

	if (flgid <= 0 || flgid > uITRON_MAX_FLAGID)
		return E_ID;

	if (waiptn == 0)
		return E_PAR;

	if (tmout == TMO_FEVR)
		timeout = XN_INFINITE;
	else if (tmout == 0)
		timeout = XN_NONBLOCK;
	else if (tmout < TMO_FEVR)
		return E_PAR;
	else
		timeout = (xnticks_t)tmout;

	xnlock_get_irqsave(&nklock, s);

	flag = xnmap_fetch(ui_flag_idmap, flgid);

	if (!flag) {
		err = E_NOEXS;
		goto unlock_and_exit;
	}

	if (((wfmode & TWF_ORW) && (waiptn & flag->flgvalue) != 0) ||
	    (!(wfmode & TWF_ORW) && ((waiptn & flag->flgvalue) == waiptn))) {
		*p_flgptn = flag->flgvalue;

		if (wfmode & TWF_CLR)
			flag->flgvalue = 0;

		goto unlock_and_exit;
	}

	if (timeout == XN_NONBLOCK) {
		err = E_TMOUT;
		goto unlock_and_exit;
	}

	else if (xnsynch_pended_p(&flag->synchbase) && !(flag->flgatr & TA_WMUL)) {
		err = E_OBJ;
		goto unlock_and_exit;
	}

	task = ui_current_task();

	xnthread_clear_info(&task->threadbase, uITRON_TASK_RLWAIT);
	task->wargs.flag.wfmode = wfmode;
	task->wargs.flag.waiptn = waiptn;

	xnsynch_sleep_on(&flag->synchbase, timeout, XN_RELATIVE);

	if (xnthread_test_info(&task->threadbase, XNRMID))
		err = E_DLT;	/* Flag deleted while pending. */
	else if (xnthread_test_info(&task->threadbase, XNTIMEO))
		err = E_TMOUT;	/* Timeout. */
	else if (xnthread_test_info(&task->threadbase, XNBREAK))
		err = E_RLWAI;	/* rel_wai() or signal received while waiting. */
	else
		*p_flgptn = task->wargs.flag.waiptn;

unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Ejemplo n.º 26
0
static int __sc_pcreate(struct task_struct *curr, struct pt_regs *regs)
{
	u_long ptsize, bsize;
	vrtx_pdesc_t pdesc;
	xnheap_t *ptheap;
	vrtxpt_t *pt;
	int err, pid;
	char *ptaddr;
	spl_t s;

	if (!__xn_access_ok
	    (curr, VERIFY_WRITE, __xn_reg_arg4(regs), sizeof(pdesc)))
		return -EFAULT;

	ptheap = (xnheap_t *)xnmalloc(sizeof(*ptheap));

	if (!ptheap)
		return ER_MEM;

	/* Suggested partition ID. */
	pid = __xn_reg_arg1(regs);
	/* Size of partition space -- account for the heap mgmt overhead. */
	ptsize = __xn_reg_arg2(regs);
	/* Shared heaps use the natural page size (PAGE_SIZE) */
	ptsize = xnheap_rounded_size(ptsize, PAGE_SIZE);
	/* Block size. */
	bsize = __xn_reg_arg3(regs);

	err = xnheap_init_mapped(ptheap, ptsize, 0);

	if (err)
		goto free_heap;

	/* Allocate the partition space as a single shared heap block. */
	ptaddr = xnheap_alloc(ptheap, ptsize);
	pid = sc_pcreate(pid, ptaddr, ptsize, bsize, &err);

	if (err)
		goto unmap_pt;

	xnlock_get_irqsave(&nklock, s);

	pt = xnmap_fetch(vrtx_pt_idmap, pid);

	if (pt) {		/* Paranoid. */
		pt->mm = curr->mm;
		pt->sysheap = ptheap;
		pdesc.pid = pid;
		pdesc.ptcb = ptheap;
		pdesc.ptsize = xnheap_extentsize(ptheap);

		xnlock_put_irqrestore(&nklock, s);

		__xn_copy_to_user(curr, (void __user *)__xn_reg_arg4(regs),
				  &pdesc, sizeof(pdesc));
		return 0;
	}

	xnlock_put_irqrestore(&nklock, s);

	err = ER_PID;

unmap_pt:

	xnheap_destroy_mapped(ptheap, NULL, NULL);

free_heap:

	xnfree(ptheap);

	return err;
}
Ejemplo n.º 27
0
static ER rcv_msg_helper(T_MSG ** ppk_msg, ID mbxid, TMO tmout)
{
	xnticks_t timeout;
	uitask_t *task;
	ER err = E_OK;
	uimbx_t *mbx;
	spl_t s;

	if (xnpod_unblockable_p())
		return E_CTX;

	if (tmout == TMO_FEVR)
		timeout = XN_INFINITE;
	else if (tmout == 0)
		timeout = XN_NONBLOCK;
	else if (tmout < TMO_FEVR)
		return E_PAR;
	else
		timeout = (xnticks_t)tmout;

	if (mbxid <= 0 || mbxid > uITRON_MAX_MBXID)
		return E_ID;

	xnlock_get_irqsave(&nklock, s);

	mbx = xnmap_fetch(ui_mbx_idmap, mbxid);

	if (!mbx) {
		err = E_NOEXS;
		goto unlock_and_exit;
	}

	if (mbx->mcount > 0) {
		*ppk_msg = mbx->ring[mbx->rdptr];
		mbx->rdptr = (mbx->rdptr + 1) % mbx->bufcnt;
		mbx->mcount--;
		goto unlock_and_exit;
	}

	if (timeout == XN_NONBLOCK) {
		err = E_TMOUT;
		goto unlock_and_exit;
	}

	task = ui_current_task();

	xnthread_clear_info(&task->threadbase, uITRON_TASK_RLWAIT);

	xnsynch_sleep_on(&mbx->synchbase, timeout, XN_RELATIVE);

	if (xnthread_test_info(&task->threadbase, XNRMID))
		err = E_DLT;	/* Flag deleted while pending. */
	else if (xnthread_test_info(&task->threadbase, XNTIMEO))
		err = E_TMOUT;	/* Timeout. */
	else if (xnthread_test_info(&task->threadbase, XNBREAK))
		err = E_RLWAI;	/* rel_wai() or signal received while waiting. */
	else
		*ppk_msg = task->wargs.msg;

unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}