Exemple #1
0
/* Must be called with nklock locked, interrupts off. */
static STATUS semm_give(wind_sem_t *sem)
{
	xnthread_t *cur = xnpod_current_thread();
	int resched = 0;

	check_NOT_ISR_CALLABLE(return ERROR);

	if (cur != xnsynch_owner(&sem->synchbase)) {
		wind_errnoset(S_semLib_INVALID_OPERATION);
		return ERROR;
	}

	if (--sem->count > 0)
		return OK;

	if (xnsynch_release(&sem->synchbase)) {
		sem->count = 1;
		resched = 1;
	}

	if (xnsynch_test_flags(&sem->synchbase, WIND_SEM_DEL_SAFE))
		if (taskUnsafeInner(cur))
			resched = 1;

	if (resched)
		xnpod_schedule();

	return OK;
}
Exemple #2
0
void xnsynch_release_all_ownerships(struct xnthread *thread)
{
	struct xnpholder *holder, *nholder;
	struct xnsynch *synch;

	for (holder = getheadpq(&thread->claimq); holder != NULL;
	     holder = nholder) {
		/*
		 * Since xnsynch_release() alters the claim queue, we
		 * need to be conservative while scanning it.
		 */
		synch = link2synch(holder);
		nholder = nextpq(&thread->claimq, holder);
		xnsynch_release(synch);
		if (synch->cleanup)
			synch->cleanup(synch);
	}
}
Exemple #3
0
void sc_mpost(int mid, int *errp)
{
	xnthread_t *cur = xnpod_current_thread();
	vrtxmx_t *mx;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	mx = xnmap_fetch(vrtx_mx_idmap, mid);
	/* Return ER_ID if the poster does not own the mutex. */
	if (mx == NULL || xnsynch_owner(&mx->synchbase) != cur) {
		*errp = ER_ID;
		goto unlock_and_exit;
	}

	*errp = RET_OK;

	if (xnsynch_release(&mx->synchbase))
		xnpod_schedule();

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);
}
Exemple #4
0
int rt_cond_wait_prologue(RT_COND *cond, RT_MUTEX *mutex, unsigned *plockcnt,
                          xntmode_t timeout_mode, RTIME timeout)
{
    xnthread_t *thread;
    xnflags_t info;
    spl_t s;
    int err;

    if (timeout == TM_NONBLOCK)
        return -EWOULDBLOCK;

    if (xnpod_unblockable_p())
        return -EPERM;

    xnlock_get_irqsave(&nklock, s);

    cond = xeno_h2obj_validate(cond, XENO_COND_MAGIC, RT_COND);

    if (!cond) {
        err = xeno_handle_error(cond, XENO_COND_MAGIC, RT_COND);
        goto unlock_and_exit;
    }

    mutex = xeno_h2obj_validate(mutex, XENO_MUTEX_MAGIC, RT_MUTEX);

    if (!mutex) {
        err = xeno_handle_error(mutex, XENO_MUTEX_MAGIC, RT_MUTEX);
        goto unlock_and_exit;
    }

    thread = xnpod_current_thread();

    err = xnsynch_owner_check(&mutex->synch_base, thread);

    if (err)
        goto unlock_and_exit;

    /*
     * We can't use rt_mutex_release since that might reschedule
     * before enter xnsynch_sleep_on.
     */
    *plockcnt = mutex->lockcnt; /* Leave even if mutex is nested */

    mutex->lockcnt = 0;

    xnsynch_release(&mutex->synch_base);
    /* Scheduling deferred */

    info = xnsynch_sleep_on(&cond->synch_base,
                            timeout, timeout_mode);
    if (info & XNRMID)
        err = -EIDRM;	/* Condvar deleted while pending. */
    else if (info & XNTIMEO)
        err = -ETIMEDOUT;	/* Timeout. */
    else if (info & XNBREAK) {
        err = -EINTR;	/* Unblocked. */
    }

unlock_and_exit:

    xnlock_put_irqrestore(&nklock, s);

    return err;
}