Exemple #1
0
int rt_cond_signal(RT_COND *cond)
{
    int err = 0;
    spl_t s;

    xnlock_get_irqsave(&nklock, s);

    cond = xeno_h2obj_validate(cond, XENO_COND_MAGIC, RT_COND);

    if (!cond) {
        err = xeno_handle_error(cond, XENO_COND_MAGIC, RT_COND);
        goto unlock_and_exit;
    }

    if (thread2rtask(xnsynch_wakeup_one_sleeper(&cond->synch_base)) != NULL) {
        xnsynch_set_owner(&cond->synch_base, NULL);	/* No ownership to track. */
        xnpod_schedule();
    }

unlock_and_exit:

    xnlock_put_irqrestore(&nklock, s);

    return err;
}
Exemple #2
0
int rt_sem_v(RT_SEM *sem)
{
	int err = 0;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	sem = xeno_h2obj_validate(sem, XENO_SEM_MAGIC, RT_SEM);

	if (!sem) {
		err = xeno_handle_error(sem, XENO_SEM_MAGIC, RT_SEM);
		goto unlock_and_exit;
	}

	if (xnsynch_wakeup_one_sleeper(&sem->synch_base) != NULL)
		xnpod_schedule();
	else if (!(sem->mode & S_PULSE))
		sem->count++;

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Exemple #3
0
void sc_spost(int semid, int *errp)
{
	vrtxsem_t *sem;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	sem = xnmap_fetch(vrtx_sem_idmap, semid);

	if (sem == NULL) {
		*errp = ER_ID;
		goto unlock_and_exit;
	}

	*errp = RET_OK;

	if (xnsynch_wakeup_one_sleeper(&sem->synchbase))
		xnpod_schedule();
	else if (sem->count == MAX_SEM_VALUE)
		*errp = ER_OVF;
	else
		sem->count++;

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);
}
Exemple #4
0
int sem_post_inner(struct pse51_sem *sem, pse51_kqueues_t *ownq)
{
	if (sem->magic != PSE51_SEM_MAGIC) {
		thread_set_errno(EINVAL);
		return -1;
	}

#if XENO_DEBUG(POSIX)
	if (ownq && ownq != pse51_kqueues(sem->pshared)) {
		thread_set_errno(EPERM);
		return -1;
	}
#endif /* XENO_DEBUG(POSIX) */

	if (sem->value == SEM_VALUE_MAX) {
		thread_set_errno(EAGAIN);
		return -1;
	}

	if (xnsynch_wakeup_one_sleeper(&sem->synchbase) != NULL)
		xnpod_schedule();
	else
		++sem->value;

	return 0;
}
Exemple #5
0
/* Must be called with nklock locked, interrupts off. */
static STATUS semc_give(wind_sem_t *sem)
{
	if (xnsynch_wakeup_one_sleeper(&sem->synchbase) != NULL)
		xnpod_schedule();
	else
		++sem->count;

	return OK;
}
Exemple #6
0
/* Must be called with nklock locked, interrupts off. */
static STATUS semb_give(wind_sem_t *sem)
{
	if (xnsynch_wakeup_one_sleeper(&sem->synchbase) != NULL)
		xnpod_schedule();
	else {
		if (sem->count != 0) {
			wind_errnoset(S_semLib_INVALID_OPERATION);
			return ERROR;
		}
		sem->count = 1;
	}

	return OK;
}
Exemple #7
0
void xnsynch_release_all_ownerships(xnthread_t *thread)
{
	xnpholder_t *holder, *nholder;

	for (holder = getheadpq(&thread->claimq); holder != NULL;
	     holder = nholder) {
		/* Since xnsynch_wakeup_one_sleeper() alters the claim
		   queue, we need to be conservative while scanning
		   it. */
		xnsynch_t *synch = link2synch(holder);
		nholder = nextpq(&thread->claimq, holder);
		xnsynch_wakeup_one_sleeper(synch);
		if (synch->cleanup)
			synch->cleanup(synch);
	}
}
Exemple #8
0
ER snd_msg(ID mbxid, T_MSG *pk_msg)
{
	uitask_t *sleeper;
	ER err = E_OK;
	uimbx_t *mbx;
	int wrptr;
	spl_t s;

	if (mbxid <= 0 || mbxid > uITRON_MAX_MBXID)
		return E_ID;

	xnlock_get_irqsave(&nklock, s);

	mbx = xnmap_fetch(ui_mbx_idmap, mbxid);

	if (!mbx) {
		err = E_NOEXS;
		goto unlock_and_exit;
	}

	sleeper = thread2uitask(xnsynch_wakeup_one_sleeper(&mbx->synchbase));

	if (sleeper) {
		sleeper->wargs.msg = pk_msg;
		xnpod_schedule();
		goto unlock_and_exit;
	}

	wrptr = mbx->wrptr;

	if (mbx->mcount > 0 && wrptr == mbx->rdptr)
		err = E_QOVR;
	else {
		mbx->ring[wrptr] = pk_msg;
		mbx->wrptr = (wrptr + 1) % mbx->bufcnt;
		mbx->mcount++;
	}

unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Exemple #9
0
/**
 * Unlock a semaphore.
 *
 * This service unlocks the semaphore @a sm.
 *
 * If no thread is currently blocked on this semaphore, its count is
 * incremented, otherwise the highest priority thread is unblocked.
 *
 * @param sm the semaphore to be unlocked.
 *
 * @retval 0 on success;
 * @retval -1 with errno set if:
 * - EINVAL, the specified semaphore is invalid or uninitialized;
 * - EPERM, the semaphore @a sm is not process-shared and does not belong to the
 *   current process;
 * - EAGAIN, the semaphore count is @a SEM_VALUE_MAX.
 *
 * @see
 * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/sem_post.html">
 * Specification.</a>
 *
 */
int sem_post(sem_t * sm)
{
	struct __shadow_sem *shadow = &((union __xeno_sem *)sm)->shadow_sem;
	pse51_sem_t *sem;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	if ((shadow->magic != PSE51_SEM_MAGIC
	     && shadow->magic != PSE51_NAMED_SEM_MAGIC)
	    || shadow->sem->magic != PSE51_SEM_MAGIC) {
		thread_set_errno(EINVAL);
		goto error;
	}

	sem = shadow->sem;

#if XENO_DEBUG(POSIX)
	if (sem->owningq != pse51_kqueues(sem->pshared)) {
		thread_set_errno(EPERM);
		goto error;
	}
#endif /* XENO_DEBUG(POSIX) */

	if (sem->value == SEM_VALUE_MAX) {
		thread_set_errno(EAGAIN);
		goto error;
	}

	if (xnsynch_wakeup_one_sleeper(&sem->synchbase) != NULL)
		xnpod_schedule();
	else
		++sem->value;

	xnlock_put_irqrestore(&nklock, s);

	return 0;

      error:

	xnlock_put_irqrestore(&nklock, s);

	return -1;
}
Exemple #10
0
u_long ev_send(u_long tid, u_long events)
{
	u_long err = SUCCESS;
	psosevent_t *evgroup;
	psostask_t *task;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	task = psos_h2obj_active(tid, PSOS_TASK_MAGIC, psostask_t);

	if (!task) {
		err = psos_handle_error(tid, PSOS_TASK_MAGIC, psostask_t);
		goto unlock_and_exit;
	}

	evgroup = &task->evgroup;
	evgroup->events |= events;

	/* Only the task to which the event group pertains can
	   pend on it. */

	if (!emptypq_p(xnsynch_wait_queue(&evgroup->synchbase))) {
		u_long flags = task->waitargs.evgroup.flags;
		u_long bits = task->waitargs.evgroup.events;

		if (((flags & EV_ANY) && (bits & evgroup->events) != 0) ||
		    (!(flags & EV_ANY) && ((bits & evgroup->events) == bits))) {
			xnsynch_wakeup_one_sleeper(&evgroup->synchbase);
			task->waitargs.evgroup.events =
			    (bits & evgroup->events);
			evgroup->events &= ~bits;
			xnpod_schedule();
		}
	}

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Exemple #11
0
static ssize_t xnpipe_write(struct file *file,
			    const char *buf, size_t count, loff_t *ppos)
{
	struct xnpipe_state *state = file->private_data;
	struct xnpipe_mh *mh;
	int pollnum, ret;
	spl_t s;

	if (count == 0)
		return 0;

	if (!access_ok(VERIFY_READ, buf, count))
		return -EFAULT;

	xnlock_get_irqsave(&nklock, s);

      retry:

	if (!testbits(state->status, XNPIPE_KERN_CONN)) {
		xnlock_put_irqrestore(&nklock, s);
		return -EPIPE;
	}

	pollnum = countq(&state->inq) + countq(&state->outq);
	xnlock_put_irqrestore(&nklock, s);

	mh = state->ops.alloc_ibuf(count + sizeof(*mh), state->xstate);
	if (mh == (struct xnpipe_mh *)-1)
		return -ENOMEM;

	if (mh == NULL) {
		if (file->f_flags & O_NONBLOCK)
			return -EWOULDBLOCK;

		xnlock_get_irqsave(&nklock, s);
		if (xnpipe_wait(state, XNPIPE_USER_WSYNC, s,
				pollnum >
				countq(&state->inq) + countq(&state->outq))) {
			xnlock_put_irqrestore(&nklock, s);
			return -ERESTARTSYS;
		}
		goto retry;
	}

	inith(xnpipe_m_link(mh));
	xnpipe_m_size(mh) = count;
	xnpipe_m_rdoff(mh) = 0;

	if (copy_from_user(xnpipe_m_data(mh), buf, count)) {
		state->ops.free_ibuf(mh, state->xstate);
		return -EFAULT;
	}

	xnlock_get_irqsave(&nklock, s);

	appendq(&state->inq, &mh->link);

	/* Wake up a Xenomai sleeper if any. */
	if (xnsynch_wakeup_one_sleeper(&state->synchbase))
		xnpod_schedule();

	if (state->ops.input) {
		ret = state->ops.input(mh, 0, state->xstate);
		if (ret)
			count = (size_t)ret;
	}

	if (file->f_flags & O_SYNC) {
		if (!emptyq_p(&state->inq)) {
			if (xnpipe_wait(state, XNPIPE_USER_WSYNC, s,
					emptyq_p(&state->inq)))
				count = -ERESTARTSYS;
		}
	}

	xnlock_put_irqrestore(&nklock, s);

	return (ssize_t)count;
}