Example #1
0
u_long ev_receive(u_long events, u_long flags, u_long timeout, u_long *events_r)
{
	u_long err = SUCCESS;
	psosevent_t *evgroup;
	psostask_t *task;
	spl_t s;

	if (xnpod_unblockable_p())
		return -EPERM;

	xnlock_get_irqsave(&nklock, s);

	task = psos_current_task();

	evgroup = &task->evgroup;

	if (!events) {
		*events_r = evgroup->events;
		goto unlock_and_exit;
	}

	if (flags & EV_NOWAIT) {
		u_long bits = (evgroup->events & events);
		evgroup->events &= ~events;
		*events_r = bits;

		if (flags & EV_ANY) {
			if (!bits)
				err = ERR_NOEVS;
		} else if (bits != events)
			err = ERR_NOEVS;

		goto unlock_and_exit;
	}

	if (((flags & EV_ANY) && (events & evgroup->events) != 0) ||
	    (!(flags & EV_ANY) && ((events & evgroup->events) == events))) {
		*events_r = (evgroup->events & events);
		evgroup->events &= ~events;
		goto unlock_and_exit;
	}

	task->waitargs.evgroup.flags = flags;
	task->waitargs.evgroup.events = events;
	xnsynch_sleep_on(&evgroup->synchbase, timeout, XN_RELATIVE);

	if (xnthread_test_info(&task->threadbase, XNBREAK))
		err = -EINTR;
	else if (xnthread_test_info(&task->threadbase, XNTIMEO)) {
		err = ERR_TIMEOUT;
		*events_r = evgroup->events;
	} else
		*events_r = task->waitargs.evgroup.events;

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Example #2
0
void sc_maccept(int mid, int *errp)
{
	vrtxmx_t *mx;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	if (xnpod_unblockable_p()) {
		*errp = -EPERM;
		goto unlock_and_exit;
	}

	mx = xnmap_fetch(vrtx_mx_idmap, mid);
	if (mx == NULL) {
		*errp = ER_ID;
		goto unlock_and_exit;
	}

	if (xnthread_try_grab(xnpod_current_thread(), &mx->synchbase))
		*errp = RET_OK;
	else
		*errp = ER_PND;

unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);
}
Example #3
0
/**
 * Sleep some amount of time.
 *
 * This service suspends the calling thread until the wakeup time specified by
 * @a rqtp, or a signal is delivered to the caller. If the flag TIMER_ABSTIME is
 * set in the @a flags argument, the wakeup time is specified as an absolute
 * value of the clock @a clock_id. If the flag TIMER_ABSTIME is not set, the
 * wakeup time is specified as a time interval.
 *
 * If this service is interrupted by a signal, the flag TIMER_ABSTIME is not
 * set, and @a rmtp is not @a NULL, the time remaining until the specified
 * wakeup time is returned at the address @a rmtp.
 *
 * The resolution of this service is one system clock tick.
 *
 * @param clock_id clock identifier, either CLOCK_REALTIME,
 * CLOCK_MONOTONIC or CLOCK_MONOTONIC_RAW.
 *
 * @param flags one of:
 * - 0 meaning that the wakeup time @a rqtp is a time interval;
 * - TIMER_ABSTIME, meaning that the wakeup time is an absolute value of the
 *   clock @a clock_id.
 *
 * @param rqtp address of the wakeup time.
 *
 * @param rmtp address where the remaining time before wakeup will be stored if
 * the service is interrupted by a signal.
 *
 * @return 0 on success;
 * @return an error number if:
 * - EPERM, the caller context is invalid;
 * - ENOTSUP, the specified clock is unsupported;
 * - EINVAL, the specified wakeup time is invalid;
 * - EINTR, this service was interrupted by a signal.
 *
 * @par Valid contexts:
 * - Xenomai kernel-space thread,
 * - Xenomai user-space thread (switches to primary mode).
 *
 * @see
 * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/clock_nanosleep.html">
 * Specification.</a>
 *
 */
int clock_nanosleep(clockid_t clock_id,
		    int flags,
		    const struct timespec *rqtp, struct timespec *rmtp)
{
	xnthread_t *cur;
	spl_t s;
	int err = 0;

	if (xnpod_unblockable_p())
		return EPERM;

	if (clock_id != CLOCK_MONOTONIC &&
	    clock_id != CLOCK_MONOTONIC_RAW &&
	    clock_id != CLOCK_REALTIME)
		return ENOTSUP;

	if ((unsigned long)rqtp->tv_nsec >= ONE_BILLION)
		return EINVAL;

	if (flags & ~TIMER_ABSTIME)
		return EINVAL;

	cur = xnpod_current_thread();

	xnlock_get_irqsave(&nklock, s);

	thread_cancellation_point(cur);

	xnpod_suspend_thread(cur, XNDELAY, ts2ticks_ceil(rqtp) + 1,
			     clock_flag(flags, clock_id), NULL);

	thread_cancellation_point(cur);

	if (xnthread_test_info(cur, XNBREAK)) {

		if (flags == 0 && rmtp) {
			xnticks_t now, expiry;
			xnsticks_t rem;

			now = clock_get_ticks(clock_id);
			expiry = xntimer_get_date(&cur->rtimer);
			xnlock_put_irqrestore(&nklock, s);
			rem = expiry - now;

			ticks2ts(rmtp, rem > 0 ? rem : 0);
		} else
			xnlock_put_irqrestore(&nklock, s);

		return EINTR;
	}

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Example #4
0
int rt_sem_p_inner(RT_SEM *sem, xntmode_t timeout_mode, RTIME timeout)
{
	xnflags_t info;
	int err = 0;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	sem = xeno_h2obj_validate(sem, XENO_SEM_MAGIC, RT_SEM);

	if (!sem) {
		err = xeno_handle_error(sem, XENO_SEM_MAGIC, RT_SEM);
		goto unlock_and_exit;
	}

	if (timeout == TM_NONBLOCK) {
		if (sem->count > 0)
			sem->count--;
		else
			err = -EWOULDBLOCK;

		goto unlock_and_exit;
	}

	if (xnpod_unblockable_p()) {
		err = -EPERM;
		goto unlock_and_exit;
	}

	if (sem->count > 0)
		--sem->count;
	else {
		info = xnsynch_sleep_on(&sem->synch_base,
					timeout, timeout_mode);
		if (info & XNRMID)
			err = -EIDRM;	/* Semaphore deleted while pending. */
		else if (info & XNTIMEO)
			err = -ETIMEDOUT;	/* Timeout. */
		else if (info & XNBREAK)
			err = -EINTR;	/* Unblocked. */
	}

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Example #5
0
void sc_mpend(int mid, unsigned long timeout, int *errp)
{
	xnthread_t *cur = xnpod_current_thread();
	vrtxtask_t *task;
	vrtxmx_t *mx;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	if (xnpod_unblockable_p()) {
		*errp = -EPERM;
		goto unlock_and_exit;
	}

	mx = xnmap_fetch(vrtx_mx_idmap, mid);
	if (mx == NULL) {
		*errp = ER_ID;
		goto unlock_and_exit;
	}

	*errp = RET_OK;

	if (xnthread_try_grab(cur, &mx->synchbase))
		goto unlock_and_exit;

	if (xnsynch_owner(&mx->synchbase) == cur)
		goto unlock_and_exit;

	task = thread2vrtxtask(cur);
	task->vrtxtcb.TCBSTAT = TBSMUTEX;

	if (timeout)
		task->vrtxtcb.TCBSTAT |= TBSDELAY;

	xnsynch_acquire(&mx->synchbase, timeout, XN_RELATIVE);

	if (xnthread_test_info(cur, XNBREAK))
		*errp = -EINTR;
	else if (xnthread_test_info(cur, XNRMID))
		*errp = ER_DEL;	/* Mutex deleted while pending. */
	else if (xnthread_test_info(cur, XNTIMEO))
		*errp = ER_TMO;	/* Timeout. */

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);
}
Example #6
0
u_long t_setpri(u_long tid, u_long newprio, u_long *oldprio)
{
	union xnsched_policy_param param;
	u_long err = SUCCESS;
	psostask_t *task;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	if (tid == 0) {
		if (xnpod_unblockable_p())
			return -EPERM;
		task = psos_current_task();
	}
	else {
		task = psos_h2obj_active(tid, PSOS_TASK_MAGIC, psostask_t);

		if (!task) {
			err =
				psos_handle_error(tid, PSOS_TASK_MAGIC, psostask_t);
			goto unlock_and_exit;
		}
	}

	*oldprio = xnthread_current_priority(&task->threadbase);

	if (newprio != 0) {
		if (newprio < 1 || newprio > 255) {
			err = ERR_SETPRI;
			goto unlock_and_exit;
		}

		if (newprio != *oldprio) {
			param.rt.prio = newprio;
			xnpod_set_thread_schedparam(&task->threadbase,
						    &xnsched_class_rt, &param);
			xnpod_schedule();
		}
	}

unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Example #7
0
void sc_spend(int semid, long timeout, int *errp)
{
	vrtxtask_t *task;
	vrtxsem_t *sem;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	sem = xnmap_fetch(vrtx_sem_idmap, semid);

	if (sem == NULL) {
		*errp = ER_ID;
		goto unlock_and_exit;
	}

	*errp = RET_OK;

	if (sem->count > 0)
		sem->count--;
	else {
		if (xnpod_unblockable_p()) {
			*errp = -EPERM;
			goto unlock_and_exit;
		}

		task = vrtx_current_task();

		task->vrtxtcb.TCBSTAT = TBSSEMA;

		if (timeout)
			task->vrtxtcb.TCBSTAT |= TBSDELAY;

		xnsynch_sleep_on(&sem->synchbase, timeout, XN_RELATIVE);

		if (xnthread_test_info(&task->threadbase, XNBREAK))
			*errp = -EINTR;
		else if (xnthread_test_info(&task->threadbase, XNRMID))
			*errp = ER_DEL;	/* Semaphore deleted while pending. */
		else if (xnthread_test_info(&task->threadbase, XNTIMEO))
			*errp = ER_TMO;	/* Timeout. */
	}

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);
}
Example #8
0
int msgQReceive(MSG_Q_ID qid, char *buf, UINT bytes, int to)
{
	xnticks_t timeout;
	wind_msgq_t *queue;
	wind_msg_t *msg;
	xnthread_t *thread;
	wind_task_t *task;
	spl_t s;

	error_check(buf == NULL, 0, return ERROR);

	check_NOT_ISR_CALLABLE(return ERROR);

	xnlock_get_irqsave(&nklock, s);

	check_OBJ_ID_ERROR(qid, wind_msgq_t, queue, WIND_MSGQ_MAGIC,
			   goto error);

	if ((msg = unqueue_msg(queue)) == NULL) {
		/* message queue is empty */

		error_check(to == NO_WAIT ||
			    xnpod_unblockable_p(), S_objLib_OBJ_UNAVAILABLE,
			    goto error);

		if (to == WAIT_FOREVER)
			timeout = XN_INFINITE;
		else
			timeout = to;

		task = wind_current_task();
		thread = &task->threadbase;
		task->rcv_buf = buf;
		task->rcv_bytes = bytes;

		xnsynch_sleep_on(&queue->synchbase, timeout, XN_RELATIVE);

		error_check(xnthread_test_info(thread, XNBREAK), -EINTR,
			    goto error);
		error_check(xnthread_test_info(thread, XNRMID),
			    S_objLib_OBJ_DELETED, goto error);
		error_check(xnthread_test_info(thread, XNTIMEO),
			    S_objLib_OBJ_TIMEOUT, goto error);

		bytes = task->rcv_bytes;
	} else {
Example #9
0
u_long t_suspend(u_long tid)
{
	u_long err = SUCCESS;
	psostask_t *task;
	spl_t s;

	if (tid == 0) {
		if (xnpod_unblockable_p())
			return -EPERM;

		xnpod_suspend_self();

		if (xnthread_test_info(&psos_current_task()->threadbase, XNBREAK))
			return -EINTR;

		return SUCCESS;
	}

	xnlock_get_irqsave(&nklock, s);

	task = psos_h2obj_active(tid, PSOS_TASK_MAGIC, psostask_t);

	if (!task) {
		err = psos_handle_error(tid, PSOS_TASK_MAGIC, psostask_t);
		goto unlock_and_exit;
	}

	if (xnthread_test_state(&task->threadbase, XNSUSP)) {
		err = ERR_SUSP;	/* Task already suspended. */
		goto unlock_and_exit;
	}

	xnpod_suspend_thread(&task->threadbase, XNSUSP, XN_INFINITE, XN_RELATIVE, NULL);

	if (xnthread_test_info(&task->threadbase, XNBREAK))
		err = -EINTR;

unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Example #10
0
u_long t_resume(u_long tid)
{
	u_long err = SUCCESS;
	psostask_t *task;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	if (tid == 0) {
		if (xnpod_unblockable_p()) {
			err = -EPERM;
			goto unlock_and_exit;
		}

		/* Would be admittedly silly, but silly code does
		 * exist, and it's a matter of returning ERR_NOTSUSP
		 * instead of ERR_OBJID. */
		task = psos_current_task();
	}
	else {
		task = psos_h2obj_active(tid, PSOS_TASK_MAGIC, psostask_t);

		if (!task) {
			err = psos_handle_error(tid, PSOS_TASK_MAGIC, psostask_t);
			goto unlock_and_exit;
		}
	}

	if (!xnthread_test_state(&task->threadbase, XNSUSP)) {
		err = ERR_NOTSUSP;	/* Task not suspended. */
		goto unlock_and_exit;
	}

	xnpod_resume_thread(&task->threadbase, XNSUSP);
	xnpod_schedule();

unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Example #11
0
u_long t_restart(u_long tid, u_long targs[])
{
	u_long err = SUCCESS;
	psostask_t *task;
	spl_t s;
	int n;

	if (xnpod_unblockable_p())
		return -EPERM;

	xnlock_get_irqsave(&nklock, s);

	if (tid == 0)
		task = psos_current_task();
	else {
		task = psos_h2obj_active(tid, PSOS_TASK_MAGIC, psostask_t);

		if (!task) {
			err = psos_handle_error(tid, PSOS_TASK_MAGIC, psostask_t);
			goto unlock_and_exit;
		}

		if (xnthread_test_state(&task->threadbase, XNDORMANT)) {
			err = ERR_NACTIVE;
			goto unlock_and_exit;
		}
	}

	for (n = 0; n < 4; n++)
		task->args[n] = targs ? targs[n] : 0;

	xnpod_restart_thread(&task->threadbase);

unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Example #12
0
u_long t_ident(const char *name, u_long node, u_long *tid_r)
{
	u_long err = SUCCESS;
	xnholder_t *holder;
	psostask_t *task;
	spl_t s;

	if (node > 1)
		return ERR_NODENO;

	if (!name) {
		if (xnpod_unblockable_p())
			return ERR_OBJID;
		*tid_r = (u_long)psos_current_task();
		return SUCCESS;
	}

	xnlock_get_irqsave(&nklock, s);

	for (holder = getheadq(&psostaskq);
	     holder; holder = nextq(&psostaskq, holder)) {
		task = link2psostask(holder);

		if (!strcmp(task->name, name)) {
			*tid_r = (u_long)task;
			goto unlock_and_exit;
		}
	}

	err = ERR_OBJNF;

unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Example #13
0
static inline int sem_timedwait_internal(struct __shadow_sem *shadow,
					 int timed, xnticks_t to)
{
	pse51_sem_t *sem = shadow->sem;
	xnthread_t *cur;
	int err;

	if (xnpod_unblockable_p())
		return EPERM;

	cur = xnpod_current_thread();

	if ((err = sem_trywait_internal(shadow)) != EAGAIN)
		return err;

	thread_cancellation_point(cur);

	if (timed)
		xnsynch_sleep_on(&sem->synchbase, to, XN_REALTIME);
	else
		xnsynch_sleep_on(&sem->synchbase, XN_INFINITE, XN_RELATIVE);

	/* Handle cancellation requests. */
	thread_cancellation_point(cur);

	if (xnthread_test_info(cur, XNRMID))
		return EINVAL;

	if (xnthread_test_info(cur, XNBREAK))
		return EINTR;

	if (xnthread_test_info(cur, XNTIMEO))
		return ETIMEDOUT;

	return 0;
}
Example #14
0
static ER rcv_msg_helper(T_MSG ** ppk_msg, ID mbxid, TMO tmout)
{
	xnticks_t timeout;
	uitask_t *task;
	ER err = E_OK;
	uimbx_t *mbx;
	spl_t s;

	if (xnpod_unblockable_p())
		return E_CTX;

	if (tmout == TMO_FEVR)
		timeout = XN_INFINITE;
	else if (tmout == 0)
		timeout = XN_NONBLOCK;
	else if (tmout < TMO_FEVR)
		return E_PAR;
	else
		timeout = (xnticks_t)tmout;

	if (mbxid <= 0 || mbxid > uITRON_MAX_MBXID)
		return E_ID;

	xnlock_get_irqsave(&nklock, s);

	mbx = xnmap_fetch(ui_mbx_idmap, mbxid);

	if (!mbx) {
		err = E_NOEXS;
		goto unlock_and_exit;
	}

	if (mbx->mcount > 0) {
		*ppk_msg = mbx->ring[mbx->rdptr];
		mbx->rdptr = (mbx->rdptr + 1) % mbx->bufcnt;
		mbx->mcount--;
		goto unlock_and_exit;
	}

	if (timeout == XN_NONBLOCK) {
		err = E_TMOUT;
		goto unlock_and_exit;
	}

	task = ui_current_task();

	xnthread_clear_info(&task->threadbase, uITRON_TASK_RLWAIT);

	xnsynch_sleep_on(&mbx->synchbase, timeout, XN_RELATIVE);

	if (xnthread_test_info(&task->threadbase, XNRMID))
		err = E_DLT;	/* Flag deleted while pending. */
	else if (xnthread_test_info(&task->threadbase, XNTIMEO))
		err = E_TMOUT;	/* Timeout. */
	else if (xnthread_test_info(&task->threadbase, XNBREAK))
		err = E_RLWAI;	/* rel_wai() or signal received while waiting. */
	else
		*ppk_msg = task->wargs.msg;

unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Example #15
0
int rt_cond_wait_prologue(RT_COND *cond, RT_MUTEX *mutex, unsigned *plockcnt,
                          xntmode_t timeout_mode, RTIME timeout)
{
    xnthread_t *thread;
    xnflags_t info;
    spl_t s;
    int err;

    if (timeout == TM_NONBLOCK)
        return -EWOULDBLOCK;

    if (xnpod_unblockable_p())
        return -EPERM;

    xnlock_get_irqsave(&nklock, s);

    cond = xeno_h2obj_validate(cond, XENO_COND_MAGIC, RT_COND);

    if (!cond) {
        err = xeno_handle_error(cond, XENO_COND_MAGIC, RT_COND);
        goto unlock_and_exit;
    }

    mutex = xeno_h2obj_validate(mutex, XENO_MUTEX_MAGIC, RT_MUTEX);

    if (!mutex) {
        err = xeno_handle_error(mutex, XENO_MUTEX_MAGIC, RT_MUTEX);
        goto unlock_and_exit;
    }

    thread = xnpod_current_thread();

    err = xnsynch_owner_check(&mutex->synch_base, thread);

    if (err)
        goto unlock_and_exit;

    /*
     * We can't use rt_mutex_release since that might reschedule
     * before enter xnsynch_sleep_on.
     */
    *plockcnt = mutex->lockcnt; /* Leave even if mutex is nested */

    mutex->lockcnt = 0;

    xnsynch_release(&mutex->synch_base);
    /* Scheduling deferred */

    info = xnsynch_sleep_on(&cond->synch_base,
                            timeout, timeout_mode);
    if (info & XNRMID)
        err = -EIDRM;	/* Condvar deleted while pending. */
    else if (info & XNTIMEO)
        err = -ETIMEDOUT;	/* Timeout. */
    else if (info & XNBREAK) {
        err = -EINTR;	/* Unblocked. */
    }

unlock_and_exit:

    xnlock_put_irqrestore(&nklock, s);

    return err;
}
Example #16
0
int rt_event_wait_inner(RT_EVENT *event,
			unsigned long mask,
			unsigned long *mask_r,
			int mode, xntmode_t timeout_mode, RTIME timeout)
{
	RT_TASK *task;
	xnflags_t info;
	int err = 0;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	event = xeno_h2obj_validate(event, XENO_EVENT_MAGIC, RT_EVENT);

	if (!event) {
		err = xeno_handle_error(event, XENO_EVENT_MAGIC, RT_EVENT);
		goto unlock_and_exit;
	}

	if (!mask) {
		*mask_r = event->value;
		goto unlock_and_exit;
	}

	if (timeout == TM_NONBLOCK) {
		unsigned long bits = (event->value & mask);
		*mask_r = bits;

		if (mode & EV_ANY) {
			if (!bits)
				err = -EWOULDBLOCK;
		} else if (bits != mask)
			err = -EWOULDBLOCK;

		goto unlock_and_exit;
	}

	if (((mode & EV_ANY) && (mask & event->value) != 0) ||
	    (!(mode & EV_ANY) && ((mask & event->value) == mask))) {
		*mask_r = (event->value & mask);
		goto unlock_and_exit;
	}

	if (xnpod_unblockable_p()) {
		err = -EPERM;
		goto unlock_and_exit;
	}

	task = xeno_current_task();
	task->wait_args.event.mode = mode;
	task->wait_args.event.mask = mask;
	info = xnsynch_sleep_on(&event->synch_base,
				timeout, timeout_mode);
	if (info & XNRMID)
		err = -EIDRM;	/* Event group deleted while pending. */
	else if (info & XNTIMEO)
		err = -ETIMEDOUT;	/* Timeout. */
	else if (info & XNBREAK)
		err = -EINTR;	/* Unblocked. */
	/*
	 * The returned mask is only significant if the operation has
	 * succeeded, but do always write it back anyway.
	 */
	*mask_r = task->wait_args.event.mask;

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Example #17
0
static ER wai_flg_helper(UINT *p_flgptn,
			 ID flgid, UINT waiptn, UINT wfmode, TMO tmout)
{
	xnticks_t timeout;
	uitask_t *task;
	uiflag_t *flag;
	ER err = E_OK;
	spl_t s;

	if (xnpod_unblockable_p())
		return E_CTX;

	if (flgid <= 0 || flgid > uITRON_MAX_FLAGID)
		return E_ID;

	if (waiptn == 0)
		return E_PAR;

	if (tmout == TMO_FEVR)
		timeout = XN_INFINITE;
	else if (tmout == 0)
		timeout = XN_NONBLOCK;
	else if (tmout < TMO_FEVR)
		return E_PAR;
	else
		timeout = (xnticks_t)tmout;

	xnlock_get_irqsave(&nklock, s);

	flag = xnmap_fetch(ui_flag_idmap, flgid);

	if (!flag) {
		err = E_NOEXS;
		goto unlock_and_exit;
	}

	if (((wfmode & TWF_ORW) && (waiptn & flag->flgvalue) != 0) ||
	    (!(wfmode & TWF_ORW) && ((waiptn & flag->flgvalue) == waiptn))) {
		*p_flgptn = flag->flgvalue;

		if (wfmode & TWF_CLR)
			flag->flgvalue = 0;

		goto unlock_and_exit;
	}

	if (timeout == XN_NONBLOCK) {
		err = E_TMOUT;
		goto unlock_and_exit;
	}

	else if (xnsynch_pended_p(&flag->synchbase) && !(flag->flgatr & TA_WMUL)) {
		err = E_OBJ;
		goto unlock_and_exit;
	}

	task = ui_current_task();

	xnthread_clear_info(&task->threadbase, uITRON_TASK_RLWAIT);
	task->wargs.flag.wfmode = wfmode;
	task->wargs.flag.waiptn = waiptn;

	xnsynch_sleep_on(&flag->synchbase, timeout, XN_RELATIVE);

	if (xnthread_test_info(&task->threadbase, XNRMID))
		err = E_DLT;	/* Flag deleted while pending. */
	else if (xnthread_test_info(&task->threadbase, XNTIMEO))
		err = E_TMOUT;	/* Timeout. */
	else if (xnthread_test_info(&task->threadbase, XNBREAK))
		err = E_RLWAI;	/* rel_wai() or signal received while waiting. */
	else
		*p_flgptn = task->wargs.flag.waiptn;

unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Example #18
0
ssize_t rt_buffer_read_inner(RT_BUFFER *bf,
			     struct xnbufd *bufd,
			     xntmode_t timeout_mode, RTIME timeout)
{
	xnthread_t *thread, *waiter;
	size_t len, rbytes, n;
	xnflags_t info;
	u_long rdtoken;
	off_t rdoff;
	ssize_t ret;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	bf = xeno_h2obj_validate(bf, XENO_BUFFER_MAGIC, RT_BUFFER);
	if (bf == NULL) {
		ret = xeno_handle_error(bf, XENO_BUFFER_MAGIC, RT_BUFFER);
		goto unlock_and_exit;
	}

	/*
	 * We may only return complete messages to readers, so there
	 * is no point in waiting for messages which are larger than
	 * what the buffer can hold.
	 */
	len = bufd->b_len;
	if (len > bf->bufsz) {
		ret = -EINVAL;
		goto unlock_and_exit;
	}

	if (len == 0) {
		ret = 0;
		goto unlock_and_exit;
	}

	if (timeout_mode == XN_RELATIVE &&
	    timeout != TM_NONBLOCK && timeout != TM_INFINITE) {
		/*
		 * We may sleep several times before receiving the
		 * data, so let's always use an absolute time spec.
		 */
		timeout_mode = XN_REALTIME;
		timeout += xntbase_get_time(__native_tbase);
	}

redo:
	for (;;) {
		/*
		 * We should be able to read a complete message of the
		 * requested length, or block.
		 */
		if (bf->fillsz < len)
			goto wait;

		/*
		 * Draw the next read token so that we can later
		 * detect preemption.
		 */
		rdtoken = ++bf->rdtoken;

		/* Read from the buffer in a circular way. */
		rdoff = bf->rdoff;
		rbytes = len;

		do {
			if (rdoff + rbytes > bf->bufsz)
				n = bf->bufsz - rdoff;
			else
				n = rbytes;
			/*
			 * Release the nklock while retrieving the
			 * data to keep latency low.
			 */

			xnlock_put_irqrestore(&nklock, s);

			ret = xnbufd_copy_from_kmem(bufd, bf->bufmem + rdoff, n);
			if (ret < 0)
				return ret;

			xnlock_get_irqsave(&nklock, s);
			/*
			 * In case we were preempted while retrieving
			 * the message, we have to re-read the whole
			 * thing.
			 */
			if (bf->rdtoken != rdtoken) {
				xnbufd_reset(bufd);
				goto redo;
			}

			rdoff = (rdoff + n) % bf->bufsz;
			rbytes -= n;
		} while (rbytes > 0);

		bf->fillsz -= len;
		bf->rdoff = rdoff;
		ret = (ssize_t)len;

		/*
		 * Wake up all threads pending on the output wait
		 * queue, if we freed enough room for the leading one
		 * to post its message.
		 */
		waiter = xnsynch_peek_pendq(&bf->osynch_base);
		if (waiter && waiter->wait_u.size + bf->fillsz <= bf->bufsz) {
			if (xnsynch_flush(&bf->osynch_base, 0) == XNSYNCH_RESCHED)
				xnpod_schedule();
		}

		/*
		 * We cannot fail anymore once some data has been
		 * copied via the buffer descriptor, so no need to
		 * check for any reason to invalidate the latter.
		 */
		goto unlock_and_exit;

	wait:
		if (timeout_mode == XN_RELATIVE && timeout == TM_NONBLOCK) {
			ret = -EWOULDBLOCK;
			break;
		}

		if (xnpod_unblockable_p()) {
			ret = -EPERM;
			break;
		}

		/*
		 * Check whether writers are already waiting for
		 * sending data, while we are about to wait for
		 * receiving some. In such a case, we have a
		 * pathological use of the buffer. We must allow for a
		 * short read to prevent a deadlock.
		 */
		if (bf->fillsz > 0 &&
		    xnsynch_nsleepers(&bf->osynch_base) > 0) {
			len = bf->fillsz;
			goto redo;
		}

		thread = xnpod_current_thread();
		thread->wait_u.bufd =  bufd;
		info = xnsynch_sleep_on(&bf->isynch_base,
					timeout, timeout_mode);
		if (info & XNRMID) {
			ret = -EIDRM;	/* Buffer deleted while pending. */
			break;
		} else if (info & XNTIMEO) {
			ret = -ETIMEDOUT;	/* Timeout. */
			break;
		} if (info & XNBREAK) {
			ret = -EINTR;	/* Unblocked. */
			break;
		}
	}

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return ret;
}
Example #19
0
ssize_t rt_buffer_write_inner(RT_BUFFER *bf,
			      struct xnbufd *bufd,
			      xntmode_t timeout_mode, RTIME timeout)
{
	xnthread_t *thread, *waiter;
	size_t len, rbytes, n;
	xnflags_t info;
	u_long wrtoken;
	off_t wroff;
	ssize_t ret;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	bf = xeno_h2obj_validate(bf, XENO_BUFFER_MAGIC, RT_BUFFER);
	if (bf == NULL) {
		ret = xeno_handle_error(bf, XENO_BUFFER_MAGIC, RT_BUFFER);
		goto unlock_and_exit;
	}

	/*
	 * We may only send complete messages, so there is no point in
	 * accepting messages which are larger than what the buffer
	 * can hold.
	 */
	len = bufd->b_len;
	if (len > bf->bufsz) {
		ret = -EINVAL;
		goto unlock_and_exit;
	}

	if (len == 0) {
		ret = 0;
		goto unlock_and_exit;
	}

	if (timeout_mode == XN_RELATIVE &&
	    timeout != TM_NONBLOCK && timeout != TM_INFINITE) {
		/*
		 * We may sleep several times before being able to
		 * send the data, so let's always use an absolute time
		 * spec.
		 */
		timeout_mode = XN_REALTIME;
		timeout += xntbase_get_time(__native_tbase);
	}

redo:
	for (;;) {
		/*
		 * We should be able to write the entire message at
		 * once, or block.
		 */
		if (bf->fillsz + len > bf->bufsz)
			goto wait;

		/*
		 * Draw the next write token so that we can later
		 * detect preemption.
		 */
		wrtoken = ++bf->wrtoken;

		/* Write to the buffer in a circular way. */
		wroff = bf->wroff;
		rbytes = len;

		do {
			if (wroff + rbytes > bf->bufsz)
				n = bf->bufsz - wroff;
			else
				n = rbytes;
			/*
			 * Release the nklock while copying the source
			 * data to keep latency low.
			 */
			xnlock_put_irqrestore(&nklock, s);

			ret = xnbufd_copy_to_kmem(bf->bufmem + wroff, bufd, n);
			if (ret < 0)
				return ret;

			xnlock_get_irqsave(&nklock, s);
			/*
			 * In case we were preempted while writing
			 * the message, we have to resend the whole
			 * thing.
			 */
			if (bf->wrtoken != wrtoken) {
				xnbufd_reset(bufd);
				goto redo;
			}

			wroff = (wroff + n) % bf->bufsz;
			rbytes -= n;
		} while (rbytes > 0);

		bf->fillsz += len;
		bf->wroff = wroff;
		ret = (ssize_t)len;

		/*
		 * Wake up all threads pending on the input wait
		 * queue, if we accumulated enough data to feed the
		 * leading one.
		 */
		waiter = xnsynch_peek_pendq(&bf->isynch_base);
		if (waiter && waiter->wait_u.bufd->b_len <= bf->fillsz) {
			if (xnsynch_flush(&bf->isynch_base, 0) == XNSYNCH_RESCHED)
				xnpod_schedule();
		}

		/*
		 * We cannot fail anymore once some data has been
		 * copied via the buffer descriptor, so no need to
		 * check for any reason to invalidate the latter.
		 */
		goto unlock_and_exit;

	wait:
		if (timeout_mode == XN_RELATIVE && timeout == TM_NONBLOCK) {
			ret = -EWOULDBLOCK;
			break;
		}

		if (xnpod_unblockable_p()) {
			ret = -EPERM;
			break;
		}

		thread = xnpod_current_thread();
		thread->wait_u.size = len;
		info = xnsynch_sleep_on(&bf->osynch_base,
					timeout, timeout_mode);
		if (info & XNRMID) {
			ret = -EIDRM;	/* Buffer deleted while pending. */
			break;
		} if (info & XNTIMEO) {
			ret = -ETIMEDOUT;	/* Timeout. */
			break;
		} if (info & XNBREAK) {
			ret = -EINTR;	/* Unblocked. */
			break;
		}
	}

      unlock_and_exit:

	/*
	 * xnpod_schedule() is smarter than us; it will detect any
	 * worthless call inline and won't branch to the rescheduling
	 * code in such a case.
	 */
	xnpod_schedule();

	xnlock_put_irqrestore(&nklock, s);

	return ret;
}
Example #20
0
int rt_heap_alloc(RT_HEAP *heap, size_t size, RTIME timeout, void **blockp)
{
	void *block = NULL;
	xnthread_t *thread;
	xnflags_t info;
	int err = 0;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	heap = xeno_h2obj_validate(heap, XENO_HEAP_MAGIC, RT_HEAP);

	if (!heap) {
		err = xeno_handle_error(heap, XENO_HEAP_MAGIC, RT_HEAP);
		goto unlock_and_exit;
	}

	/* In single-block mode, there is only a single allocation
	   returning the whole addressable heap space to the user. All
	   users referring to this heap are then returned the same
	   block. */

	if (heap->mode & H_SINGLE) {
		block = heap->sba;

		if (!block) {
			/* It's ok to pass zero for size here, since the requested
			   size is implicitely the whole heap space; but if
			   non-zero is given, it must match the original heap
			   size. */

			if (size > 0 && size != heap->csize) {
				err = -EINVAL;
				goto unlock_and_exit;
			}

			block = heap->sba = xnheap_alloc(&heap->heap_base,
							 xnheap_max_contiguous
							 (&heap->heap_base));
		}

		if (block)
			goto unlock_and_exit;

		err = -ENOMEM;	/* This should never happen. Paranoid. */
		goto unlock_and_exit;
	}

	block = xnheap_alloc(&heap->heap_base, size);

	if (block)
		goto unlock_and_exit;

	if (timeout == TM_NONBLOCK) {
		err = -EWOULDBLOCK;
		goto unlock_and_exit;
	}

	if (xnpod_unblockable_p()) {
		err = -EPERM;
		goto unlock_and_exit;
	}

	thread = xnpod_current_thread();
	thread->wait_u.buffer.size = size;
	thread->wait_u.buffer.ptr = NULL;
	info = xnsynch_sleep_on(&heap->synch_base, timeout, XN_RELATIVE);
	if (info & XNRMID)
		err = -EIDRM;	/* Heap deleted while pending. */
	else if (info & XNTIMEO)
		err = -ETIMEDOUT;	/* Timeout. */
	else if (info & XNBREAK)
		err = -EINTR;	/* Unblocked. */
	else
		block = thread->wait_u.buffer.ptr;

      unlock_and_exit:

	*blockp = block;

	xnlock_put_irqrestore(&nklock, s);

	return err;
}