Esempio n. 1
0
int rt_event_inquire(RT_EVENT *event, RT_EVENT_INFO *info)
{
	int err = 0;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	event = xeno_h2obj_validate(event, XENO_EVENT_MAGIC, RT_EVENT);

	if (!event) {
		err = xeno_handle_error(event, XENO_EVENT_MAGIC, RT_EVENT);
		goto unlock_and_exit;
	}

	strcpy(info->name, event->name);
	info->value = event->value;
	info->nwaiters = xnsynch_nsleepers(&event->synch_base);

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Esempio n. 2
0
ssize_t rt_buffer_read_inner(RT_BUFFER *bf,
			     struct xnbufd *bufd,
			     xntmode_t timeout_mode, RTIME timeout)
{
	xnthread_t *thread, *waiter;
	size_t len, rbytes, n;
	xnflags_t info;
	u_long rdtoken;
	off_t rdoff;
	ssize_t ret;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	bf = xeno_h2obj_validate(bf, XENO_BUFFER_MAGIC, RT_BUFFER);
	if (bf == NULL) {
		ret = xeno_handle_error(bf, XENO_BUFFER_MAGIC, RT_BUFFER);
		goto unlock_and_exit;
	}

	/*
	 * We may only return complete messages to readers, so there
	 * is no point in waiting for messages which are larger than
	 * what the buffer can hold.
	 */
	len = bufd->b_len;
	if (len > bf->bufsz) {
		ret = -EINVAL;
		goto unlock_and_exit;
	}

	if (len == 0) {
		ret = 0;
		goto unlock_and_exit;
	}

	if (timeout_mode == XN_RELATIVE &&
	    timeout != TM_NONBLOCK && timeout != TM_INFINITE) {
		/*
		 * We may sleep several times before receiving the
		 * data, so let's always use an absolute time spec.
		 */
		timeout_mode = XN_REALTIME;
		timeout += xntbase_get_time(__native_tbase);
	}

redo:
	for (;;) {
		/*
		 * We should be able to read a complete message of the
		 * requested length, or block.
		 */
		if (bf->fillsz < len)
			goto wait;

		/*
		 * Draw the next read token so that we can later
		 * detect preemption.
		 */
		rdtoken = ++bf->rdtoken;

		/* Read from the buffer in a circular way. */
		rdoff = bf->rdoff;
		rbytes = len;

		do {
			if (rdoff + rbytes > bf->bufsz)
				n = bf->bufsz - rdoff;
			else
				n = rbytes;
			/*
			 * Release the nklock while retrieving the
			 * data to keep latency low.
			 */

			xnlock_put_irqrestore(&nklock, s);

			ret = xnbufd_copy_from_kmem(bufd, bf->bufmem + rdoff, n);
			if (ret < 0)
				return ret;

			xnlock_get_irqsave(&nklock, s);
			/*
			 * In case we were preempted while retrieving
			 * the message, we have to re-read the whole
			 * thing.
			 */
			if (bf->rdtoken != rdtoken) {
				xnbufd_reset(bufd);
				goto redo;
			}

			rdoff = (rdoff + n) % bf->bufsz;
			rbytes -= n;
		} while (rbytes > 0);

		bf->fillsz -= len;
		bf->rdoff = rdoff;
		ret = (ssize_t)len;

		/*
		 * Wake up all threads pending on the output wait
		 * queue, if we freed enough room for the leading one
		 * to post its message.
		 */
		waiter = xnsynch_peek_pendq(&bf->osynch_base);
		if (waiter && waiter->wait_u.size + bf->fillsz <= bf->bufsz) {
			if (xnsynch_flush(&bf->osynch_base, 0) == XNSYNCH_RESCHED)
				xnpod_schedule();
		}

		/*
		 * We cannot fail anymore once some data has been
		 * copied via the buffer descriptor, so no need to
		 * check for any reason to invalidate the latter.
		 */
		goto unlock_and_exit;

	wait:
		if (timeout_mode == XN_RELATIVE && timeout == TM_NONBLOCK) {
			ret = -EWOULDBLOCK;
			break;
		}

		if (xnpod_unblockable_p()) {
			ret = -EPERM;
			break;
		}

		/*
		 * Check whether writers are already waiting for
		 * sending data, while we are about to wait for
		 * receiving some. In such a case, we have a
		 * pathological use of the buffer. We must allow for a
		 * short read to prevent a deadlock.
		 */
		if (bf->fillsz > 0 &&
		    xnsynch_nsleepers(&bf->osynch_base) > 0) {
			len = bf->fillsz;
			goto redo;
		}

		thread = xnpod_current_thread();
		thread->wait_u.bufd =  bufd;
		info = xnsynch_sleep_on(&bf->isynch_base,
					timeout, timeout_mode);
		if (info & XNRMID) {
			ret = -EIDRM;	/* Buffer deleted while pending. */
			break;
		} else if (info & XNTIMEO) {
			ret = -ETIMEDOUT;	/* Timeout. */
			break;
		} if (info & XNBREAK) {
			ret = -EINTR;	/* Unblocked. */
			break;
		}
	}

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return ret;
}