Beispiel #1
0
/**
 * @fn ssize_t rt_pipe_read_timed(RT_PIPE *pipe, void *buf, size_t size, const struct timespec *abs_timeout)
 * @brief Read a message from a pipe.
 *
 * This service reads the next available message from a given pipe.
 *
 * @param pipe The pipe descriptor.
 *
 * @param buf A pointer to a memory area which will be written upon
 * success with the message received.
 *
 * @param size The count of bytes from the received message to read up
 * into @a buf. If @a size is lower than the actual message size,
 * -ENOBUFS is returned since the incompletely received message would
 * be lost. If @a size is zero, this call returns immediately with no
 * other action.
 *
 * @param abs_timeout An absolute date expressed in clock ticks,
 * specifying a time limit to wait for a message to be available from
 * the pipe (see note). Passing NULL causes the caller to block
 * indefinitely until a message is available. Passing { .tv_sec = 0,
 * .tv_nsec = 0 } causes the service to return immediately without
 * blocking in case no message is available.
 *
 * @return The number of bytes available from the received message is
 * returned upon success. Otherwise:
 *
 * - -ETIMEDOUT is returned if @a abs_timeout is reached before a
 * message arrives.
 *
 * - -EWOULDBLOCK is returned if @a abs_timeout is { .tv_sec = 0,
 * .tv_nsec = 0 } and no message is immediately available on entry to
 * the call.
 *
 * - -EINTR is returned if rt_task_unblock() was called for the
 * current task before a message was available.
 *
 * - -EINVAL is returned if @a pipe is not a valid pipe descriptor.
 *
 * - -EIDRM is returned if @a pipe is deleted while the caller was
 * waiting for a message. In such event, @a pipe is no more valid upon
 * return of this service.
 *
 * - -EPERM is returned if this service should block, but was not
 * called from a Xenomai thread.
 *
 * @apitags{xthread-nowait, switch-primary}
 *
 * @note @a abs_timeout is interpreted as a multiple of the Alchemy
 * clock resolution (see --alchemy-clock-resolution option, defaults
 * to 1 nanosecond).
 */
ssize_t rt_pipe_read_timed(RT_PIPE *pipe,
			   void *buf, size_t size,
			   const struct timespec *abs_timeout)
{
	struct alchemy_pipe *pcb;
	int err = 0, flags;
	struct timeval tv;
	ssize_t ret;

	pcb = find_alchemy_pipe(pipe, &err);
	if (pcb == NULL)
		return err;

	if (alchemy_poll_mode(abs_timeout))
		flags = MSG_DONTWAIT;
	else {
		if (!threadobj_current_p())
			return -EPERM;
		if (abs_timeout) {
			tv.tv_sec = abs_timeout->tv_sec;
			tv.tv_usec = abs_timeout->tv_nsec / 1000;
		} else {
			tv.tv_sec = 0;
			tv.tv_usec = 0;
		}
		__RT(setsockopt(pcb->sock, SOL_SOCKET,
				SO_RCVTIMEO, &tv, sizeof(tv)));
		flags = 0;
	}

	ret = __RT(recvfrom(pcb->sock, buf, size, flags, NULL, 0));
	if (ret < 0)
		ret = -errno;

	return ret;
}
Beispiel #2
0
/**
 * @fn ssize_t rt_buffer_write_timed(RT_BUFFER *bf, const void *ptr, size_t len, const struct timespec *abs_timeout)
 * @brief Write to an IPC buffer.
 *
 * This routine writes a message to the specified buffer. If not
 * enough buffer space is available on entry to hold the message, the
 * caller is allowed to block until enough room is freed, or a timeout
 * elapses, whichever comes first.
 *
 * @param bf The descriptor address of the buffer to write to.
 *
 * @param ptr The address of the message data to be written to the
 * buffer.
 *
 * @param len The length in bytes of the message data. Zero is a valid
 * value, in which case the buffer is left untouched, and zero is
 * returned to the caller.
 *
 * @param abs_timeout An absolute date expressed in clock ticks,
 * specifying a time limit to wait for enough buffer space to be
 * available to hold the message (see note). Passing NULL causes the
 * caller to block indefinitely until enough buffer space is
 * available. Passing { .tv_sec = 0, .tv_nsec = 0 } causes the service
 * to return immediately without blocking in case of buffer space
 * shortage.
 *
 * @return The number of bytes written to the buffer is returned upon
 * success. Otherwise:
 *
 * - -ETIMEDOUT is returned if the absolute @a abs_timeout date is
 * reached before enough buffer space is available to hold the
 * message.
 *
 * - -EWOULDBLOCK is returned if @a abs_timeout is { .tv_sec = 0,
 * .tv_nsec = 0 } and no buffer space is immediately available on
 * entry to hold the message.

 * - -EINTR is returned if rt_task_unblock() was called for the
 * current task before enough buffer space became available to hold
 * the message.
 *
 * - -EINVAL is returned if @a bf is not a valid buffer descriptor, or
 * @a len is greater than the actual buffer length.
 *
 * - -EIDRM is returned if @a bf is deleted while the caller was
 * waiting for buffer space. In such event, @a bf is no more valid
 * upon return of this service.
 *
 * - -EPERM is returned if this service should block, but was not
 * called from a Xenomai thread.
 *
 * Valid calling contexts:
 *
 * - Xenomai threads
 * - Any other context if @a abs_timeout is { .tv_sec = 0, .tv_nsec = 0 } .
 *
 * @note @a abs_timeout is interpreted as a multiple of the Alchemy
 * clock resolution (see --alchemy-clock-resolution option, defaults
 * to 1 nanosecond).
 */
ssize_t rt_buffer_write_timed(RT_BUFFER *bf,
			      const void *ptr, size_t size,
			      const struct timespec *abs_timeout)
{
	struct alchemy_buffer_wait *wait = NULL;
	struct alchemy_buffer *bcb;
	struct threadobj *thobj;
	size_t len, rbytes, n;
	struct syncstate syns;
	struct service svc;
	const void *p;
	size_t wroff;
	int ret = 0;

	len = size;
	if (len == 0)
		return 0;

	if (!threadobj_current_p() && !alchemy_poll_mode(abs_timeout))
		return -EPERM;

	CANCEL_DEFER(svc);

	bcb = get_alchemy_buffer(bf, &syns, &ret);
	if (bcb == NULL)
		goto out;

	/*
	 * We may only send complete messages, so there is no point in
	 * accepting messages which are larger than what the buffer
	 * can hold.
	 */
	if (len > bcb->bufsz) {
		ret = -EINVAL;
		goto done;
	}

	for (;;) {
		/*
		 * We should be able to write the entire message at
		 * once, or block.
		 */
		if (bcb->fillsz + len > bcb->bufsz)
			goto wait;

		/* Write to the buffer in a circular way. */
		wroff = bcb->wroff;
		rbytes = len;
		p = ptr;

		do {
			if (wroff + rbytes > bcb->bufsz)
				n = bcb->bufsz - wroff;
			else
				n = rbytes;

			memcpy(bcb->buf + wroff, p, n);
			p += n;
			wroff = (wroff + n) % bcb->bufsz;
			rbytes -= n;
		} while (rbytes > 0);

		bcb->fillsz += len;
		bcb->wroff = wroff;
		ret = (ssize_t)len;

		/*
		 * Wake up all threads waiting for input, if we
		 * accumulated enough data to feed the leading one.
		 */
		thobj = syncobj_peek_grant(&bcb->sobj);
		if (thobj == NULL)
			goto done;

		wait = threadobj_get_wait(thobj);
		if (wait->size <= bcb->fillsz)
			syncobj_grant_all(&bcb->sobj);

		goto done;
	wait:
		if (alchemy_poll_mode(abs_timeout)) {
			ret = -EWOULDBLOCK;
			goto done;
		}

		if (wait == NULL)
			wait = threadobj_prepare_wait(struct alchemy_buffer_wait);

		wait->size = len;

		/*
		 * Check whether readers are already waiting for
		 * receiving data, while we are about to wait for
		 * sending some. In such a case, we have the converse
		 * pathological use of the buffer. We must kick
		 * readers to allow for a short read to prevent a
		 * deadlock.
		 *
		 * XXX: instead of broadcasting a general wake up
		 * event, we could be smarter and wake up only the
		 * number of waiters required to consume the amount of
		 * data we want to send, but this does not seem worth
		 * the burden: this is an error condition, we just
		 * have to mitigate its effect, avoiding a deadlock.
		 */
		if (bcb->fillsz > 0 && syncobj_count_grant(&bcb->sobj))
			syncobj_grant_all(&bcb->sobj);

		ret = syncobj_wait_drain(&bcb->sobj, abs_timeout, &syns);
		if (ret) {
			if (ret == -EIDRM)
				goto out;
			break;
		}
	}
done:
	put_alchemy_buffer(bcb, &syns);
out:
	if (wait)
		threadobj_finish_wait();

	CANCEL_RESTORE(svc);

	return ret;
}
Beispiel #3
0
/**
 * @fn ssize_t rt_buffer_read_timed(RT_BUFFER *bf, void *ptr, size_t len, const struct timespec *abs_timeout)
 * @brief Read from an IPC buffer.
 *
 * This routine reads the next message from the specified buffer. If
 * no message is available on entry, the caller is allowed to block
 * until enough data is written to the buffer, or a timeout elapses.
 *
 * @param bf The descriptor address of the buffer to read from.
 *
 * @param ptr A pointer to a memory area which will be written upon
 * success with the received data.
 *
 * @param len The length in bytes of the memory area pointed to by @a
 * ptr. Under normal circumstances, rt_buffer_read_timed() only
 * returns entire messages as specified by the @a len argument, or an
 * error value. However, short reads are allowed when a potential
 * deadlock situation is detected (see note below).
 *
 * @param abs_timeout An absolute date expressed in clock ticks,
 * specifying a time limit to wait for a message to be available from
 * the buffer (see note). Passing NULL causes the caller to block
 * indefinitely until enough data is available. Passing { .tv_sec = 0,
 * .tv_nsec = 0 } causes the service to return immediately without
 * blocking in case not enough data is available.
 *
 * @return The number of bytes read from the buffer is returned upon
 * success. Otherwise:
 *
 * - -ETIMEDOUT is returned if @a abs_timeout is reached before a
 * complete message arrives.
 *
 * - -EWOULDBLOCK is returned if @a abs_timeout is { .tv_sec = 0,
 * .tv_nsec = 0 } and not enough data is immediately available on
 * entry to form a complete message.

 * - -EINTR is returned if rt_task_unblock() was called for the
 * current task before enough data became available to form a complete
 * message.
 *
 * - -EINVAL is returned if @a bf is not a valid buffer descriptor, or
 * @a len is greater than the actual buffer length.
 *
 * - -EIDRM is returned if @a bf is deleted while the caller was
 * waiting for data. In such event, @a bf is no more valid upon return
 * of this service.
 *
 * - -EPERM is returned if this service should block, but was not
 * called from a Xenomai thread.
 *
 * @note A short read (i.e. fewer bytes returned than requested by @a
 * len) may happen whenever a pathological use of the buffer is
 * encountered. This condition only arises when the system detects
 * that one or more writers are waiting for sending data, while a
 * reader would have to wait for receiving a complete message at the
 * same time. For instance, consider the following sequence, involving
 * a 1024-byte buffer (bf) and two threads:
 *
 * writer thread > rt_write_buffer(&bf, ptr, 1, TM_INFINITE);
 *        (one byte to read, 1023 bytes available for sending)
 * writer thread > rt_write_buffer(&bf, ptr, 1024, TM_INFINITE);
 *        (writer blocks - no space for another 1024-byte message)
 * reader thread > rt_read_buffer(&bf, ptr, 1024, TM_INFINITE);
 *        (short read - a truncated (1-byte) message is returned)
 *
 * In order to prevent both threads to wait for each other
 * indefinitely, a short read is allowed, which may be completed by a
 * subsequent call to rt_buffer_read() or rt_buffer_read_until().  If
 * that case arises, thread priorities, buffer and/or message lengths
 * should likely be fixed, in order to eliminate such condition.
 *
 * Valid calling contexts:
 *
 * - Xenomai threads
 * - Any other context if @a abs_timeout is { .tv_sec = 0,
 * .tv_nsec = 0 }.
 *
 * @note @a abs_timeout is interpreted as a multiple of the Alchemy
 * clock resolution (see --alchemy-clock-resolution option, defaults
 * to 1 nanosecond).
 */
ssize_t rt_buffer_read_timed(RT_BUFFER *bf,
			     void *ptr, size_t size,
			     const struct timespec *abs_timeout)
{
	struct alchemy_buffer_wait *wait = NULL;
	struct alchemy_buffer *bcb;
	struct threadobj *thobj;
	size_t len, rbytes, n;
	struct syncstate syns;
	struct service svc;
	size_t rdoff;
	int ret = 0;
	void *p;

	len = size;
	if (len == 0)
		return 0;

	if (!threadobj_current_p() && !alchemy_poll_mode(abs_timeout))
		return -EPERM;

	CANCEL_DEFER(svc);

	bcb = get_alchemy_buffer(bf, &syns, &ret);
	if (bcb == NULL)
		goto out;

	/*
	 * We may only return complete messages to readers, so there
	 * is no point in waiting for messages which are larger than
	 * what the buffer can hold.
	 */
	if (len > bcb->bufsz) {
		ret = -EINVAL;
		goto done;
	}
redo:
	for (;;) {
		/*
		 * We should be able to read a complete message of the
		 * requested length, or block.
		 */
		if (bcb->fillsz < len)
			goto wait;

		/* Read from the buffer in a circular way. */
		rdoff = bcb->rdoff;
		rbytes = len;
		p = ptr;

		do {
			if (rdoff + rbytes > bcb->bufsz)
				n = bcb->bufsz - rdoff;
			else
				n = rbytes;
			memcpy(p, bcb->buf + rdoff, n);
			p += n;
			rdoff = (rdoff + n) % bcb->bufsz;
			rbytes -= n;
		} while (rbytes > 0);

		bcb->fillsz -= len;
		bcb->rdoff = rdoff;
		ret = (ssize_t)len;

		/*
		 * Wake up all threads waiting for the buffer to
		 * drain, if we freed enough room for the leading one
		 * to post its message.
		 */
		thobj = syncobj_peek_drain(&bcb->sobj);
		if (thobj == NULL)
			goto done;

		wait = threadobj_get_wait(thobj);
		if (wait->size + bcb->fillsz <= bcb->bufsz)
			syncobj_drain(&bcb->sobj);

		goto done;
	wait:
		if (alchemy_poll_mode(abs_timeout)) {
			ret = -EWOULDBLOCK;
			goto done;
		}

		/*
		 * Check whether writers are already waiting for
		 * sending data, while we are about to wait for
		 * receiving some. In such a case, we have a
		 * pathological use of the buffer. We must allow for a
		 * short read to prevent a deadlock.
		 */
		if (bcb->fillsz > 0 && syncobj_count_drain(&bcb->sobj)) {
			len = bcb->fillsz;
			goto redo;
		}

		if (wait == NULL)
			wait = threadobj_prepare_wait(struct alchemy_buffer_wait);

		wait->size = len;

		ret = syncobj_wait_grant(&bcb->sobj, abs_timeout, &syns);
		if (ret) {
			if (ret == -EIDRM)
				goto out;
			break;
		}
	}
done:
	put_alchemy_buffer(bcb, &syns);
out:
	if (wait)
		threadobj_finish_wait();

	CANCEL_RESTORE(svc);

	return ret;
}