Exemple #1
0
int rt_task_make_periodic(RT_TASK *task, RTIME start_time, RTIME period)
{
	int err;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	task = rtai_h2obj_validate(task, RTAI_TASK_MAGIC, RT_TASK);

	if (!task) {
		err = -EINVAL;
		goto unlock_and_exit;
	}

	if (start_time <= xntbase_get_time(rtai_tbase))
		start_time = XN_INFINITE;

	err = xnpod_set_thread_periodic(&task->thread_base, start_time, period);

	if (task->suspend_depth > 0 && --task->suspend_depth == 0) {
		xnpod_resume_thread(&task->thread_base, XNSUSP);
		xnpod_schedule();
	}

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Exemple #2
0
/**
 * Read the specified clock.
 *
 * This service returns, at the address @a tp the current value of the clock @a
 * clock_id. If @a clock_id is:
 * - CLOCK_REALTIME, the clock value represents the amount of time since the
 *   Epoch, with a precision of one system clock tick;
 * - CLOCK_MONOTONIC, the clock value is given by an architecture-dependent high
 *   resolution counter, with a precision independent from the system clock tick
 *   duration.
 * - CLOCK_MONOTONIC_RAW, same as CLOCK_MONOTONIC.
 * - CLOCK_HOST_REALTIME, the clock value as seen by the host, typically
 *   Linux. Resolution and precision depend on the host, but it is guaranteed
 *   that both, host and Xenomai, see the same information.
 *
 * @param clock_id clock identifier, either CLOCK_REALTIME, CLOCK_MONOTONIC,
 *        CLOCK_MONOTONIC_RAW or CLOCK_HOST_REALTIME;
 *
 * @param tp the address where the value of the specified clock will be stored.
 *
 * @retval 0 on success;
 * @retval -1 with @a errno set if:
 * - EINVAL, @a clock_id is invalid.
 *
 * @see
 * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/clock_gettime.html">
 * Specification.</a>
 *
 */
int clock_gettime(clockid_t clock_id, struct timespec *tp)
{
	xnticks_t cpu_time;

	switch (clock_id) {
	case CLOCK_REALTIME:
		ticks2ts(tp, xntbase_get_time(pse51_tbase));
		break;

	case CLOCK_MONOTONIC:
	case CLOCK_MONOTONIC_RAW:
		cpu_time = xnpod_get_cpu_time();
		tp->tv_sec =
		    xnarch_uldivrem(cpu_time, ONE_BILLION, &tp->tv_nsec);
		break;

	case CLOCK_HOST_REALTIME:
		if (do_clock_host_realtime(tp) != 0) {
			thread_set_errno(EINVAL);
			return -1;
		}
		break;

	default:
		thread_set_errno(EINVAL);
		return -1;
	}

	return 0;
}
Exemple #3
0
/**
 * Set the specified clock.
 *
 * This allow setting the CLOCK_REALTIME clock.
 *
 * @param clock_id the id of the clock to be set, only CLOCK_REALTIME is
 * supported.
 *
 * @param tp the address of a struct timespec specifying the new date.
 *
 * @retval 0 on success;
 * @retval -1 with @a errno set if:
 * - EINVAL, @a clock_id is not CLOCK_REALTIME;
 * - EINVAL, the date specified by @a tp is invalid.
 *
 * @see
 * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/clock_settime.html">
 * Specification.</a>
 *
 */
int clock_settime(clockid_t clock_id, const struct timespec *tp)
{
	xnticks_t now, new_date;
	spl_t s;

	if (clock_id != CLOCK_REALTIME
	    || (unsigned long)tp->tv_nsec >= ONE_BILLION) {
		thread_set_errno(EINVAL);
		return -1;
	}

	new_date = ts2ticks_floor(tp);

	xnlock_get_irqsave(&nklock, s);
	now = xntbase_get_time(pse51_tbase);
	xntbase_adjust_time(pse51_tbase, (xnsticks_t) (new_date - now));
	xnlock_put_irqrestore(&nklock, s);

	return 0;
}
Exemple #4
0
ssize_t rt_buffer_read_inner(RT_BUFFER *bf,
			     struct xnbufd *bufd,
			     xntmode_t timeout_mode, RTIME timeout)
{
	xnthread_t *thread, *waiter;
	size_t len, rbytes, n;
	xnflags_t info;
	u_long rdtoken;
	off_t rdoff;
	ssize_t ret;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	bf = xeno_h2obj_validate(bf, XENO_BUFFER_MAGIC, RT_BUFFER);
	if (bf == NULL) {
		ret = xeno_handle_error(bf, XENO_BUFFER_MAGIC, RT_BUFFER);
		goto unlock_and_exit;
	}

	/*
	 * We may only return complete messages to readers, so there
	 * is no point in waiting for messages which are larger than
	 * what the buffer can hold.
	 */
	len = bufd->b_len;
	if (len > bf->bufsz) {
		ret = -EINVAL;
		goto unlock_and_exit;
	}

	if (len == 0) {
		ret = 0;
		goto unlock_and_exit;
	}

	if (timeout_mode == XN_RELATIVE &&
	    timeout != TM_NONBLOCK && timeout != TM_INFINITE) {
		/*
		 * We may sleep several times before receiving the
		 * data, so let's always use an absolute time spec.
		 */
		timeout_mode = XN_REALTIME;
		timeout += xntbase_get_time(__native_tbase);
	}

redo:
	for (;;) {
		/*
		 * We should be able to read a complete message of the
		 * requested length, or block.
		 */
		if (bf->fillsz < len)
			goto wait;

		/*
		 * Draw the next read token so that we can later
		 * detect preemption.
		 */
		rdtoken = ++bf->rdtoken;

		/* Read from the buffer in a circular way. */
		rdoff = bf->rdoff;
		rbytes = len;

		do {
			if (rdoff + rbytes > bf->bufsz)
				n = bf->bufsz - rdoff;
			else
				n = rbytes;
			/*
			 * Release the nklock while retrieving the
			 * data to keep latency low.
			 */

			xnlock_put_irqrestore(&nklock, s);

			ret = xnbufd_copy_from_kmem(bufd, bf->bufmem + rdoff, n);
			if (ret < 0)
				return ret;

			xnlock_get_irqsave(&nklock, s);
			/*
			 * In case we were preempted while retrieving
			 * the message, we have to re-read the whole
			 * thing.
			 */
			if (bf->rdtoken != rdtoken) {
				xnbufd_reset(bufd);
				goto redo;
			}

			rdoff = (rdoff + n) % bf->bufsz;
			rbytes -= n;
		} while (rbytes > 0);

		bf->fillsz -= len;
		bf->rdoff = rdoff;
		ret = (ssize_t)len;

		/*
		 * Wake up all threads pending on the output wait
		 * queue, if we freed enough room for the leading one
		 * to post its message.
		 */
		waiter = xnsynch_peek_pendq(&bf->osynch_base);
		if (waiter && waiter->wait_u.size + bf->fillsz <= bf->bufsz) {
			if (xnsynch_flush(&bf->osynch_base, 0) == XNSYNCH_RESCHED)
				xnpod_schedule();
		}

		/*
		 * We cannot fail anymore once some data has been
		 * copied via the buffer descriptor, so no need to
		 * check for any reason to invalidate the latter.
		 */
		goto unlock_and_exit;

	wait:
		if (timeout_mode == XN_RELATIVE && timeout == TM_NONBLOCK) {
			ret = -EWOULDBLOCK;
			break;
		}

		if (xnpod_unblockable_p()) {
			ret = -EPERM;
			break;
		}

		/*
		 * Check whether writers are already waiting for
		 * sending data, while we are about to wait for
		 * receiving some. In such a case, we have a
		 * pathological use of the buffer. We must allow for a
		 * short read to prevent a deadlock.
		 */
		if (bf->fillsz > 0 &&
		    xnsynch_nsleepers(&bf->osynch_base) > 0) {
			len = bf->fillsz;
			goto redo;
		}

		thread = xnpod_current_thread();
		thread->wait_u.bufd =  bufd;
		info = xnsynch_sleep_on(&bf->isynch_base,
					timeout, timeout_mode);
		if (info & XNRMID) {
			ret = -EIDRM;	/* Buffer deleted while pending. */
			break;
		} else if (info & XNTIMEO) {
			ret = -ETIMEDOUT;	/* Timeout. */
			break;
		} if (info & XNBREAK) {
			ret = -EINTR;	/* Unblocked. */
			break;
		}
	}

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return ret;
}
Exemple #5
0
ssize_t rt_buffer_write_inner(RT_BUFFER *bf,
			      struct xnbufd *bufd,
			      xntmode_t timeout_mode, RTIME timeout)
{
	xnthread_t *thread, *waiter;
	size_t len, rbytes, n;
	xnflags_t info;
	u_long wrtoken;
	off_t wroff;
	ssize_t ret;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	bf = xeno_h2obj_validate(bf, XENO_BUFFER_MAGIC, RT_BUFFER);
	if (bf == NULL) {
		ret = xeno_handle_error(bf, XENO_BUFFER_MAGIC, RT_BUFFER);
		goto unlock_and_exit;
	}

	/*
	 * We may only send complete messages, so there is no point in
	 * accepting messages which are larger than what the buffer
	 * can hold.
	 */
	len = bufd->b_len;
	if (len > bf->bufsz) {
		ret = -EINVAL;
		goto unlock_and_exit;
	}

	if (len == 0) {
		ret = 0;
		goto unlock_and_exit;
	}

	if (timeout_mode == XN_RELATIVE &&
	    timeout != TM_NONBLOCK && timeout != TM_INFINITE) {
		/*
		 * We may sleep several times before being able to
		 * send the data, so let's always use an absolute time
		 * spec.
		 */
		timeout_mode = XN_REALTIME;
		timeout += xntbase_get_time(__native_tbase);
	}

redo:
	for (;;) {
		/*
		 * We should be able to write the entire message at
		 * once, or block.
		 */
		if (bf->fillsz + len > bf->bufsz)
			goto wait;

		/*
		 * Draw the next write token so that we can later
		 * detect preemption.
		 */
		wrtoken = ++bf->wrtoken;

		/* Write to the buffer in a circular way. */
		wroff = bf->wroff;
		rbytes = len;

		do {
			if (wroff + rbytes > bf->bufsz)
				n = bf->bufsz - wroff;
			else
				n = rbytes;
			/*
			 * Release the nklock while copying the source
			 * data to keep latency low.
			 */
			xnlock_put_irqrestore(&nklock, s);

			ret = xnbufd_copy_to_kmem(bf->bufmem + wroff, bufd, n);
			if (ret < 0)
				return ret;

			xnlock_get_irqsave(&nklock, s);
			/*
			 * In case we were preempted while writing
			 * the message, we have to resend the whole
			 * thing.
			 */
			if (bf->wrtoken != wrtoken) {
				xnbufd_reset(bufd);
				goto redo;
			}

			wroff = (wroff + n) % bf->bufsz;
			rbytes -= n;
		} while (rbytes > 0);

		bf->fillsz += len;
		bf->wroff = wroff;
		ret = (ssize_t)len;

		/*
		 * Wake up all threads pending on the input wait
		 * queue, if we accumulated enough data to feed the
		 * leading one.
		 */
		waiter = xnsynch_peek_pendq(&bf->isynch_base);
		if (waiter && waiter->wait_u.bufd->b_len <= bf->fillsz) {
			if (xnsynch_flush(&bf->isynch_base, 0) == XNSYNCH_RESCHED)
				xnpod_schedule();
		}

		/*
		 * We cannot fail anymore once some data has been
		 * copied via the buffer descriptor, so no need to
		 * check for any reason to invalidate the latter.
		 */
		goto unlock_and_exit;

	wait:
		if (timeout_mode == XN_RELATIVE && timeout == TM_NONBLOCK) {
			ret = -EWOULDBLOCK;
			break;
		}

		if (xnpod_unblockable_p()) {
			ret = -EPERM;
			break;
		}

		thread = xnpod_current_thread();
		thread->wait_u.size = len;
		info = xnsynch_sleep_on(&bf->osynch_base,
					timeout, timeout_mode);
		if (info & XNRMID) {
			ret = -EIDRM;	/* Buffer deleted while pending. */
			break;
		} if (info & XNTIMEO) {
			ret = -ETIMEDOUT;	/* Timeout. */
			break;
		} if (info & XNBREAK) {
			ret = -EINTR;	/* Unblocked. */
			break;
		}
	}

      unlock_and_exit:

	/*
	 * xnpod_schedule() is smarter than us; it will detect any
	 * worthless call inline and won't branch to the rescheduling
	 * code in such a case.
	 */
	xnpod_schedule();

	xnlock_put_irqrestore(&nklock, s);

	return ret;
}
Exemple #6
0
RTIME rt_get_time_ns(void)
{
	RTIME ticks = xntbase_get_time(rtai_tbase);
	return xntbase_ticks2ns(rtai_tbase, ticks);
}