示例#1
0
int32_t nano_timer_ticks_remain(struct nano_timer *timer)
{
	int key = irq_lock();
	int32_t remaining_ticks;
	struct _nano_timeout *t = &timer->timeout_data;
	sys_dlist_t *timeout_q = &_nanokernel.timeout_q;
	struct _nano_timeout *iterator;

	if (t->delta_ticks_from_prev == -1) {
		remaining_ticks = 0;
	} else {
		/*
		 * As nanokernel timeouts are stored in a linked list with
		 * delta_ticks_from_prev, to get the actual number of ticks
		 * remaining for the timer, walk through the timeouts list
		 * and accumulate all the delta_ticks_from_prev values up to
		 * the timer.
		 */
		iterator =
			(struct _nano_timeout *)sys_dlist_peek_head(timeout_q);
		remaining_ticks = iterator->delta_ticks_from_prev;
		while (iterator != t) {
			iterator = (struct _nano_timeout *)sys_dlist_peek_next(
				timeout_q, &iterator->node);
			remaining_ticks += iterator->delta_ticks_from_prev;
		}
	}

	irq_unlock(key);
	return remaining_ticks;
}
示例#2
0
文件: timer.c 项目: sunkaizhu/zephyr
int32_t _timeout_remaining_get(struct _timeout *timeout)
{
	unsigned int key = irq_lock();
	int32_t remaining_ticks;

	if (timeout->delta_ticks_from_prev == _INACTIVE) {
		remaining_ticks = 0;
	} else {
		/*
		 * compute remaining ticks by walking the timeout list
		 * and summing up the various tick deltas involved
		 */
		struct _timeout *t =
			(struct _timeout *)sys_dlist_peek_head(&_timeout_q);

		remaining_ticks = t->delta_ticks_from_prev;
		while (t != timeout) {
			t = (struct _timeout *)sys_dlist_peek_next(&_timeout_q,
								   &t->node);
			remaining_ticks += t->delta_ticks_from_prev;
		}
	}

	irq_unlock(key);
	return _ticks_to_ms(remaining_ticks);
}
示例#3
0
static inline void handle_expired_nano_timeouts(int32_t ticks)
{
	struct _nano_timeout *head =
		(struct _nano_timeout *)sys_dlist_peek_head(&_nanokernel.timeout_q);

	_nanokernel.task_timeout = TICKS_UNLIMITED;
	if (head) {
		head->delta_ticks_from_prev -= ticks;
		_nano_timeout_handle_timeouts();
	}
}
示例#4
0
文件: timer.c 项目: sunkaizhu/zephyr
/**
 * @brief Handle expiration of a kernel timer object.
 *
 * @param t  Timeout used by the timer.
 *
 * @return N/A
 */
void _timer_expiration_handler(struct _timeout *t)
{
	struct k_timer *timer = CONTAINER_OF(t, struct k_timer, timeout);
	struct k_thread *thread;
	unsigned int key;

	/*
	 * if the timer is periodic, start it again; don't add _TICK_ALIGN
	 * since we're already aligned to a tick boundary
	 */
	if (timer->period > 0) {
		key = irq_lock();
		_add_timeout(NULL, &timer->timeout, &timer->wait_q,
				timer->period);
		irq_unlock(key);
	}

	/* update timer's status */
	timer->status += 1;

	/* invoke timer expiry function */
	if (timer->expiry_fn) {
		timer->expiry_fn(timer);
	}

	thread = (struct k_thread *)sys_dlist_peek_head(&timer->wait_q);

	if (!thread) {
		return;
	}

	/*
	 * Interrupts _DO NOT_ have to be locked in this specific instance of
	 * calling _unpend_thread() because a) this is the only place a thread
	 * can be taken off this pend queue, and b) the only place a thread
	 * can be put on the pend queue is at thread level, which of course
	 * cannot interrupt the current context.
	 */
	_unpend_thread(thread);

	key = irq_lock();
	_ready_thread(thread);
	irq_unlock(key);

	_set_thread_return_value(thread, 0);
}
示例#5
0
void k_mem_pool_free(struct k_mem_block *block)
{
	int i, key, need_sched = 0;
	struct k_mem_pool *p = get_pool(block->id.pool);
	size_t lsizes[p->n_levels];

	/* As in k_mem_pool_alloc(), we build a table of level sizes
	 * to avoid having to store it in precious RAM bytes.
	 * Overhead here is somewhat higher because free_block()
	 * doesn't inherently need to traverse all the larger
	 * sublevels.
	 */
	lsizes[0] = _ALIGN4(p->max_sz);
	for (i = 1; i <= block->id.level; i++) {
		lsizes[i] = _ALIGN4(lsizes[i-1] / 4);
	}

	free_block(get_pool(block->id.pool), block->id.level,
		   lsizes, block->id.block);

	/* Wake up anyone blocked on this pool and let them repeat
	 * their allocation attempts
	 */
	key = irq_lock();

	while (!sys_dlist_is_empty(&p->wait_q)) {
		struct k_thread *th = (void *)sys_dlist_peek_head(&p->wait_q);

		_unpend_thread(th);
		_abort_thread_timeout(th);
		_ready_thread(th);
		need_sched = 1;
	}

	if (need_sched && !_is_in_isr()) {
		_reschedule_threads(key);
	} else {
		irq_unlock(key);
	}
}
示例#6
0
文件: pipes.c 项目: rsalveti/zephyr
/**
 * @brief Prepare a working set of readers/writers
 *
 * Prepare a list of "working threads" into/from which the data
 * will be directly copied. This list is useful as it is used to ...
 *
 *  1. avoid double copying
 *  2. minimize interrupt latency as interrupts are unlocked
 *     while copying data
 *  3. ensure a timeout can not make the request impossible to satisfy
 *
 * The list is populated with previously pended threads that will be ready to
 * run after the pipe call is complete.
 *
 * Important things to remember when reading from the pipe ...
 * 1. If there are writers int @a wait_q, then the pipe's buffer is full.
 * 2. Conversely if the pipe's buffer is not full, there are no writers.
 * 3. The amount of available data in the pipe is the sum the bytes used in
 *    the pipe (@a pipe_space) and all the requests from the waiting writers.
 * 4. Since data is read from the pipe's buffer first, the working set must
 *    include writers that will (try to) re-fill the pipe's buffer afterwards.
 *
 * Important things to remember when writing to the pipe ...
 * 1. If there are readers in @a wait_q, then the pipe's buffer is empty.
 * 2. Conversely if the pipe's buffer is not empty, then there are no readers.
 * 3. The amount of space available in the pipe is the sum of the bytes unused
 *    in the pipe (@a pipe_space) and all the requests from the waiting readers.
 *
 * @return false if request is unsatisfiable, otherwise true
 */
static bool _pipe_xfer_prepare(sys_dlist_t      *xfer_list,
			       struct k_thread **waiter,
			       _wait_q_t        *wait_q,
			       size_t            pipe_space,
			       size_t            bytes_to_xfer,
			       size_t            min_xfer,
			       s32_t           timeout)
{
	sys_dnode_t      *node;
	struct k_thread  *thread;
	struct k_pipe_desc *desc;
	size_t num_bytes = 0;

	if (timeout == K_NO_WAIT) {
		for (node = sys_dlist_peek_head(wait_q); node != NULL;
		     node = sys_dlist_peek_next(wait_q, node)) {
			thread = (struct k_thread *)node;
			desc = (struct k_pipe_desc *)thread->base.swap_data;

			num_bytes += desc->bytes_to_xfer;

			if (num_bytes >= bytes_to_xfer) {
				break;
			}
		}

		if (num_bytes + pipe_space < min_xfer) {
			return false;
		}
	}

	/*
	 * Either @a timeout is not K_NO_WAIT (so the thread may pend) or
	 * the entire request can be satisfied. Generate the working list.
	 */

	sys_dlist_init(xfer_list);
	num_bytes = 0;

	while ((thread = (struct k_thread *) sys_dlist_peek_head(wait_q))) {
		desc = (struct k_pipe_desc *)thread->base.swap_data;
		num_bytes += desc->bytes_to_xfer;

		if (num_bytes > bytes_to_xfer) {
			/*
			 * This request can not be fully satisfied.
			 * Do not remove it from the wait_q.
			 * Do not abort its timeout (if applicable).
			 * Do not add it to the transfer list
			 */
			break;
		}

		/*
		 * This request can be fully satisfied.
		 * Remove it from the wait_q.
		 * Abort its timeout.
		 * Add it to the transfer list.
		 */
		_unpend_thread(thread);
		_abort_thread_timeout(thread);
		sys_dlist_append(xfer_list, &thread->base.k_q_node);
	}

	*waiter = (num_bytes > bytes_to_xfer) ? thread : NULL;

	return true;
}