Пример #1
0
void k_timer_stop(struct k_timer *timer)
{
	__ASSERT(!_is_in_isr(), "");

	int key = irq_lock();
	int inactive = (_abort_timeout(&timer->timeout) == _INACTIVE);

	irq_unlock(key);

	if (inactive) {
		return;
	}

	if (timer->stop_fn) {
		timer->stop_fn(timer);
	}

	key = irq_lock();
	struct k_thread *pending_thread = _unpend_first_thread(&timer->wait_q);

	if (pending_thread) {
		_ready_thread(pending_thread);
	}

	if (_is_in_isr()) {
		irq_unlock(key);
	} else {
		_reschedule_threads(key);
	}
}
Пример #2
0
int k_msgq_get(struct k_msgq *q, void *data, s32_t timeout)
{
	__ASSERT(!_is_in_isr() || timeout == K_NO_WAIT, "");

	unsigned int key = irq_lock();
	struct k_thread *pending_thread;
	int result;

	if (q->used_msgs > 0) {
		/* take first available message from queue */
		memcpy(data, q->read_ptr, q->msg_size);
		q->read_ptr += q->msg_size;
		if (q->read_ptr == q->buffer_end) {
			q->read_ptr = q->buffer_start;
		}
		q->used_msgs--;

		/* handle first thread waiting to write (if any) */
		pending_thread = _unpend_first_thread(&q->wait_q);
		if (pending_thread) {
			/* add thread's message to queue */
			memcpy(q->write_ptr, pending_thread->base.swap_data,
			       q->msg_size);
			q->write_ptr += q->msg_size;
			if (q->write_ptr == q->buffer_end) {
				q->write_ptr = q->buffer_start;
			}
			q->used_msgs++;

			/* wake up waiting thread */
			_set_thread_return_value(pending_thread, 0);
			_abort_thread_timeout(pending_thread);
			_ready_thread(pending_thread);
			if (!_is_in_isr() && _must_switch_threads()) {
				_Swap(key);
				return 0;
			}
		}
		result = 0;
	} else if (timeout == K_NO_WAIT) {
		/* don't wait for a message to become available */
		result = -ENOMSG;
	} else {
		/* wait for get message success or timeout */
		_pend_current_thread(&q->wait_q, timeout);
		_current->base.swap_data = data;
		return _Swap(key);
	}

	irq_unlock(key);

	return result;
}
Пример #3
0
int k_mem_pool_alloc(struct k_mem_pool *p, struct k_mem_block *block,
		     size_t size, s32_t timeout)
{
	int ret, key;
	s64_t end = 0;

	__ASSERT(!(_is_in_isr() && timeout != K_NO_WAIT), "");

	if (timeout > 0) {
		end = _tick_get() + _ms_to_ticks(timeout);
	}

	while (1) {
		ret = pool_alloc(p, block, size);

		if (ret == 0 || timeout == K_NO_WAIT ||
		    ret == -EAGAIN || (ret && ret != -ENOMEM)) {
			return ret;
		}

		key = irq_lock();
		_pend_current_thread(&p->wait_q, timeout);
		_Swap(key);

		if (timeout != K_FOREVER) {
			timeout = end - _tick_get();

			if (timeout < 0) {
				break;
			}
		}
	}

	return -EAGAIN;
}
Пример #4
0
void _k_thread_group_op(u32_t groups, void (*func)(struct k_thread *))
{
	unsigned int  key;

	__ASSERT(!_is_in_isr(), "");

	_sched_lock();

	/* Invoke func() on each static thread in the specified group set. */

	_FOREACH_STATIC_THREAD(thread_data) {
		if (is_in_any_group(thread_data, groups)) {
			key = irq_lock();
			func(thread_data->thread);
			irq_unlock(key);
		}
	}

	/*
	 * If the current thread is still in a ready state, then let the
	 * "unlock scheduler" code determine if any rescheduling is needed.
	 */
	if (_is_thread_ready(_current)) {
		k_sched_unlock();
		return;
	}

	/* The current thread is no longer in a ready state--reschedule. */
	key = irq_lock();
	_sched_unlock_no_reschedule();
	_Swap(key);
}
Пример #5
0
void k_queue_insert(struct k_queue *queue, void *prev, void *data)
{
	struct k_thread *first_pending_thread;
	unsigned int key;

	key = irq_lock();

	first_pending_thread = _unpend_first_thread(&queue->wait_q);

	if (first_pending_thread) {
		prepare_thread_to_run(first_pending_thread, data);
		if (!_is_in_isr() && _must_switch_threads()) {
			(void)_Swap(key);
			return;
		}
	} else {
		sys_slist_insert(&queue->data_q, prev, data);
		if (handle_poll_event(queue)) {
			(void)_Swap(key);
			return;
		}
	}

	irq_unlock(key);
}
Пример #6
0
void k_queue_append_list(struct k_queue *queue, void *head, void *tail)
{
	__ASSERT(head && tail, "invalid head or tail");

	struct k_thread *first_thread, *thread;
	unsigned int key;

	key = irq_lock();

	first_thread = _peek_first_pending_thread(&queue->wait_q);
	while (head && ((thread = _unpend_first_thread(&queue->wait_q)))) {
		prepare_thread_to_run(thread, head);
		head = *(void **)head;
	}

	if (head) {
		sys_slist_append_list(&queue->data_q, head, tail);
	}

	if (first_thread) {
		if (!_is_in_isr() && _must_switch_threads()) {
			(void)_Swap(key);
			return;
		}
	} else {
		if (handle_poll_event(queue)) {
			(void)_Swap(key);
			return;
		}
	}

	irq_unlock(key);
}
Пример #7
0
uint32_t k_timer_status_sync(struct k_timer *timer)
{
	__ASSERT(!_is_in_isr(), "");

	unsigned int key = irq_lock();
	uint32_t result = timer->status;

	if (result == 0) {
		if (timer->timeout.delta_ticks_from_prev != _INACTIVE) {
			/* wait for timer to expire or stop */
			_pend_current_thread(&timer->wait_q, K_FOREVER);
			_Swap(key);

			/* get updated timer status */
			key = irq_lock();
			result = timer->status;
		} else {
			/* timer is already stopped */
		}
	} else {
		/* timer has already expired at least once */
	}

	timer->status = 0;
	irq_unlock(key);

	return result;
}
Пример #8
0
int k_msgq_put(struct k_msgq *q, void *data, s32_t timeout)
{
	__ASSERT(!_is_in_isr() || timeout == K_NO_WAIT, "");

	unsigned int key = irq_lock();
	struct k_thread *pending_thread;
	int result;

	if (q->used_msgs < q->max_msgs) {
		/* message queue isn't full */
		pending_thread = _unpend_first_thread(&q->wait_q);
		if (pending_thread) {
			/* give message to waiting thread */
			memcpy(pending_thread->base.swap_data, data,
			       q->msg_size);
			/* wake up waiting thread */
			_set_thread_return_value(pending_thread, 0);
			_abort_thread_timeout(pending_thread);
			_ready_thread(pending_thread);
			if (!_is_in_isr() && _must_switch_threads()) {
				_Swap(key);
				return 0;
			}
		} else {
			/* put message in queue */
			memcpy(q->write_ptr, data, q->msg_size);
			q->write_ptr += q->msg_size;
			if (q->write_ptr == q->buffer_end) {
				q->write_ptr = q->buffer_start;
			}
			q->used_msgs++;
		}
		result = 0;
	} else if (timeout == K_NO_WAIT) {
		/* don't wait for message space to become available */
		result = -ENOMSG;
	} else {
		/* wait for put message success, failure, or timeout */
		_pend_current_thread(&q->wait_q, timeout);
		_current->base.swap_data = data;
		return _Swap(key);
	}

	irq_unlock(key);

	return result;
}
Пример #9
0
/* This implements a "fair" scheduling policy: at the end of a POSIX
 * thread call that might result in a change of the current maximum
 * priority thread, we always check and context switch if needed.
 * Note that there is significant dispute in the community over the
 * "right" way to do this and different systems do it differently by
 * default.  Zephyr is an RTOS, so we choose latency over
 * throughput.  See here for a good discussion of the broad issue:
 *
 * https://blog.mozilla.org/nfroyd/2017/03/29/on-mutex-performance-part-1/
 */
static void swap_or_unlock(int key)
{
	/* API madness: use __ not _ here.  The latter checks for our
	 * preemption state, but we want to do a switch here even if
	 * we can be preempted.
	 */
	if (!_is_in_isr() && __must_switch_threads()) {
		_Swap(key);
	} else {
		irq_unlock(key);
	}
}
Пример #10
0
k_tid_t k_thread_spawn(char *stack, size_t stack_size,
			void (*entry)(void *, void *, void*),
			void *p1, void *p2, void *p3,
			int prio, u32_t options, s32_t delay)
{
	__ASSERT(!_is_in_isr(), "");

	struct k_thread *new_thread = (struct k_thread *)stack;

	_new_thread(stack, stack_size, entry, p1, p2, p3, prio, options);

	schedule_new_thread(new_thread, delay);

	return new_thread;
}
Пример #11
0
void k_mem_pool_free(struct k_mem_block *block)
{
	int i, key, need_sched = 0;
	struct k_mem_pool *p = get_pool(block->id.pool);
	size_t lsizes[p->n_levels];

	/* As in k_mem_pool_alloc(), we build a table of level sizes
	 * to avoid having to store it in precious RAM bytes.
	 * Overhead here is somewhat higher because free_block()
	 * doesn't inherently need to traverse all the larger
	 * sublevels.
	 */
	lsizes[0] = _ALIGN4(p->max_sz);
	for (i = 1; i <= block->id.level; i++) {
		lsizes[i] = _ALIGN4(lsizes[i-1] / 4);
	}

	free_block(get_pool(block->id.pool), block->id.level,
		   lsizes, block->id.block);

	/* Wake up anyone blocked on this pool and let them repeat
	 * their allocation attempts
	 */
	key = irq_lock();

	while (!sys_dlist_is_empty(&p->wait_q)) {
		struct k_thread *th = (void *)sys_dlist_peek_head(&p->wait_q);

		_unpend_thread(th);
		_abort_thread_timeout(th);
		_ready_thread(th);
		need_sched = 1;
	}

	if (need_sched && !_is_in_isr()) {
		_reschedule_threads(key);
	} else {
		irq_unlock(key);
	}
}
Пример #12
0
/* must be called with interrupts locked */
static int _signal_poll_event(struct k_poll_event *event, u32_t state,
			      int *must_reschedule)
{
	*must_reschedule = 0;

	if (!event->poller) {
		goto ready_event;
	}

	struct k_thread *thread = event->poller->thread;

	__ASSERT(event->poller->thread, "poller should have a thread\n");

	clear_polling_state(thread);

	if (!_is_thread_pending(thread)) {
		goto ready_event;
	}

	if (_is_thread_timeout_expired(thread)) {
		return -EAGAIN;
	}

	_unpend_thread(thread);
	_abort_thread_timeout(thread);
	_set_thread_return_value(thread, 0);

	if (!_is_thread_ready(thread)) {
		goto ready_event;
	}

	_add_thread_to_ready_q(thread);
	*must_reschedule = !_is_in_isr() && _must_switch_threads();

ready_event:
	set_event_ready(event, state);
	return 0;
}
Пример #13
0
/**
 * @brief Check if a memory address range falls within the stack
 *
 * Given a memory address range, ensure that it falls within the bounds
 * of the faulting context's stack.
 *
 * @param addr Starting address
 * @param size Size of the region, or 0 if we just want to see if addr is
 *             in bounds
 * @param cs Code segment of faulting context
 * @return true if addr/size region is not within the thread stack
 */
static bool check_stack_bounds(u32_t addr, size_t size, u16_t cs)
{
	u32_t start, end;

	if (_is_in_isr()) {
		/* We were servicing an interrupt */
		start = (u32_t)_ARCH_THREAD_STACK_BUFFER(_interrupt_stack);
		end = start + CONFIG_ISR_STACK_SIZE;
	} else if ((cs & 0x3) != 0 ||
		   (_current->base.user_options & K_USER) == 0) {
		/* Thread was in user mode, or is not a user mode thread.
		 * The normal stack buffer is what we will check.
		 */
		start = _current->stack_info.start;
		end = STACK_ROUND_DOWN(_current->stack_info.start +
				       _current->stack_info.size);
	} else {
		/* User thread was doing a syscall, check kernel stack bounds */
		start = _current->stack_info.start - MMU_PAGE_SIZE;
		end = _current->stack_info.start;
	}

	return (addr <= start) || (addr + size > end);
}
Пример #14
0
int k_is_in_isr(void)
{
	return _is_in_isr();
}
Пример #15
0
int k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
{
	__ASSERT(!_is_in_isr(), "");
	__ASSERT(events, "NULL events\n");
	__ASSERT(num_events > 0, "zero events\n");

	int last_registered = -1, in_use = 0, rc;
	unsigned int key;

	key = irq_lock();
	set_polling_state(_current);
	irq_unlock(key);

	/*
	 * We can get by with one poller structure for all events for now:
	 * if/when we allow multiple threads to poll on the same object, we
	 * will need one per poll event associated with an object.
	 */
	struct _poller poller = { .thread = _current };

	/* find events whose condition is already fulfilled */
	for (int ii = 0; ii < num_events; ii++) {
		u32_t state;

		key = irq_lock();
		if (is_condition_met(&events[ii], &state)) {
			set_event_ready(&events[ii], state);
			clear_polling_state(_current);
		} else if (timeout != K_NO_WAIT && is_polling() && !in_use) {
			rc = register_event(&events[ii]);
			if (rc == 0) {
				events[ii].poller = &poller;
				++last_registered;
			} else if (rc == -EADDRINUSE) {
				/* setting in_use also prevents any further
				 * registrations by the current thread
				 */
				in_use = -EADDRINUSE;
				events[ii].state = K_POLL_STATE_EADDRINUSE;
				clear_polling_state(_current);
			} else {
				__ASSERT(0, "unexpected return code\n");
			}
		}
		irq_unlock(key);
	}

	key = irq_lock();

	/*
	 * If we're not polling anymore, it means that at least one event
	 * condition is met, either when looping through the events here or
	 * because one of the events registered has had its state changed, or
	 * that one of the objects we wanted to poll on already had a thread
	 * polling on it. We can remove all registrations and return either
	 * success or a -EADDRINUSE error. In the case of a -EADDRINUSE error,
	 * the events that were available are still flagged as such, and it is
	 * valid for the caller to consider them available, as if this function
	 * returned success.
	 */
	if (!is_polling()) {
		clear_event_registrations(events, last_registered, key);
		irq_unlock(key);
		return in_use;
	}

	clear_polling_state(_current);

	if (timeout == K_NO_WAIT) {
		irq_unlock(key);
		return -EAGAIN;
	}

	_wait_q_t wait_q = _WAIT_Q_INIT(&wait_q);

	_pend_current_thread(&wait_q, timeout);

	int swap_rc = _Swap(key);

	/*
	 * Clear all event registrations. If events happen while we're in this
	 * loop, and we already had one that triggered, that's OK: they will
	 * end up in the list of events that are ready; if we timed out, and
	 * events happen while we're in this loop, that is OK as well since
	 * we've already know the return code (-EAGAIN), and even if they are
	 * added to the list of events that occurred, the user has to check the
	 * return code first, which invalidates the whole list of event states.
	 */
	key = irq_lock();
	clear_event_registrations(events, last_registered, key);
	irq_unlock(key);

	return swap_rc;
}