int k_msgq_get(struct k_msgq *q, void *data, s32_t timeout) { __ASSERT(!_is_in_isr() || timeout == K_NO_WAIT, ""); unsigned int key = irq_lock(); struct k_thread *pending_thread; int result; if (q->used_msgs > 0) { /* take first available message from queue */ memcpy(data, q->read_ptr, q->msg_size); q->read_ptr += q->msg_size; if (q->read_ptr == q->buffer_end) { q->read_ptr = q->buffer_start; } q->used_msgs--; /* handle first thread waiting to write (if any) */ pending_thread = _unpend_first_thread(&q->wait_q); if (pending_thread) { /* add thread's message to queue */ memcpy(q->write_ptr, pending_thread->base.swap_data, q->msg_size); q->write_ptr += q->msg_size; if (q->write_ptr == q->buffer_end) { q->write_ptr = q->buffer_start; } q->used_msgs++; /* wake up waiting thread */ _set_thread_return_value(pending_thread, 0); _abort_thread_timeout(pending_thread); _ready_thread(pending_thread); if (!_is_in_isr() && _must_switch_threads()) { _Swap(key); return 0; } } result = 0; } else if (timeout == K_NO_WAIT) { /* don't wait for a message to become available */ result = -ENOMSG; } else { /* wait for get message success or timeout */ _pend_current_thread(&q->wait_q, timeout); _current->base.swap_data = data; return _Swap(key); } irq_unlock(key); return result; }
int k_msgq_put(struct k_msgq *q, void *data, s32_t timeout) { __ASSERT(!_is_in_isr() || timeout == K_NO_WAIT, ""); unsigned int key = irq_lock(); struct k_thread *pending_thread; int result; if (q->used_msgs < q->max_msgs) { /* message queue isn't full */ pending_thread = _unpend_first_thread(&q->wait_q); if (pending_thread) { /* give message to waiting thread */ memcpy(pending_thread->base.swap_data, data, q->msg_size); /* wake up waiting thread */ _set_thread_return_value(pending_thread, 0); _abort_thread_timeout(pending_thread); _ready_thread(pending_thread); if (!_is_in_isr() && _must_switch_threads()) { _Swap(key); return 0; } } else { /* put message in queue */ memcpy(q->write_ptr, data, q->msg_size); q->write_ptr += q->msg_size; if (q->write_ptr == q->buffer_end) { q->write_ptr = q->buffer_start; } q->used_msgs++; } result = 0; } else if (timeout == K_NO_WAIT) { /* don't wait for message space to become available */ result = -ENOMSG; } else { /* wait for put message success, failure, or timeout */ _pend_current_thread(&q->wait_q, timeout); _current->base.swap_data = data; return _Swap(key); } irq_unlock(key); return result; }
/** * @brief Handle expiration of a kernel timer object. * * @param t Timeout used by the timer. * * @return N/A */ void _timer_expiration_handler(struct _timeout *t) { struct k_timer *timer = CONTAINER_OF(t, struct k_timer, timeout); struct k_thread *thread; unsigned int key; /* * if the timer is periodic, start it again; don't add _TICK_ALIGN * since we're already aligned to a tick boundary */ if (timer->period > 0) { key = irq_lock(); _add_timeout(NULL, &timer->timeout, &timer->wait_q, timer->period); irq_unlock(key); } /* update timer's status */ timer->status += 1; /* invoke timer expiry function */ if (timer->expiry_fn) { timer->expiry_fn(timer); } thread = (struct k_thread *)sys_dlist_peek_head(&timer->wait_q); if (!thread) { return; } /* * Interrupts _DO NOT_ have to be locked in this specific instance of * calling _unpend_thread() because a) this is the only place a thread * can be taken off this pend queue, and b) the only place a thread * can be put on the pend queue is at thread level, which of course * cannot interrupt the current context. */ _unpend_thread(thread); key = irq_lock(); _ready_thread(thread); irq_unlock(key); _set_thread_return_value(thread, 0); }
void k_msgq_purge(struct k_msgq *q) { unsigned int key = irq_lock(); struct k_thread *pending_thread; /* wake up any threads that are waiting to write */ while ((pending_thread = _unpend_first_thread(&q->wait_q)) != NULL) { _set_thread_return_value(pending_thread, -ENOMSG); _abort_thread_timeout(pending_thread); _ready_thread(pending_thread); } q->used_msgs = 0; q->read_ptr = q->write_ptr; _reschedule_threads(key); }
/* must be called with interrupts locked */ static int _signal_poll_event(struct k_poll_event *event, u32_t state, int *must_reschedule) { *must_reschedule = 0; if (!event->poller) { goto ready_event; } struct k_thread *thread = event->poller->thread; __ASSERT(event->poller->thread, "poller should have a thread\n"); clear_polling_state(thread); if (!_is_thread_pending(thread)) { goto ready_event; } if (_is_thread_timeout_expired(thread)) { return -EAGAIN; } _unpend_thread(thread); _abort_thread_timeout(thread); _set_thread_return_value(thread, 0); if (!_is_thread_ready(thread)) { goto ready_event; } _add_thread_to_ready_q(thread); *must_reschedule = !_is_in_isr() && _must_switch_threads(); ready_event: set_event_ready(event, state); return 0; }