void k_timer_stop(struct k_timer *timer) { __ASSERT(!_is_in_isr(), ""); int key = irq_lock(); int inactive = (_abort_timeout(&timer->timeout) == _INACTIVE); irq_unlock(key); if (inactive) { return; } if (timer->stop_fn) { timer->stop_fn(timer); } key = irq_lock(); struct k_thread *pending_thread = _unpend_first_thread(&timer->wait_q); if (pending_thread) { _ready_thread(pending_thread); } if (_is_in_isr()) { irq_unlock(key); } else { _reschedule_threads(key); } }
static void ready_one_thread(_wait_q_t *wq) { struct k_thread *th = _unpend_first_thread(wq); if (th) { _abort_thread_timeout(th); _ready_thread(th); } }
int k_msgq_get(struct k_msgq *q, void *data, s32_t timeout) { __ASSERT(!_is_in_isr() || timeout == K_NO_WAIT, ""); unsigned int key = irq_lock(); struct k_thread *pending_thread; int result; if (q->used_msgs > 0) { /* take first available message from queue */ memcpy(data, q->read_ptr, q->msg_size); q->read_ptr += q->msg_size; if (q->read_ptr == q->buffer_end) { q->read_ptr = q->buffer_start; } q->used_msgs--; /* handle first thread waiting to write (if any) */ pending_thread = _unpend_first_thread(&q->wait_q); if (pending_thread) { /* add thread's message to queue */ memcpy(q->write_ptr, pending_thread->base.swap_data, q->msg_size); q->write_ptr += q->msg_size; if (q->write_ptr == q->buffer_end) { q->write_ptr = q->buffer_start; } q->used_msgs++; /* wake up waiting thread */ _set_thread_return_value(pending_thread, 0); _abort_thread_timeout(pending_thread); _ready_thread(pending_thread); if (!_is_in_isr() && _must_switch_threads()) { _Swap(key); return 0; } } result = 0; } else if (timeout == K_NO_WAIT) { /* don't wait for a message to become available */ result = -ENOMSG; } else { /* wait for get message success or timeout */ _pend_current_thread(&q->wait_q, timeout); _current->base.swap_data = data; return _Swap(key); } irq_unlock(key); return result; }
/** * @brief Ready a pipe thread * * If the pipe thread is a real thread, then add it to the ready queue. * If it is a dummy thread, then finish the asynchronous work. * * @return N/A */ static void _pipe_thread_ready(struct k_thread *thread) { unsigned int key; #if (CONFIG_NUM_PIPE_ASYNC_MSGS > 0) if (thread->base.thread_state & _THREAD_DUMMY) { _pipe_async_finish((struct k_pipe_async *)thread); return; } #endif key = irq_lock(); _ready_thread(thread); irq_unlock(key); }
int k_msgq_put(struct k_msgq *q, void *data, s32_t timeout) { __ASSERT(!_is_in_isr() || timeout == K_NO_WAIT, ""); unsigned int key = irq_lock(); struct k_thread *pending_thread; int result; if (q->used_msgs < q->max_msgs) { /* message queue isn't full */ pending_thread = _unpend_first_thread(&q->wait_q); if (pending_thread) { /* give message to waiting thread */ memcpy(pending_thread->base.swap_data, data, q->msg_size); /* wake up waiting thread */ _set_thread_return_value(pending_thread, 0); _abort_thread_timeout(pending_thread); _ready_thread(pending_thread); if (!_is_in_isr() && _must_switch_threads()) { _Swap(key); return 0; } } else { /* put message in queue */ memcpy(q->write_ptr, data, q->msg_size); q->write_ptr += q->msg_size; if (q->write_ptr == q->buffer_end) { q->write_ptr = q->buffer_start; } q->used_msgs++; } result = 0; } else if (timeout == K_NO_WAIT) { /* don't wait for message space to become available */ result = -ENOMSG; } else { /* wait for put message success, failure, or timeout */ _pend_current_thread(&q->wait_q, timeout); _current->base.swap_data = data; return _Swap(key); } irq_unlock(key); return result; }
/** * @brief Handle expiration of a kernel timer object. * * @param t Timeout used by the timer. * * @return N/A */ void _timer_expiration_handler(struct _timeout *t) { struct k_timer *timer = CONTAINER_OF(t, struct k_timer, timeout); struct k_thread *thread; unsigned int key; /* * if the timer is periodic, start it again; don't add _TICK_ALIGN * since we're already aligned to a tick boundary */ if (timer->period > 0) { key = irq_lock(); _add_timeout(NULL, &timer->timeout, &timer->wait_q, timer->period); irq_unlock(key); } /* update timer's status */ timer->status += 1; /* invoke timer expiry function */ if (timer->expiry_fn) { timer->expiry_fn(timer); } thread = (struct k_thread *)sys_dlist_peek_head(&timer->wait_q); if (!thread) { return; } /* * Interrupts _DO NOT_ have to be locked in this specific instance of * calling _unpend_thread() because a) this is the only place a thread * can be taken off this pend queue, and b) the only place a thread * can be put on the pend queue is at thread level, which of course * cannot interrupt the current context. */ _unpend_thread(thread); key = irq_lock(); _ready_thread(thread); irq_unlock(key); _set_thread_return_value(thread, 0); }
void k_msgq_purge(struct k_msgq *q) { unsigned int key = irq_lock(); struct k_thread *pending_thread; /* wake up any threads that are waiting to write */ while ((pending_thread = _unpend_first_thread(&q->wait_q)) != NULL) { _set_thread_return_value(pending_thread, -ENOMSG); _abort_thread_timeout(pending_thread); _ready_thread(pending_thread); } q->used_msgs = 0; q->read_ptr = q->write_ptr; _reschedule_threads(key); }
void k_mem_pool_free(struct k_mem_block *block) { int i, key, need_sched = 0; struct k_mem_pool *p = get_pool(block->id.pool); size_t lsizes[p->n_levels]; /* As in k_mem_pool_alloc(), we build a table of level sizes * to avoid having to store it in precious RAM bytes. * Overhead here is somewhat higher because free_block() * doesn't inherently need to traverse all the larger * sublevels. */ lsizes[0] = _ALIGN4(p->max_sz); for (i = 1; i <= block->id.level; i++) { lsizes[i] = _ALIGN4(lsizes[i-1] / 4); } free_block(get_pool(block->id.pool), block->id.level, lsizes, block->id.block); /* Wake up anyone blocked on this pool and let them repeat * their allocation attempts */ key = irq_lock(); while (!sys_dlist_is_empty(&p->wait_q)) { struct k_thread *th = (void *)sys_dlist_peek_head(&p->wait_q); _unpend_thread(th); _abort_thread_timeout(th); _ready_thread(th); need_sched = 1; } if (need_sched && !_is_in_isr()) { _reschedule_threads(key); } else { irq_unlock(key); } }
static void prepare_thread_to_run(struct k_thread *thread, void *data) { _abort_thread_timeout(thread); _ready_thread(thread); _set_thread_return_value_with_data(thread, 0, data); }
/** * @brief Internal API used to send data to a pipe */ int _k_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc, unsigned char *data, size_t bytes_to_write, size_t *bytes_written, size_t min_xfer, s32_t timeout) { struct k_thread *reader; struct k_pipe_desc *desc; sys_dlist_t xfer_list; unsigned int key; size_t num_bytes_written = 0; size_t bytes_copied; #if (CONFIG_NUM_PIPE_ASYNC_MSGS == 0) ARG_UNUSED(async_desc); #endif key = irq_lock(); /* * Create a list of "working readers" into which the data will be * directly copied. */ if (!_pipe_xfer_prepare(&xfer_list, &reader, &pipe->wait_q.readers, pipe->size - pipe->bytes_used, bytes_to_write, min_xfer, timeout)) { irq_unlock(key); *bytes_written = 0; return -EIO; } _sched_lock(); irq_unlock(key); /* * 1. 'xfer_list' currently contains a list of reader threads that can * have their read requests fulfilled by the current call. * 2. 'reader' if not NULL points to a thread on the reader wait_q * that can get some of its requested data. * 3. Interrupts are unlocked but the scheduler is locked to allow * ticks to be delivered but no scheduling to occur * 4. If 'reader' times out while we are copying data, not only do we * still have a pointer to it, but it can not execute until this call * is complete so it is still safe to copy data to it. */ struct k_thread *thread = (struct k_thread *) sys_dlist_get(&xfer_list); while (thread) { desc = (struct k_pipe_desc *)thread->base.swap_data; bytes_copied = _pipe_xfer(desc->buffer, desc->bytes_to_xfer, data + num_bytes_written, bytes_to_write - num_bytes_written); num_bytes_written += bytes_copied; desc->buffer += bytes_copied; desc->bytes_to_xfer -= bytes_copied; /* The thread's read request has been satisfied. Ready it. */ key = irq_lock(); _ready_thread(thread); irq_unlock(key); thread = (struct k_thread *)sys_dlist_get(&xfer_list); } /* * Copy any data to the reader that we left on the wait_q. * It is possible no data will be copied. */ if (reader) { desc = (struct k_pipe_desc *)reader->base.swap_data; bytes_copied = _pipe_xfer(desc->buffer, desc->bytes_to_xfer, data + num_bytes_written, bytes_to_write - num_bytes_written); num_bytes_written += bytes_copied; desc->buffer += bytes_copied; desc->bytes_to_xfer -= bytes_copied; } /* * As much data as possible has been directly copied to any waiting * readers. Add as much as possible to the pipe's circular buffer. */ num_bytes_written += _pipe_buffer_put(pipe, data + num_bytes_written, bytes_to_write - num_bytes_written); if (num_bytes_written == bytes_to_write) { *bytes_written = num_bytes_written; #if (CONFIG_NUM_PIPE_ASYNC_MSGS > 0) if (async_desc != NULL) { _pipe_async_finish(async_desc); } #endif k_sched_unlock(); return 0; } /* Not all data was copied. */ #if (CONFIG_NUM_PIPE_ASYNC_MSGS > 0) if (async_desc != NULL) { /* * Lock interrupts and unlock the scheduler before * manipulating the writers wait_q. */ key = irq_lock(); _sched_unlock_no_reschedule(); _pend_thread((struct k_thread *) &async_desc->thread, &pipe->wait_q.writers, K_FOREVER); _reschedule_threads(key); return 0; } #endif struct k_pipe_desc pipe_desc; pipe_desc.buffer = data + num_bytes_written; pipe_desc.bytes_to_xfer = bytes_to_write - num_bytes_written; if (timeout != K_NO_WAIT) { _current->base.swap_data = &pipe_desc; /* * Lock interrupts and unlock the scheduler before * manipulating the writers wait_q. */ key = irq_lock(); _sched_unlock_no_reschedule(); _pend_current_thread(&pipe->wait_q.writers, timeout); _Swap(key); } else { k_sched_unlock(); } *bytes_written = bytes_to_write - pipe_desc.bytes_to_xfer; return _pipe_return_code(min_xfer, pipe_desc.bytes_to_xfer, bytes_to_write); }