/** * Remove the timer in Chained list of timers. * This service may panic if: * tmr parameter is is null, invalid, or timer is not running. * * Authorized execution levels: task, fiber, ISR * * @param tmr : handler on the timer (value returned by timer_create ). * */ void timer_stop(T_TIMER tmr) { T_TIMER_LIST_ELT *timer = (T_TIMER_LIST_ELT *)tmr; bool doSignal = false; if (NULL != timer) { int flags = irq_lock(); /* if timer is active */ if (timer->desc.status == E_TIMER_RUNNING) { #ifdef __DEBUG_OS_ABSTRACTION_TIMER _log( "\nINFO : timer_stop : stopping timer at addr = 0x%x", (uint32_t)timer); #endif /* remove the timer */ if (g_CurrentTimerHead == timer) { doSignal = true; } remove_timer(timer); irq_unlock(flags); if (doSignal) { /* the next timer to expire was removed, unblock timer_task to assess the change */ signal_timer_task(); } } else { /* tmr is not running */ irq_unlock(flags); } } else { /* tmr is not a timer from g_TimerPool_elements */ panic(E_OS_ERR); } }
void k_timer_stop(struct k_timer *timer) { __ASSERT(!_is_in_isr(), ""); int key = irq_lock(); int inactive = (_abort_timeout(&timer->timeout) == _INACTIVE); irq_unlock(key); if (inactive) { return; } if (timer->stop_fn) { timer->stop_fn(timer); } key = irq_lock(); struct k_thread *pending_thread = _unpend_first_thread(&timer->wait_q); if (pending_thread) { _ready_thread(pending_thread); } if (_is_in_isr()) { irq_unlock(key); } else { _reschedule_threads(key); } }
static void free_block(struct k_mem_pool *p, int level, size_t *lsizes, int bn) { int i, key, lsz = lsizes[level]; void *block = block_ptr(p, lsz, bn); key = irq_lock(); set_free_bit(p, level, bn); if (level && partner_bits(p, level, bn) == 0xf) { for (i = 0; i < 4; i++) { int b = (bn & ~3) + i; clear_free_bit(p, level, b); if (b != bn && block_fits(p, block_ptr(p, lsz, b), lsz)) { sys_dlist_remove(block_ptr(p, lsz, b)); } } irq_unlock(key); free_block(p, level-1, lsizes, bn / 4); /* tail recursion! */ return; } if (block_fits(p, block, lsz)) { sys_dlist_append(&p->levels[level].free_list, block); } irq_unlock(key); }
static u16_t rng_pool_get(struct rng_pool *rngp, u8_t *buf, u16_t len) { u32_t last = rngp->last; u32_t mask = rngp->mask; u8_t *dst = buf; u32_t first, available; u32_t other_read_in_progress; unsigned int key; key = irq_lock(); first = rngp->first_alloc; /* * The other_read_in_progress is non-zero if rngp->first_read != first, * which means that lower-priority code (which was interrupted by this * call) already allocated area for read. */ other_read_in_progress = (rngp->first_read ^ first); available = (last - first) & mask; if (available < len) { len = available; } /* * Move alloc index forward to signal, that part of the buffer is * now reserved for this call. */ rngp->first_alloc = (first + len) & mask; irq_unlock(key); while (likely(len--)) { *dst++ = rngp->buffer[first]; first = (first + 1) & mask; } /* * If this call is the last one accessing the pool, move read index * to signal that all allocated regions are now read and could be * overwritten. */ if (likely(!other_read_in_progress)) { key = irq_lock(); rngp->first_read = rngp->first_alloc; irq_unlock(key); } len = dst - buf; available = available - len; if (available <= rngp->threshold) { nrf_rng_task_trigger(NRF_RNG_TASK_START); } return len; }
static int quark_se_ipm_send(struct device *d, int wait, uint32_t id, const void *data, int size) { struct quark_se_ipm_config_info *config = d->config->config_info; volatile struct quark_se_ipm *ipm = config->ipm; const uint8_t *data8; int i; int flags; if (id > QUARK_SE_IPM_MAX_ID_VAL) { return -EINVAL; } if (config->direction != QUARK_SE_IPM_OUTBOUND) { return -EINVAL; } if (size > QUARK_SE_IPM_DATA_BYTES) { return -EMSGSIZE; } flags = irq_lock(); if (ipm->sts.sts != 0) { irq_unlock(flags); return -EBUSY; } /* Populate the data, memcpy doesn't take volatiles */ data8 = (const uint8_t *)data; for (i = 0; i < size; ++i) { ipm->data[i] = data8[i]; } ipm->ctrl.ctrl = id; /* Cause the interrupt to assert on the remote side */ ipm->ctrl.irq = 1; /* Wait for HW to set the sts bit */ while (ipm->sts.sts == 0) { } irq_unlock(flags); if (wait) { /* Loop until remote clears the status bit */ while (ipm->sts.sts != 0) { } } return 0; }
/* * Put a character into a queue. */ void ttyq_putc(int c, struct tty_queue *tq) { irq_lock(); if (ttyq_full(tq)) { irq_unlock(); return; } tq->tq_buf[tq->tq_tail] = c; tq->tq_tail = ttyq_next(tq->tq_tail); tq->tq_count++; irq_unlock(); }
int nano_task_stack_pop(struct nano_stack *stack, uint32_t *pData, int32_t timeout_in_ticks) { unsigned int imask; imask = irq_lock(); while (1) { /* * Predict that the branch will be taken to break out of the * loop. There is little cost to a misprediction since that * leads to idle. */ if (likely(stack->next > stack->base)) { stack->next--; *pData = *(stack->next); irq_unlock(imask); return 1; } if (timeout_in_ticks == TICKS_NONE) { break; } /* * Invoke nano_cpu_atomic_idle() with interrupts still disabled * to prevent the scenario where an interrupt fires after * re-enabling interrupts and before executing the "halt" * instruction. If the ISR performs a nano_isr_stack_push() on * the same stack object, the subsequent execution of the "halt" * instruction will result in the queued data being ignored * until the next interrupt, if any. * * Thus it should be clear that an architectures implementation * of nano_cpu_atomic_idle() must be able to atomically * re-enable interrupts and enter a low-power mode. * * This explanation is valid for all nanokernel objects: stacks, * FIFOs, LIFOs, and semaphores, for their * nano_task_<object>_<get>() routines. */ nano_cpu_atomic_idle(imask); imask = irq_lock(); } irq_unlock(imask); return 0; }
/* * Test command to get status of RTC(not config, running, finished): rtc status * * @param[in] argc Number of arguments in the Test Command (including group and name) * @param[in] argv Table of null-terminated buffers containing the arguments * @param[in] ctx The context to pass back to responses */ void rtc_status(int argc, char *argv[], struct tcmd_handler_ctx *ctx) { char answer[RTC_FINISHED_STR_LEN + DOUBLE_UINT32_ANSWER_LENGTH]; uint32_t keys = 0; bool local_alarm_pending = false; uint32_t local_test_rtc = 0; uint32_t local_tcmd_user_alarm_rtc_val = 0; uint32_t local_tcmd_alarm_rtc_read = 0; keys = irq_lock(); local_alarm_pending = alarm_pending; local_test_rtc = test_rtc; local_tcmd_user_alarm_rtc_val = tcmd_user_alarm_rtc_val; local_tcmd_alarm_rtc_read = tcmd_alarm_rtc_read; irq_unlock(keys); if (local_alarm_pending) { TCMD_RSP_FINAL(ctx, "Rtc is running."); } else if (local_test_rtc) { snprintf(answer, RTC_FINISHED_STR_LEN + DOUBLE_UINT32_ANSWER_LENGTH, "Rtc finished %u %u", local_tcmd_user_alarm_rtc_val, local_tcmd_alarm_rtc_read); TCMD_RSP_FINAL(ctx, answer); } else { TCMD_RSP_ERROR(ctx, "Rtc not configured."); } }
/* * Test command to set RTC Alarm time, and start RTC: rtc alarm <rtc_alarm_time> * * @param[in] argc Number of arguments in the Test Command (including group and name) * @param[in] argv Table of null-terminated buffers containing the arguments * @param[in] ctx The context to pass back to responses */ void rtc_alarm_tcmd(int argc, char *argv[], struct tcmd_handler_ctx *ctx) { struct rtc_config config = { 0 }; struct device *rtc_dev; uint32_t keys = 0; if (argc == RTC_ARG_NO && isdigit(argv[RTC_TIME_IDX][0])) { keys = irq_lock(); tcmd_user_alarm_rtc_val = (uint32_t)strtoul(argv[RTC_TIME_IDX], NULL, 10); test_rtc = 0; config.alarm_val = tcmd_user_alarm_rtc_val; config.alarm_enable = true; config.cb_fn = test_rtc_interrupt_fn; alarm_pending = true; irq_unlock(keys); rtc_dev = device_get_binding(RTC_DRV_NAME); assert(rtc_dev != NULL); rtc_dev->driver_data = (void *)ctx; rtc_enable(rtc_dev); rtc_set_config(rtc_dev, &config); rtc_set_alarm(rtc_dev, config.alarm_val); } else { TCMD_RSP_ERROR(ctx, "Usage: rtc alarm <alarm_time>"); } }
/** * * @brief Atomically re-enable interrupts and enter low power mode * * This function is utilized by the nanokernel object "wait" APIs for tasks, * e.g. nano_task_lifo_get(), nano_task_sem_take(), * nano_task_stack_pop(), and nano_task_fifo_get(). * * INTERNAL * The requirements for nano_cpu_atomic_idle() are as follows: * 1) The enablement of interrupts and entering a low-power mode needs to be * atomic, i.e. there should be no period of time where interrupts are * enabled before the processor enters a low-power mode. See the comments * in nano_task_lifo_get(), for example, of the race condition that * occurs if this requirement is not met. * * 2) After waking up from the low-power mode, the interrupt lockout state * must be restored as indicated in the 'imask' input parameter. * * @return N/A */ void nano_cpu_atomic_idle(unsigned int key) { /* Do nothing but restore IRQ state. This CPU does not have any * kind of power saving instruction. */ irq_unlock(key); }
void k_queue_append_list(struct k_queue *queue, void *head, void *tail) { __ASSERT(head && tail, "invalid head or tail"); struct k_thread *first_thread, *thread; unsigned int key; key = irq_lock(); first_thread = _peek_first_pending_thread(&queue->wait_q); while (head && ((thread = _unpend_first_thread(&queue->wait_q)))) { prepare_thread_to_run(thread, head); head = *(void **)head; } if (head) { sys_slist_append_list(&queue->data_q, head, tail); } if (first_thread) { if (!_is_in_isr() && _must_switch_threads()) { (void)_Swap(key); return; } } else { if (handle_poll_event(queue)) { (void)_Swap(key); return; } } irq_unlock(key); }
void _IntVecSet(unsigned int vector, void (*routine)(void *), unsigned int dpl) { unsigned long long *pIdtEntry; unsigned int key; /* * The <vector> parameter must be less than the value of the * CONFIG_IDT_NUM_VECTORS configuration parameter, however, * explicit validation will not be performed in this primitive. */ pIdtEntry = (unsigned long long *)(_idt_base_address + (vector << 3)); /* * Lock interrupts to protect the IDT entry to which _IdtEntryCreate() * will write. They must be locked here because the _IdtEntryCreate() * code is shared with the 'gen_idt' host tool. */ key = irq_lock(); _IdtEntCreate(pIdtEntry, routine, dpl); #ifdef CONFIG_MVIC /* Some nonstandard interrupt controllers may be doing some IDT * caching for performance reasons and need the IDT reloaded if * any changes are made to it */ __asm__ volatile ("lidt _Idt"); #endif irq_unlock(key); }
/** * * @brief disable an individual LOAPIC interrupt (IRQ) * * This routine clears the interrupt mask bit in the LVT for the specified IRQ * * @param irq IRQ number of the interrupt * * @returns N/A */ void _loapic_irq_disable(unsigned int irq) { volatile int *pLvt; /* pointer to local vector table */ int32_t oldLevel; /* previous interrupt lock level */ /* * irq is actually an index to local APIC LVT register. * ASSERT if out of range for MVIC implementation. */ __ASSERT_NO_MSG(irq < LOAPIC_IRQ_COUNT); /* * See the comments in _LoApicLvtVecSet() regarding IRQ to LVT mappings * and ths assumption concerning LVT spacing. */ pLvt = (volatile int *)(CONFIG_LOAPIC_BASE_ADDRESS + LOAPIC_TIMER + (irq * LOAPIC_LVT_REG_SPACING)); /* set the mask bit in the LVT */ oldLevel = irq_lock(); *pLvt = *pLvt | LOAPIC_LVT_MASKED; irq_unlock(oldLevel); }
void _arch_isr_direct_pm(void) { #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) unsigned int key; /* irq_lock() does what we wan for this CPU */ key = irq_lock(); #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) /* Lock all interrupts. irq_lock() will on this CPU only disable those * lower than BASEPRI, which is not what we want. See comments in * arch/arm/core/isr_wrapper.S */ __asm__ volatile("cpsid i" : : : "memory"); #else #error Unknown ARM architecture #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */ if (_kernel.idle) { s32_t idle_val = _kernel.idle; _kernel.idle = 0; z_sys_power_save_idle_exit(idle_val); } #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) irq_unlock(key); #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) __asm__ volatile("cpsie i" : : : "memory"); #else #error Unknown ARM architecture #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */ }
void z_arch_irq_disable(unsigned int irq) { unsigned int key = irq_lock(); z_arc_v2_irq_unit_int_disable(irq); irq_unlock(key); }
static void qdec_nrfx_event_handler(nrfx_qdec_event_t event) { sensor_trigger_handler_t handler; unsigned int key; switch (event.type) { case NRF_QDEC_EVENT_REPORTRDY: accumulate(&qdec_nrfx_data, event.data.report.acc); key = irq_lock(); handler = qdec_nrfx_data.data_ready_handler; irq_unlock(key); if (handler) { struct sensor_trigger trig = { .type = SENSOR_TRIG_DATA_READY, .chan = SENSOR_CHAN_ROTATION, }; handler(DEVICE_GET(qdec_nrfx), &trig); } break; default: LOG_ERR("unhandled event (0x%x)", event.type); break; } }
static int qdec_nrfx_channel_get(struct device *dev, enum sensor_channel chan, struct sensor_value *val) { struct qdec_nrfx_data *data = &qdec_nrfx_data; unsigned int key; s32_t acc; ARG_UNUSED(dev); LOG_DBG(""); if (chan != SENSOR_CHAN_ROTATION) { return -ENOTSUP; } key = irq_lock(); acc = data->acc; data->acc = 0; irq_unlock(key); BUILD_ASSERT_MSG(DT_NORDIC_NRF_QDEC_QDEC_0_STEPS > 0, "only positive number valid"); BUILD_ASSERT_MSG(DT_NORDIC_NRF_QDEC_QDEC_0_STEPS <= 2148, "overflow possible"); val->val1 = (acc * FULL_ANGLE) / DT_NORDIC_NRF_QDEC_QDEC_0_STEPS; val->val2 = (acc * FULL_ANGLE) - (val->val1 * DT_NORDIC_NRF_QDEC_QDEC_0_STEPS); if (val->val2 != 0) { val->val2 *= 1000000; val->val2 /= DT_NORDIC_NRF_QDEC_QDEC_0_STEPS; } return 0; }
/** * Execute the callback of a timer. * * @param expiredTimer pointer on the timer * * WARNING: expiredTimer MUST NOT be null (rem: static function ) */ static void execute_callback(T_TIMER_LIST_ELT *expiredTimer) { #ifdef __DEBUG_OS_ABSTRACTION_TIMER _log( "\nINFO : execute_callback : executing callback of timer 0x%x (now = %u - expiration = %u)", (uint32_t)expiredTimer, get_uptime_ms(), expiredTimer->desc.expiration); #endif int flags = irq_lock(); /* if the timer was not stopped by its own callback */ if (E_TIMER_RUNNING == expiredTimer->desc.status) { remove_timer(expiredTimer); /* add it again if repeat flag was on */ if (expiredTimer->desc.repeat) { expiredTimer->desc.expiration = get_uptime_ms() + expiredTimer->desc. delay; add_timer(expiredTimer); } } irq_unlock(flags); /* call callback back */ if (NULL != expiredTimer->desc.callback) { expiredTimer->desc.callback(expiredTimer->desc.data); } else { #ifdef __DEBUG_OS_ABSTRACTION_TIMER _log("\nERROR : execute_callback : timer callback is null "); #endif panic(E_OS_ERR); } }
void k_queue_insert(struct k_queue *queue, void *prev, void *data) { struct k_thread *first_pending_thread; unsigned int key; key = irq_lock(); first_pending_thread = _unpend_first_thread(&queue->wait_q); if (first_pending_thread) { prepare_thread_to_run(first_pending_thread, data); if (!_is_in_isr() && _must_switch_threads()) { (void)_Swap(key); return; } } else { sys_slist_insert(&queue->data_q, prev, data); if (handle_poll_event(queue)) { (void)_Swap(key); return; } } irq_unlock(key); }
int32_t nano_timer_ticks_remain(struct nano_timer *timer) { int key = irq_lock(); int32_t remaining_ticks; struct _nano_timeout *t = &timer->timeout_data; sys_dlist_t *timeout_q = &_nanokernel.timeout_q; struct _nano_timeout *iterator; if (t->delta_ticks_from_prev == -1) { remaining_ticks = 0; } else { /* * As nanokernel timeouts are stored in a linked list with * delta_ticks_from_prev, to get the actual number of ticks * remaining for the timer, walk through the timeouts list * and accumulate all the delta_ticks_from_prev values up to * the timer. */ iterator = (struct _nano_timeout *)sys_dlist_peek_head(timeout_q); remaining_ticks = iterator->delta_ticks_from_prev; while (iterator != t) { iterator = (struct _nano_timeout *)sys_dlist_peek_next( timeout_q, &iterator->node); remaining_ticks += iterator->delta_ticks_from_prev; } } irq_unlock(key); return remaining_ticks; }
void _timer_stop_non_preemptible(struct nano_timer *timer) { struct _nano_timeout *t = &timer->timeout_data; struct tcs *tcs = t->tcs; int key = irq_lock(); /* * Verify first if fiber is not waiting on an object, * timer is not expired and there is a fiber waiting * on it */ if (!t->wait_q && (_nano_timer_timeout_abort(t) == 0) && tcs != NULL) { if (_IS_MICROKERNEL_TASK(tcs)) { _NANO_TIMER_TASK_READY(tcs); } else { _nano_fiber_ready(tcs); } } /* * After timer gets aborted nano_timer_test() should * return NULL until timer gets restarted */ timer->user_data = NULL; irq_unlock(key); }
/* The actual printk hook */ static int telnet_console_out(int c) { int key = irq_lock(); struct line_buf *lb = telnet_rb_get_line_in(); bool yield = false; lb->buf[lb->len++] = (char)c; if (c == '\n' || lb->len == TELNET_LINE_SIZE - 1) { lb->buf[lb->len-1] = NVT_CR; lb->buf[lb->len++] = NVT_LF; telnet_rb_switch(); yield = true; } irq_unlock(key); #ifdef CONFIG_TELNET_CONSOLE_DEBUG_DEEP /* This is ugly, but if one wants to debug telnet, it * will also output the character to original console */ orig_printk_hook(c); #endif if (yield) { k_yield(); } return c; }
void _loapic_int_vec_set(unsigned int irq, /* IRQ number of the interrupt */ unsigned int vector /* vector to copy into the LVT */ ) { volatile int *pLvt; /* pointer to local vector table */ int32_t oldLevel; /* previous interrupt lock level */ /* * The following mappings are used: * * IRQ0 -> LOAPIC_TIMER * IRQ1 -> LOAPIC_THERMAL * IRQ2 -> LOAPIC_PMC * IRQ3 -> LOAPIC_LINT0 * IRQ4 -> LOAPIC_LINT1 * IRQ5 -> LOAPIC_ERROR * * It's assumed that LVTs are spaced by 0x10 bytes */ pLvt = (volatile int *) (CONFIG_LOAPIC_BASE_ADDRESS + LOAPIC_TIMER + (irq * 0x10)); /* update the 'vector' bits in the LVT */ oldLevel = irq_lock(); *pLvt = (*pLvt & ~LOAPIC_VECTOR) | vector; irq_unlock(oldLevel); }
static void release_sba_req(uint8_t *req_bitmap, uint8_t i) { uint32_t saved = irq_lock(); *req_bitmap &= ~(1 << i); irq_unlock(saved); }
/** * * @brief write to 32 bit MVIC IO APIC register * * @param irq INTIN number * @param value value to be written * * @returns N/A */ static void _mvic_rte_set(unsigned int irq, uint32_t value) { int key; /* interrupt lock level */ volatile unsigned int *rte; volatile unsigned int *index; unsigned int low_nibble; unsigned int high_nibble; index = (unsigned int *)(CONFIG_IOAPIC_BASE_ADDRESS + IOAPIC_IND); rte = (unsigned int *)(CONFIG_IOAPIC_BASE_ADDRESS + IOAPIC_DATA); /* Set index in the IOREGSEL */ __ASSERT(irq < CONFIG_IOAPIC_NUM_RTES, "INVL"); low_nibble = ((irq & MVIC_LOW_NIBBLE_MASK) << 0x1); high_nibble = ((irq & MVIC_HIGH_NIBBLE_MASK) << 0x2); /* lock interrupts to ensure indirect addressing works "atomically" */ key = irq_lock(); *(index) = high_nibble | low_nibble; *(rte) = (value & IOAPIC_LO32_RTE_SUPPORTED_MASK); irq_unlock(key); }
/** * @brief Initialize UART channel * * This routine is called to reset the chip in a quiescent state. * It is assumed that this function is called only once per UART. * * @param dev UART device struct * * @return DEV_OK */ static int uart_k20_init(struct device *dev) { int old_level; /* old interrupt lock level */ union C1 c1; /* UART C1 register value */ union C2 c2; /* UART C2 register value */ volatile struct K20_UART *uart = UART_STRUCT(dev); struct uart_device_config * const dev_cfg = DEV_CFG(dev); struct uart_k20_dev_data_t * const dev_data = DEV_DATA(dev); /* disable interrupts */ old_level = irq_lock(); _uart_k20_baud_rate_set(uart, dev_cfg->sys_clk_freq, dev_data->baud_rate); /* 1 start bit, 8 data bits, no parity, 1 stop bit */ c1.value = 0; uart->c1 = c1; /* enable Rx and Tx with interrupts disabled */ c2.value = 0; c2.field.rx_enable = 1; c2.field.tx_enable = 1; uart->c2 = c2; /* restore interrupt state */ irq_unlock(old_level); dev->driver_api = &uart_k20_driver_api; return DEV_OK; }
static int nordicsemi_nrf52_init(struct device *arg) { u32_t key; ARG_UNUSED(arg); key = irq_lock(); SystemInit(); #ifdef CONFIG_NRF_ENABLE_ICACHE /* Enable the instruction cache */ NRF_NVMC->ICACHECNF = NVMC_ICACHECNF_CACHEEN_Msk; #endif #if defined(CONFIG_SOC_DCDC_NRF52X) nrf_power_dcdcen_set(true); #endif _ClearFaults(); /* Install default handler that simply resets the CPU * if configured in the kernel, NOP otherwise */ NMI_INIT(); irq_unlock(key); return 0; }
/* * @brief Initialize fake serial port * @which: port number * @init_info: pointer to initialization information */ void uart_init(int which, const struct uart_init_info * const init_info) { int key = irq_lock(); uart[which].regs = init_info->regs; irq_unlock(key); }
void phil_entry(void) { int counter; struct k_sem *f1; /* fork #1 */ struct k_sem *f2; /* fork #2 */ static int myId; /* next philosopher ID */ int pri = irq_lock(); /* interrupt lock level */ int id = myId++; /* current philosopher ID */ irq_unlock(pri); /* always take the lowest fork first */ if ((id + 1) != N_PHILOSOPHERS) { f1 = FORK(id); f2 = FORK(id + 1); } else { f1 = FORK(0); f2 = FORK(id); } for (counter = 0; counter < 5; counter++) { TAKE(f1); TAKE(f2); RANDDELAY(id); GIVE(f2); GIVE(f1); RANDDELAY(id); } }
void _k_thread_group_op(u32_t groups, void (*func)(struct k_thread *)) { unsigned int key; __ASSERT(!_is_in_isr(), ""); _sched_lock(); /* Invoke func() on each static thread in the specified group set. */ _FOREACH_STATIC_THREAD(thread_data) { if (is_in_any_group(thread_data, groups)) { key = irq_lock(); func(thread_data->thread); irq_unlock(key); } } /* * If the current thread is still in a ready state, then let the * "unlock scheduler" code determine if any rescheduling is needed. */ if (_is_thread_ready(_current)) { k_sched_unlock(); return; } /* The current thread is no longer in a ready state--reschedule. */ key = irq_lock(); _sched_unlock_no_reschedule(); _Swap(key); }