void ethos_send_frame(ethos_t *dev, const uint8_t *data, size_t len, unsigned frame_type) { uint8_t frame_delim = ETHOS_FRAME_DELIMITER; if (!irq_is_in()) { mutex_lock(&dev->out_mutex); } else { /* Send frame delimiter. This cancels the current frame, * but enables in-ISR writes. */ uart_write(dev->uart, &frame_delim, 1); } /* send frame delimiter */ uart_write(dev->uart, &frame_delim, 1); /* set frame type */ if (frame_type) { uint8_t out[2] = { ETHOS_ESC_CHAR, (frame_type ^ 0x20) }; uart_write(dev->uart, out, 2); } /* send frame content */ while(len--) { _write_escaped(dev->uart, *(uint8_t*)data++); } /* end of frame */ uart_write(dev->uart, &frame_delim, 1); if (!irq_is_in()) { mutex_unlock(&dev->out_mutex); } }
void thread_yield_higher(void) { /* reset hardware watchdog */ system_soft_wdt_feed(); /* yield next task */ #if defined(ENABLE_DEBUG) && defined(DEVELHELP) if (sched_active_thread) { DEBUG("%u old task %u %s %u\n", phy_get_mactime(), sched_active_thread->pid, sched_active_thread->name, sched_active_thread->sp - sched_active_thread-> stack_start); } #endif if (!irq_is_in()) { #ifdef CONTEXT_SWITCH_BY_INT WSR(BIT(ETS_SOFT_INUM), interrupt); #else vPortYield(); #endif } else { _frxt_setup_switch(); } #if defined(ENABLE_DEBUG) && defined(DEVELHELP) if (sched_active_thread) { DEBUG("%u new task %u %s %u\n", phy_get_mactime(), sched_active_thread->pid, sched_active_thread->name, sched_active_thread->sp - sched_active_thread-> stack_start); } #endif return; }
int msg_try_send(msg_t *m, kernel_pid_t target_pid) { if (irq_is_in()) { return msg_send_int(m, target_pid); } if (sched_active_pid == target_pid) { return msg_send_to_self(m); } return _msg_send(m, target_pid, false, irq_disable()); }
void uart_write(uart_t uart, const uint8_t *data, size_t len) { assert(uart < UART_NUMOF); #if DEVELHELP /* If tx is not enabled don't try to send */ if (!(dev(uart)->CR1 & USART_CR1_TE)) { return; } #endif #ifdef MODULE_PERIPH_DMA if (!len) { return; } if (uart_config[uart].dma != DMA_STREAM_UNDEF) { if (irq_is_in()) { uint16_t todo = 0; if (dev(uart)->CR3 & USART_CR3_DMAT) { /* DMA transfer for UART on-going */ todo = dma_suspend(uart_config[uart].dma); } if (todo) { dma_stop(uart_config[uart].dma); dev(uart)->CR3 &= ~USART_CR3_DMAT; } for (unsigned i = 0; i < len; i++) { send_byte(uart, data[i]); } if (todo > 0) { wait_for_tx_complete(uart); dev(uart)->CR3 |= USART_CR3_DMAT; dma_resume(uart_config[uart].dma, todo); } } else { dma_acquire(uart_config[uart].dma); dev(uart)->CR3 |= USART_CR3_DMAT; dma_transfer(uart_config[uart].dma, uart_config[uart].dma_chan, data, (void *)&dev(uart)->TDR_REG, len, DMA_MEM_TO_PERIPH, DMA_INC_SRC_ADDR); dma_release(uart_config[uart].dma); /* make sure the function is synchronous by waiting for the transfer to * finish */ wait_for_tx_complete(uart); dev(uart)->CR3 &= ~USART_CR3_DMAT; } return; } #endif for (size_t i = 0; i < len; i++) { send_byte(uart, data[i]); } /* make sure the function is synchronous by waiting for the transfer to * finish */ wait_for_tx_complete(uart); }
/** * If we are already in an interrupt handler, the function simply sets the * context switch flag, which indicates that the context has to be switched * in the _frxt_int_exit function when exiting the interrupt. Otherwise, we * will generate a software interrupt to force the context switch when * terminating the software interrupt (see thread_yield_isr). */ void thread_yield_higher(void) { /* reset hardware watchdog */ system_wdt_feed(); /* yield next task */ #if defined(ENABLE_DEBUG) && defined(DEVELHELP) if (sched_active_thread) { DEBUG("%u old task %u %s %u\n", system_get_time(), sched_active_thread->pid, sched_active_thread->name, sched_active_thread->sp - sched_active_thread-> stack_start); } #endif if (!irq_is_in()) { /* generate the software interrupt to switch the context */ DPORT_WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_0_REG, DPORT_CPU_INTR_FROM_CPU_0); } else { /* set the context switch flag */ _frxt_setup_switch(); } #if defined(ENABLE_DEBUG) && defined(DEVELHELP) if (sched_active_thread) { DEBUG("%u new task %u %s %u\n", system_get_time(), sched_active_thread->pid, sched_active_thread->name, sched_active_thread->sp - sched_active_thread-> stack_start); } #endif /* * Instruction fetch synchronize: Waits for all previously fetched load, * store, cache, and special register write instructions that affect * instruction fetch to be performed before fetching the next instruction. */ __asm__("isync"); return; }
void sched_switch(uint16_t other_prio) { thread_t *active_thread = (thread_t *) sched_active_thread; uint16_t current_prio = active_thread->priority; int on_runqueue = (active_thread->status >= STATUS_ON_RUNQUEUE); DEBUG("sched_switch: active pid=%" PRIkernel_pid" prio=%" PRIu16 " on_runqueue=%i " ", other_prio=%" PRIu16 "\n", active_thread->pid, current_prio, on_runqueue, other_prio); if (!on_runqueue || (current_prio > other_prio)) { if (irq_is_in()) { DEBUG("sched_switch: setting sched_context_switch_request.\n"); sched_context_switch_request = 1; } else { DEBUG("sched_switch: yielding immediately.\n"); thread_yield_higher(); } } else { DEBUG("sched_switch: continuing without yield.\n"); } }