uint32_t fifo_out_all(struct k_fifo *fifo, void *buf) { uint32_t len; CPSR_ALLOC(); RHINO_CRITICAL_ENTER(); len = fifo->size - fifo->free_bytes; if (len == 0) { RHINO_CRITICAL_EXIT(); return 0; } len = internal_fifo_out_peek(fifo, buf, len); fifo->out += len; fifo->free_bytes += len; RHINO_CRITICAL_EXIT(); return len; }
uint32_t fifo_out(struct k_fifo *fifo, void *buf, uint32_t len) { CPSR_ALLOC(); RHINO_CRITICAL_ENTER(); len = internal_fifo_out_peek(fifo, buf, len); fifo->out += len; fifo->free_bytes += len; RHINO_CRITICAL_EXIT(); return len; }
void krhino_intrpt_exit(void) { CPSR_ALLOC(); uint8_t cur_cpu_num; #if (RHINO_CONFIG_INTRPT_STACK_OVF_CHECK > 0) krhino_intrpt_stack_ovf_check(); #endif RHINO_CPU_INTRPT_DISABLE(); cur_cpu_num = cpu_cur_get(); if (g_intrpt_nested_level[cur_cpu_num] == 0u) { RHINO_CPU_INTRPT_ENABLE(); k_err_proc(RHINO_INV_INTRPT_NESTED_LEVEL); } g_intrpt_nested_level[cur_cpu_num]--; if (g_intrpt_nested_level[cur_cpu_num] > 0u) { RHINO_CPU_INTRPT_ENABLE(); return; } if (g_sched_lock[cur_cpu_num] > 0u) { RHINO_CPU_INTRPT_ENABLE(); return; } preferred_cpu_ready_task_get(&g_ready_queue, cur_cpu_num); if (g_preferred_ready_task[cur_cpu_num] == g_active_task[cur_cpu_num]) { RHINO_CPU_INTRPT_ENABLE(); return; } TRACE_INTRPT_TASK_SWITCH(g_active_task[cur_cpu_num], g_preferred_ready_task[cur_cpu_num]); #if (RHINO_CONFIG_CPU_NUM > 1) g_active_task[cur_cpu_num]->cur_exc = 0; #endif cpu_intrpt_switch(); RHINO_CPU_INTRPT_ENABLE(); }
uint32_t fifo_out_peek(struct k_fifo *fifo, void *buf, uint32_t len) { uint32_t ret_len; CPSR_ALLOC(); RHINO_CRITICAL_ENTER(); ret_len = internal_fifo_out_peek(fifo, buf, len); RHINO_CRITICAL_EXIT(); return ret_len; }
bool mico_rtos_is_queue_empty( mico_queue_t* queue ) { bool ret; CPSR_ALLOC(); kbuf_queue_t *q = *((kbuf_queue_t **)queue); RHINO_CRITICAL_ENTER(); if (q->cur_num == 0) { ret = true; } else { ret = false;; } RHINO_CRITICAL_EXIT(); return ret; }
uint32_t fifo_in(struct k_fifo *fifo, const void *buf, uint32_t len) { uint32_t l; CPSR_ALLOC(); RHINO_CRITICAL_ENTER(); l = fifo_unused(fifo); if (len > l) { len = l; } fifo_copy_in(fifo, buf, len, fifo->in); fifo->in += len; fifo->free_bytes -= len; RHINO_CRITICAL_EXIT(); return len; }
bool mico_rtos_is_queue_full( mico_queue_t* queue ) { bool ret; CPSR_ALLOC(); kbuf_queue_t *q = *((kbuf_queue_t **)queue); uint32_t max_msg_num; RHINO_CRITICAL_ENTER(); max_msg_num = (q->ringbuf.end - q->ringbuf.buf) / (q->max_msg_size + COMPRESS_LEN(q->max_msg_size)); if (q->cur_num == max_msg_num) { ret = true; } else { ret = false; } RHINO_CRITICAL_EXIT(); return ret; }
kstat_t krhino_intrpt_enter(void) { CPSR_ALLOC(); #if (RHINO_CONFIG_INTRPT_STACK_OVF_CHECK > 0) krhino_intrpt_stack_ovf_check(); #endif RHINO_CPU_INTRPT_DISABLE(); if (g_intrpt_nested_level[cpu_cur_get()] >= RHINO_CONFIG_INTRPT_MAX_NESTED_LEVEL) { k_err_proc(RHINO_INTRPT_NESTED_LEVEL_OVERFLOW); RHINO_CPU_INTRPT_ENABLE(); return RHINO_INTRPT_NESTED_LEVEL_OVERFLOW; } g_intrpt_nested_level[cpu_cur_get()]++; RHINO_CPU_INTRPT_ENABLE(); return RHINO_SUCCESS; }
void USART4_5_IRQHandler(void) { int rx_ready = 0; char rx; CPSR_ALLOC(); RHINO_CPU_INTRPT_DISABLE(); if ( LL_USART_IsActiveFlag_RXNE(USART4) && (LL_USART_IsEnabledIT_RXNE(USART4 ) != RESET) ) { /* no need to clear the RXNE flag because it is auto cleared by reading the data*/ rx = LL_USART_ReceiveData8( USART4 ); rx_ready = 1; //PRINTF("%c\r\n", rx); } if (rx_ready) { #ifdef CONFIG_LINKWAN_TEST extern void linkwan_test_cli_cb(uint8_t cmd); linkwan_test_cli_cb(rx); #endif } RHINO_CPU_INTRPT_ENABLE(); }
void irq_unlock(unsigned int key) { CPSR_ALLOC(); cpsr = key; RHINO_CPU_INTRPT_ENABLE(); }
unsigned int irq_lock(void) { CPSR_ALLOC(); RHINO_CPU_INTRPT_DISABLE(); return cpsr; }