void krhino_intrpt_exit(void) { CPSR_ALLOC(); uint8_t cur_cpu_num; #if (RHINO_CONFIG_INTRPT_STACK_OVF_CHECK > 0) krhino_intrpt_stack_ovf_check(); #endif RHINO_CPU_INTRPT_DISABLE(); cur_cpu_num = cpu_cur_get(); if (g_intrpt_nested_level[cur_cpu_num] == 0u) { RHINO_CPU_INTRPT_ENABLE(); k_err_proc(RHINO_INV_INTRPT_NESTED_LEVEL); } g_intrpt_nested_level[cur_cpu_num]--; if (g_intrpt_nested_level[cur_cpu_num] > 0u) { RHINO_CPU_INTRPT_ENABLE(); return; } if (g_sched_lock[cur_cpu_num] > 0u) { RHINO_CPU_INTRPT_ENABLE(); return; } preferred_cpu_ready_task_get(&g_ready_queue, cur_cpu_num); if (g_preferred_ready_task[cur_cpu_num] == g_active_task[cur_cpu_num]) { RHINO_CPU_INTRPT_ENABLE(); return; } TRACE_INTRPT_TASK_SWITCH(g_active_task[cur_cpu_num], g_preferred_ready_task[cur_cpu_num]); #if (RHINO_CONFIG_CPU_NUM > 1) g_active_task[cur_cpu_num]->cur_exc = 0; #endif cpu_intrpt_switch(); RHINO_CPU_INTRPT_ENABLE(); }
kstat_t krhino_intrpt_enter(void) { CPSR_ALLOC(); #if (RHINO_CONFIG_INTRPT_STACK_OVF_CHECK > 0) krhino_intrpt_stack_ovf_check(); #endif RHINO_CPU_INTRPT_DISABLE(); if (g_intrpt_nested_level[cpu_cur_get()] >= RHINO_CONFIG_INTRPT_MAX_NESTED_LEVEL) { k_err_proc(RHINO_INTRPT_NESTED_LEVEL_OVERFLOW); RHINO_CPU_INTRPT_ENABLE(); return RHINO_INTRPT_NESTED_LEVEL_OVERFLOW; } g_intrpt_nested_level[cpu_cur_get()]++; RHINO_CPU_INTRPT_ENABLE(); return RHINO_SUCCESS; }
void USART4_5_IRQHandler(void) { int rx_ready = 0; char rx; CPSR_ALLOC(); RHINO_CPU_INTRPT_DISABLE(); if ( LL_USART_IsActiveFlag_RXNE(USART4) && (LL_USART_IsEnabledIT_RXNE(USART4 ) != RESET) ) { /* no need to clear the RXNE flag because it is auto cleared by reading the data*/ rx = LL_USART_ReceiveData8( USART4 ); rx_ready = 1; //PRINTF("%c\r\n", rx); } if (rx_ready) { #ifdef CONFIG_LINKWAN_TEST extern void linkwan_test_cli_cb(uint8_t cmd); linkwan_test_cli_cb(rx); #endif } RHINO_CPU_INTRPT_ENABLE(); }
unsigned int irq_lock(void) { CPSR_ALLOC(); RHINO_CPU_INTRPT_DISABLE(); return cpsr; }