static void uvisor_disabled_default_vector(void) { uint32_t irqn, vector; const uint32_t *dst_box_cfgtbl_ptr; uint8_t dst_box_id; int ipsr; /* Get current IRQ number, from the IPSR. * We also allow system IRQs. This is inconsistent with the uVisor API, so * code written before uVisor might stop working when uVisor is enabled. */ irqn = 0; ipsr = ((int) (__get_IPSR() & 0x1FF)) - NVIC_USER_IRQ_OFFSET; if (ipsr < NonMaskableInt_IRQn || ipsr >= NVIC_USER_IRQ_NUMBER) { uvisor_error(USER_NOT_ALLOWED); } else { irqn = (uint32_t) ipsr; } /* Calculate the destination box configuration pointer. */ dst_box_id = g_uvisor_disabled_vectors[irqn].box_id; dst_box_cfgtbl_ptr = &__uvisor_cfgtbl_ptr_start + (uint32_t) dst_box_id; /* Get the IRQ handler. */ vector = g_uvisor_disabled_vectors[irqn].vector; if (!vector) { uvisor_error(USER_NOT_ALLOWED); } /* Switch contexts before and after executing the IRQ handler. */ uvisor_disabled_switch_in(dst_box_cfgtbl_ptr); ((void (*)(void)) vector)(); uvisor_disabled_switch_out(); }
static void uvisor_disabled_default_vector(void) { uint32_t irqn, vector; const uint32_t *dst_box_cfgtbl_ptr; uint8_t dst_box_id; int ipsr; /* Get current IRQ number, from the IPSR. * We only allow user IRQs to be registered (NVIC). This is consistent with * the corresponding API when uVisor is enabled. */ irqn = 0; ipsr = ((int) (__get_IPSR() & 0x1FF)) - NVIC_OFFSET; if (ipsr < 0 || ipsr >= NVIC_VECTORS) { uvisor_error(USER_NOT_ALLOWED); } else { irqn = (uint32_t) ipsr; } /* Calculate the destination box configuration pointer. */ dst_box_id = g_uvisor_disabled_vectors[irqn].box_id; dst_box_cfgtbl_ptr = &__uvisor_cfgtbl_ptr_start + (uint32_t) dst_box_id; /* Get the IRQ handler. */ vector = g_uvisor_disabled_vectors[irqn].vector; if (!vector) { uvisor_error(USER_NOT_ALLOWED); } /* Switch contexts before and after executing the IRQ handler. */ uvisor_disabled_switch_in(dst_box_cfgtbl_ptr); ((void (*)(void)) vector)(); uvisor_disabled_switch_out(); }
void uvisor_disabled_set_vector(uint32_t irqn, uint32_t vector) { uint8_t box_id; /* Check IRQn. * We only allow user IRQs to be registered (NVIC). This is consistent with * the corresponding API when uVisor is enabled. */ if (irqn >= NVIC_USER_IRQ_NUMBER) { uvisor_error(USER_NOT_ALLOWED); } /* Get current box ID. * We use the call stack pointer to assess the currently active box ID. */ box_id = g_call_stack[g_call_sp]; /* Register IRQ. * If vector is 0 it corresponds to a de-registration. */ g_uvisor_disabled_vectors[irqn].box_id = vector ? box_id : 0; g_uvisor_disabled_vectors[irqn].vector = vector; /* Register default handler. * The default handler performs the context switch around the actual user * handler. */ NVIC_SetVector((IRQn_Type) irqn, (uint32_t) &uvisor_disabled_default_vector); }
uint32_t uvisor_disabled_get_vector(uint32_t irqn) { /* Check IRQn. * We only allow user IRQs to be registered (NVIC). This is consistent with * the corresponding API when uVisor is enabled. */ if (irqn >= NVIC_USER_IRQ_NUMBER) { uvisor_error(USER_NOT_ALLOWED); } return g_uvisor_disabled_vectors[irqn].vector; }
void uvisor_disabled_switch_out(void) { uint8_t src_box_id; /* Pop state. */ if (g_call_sp <= 0) { uvisor_error(USER_NOT_ALLOWED); } src_box_id = g_call_stack[--g_call_sp]; /* Restore the source context. */ uvisor_ctx = g_uvisor_ctx_array[src_box_id]; }
void uvisor_disabled_switch_in(const uint32_t *dst_box_cfgtbl_ptr) { uint8_t dst_box_id; /* Read the destination box ID. */ dst_box_id = (uint8_t) (dst_box_cfgtbl_ptr - &__uvisor_cfgtbl_ptr_start); /* Allocate the box contexts if they do not exist yet. */ if (!g_initialized) { uvisor_disabled_init_context(); } uvisor_ctx = g_uvisor_ctx_array[dst_box_id]; /* Push state. */ if (g_call_sp >= UVISOR_SVC_CONTEXT_MAX_DEPTH - 1) { uvisor_error(USER_NOT_ALLOWED); } g_call_stack[++g_call_sp] = dst_box_id; }
static void uvisor_disabled_init_context(void) { uint8_t box_id; const UvisorBoxConfig **box_cfgtbl; size_t context_size; if (g_initialized) { return; } /* Iterate over all box configuration tables. */ box_id = 0; g_memory_position = (uint32_t) &__uvisor_bss_boxes_start; for(box_cfgtbl = (const UvisorBoxConfig**) &__uvisor_cfgtbl_ptr_start; box_cfgtbl < (const UvisorBoxConfig**) &__uvisor_cfgtbl_ptr_end; box_cfgtbl++) { /* Read the context size from the box configuration table. */ context_size = (size_t) (*box_cfgtbl)->context_size; /* Initialize box context. */ /* Note: Also box 0 has technically a context, although we force it to * be zero. */ if (!context_size) { g_uvisor_ctx_array[box_id] = NULL; } else if (!box_id) { uvisor_error(USER_NOT_ALLOWED); } else { /* The box context is alloated from the chunk of memory reserved to * uVisor boxes' stacks and contexts. */ /* FIXME Since we do not currently track separate stacks when uVisor * is disabled, this involves a good wealth of memory waste. */ g_uvisor_ctx_array[box_id] = (void *) g_memory_position; memset((void *) g_memory_position, 0, UVISOR_REGION_ROUND_UP(context_size)); g_memory_position += UVISOR_REGION_ROUND_UP(context_size); } box_id++; } /* Do not run this again. */ g_initialized = true; }
int main(void) { //printf("\r\n***** threaded blinky uvisor-rtos example *****\r\n"); putc('*', stdout); fflush(stdout); size_t count = 0; /* Startup a few RPC runners. */ static const uint32_t stack_size = 512; Thread * sync_1 = new(std::nothrow) Thread(sync_runner, &main_sync_1, osPriorityNormal, stack_size); Thread * sync_2 = new(std::nothrow) Thread(sync_runner, &main_sync_2, osPriorityNormal, stack_size); Thread * sync_3 = new(std::nothrow) Thread(sync_runner, &main_sync_3, osPriorityNormal, stack_size); Thread * async_1 = new(std::nothrow) Thread(async_runner, &main_async_1, osPriorityNormal, stack_size); Thread * async_2 = new(std::nothrow) Thread(async_runner, &main_async_2, osPriorityNormal, stack_size); Thread * async_3 = new(std::nothrow) Thread(async_runner, &main_async_3, osPriorityNormal, stack_size); if (sync_1 == NULL || sync_2 == NULL || sync_3 == NULL || async_1 == NULL || async_2 == NULL || async_3 == NULL) { uvisor_error(USER_NOT_ALLOWED); } while (1) { #define STATS_ENABLED 0 #if STATS_ENABLED uint32_t ticks_can_suspend = os_suspend(); puts("\r\n>---------------------------------------\r\n"); printf("ticks_can_suspend: %lu\r\n", ticks_can_suspend); printf("green_handler_a_1: %u\r\n", green_handler_a_1.num_handled); printf("green_handler_a_2: %u\r\n", green_handler_a_2.num_handled); printf("green_handler_ab_1: %u\r\n", green_handler_ab_1.num_handled); printf("green_handler_c_1: %u\r\n", green_handler_c_1.num_handled); puts("<---------------------------------------\r\n"); os_resume(0); /* Pretend no ticks went by while suspended. */ #endif /* Spin forever. */ Thread::wait(10000); } return 0; }
UVISOR_WEAK void uvisor_disabled_set_vector(uint32_t irqn, uint32_t vector) { uint8_t prio_bits, box_id; uint8_t volatile *prio; /* Check IRQn. * We only allow user IRQs to be registered (NVIC). This is consistent with * the corresponding API when uVisor is enabled. */ if (irqn >= NVIC_VECTORS) { uvisor_error(USER_NOT_ALLOWED); } /* Get current box ID. * We use the call stack pointer to assess the currently active box ID. */ box_id = g_call_stack[g_call_sp]; /* Setup the vector table relocation (only done once). * No user vector is copied, consistently with the vIRQ APIs. Instead, only * user vectors explicitly set using this API are registered in the table. */ if (SCB->VTOR != (uint32_t) g_irq_table) { SCB->VTOR = (uint32_t) g_irq_table; /* Detect the number of implemented priority bits. * The architecture specifies that unused/not implemented bits in the * NVIC IP registers read back as 0. */ __disable_irq(); prio = (uint8_t volatile *) &(NVIC->IP[0]); prio_bits = *prio; *prio = 0xFFU; g_nvic_prio_bits = (uint8_t) __builtin_popcount(*prio); *prio = prio_bits; __enable_irq(); } /* Register IRQ. * If vector is 0 it corresponds to a de-registration. */ g_uvisor_disabled_vectors[irqn].box_id = vector ? box_id : 0; g_uvisor_disabled_vectors[irqn].vector = vector; }