/** * \brief the post handler of container call, callee->caller * \param[in] ret_value return value from callee container * \param[in] sp callee container's stack pointer * \param[in] status32 callee container's status register * \return caller container's stack pointer */ uint32_t container_call_out(uint32_t ret_value, uint32_t *sp, uint32_t status32) { uint32_t src_id, dst_id; PROCESSOR_FRAME *src; uint8_t secure = 0; /* discard the created cpu frame, recover the original sp of destination container */ dst_id = g_container_stack_curr_id; container_stack_pop(dst_id, sp, status32); src_id = g_container_stack[g_container_stack_ptr].src_id; src = (PROCESSOR_FRAME *)g_container_stack[g_container_stack_ptr].src_sp; /* copy return value */ src->exc_frame.r0 = ret_value; /* switch access control tables */ vmpu_switch(dst_id, src_id); _arc_aux_write(AUX_MPU_PROBE, src->exc_frame.ret); if (_arc_aux_read(AUX_MPU_RPER) & (1 << AUX_MPU_RPER_BIT_S)) { secure = 1; } /* for all normal interrupts happened in secure world, it's handled in background container stack */ if (secure == 1) { _arc_aux_write(AUX_KERNEL_SP, (uint32_t)g_container_context[0].cur_sp); } return ((uint32_t) g_container_stack[g_container_stack_ptr].src_sp) | secure; }
/** Switch the context back from the destination box to the source one. * * @internal * * In this function we keep the same naming convention of the switch-in. Hence, * here the destination box is the one we are leaving, the source box is the one * we are switching to. We do not need any input from the caller as we already * know where we are switching to from the stacked state. * * @warning With thread context switches there is no context switch-out, but * only a context switch-in (from the current thread to the next one), so this * function should not be used for that purpose. An error will be thrown if used * for thread switches. */ TContextPreviousState * context_switch_out(TContextSwitchType context_type) { uint8_t src_id, dst_id; uint32_t src_sp; TContextPreviousState * previous_state; /* This function is not needed for unbound context switches. * In those cases there is only a switch from a source box to a destination * box, and it can be done without state keeping. It is the host OS that * takes care of switching the stacks. */ if (context_type == CONTEXT_SWITCH_UNBOUND_THREAD) { HALT_ERROR(NOT_ALLOWED, "Unbound context switching (e.g. for thread context switching) does not need to switch " "out. Just call the context_switch_in(...) function repeatedly to switch from one task " "to another."); } /* Destination box: Gather information from the current state. */ dst_id = g_active_box; /* Source box: Gather information from the previous state. */ /* This function halts if it finds an error. */ previous_state = context_state_pop(); src_id = previous_state->src_id; src_sp = previous_state->src_sp; /* The source/destination box IDs can be the same (for example, in IRQs). */ if (src_id != dst_id) { /* Store outgoing newlib reent pointer. */ UvisorBoxIndex * index = (UvisorBoxIndex *) g_context_current_states[dst_id].bss; index->bss.address_of.newlib_reent = (uint32_t) *(__uvisor_config.newlib_impure_ptr); /* Update the ID of the currently active box. */ g_active_box = src_id; /* Update the context pointer to the one of the source box. */ index = (UvisorBoxIndex *) g_context_current_states[src_id].bss; *(__uvisor_config.uvisor_box_context) = (uint32_t *) index; /* Switch MPU configurations. */ /* This function halts if it finds an error. */ vmpu_switch(dst_id, src_id); /* Restore incoming newlib reent pointer. */ *(__uvisor_config.newlib_impure_ptr) = (uint32_t *) index->bss.address_of.newlib_reent; } /* Set the stack pointer for the source box. This is only needed if the * context switch is tied to a function. * Unbound context switches require the host OS to set the correct stack * pointer before handling execution to the unprivileged code. */ if (context_type == CONTEXT_SWITCH_FUNCTION_GATEWAY || context_type == CONTEXT_SWITCH_FUNCTION_ISR || context_type == CONTEXT_SWITCH_FUNCTION_DEBUG) { __set_PSP(src_sp); } return previous_state; }
/* Switch the context from the source box to the destination one, using the * stack pointers provided as input. */ void context_switch_in(TContextSwitchType context_type, uint8_t dst_id, uint32_t src_sp, uint32_t dst_sp) { uint8_t src_id; /* Source box: Gather information from the current state. */ src_id = g_active_box; /* The source/destination box IDs can be the same (for example, in IRQs). */ if (src_id != dst_id) { /* Update the context pointer to the one of the destination box. */ *(__uvisor_config.uvisor_box_context) = (uint32_t *) g_context_current_states[dst_id].bss; /* Update the ID of the currently active box. */ g_active_box = dst_id; UvisorBoxIndex * index = (UvisorBoxIndex *) *(__uvisor_config.uvisor_box_context); index->box_id_self = dst_id; /* Switch MPU configurations. */ /* This function halts if it finds an error. */ vmpu_switch(src_id, dst_id); } /* Push the state of the source box and set the stack pointer for the * destination box. * This is only needed if the context switch is tied to a function. Unbound * context switches require the host OS to set the correct stack pointer * before handling execution to the unprivileged code, and for the same * reason do not require state-keeping. */ /* This function halts if it finds an error. */ if (context_type == CONTEXT_SWITCH_FUNCTION_GATEWAY || context_type == CONTEXT_SWITCH_FUNCTION_ISR || context_type == CONTEXT_SWITCH_FUNCTION_DEBUG) { context_state_push(context_type, src_id, src_sp); __set_PSP(dst_sp); } }
/** * \brief the handler of container call, caller -> callee * \param[in] src_frame container context frame * \return target container's stack pointer */ uint32_t container_call_in(INT_EXC_FRAME *src_frame) { uint8_t src_id, dst_id; INT_EXC_FRAME *dst_frame; uint32_t dst_fn; /* number of arguments to pass to the target function */ uint8_t args; uint32_t *src, *dst; uint8_t secure = 0; SECURESHIELD_ASSERT(src_frame != NULL); #if ARC_FEATURE_MPU_BUILD_S == 1 && SECURESHIELD_USE_MPU_SID == 1 dst_fn = src_frame->r2; dst_id = (uint32_t *)src_frame->r1 - __secureshield_config.cfgtbl_ptr_start; if (dst_id <= 0 || dst_id >= g_vmpu_container_count) { SECURESHIELD_HALT("container out of range (%d)", dst_id); return 0; } #else uint32_t pc; /* get caller pc */ pc = src_frame->ret - SJLI_INSTRUCTION_LENGTH; if (container_call_check_magic((CONTAINER_CALL *)pc) != 0) { return 0; } dst_fn = container_call_get_dst_fn((CONTAINER_CALL *)pc); dst_id = container_call_get_dst_id((CONTAINER_CALL *)pc); // the target container of container call could not be 0 (background container) if (dst_id == 0) { return 0; } #endif src_id = g_container_stack_curr_id; /* check src and dst IDs */ if (src_id == dst_id) { SECURESHIELD_HALT("container call is not allowed in the same container %d", src_id); return 0; } args = src_frame->r0; /* r0 is argument number */ /* get the right interface from interface table */ if (vmpu_find_interface(dst_id, (void *)dst_fn, args) == NULL) { SECURESHIELD_HALT("interface @%x not found in container %d", dst_fn, dst_id); return 0; } /* push the calling container and set the callee container */ /* the left registers of src container will be saved later, reserve space here */ container_stack_push(src_id, ((uint32_t *)src_frame) - ARC_CALLEE_FRAME_SIZE, src_frame->status32, dst_id); /* create the cpu frame and exception frame for the destination container */ dst_frame = (INT_EXC_FRAME *)(g_container_context[dst_id].cur_sp - ARC_EXC_FRAME_SIZE); dst_frame->erbta = 0; /* erbta, is 0 the correct value? */ dst_frame->fp = 0; dst_frame->lp_end = 0; dst_frame->lp_start = 0; dst_frame->lp_count= 0; #ifdef ARC_FEATURE_CODE_DENSITY dst_frame->ei = 0; dst_frame->ldi = 0; dst_frame->jli = 0; #endif dst_frame->ret = dst_fn; /* eret */ dst_frame->status32 = g_container_context[dst_id].cpu_status; #if !defined(__MW__) || !defined(_NO_SMALL_DATA_) /* when gp is not changed during execution and sdata is enabled, the following is meaningful */ /* The newlib c of ARC GNU is compiled with sdata enabled */ dst_frame->gp = src_frame->gp; // normal world's gp is different with secure world's gp #endif /* copy parameters */ #if ARC_FEATURE_MPU_BUILD_S == 1 && SECURESHIELD_USE_MPU_SID == 1 src = (uint32_t *)&(src_frame->r3); #else src = (uint32_t *)&(src_frame->r1); #endif dst = (uint32_t *)&(dst_frame->r0); /* r1->r0, r2->r1, ... r6->r5 */ while(args--) { *dst = *src; dst++; src++; } /* switch access control tables */ vmpu_switch(src_id, dst_id); _arc_aux_write(AUX_MPU_PROBE, dst_fn); if (_arc_aux_read(AUX_MPU_RPER) & (1 << AUX_MPU_RPER_BIT_S)) { secure = 1; } /* for all normal interrupts happed in secure world, it's handled in background container stack */ if (secure == 1) { _arc_aux_write(AUX_KERNEL_SP, (uint32_t)g_container_context[0].cur_sp); } /* need to check whether dst_sp is overflow ? */ return (((uint32_t)dst_frame) | secure); }
/* Switch the context from the source box to the destination one, using the * stack pointers provided as input. */ void context_switch_in(TContextSwitchType context_type, uint8_t dst_id, uint32_t src_sp, uint32_t dst_sp) { /* The source box is the currently active box. */ uint8_t src_id = g_active_box; if (!vmpu_is_box_id_valid(src_id)) { /* Note: We accept that the source box ID is invalid if this is the very * first context switch. */ if (context_type == CONTEXT_SWITCH_UNBOUND_FIRST) { src_id = dst_id; } else { HALT_ERROR(SANITY_CHECK_FAILED, "Context switch: The source box ID is out of range (%u).\r\n", src_id); } } if (!vmpu_is_box_id_valid(dst_id)) { HALT_ERROR(SANITY_CHECK_FAILED, "Context switch: The destination box ID is out of range (%u).\r\n", dst_id); } /* The source/destination box IDs can be the same (for example, in IRQs). */ if (src_id != dst_id || context_type == CONTEXT_SWITCH_UNBOUND_FIRST) { /* Store outgoing newlib reent pointer. */ UvisorBoxIndex * index = (UvisorBoxIndex *) g_context_current_states[src_id].bss; index->bss.address_of.newlib_reent = (uint32_t) *(__uvisor_config.newlib_impure_ptr); /* Update the context pointer to the one of the destination box. */ index = (UvisorBoxIndex *) g_context_current_states[dst_id].bss; *(__uvisor_config.uvisor_box_context) = (uint32_t *) index; /* Update the ID of the currently active box. */ g_active_box = dst_id; index->box_id_self = dst_id; #if defined(ARCH_CORE_ARMv8M) /* Switch vIRQ configurations. */ virq_switch(src_id, dst_id); #endif /* defined(ARCH_CORE_ARMv8M) */ /* Switch MPU configurations. */ /* This function halts if it finds an error. */ vmpu_switch(src_id, dst_id); /* Restore incoming newlib reent pointer. */ *(__uvisor_config.newlib_impure_ptr) = (uint32_t *) index->bss.address_of.newlib_reent; } /* Push the state of the source box and set the stack pointer for the * destination box. * This is only needed if the context switch is tied to a function. Unbound * context switches require the host OS to set the correct stack pointer * before handling execution to the unprivileged code, and for the same * reason do not require state-keeping. */ /* This function halts if it finds an error. */ if (context_type == CONTEXT_SWITCH_FUNCTION_GATEWAY || context_type == CONTEXT_SWITCH_FUNCTION_ISR || context_type == CONTEXT_SWITCH_FUNCTION_DEBUG) { context_state_push(context_type, src_id, src_sp); #if defined(ARCH_CORE_ARMv8M) /* FIXME: Set the right LR value depending on which NS SP is actually used. */ __TZ_set_MSP_NS(dst_sp); __TZ_set_PSP_NS(dst_sp); #else __set_PSP(dst_sp); #endif } }