Esempio n. 1
0
void vmpu_acl_sram(uint8_t box_id, uint32_t bss_size, uint32_t stack_size, uint32_t * bss_start,
                   uint32_t * stack_pointer)
{
    /* Offset at which the SRAM region is configured for a secure box. This
     * offset is incremented at every function call. The actual MPU region start
     * address depends on the size and alignment of the region. */
    static uint32_t box_mem_pos = 0;
    if (box_mem_pos == 0) {
        box_mem_pos = (uint32_t) __uvisor_config.bss_boxes_start;
    }

    /* Ensure that box stack is at least UVISOR_MIN_STACK_SIZE. */
    stack_size = UVISOR_MIN_STACK(stack_size);

    /* Compute the MPU region size. */
    /* Note: This function also updates the memory offset to meet the alignment
     *       requirements. */
    uint32_t region_size = vmpu_acl_sram_region_size(&box_mem_pos, bss_size, stack_size);

    /* Allocate the subregions slots for the BSS sections and for the stack.
     * One subregion is used to allow for rounding errors (BSS), and another one
     * is used to separate the BSS sections from the stack. */
    uint32_t subregion_size = region_size / 8;
    int slots_for_bss = (bss_size + subregion_size - 1) / subregion_size;
    int slots_for_stack = slots_for_bss ? (8 - slots_for_bss - 1) : 8;

    /* Final sanity checks */
    if ((slots_for_bss * subregion_size) < bss_size) {
        HALT_ERROR(SANITY_CHECK_FAILED, "slots_ctx underrun\n\r");
    }
    if ((slots_for_stack * subregion_size) < stack_size) {
        HALT_ERROR(SANITY_CHECK_FAILED, "slots_stack underrun\n\r");
    }

    /* Set the pointers to the BSS sections and to the stack. */
    *bss_start = slots_for_bss ? box_mem_pos : (uint32_t) NULL;
    /* `(box_mem_pos + size)` is already outside the memory protected by the
     * MPU region, so a pointer 8B below stack top is chosen (8B due to stack
     * alignment requirements). */
    *stack_pointer = (box_mem_pos + region_size) - 8;

    /* Create stack protection region. */
    region_size = vmpu_region_add_static_acl(
        box_id,
        box_mem_pos,
        region_size,
        UVISOR_TACLDEF_STACK,
        slots_for_bss ? 1UL << slots_for_bss : 0
    );
    DPRINTF("  - SRAM:       0x%08X - 0x%08X (permissions: 0x%04X, subregions: 0x%02X)\r\n",
            box_mem_pos, box_mem_pos + region_size, UVISOR_TACLDEF_STACK, slots_for_bss ? 1UL << slots_for_bss : 0);

    /* Move on to the next memory block. */
    box_mem_pos += region_size;

    DPRINTF("    - BSS:      0x%08X - 0x%08X (original size: %uB, rounded size: %uB)\r\n",
            *bss_start, *bss_start + bss_size, bss_size, slots_for_bss * subregion_size);
    DPRINTF("    - Stack:    0x%08X - 0x%08X (original size: %uB, rounded size: %uB)\r\n",
            *bss_start + (slots_for_bss + 1) * subregion_size, box_mem_pos, stack_size, slots_for_stack * subregion_size);
}
Esempio n. 2
0
File: virq.c Progetto: ccli8/uvisor
static void virq_check_acls(uint32_t irqn, uint8_t box_id)
{
    /* IRQn goes from 0 to (NVIC_VECTORS - 1) */
    if (irqn >= NVIC_VECTORS) {
        HALT_ERROR(NOT_ALLOWED, "Not allowed: IRQ %d is out of range\n\r", irqn);
    }

    /* Note: IRQs ownership is determined on a first come first served basis. */
    if (g_virq_states[irqn].box_id != UVISOR_BOX_ID_INVALID &&
        g_virq_states[irqn].box_id != box_id) {
        HALT_ERROR(PERMISSION_DENIED, "IRQ %d is owned by box %d.\r\n",
                   irqn, box_id);
    }
}
Esempio n. 3
0
void vmpu_order_boxes(int * const best_order, int box_count)
{
    /* Start with the same order of configuration table pointers. */
    int box_order[UVISOR_MAX_BOXES];
    for (int i = 0; i < box_count; ++i) {
        box_order[i] = i;
    }
    
    uint32_t total_sram_size = 0;
    if (box_count > 1) {
        /* Find the total amount of SRAM used by all the boxes.
         * This function also updates the best_order array with the configuration
         * that minimizes the SRAM usage. */
        total_sram_size = __vmpu_order_boxes(box_order, best_order, 1, box_count - 1, UINT32_MAX);
    }
    /* This helper message allows people to work around the linker script
     * limitation that prevents us from allocating the correct amount of memory
     * at link time. */
    uint32_t available_sram_size = (uint32_t) __uvisor_config.bss_boxes_end - (uint32_t) __uvisor_config.bss_boxes_start;
    if (available_sram_size < total_sram_size) {
        DPRINTF("Not enough memory allocated for the secure boxes. This is a known limitation of the ARMv7-M MPU.\r\n");
        DPRINTF("Please insert the following snippet in your public box file (usually main.cpp):\r\n");
        DPRINTF("uint8_t __attribute__((section(\".keep.uvisor.bss.boxes\"), aligned(32))) __boxes_overhead[%d];\r\n",
                total_sram_size - available_sram_size);
        HALT_ERROR(SANITY_CHECK_FAILED, "Secure boxes memory overflow. See message above to fix it.\r\n");
    }
}
Esempio n. 4
0
/** Switch the context back from the destination box to the source one.
 *
 * @internal
 *
 * In this function we keep the same naming convention of the switch-in. Hence,
 * here the destination box is the one we are leaving, the source box is the one
 * we are switching to. We do not need any input from the caller as we already
 * know where we are switching to from the stacked state.
 *
 * @warning With thread context switches there is no context switch-out, but
 * only a context switch-in (from the current thread to the next one), so this
 * function should not be used for that purpose. An error will be thrown if used
 * for thread switches. */
TContextPreviousState * context_switch_out(TContextSwitchType context_type)
{
    uint8_t src_id, dst_id;
    uint32_t src_sp;
    TContextPreviousState * previous_state;

    /* This function is not needed for unbound context switches.
     * In those cases there is only a switch from a source box to a destination
     * box, and it can be done without state keeping. It is the host OS that
     * takes care of switching the stacks. */
    if (context_type == CONTEXT_SWITCH_UNBOUND_THREAD) {
        HALT_ERROR(NOT_ALLOWED, "Unbound context switching (e.g. for thread context switching) does not need to switch "
                                "out. Just call the context_switch_in(...) function repeatedly to switch from one task "
                                "to another.");
    }

    /* Destination box: Gather information from the current state. */
    dst_id = g_active_box;

    /* Source box: Gather information from the previous state. */
    /* This function halts if it finds an error. */
    previous_state = context_state_pop();
    src_id = previous_state->src_id;
    src_sp = previous_state->src_sp;

    /* The source/destination box IDs can be the same (for example, in IRQs). */
    if (src_id != dst_id) {
        /* Store outgoing newlib reent pointer. */
        UvisorBoxIndex * index = (UvisorBoxIndex *) g_context_current_states[dst_id].bss;
        index->bss.address_of.newlib_reent = (uint32_t) *(__uvisor_config.newlib_impure_ptr);

        /* Update the ID of the currently active box. */
        g_active_box = src_id;
        /* Update the context pointer to the one of the source box. */
        index = (UvisorBoxIndex *) g_context_current_states[src_id].bss;
        *(__uvisor_config.uvisor_box_context) = (uint32_t *) index;

        /* Switch MPU configurations. */
        /* This function halts if it finds an error. */
        vmpu_switch(dst_id, src_id);

        /* Restore incoming newlib reent pointer. */
        *(__uvisor_config.newlib_impure_ptr) = (uint32_t *) index->bss.address_of.newlib_reent;
    }

    /* Set the stack pointer for the source box. This is only needed if the
     * context switch is tied to a function.
     * Unbound context switches require the host OS to set the correct stack
     * pointer before handling execution to the unprivileged code. */
    if (context_type == CONTEXT_SWITCH_FUNCTION_GATEWAY ||
        context_type == CONTEXT_SWITCH_FUNCTION_ISR     ||
        context_type == CONTEXT_SWITCH_FUNCTION_DEBUG) {
        __set_PSP(src_sp);
    }

    return previous_state;
}
Esempio n. 5
0
/** Pop the previous state from the state stack.
 *
 * @internal
 *
 * @warning This function trusts all the arguments that are passed to it. Input
 * verification should be performed by the caller.
 *
 * @returns the pointer to the previous box context state. */
static TContextPreviousState * context_state_pop(void)
{
    /* Check that the state stack does not underflow. */
    if (!g_context_p) {
        HALT_ERROR(SANITY_CHECK_FAILED, "Context state stack underflow");
    }

    /* Pop the source box state and return it to the caller. */
    --g_context_p;
    return &g_context_previous_states[g_context_p];
}
Esempio n. 6
0
File: vmpu.c Progetto: kedars/uvisor
static void vmpu_sanity_check_box_namespace(int box_id, const char *const box_namespace)
{
    /* Verify that all characters of the box_namespace (including the trailing
     * NUL) are within flash and that the box_namespace is not too long. It is
     * also okay for the box namespace to be NULL. */
    size_t length = 0;

    if (box_namespace == NULL) {
        return;
    }

    do {
        /* Check that the address of the character is within public flash before
         * reading the character. */
        /* Note: The public flash section is assumed to be monolithic, so if
         * both the start and end address of an array are inside the public
         * flash, then the whole array is inside the public flash. */
        if (!vmpu_public_flash_addr((uint32_t) &box_namespace[0]) ||
            !vmpu_public_flash_addr((uint32_t) &box_namespace[length])) {
            HALT_ERROR(SANITY_CHECK_FAILED, "box[%i] @0x%08X - namespace not entirely in public flash\n",
                box_id, box_namespace, UVISOR_MAX_BOX_NAMESPACE_LENGTH);
        }

        if (box_namespace[length] == '\0') {
            /* If we reached the end of the string, which we now know is stored
             * in flash, then we are done. */
            break;
        }

        ++length;

        if (length >= UVISOR_MAX_BOX_NAMESPACE_LENGTH) {
            HALT_ERROR(SANITY_CHECK_FAILED,
                "box[%i] @0x%08X - namespace too long (length >= %u)\n",
                box_id, box_namespace, UVISOR_MAX_BOX_NAMESPACE_LENGTH);
        }
    } while (box_namespace[length]);
}
Esempio n. 7
0
bool OscHandler::InitReceiveConnection( int a_port_num )
{
	m_socket.bindTo( a_port_num );

	if ( !m_socket.isOk() )
	{
		MESSAGE_BOX("Socket Bind Failed", "Cannot bind to socket %d", a_port_num );
		LOG( "Cannot bind to socket %d", a_port_num );
		HALT_ERROR();
		return false;
	}

	return true;
}
Esempio n. 8
0
/** Push the previous state to the state stack and updates the current state.
 *
 * @internal
 *
 * @warning This function trusts all the arguments that are passed to it. Input
 * verification should be performed by the caller.
 *
 * @param context_type[in]  Type of context switch to perform
 * @param src_id[in]        ID of the box we are switching context from
 * @param src_sp[in]        Stack pointer of the box we are switching from */
static void context_state_push(TContextSwitchType context_type, uint8_t src_id, uint32_t src_sp)
{
    /* Check that the state stack does not overflow. */
    if (g_context_p >= UVISOR_CONTEXT_MAX_DEPTH) {
        HALT_ERROR(SANITY_CHECK_FAILED, "Context state stack overflow");
    }

    /* Push the source box state to the state stack. */
    g_context_previous_states[g_context_p].type = context_type;
    g_context_previous_states[g_context_p].src_id = src_id;
    g_context_previous_states[g_context_p].src_sp = src_sp;
    ++g_context_p;

    /* Update the current state of the source box. */
    g_context_current_states[src_id].sp = src_sp;
}
Esempio n. 9
0
File: vmpu.c Progetto: kedars/uvisor
static int copy_box_namespace(const char *src, char *dst)
{
    int bytes_copied;

    /* Copy the box namespace to the client-provided destination. */
    for (bytes_copied = 0; bytes_copied < UVISOR_MAX_BOX_NAMESPACE_LENGTH; bytes_copied++) {
        vmpu_unpriv_uint8_write((uint32_t)&dst[bytes_copied], src[bytes_copied]);

        if (src[bytes_copied] == '\0') {
            /* We've reached the end of the box namespace. */
            ++bytes_copied; /* Include the terminating-null in bytes_copied. */
            goto done;
        }
    }

    /* We did not find a terminating null in the src. The src has been verified
     * in vmpu_box_namespace_from_id as being in the box config table. It is a
     * programmer error if the namespace in the box config table is not
     * null-terminated, so we halt. */
    HALT_ERROR(SANITY_CHECK_FAILED, "vmpu: Box namespace missing terminating-null\r\n");

done:
    return bytes_copied;
}
Esempio n. 10
0
/* Switch the context from the source box to the destination one, using the
 * stack pointers provided as input. */
void context_switch_in(TContextSwitchType context_type, uint8_t dst_id, uint32_t src_sp, uint32_t dst_sp)
{
    /* The source box is the currently active box. */
    uint8_t src_id = g_active_box;
    if (!vmpu_is_box_id_valid(src_id)) {
        /* Note: We accept that the source box ID is invalid if this is the very
         *       first context switch. */
        if (context_type == CONTEXT_SWITCH_UNBOUND_FIRST) {
            src_id = dst_id;
        } else {
            HALT_ERROR(SANITY_CHECK_FAILED, "Context switch: The source box ID is out of range (%u).\r\n", src_id);
        }
    }
    if (!vmpu_is_box_id_valid(dst_id)) {
        HALT_ERROR(SANITY_CHECK_FAILED, "Context switch: The destination box ID is out of range (%u).\r\n", dst_id);
    }

    /* The source/destination box IDs can be the same (for example, in IRQs). */
    if (src_id != dst_id || context_type == CONTEXT_SWITCH_UNBOUND_FIRST) {
        /* Store outgoing newlib reent pointer. */
        UvisorBoxIndex * index = (UvisorBoxIndex *) g_context_current_states[src_id].bss;
        index->bss.address_of.newlib_reent = (uint32_t) *(__uvisor_config.newlib_impure_ptr);

        /* Update the context pointer to the one of the destination box. */
        index = (UvisorBoxIndex *) g_context_current_states[dst_id].bss;
        *(__uvisor_config.uvisor_box_context) = (uint32_t *) index;

        /* Update the ID of the currently active box. */
        g_active_box = dst_id;
        index->box_id_self = dst_id;

#if defined(ARCH_CORE_ARMv8M)
        /* Switch vIRQ configurations. */
        virq_switch(src_id, dst_id);
#endif /* defined(ARCH_CORE_ARMv8M) */

        /* Switch MPU configurations. */
        /* This function halts if it finds an error. */
        vmpu_switch(src_id, dst_id);

        /* Restore incoming newlib reent pointer. */
        *(__uvisor_config.newlib_impure_ptr) = (uint32_t *) index->bss.address_of.newlib_reent;
    }

    /* Push the state of the source box and set the stack pointer for the
     * destination box.
     * This is only needed if the context switch is tied to a function. Unbound
     * context switches require the host OS to set the correct stack pointer
     * before handling execution to the unprivileged code, and for the same
     * reason do not require state-keeping.  */
    /* This function halts if it finds an error. */
    if (context_type == CONTEXT_SWITCH_FUNCTION_GATEWAY ||
        context_type == CONTEXT_SWITCH_FUNCTION_ISR     ||
        context_type == CONTEXT_SWITCH_FUNCTION_DEBUG) {
        context_state_push(context_type, src_id, src_sp);
#if defined(ARCH_CORE_ARMv8M)
        /* FIXME: Set the right LR value depending on which NS SP is actually used. */
        __TZ_set_MSP_NS(dst_sp);
        __TZ_set_PSP_NS(dst_sp);
#else
        __set_PSP(dst_sp);
#endif
    }
}
Esempio n. 11
0
File: vmpu.c Progetto: kedars/uvisor
static int vmpu_sanity_checks(void)
{
    /* Verify the uVisor configuration structure. */
    if (__uvisor_config.magic != UVISOR_MAGIC) {
        HALT_ERROR(SANITY_CHECK_FAILED,
            "config magic mismatch: &0x%08X = 0x%08X - exptected 0x%08X\n",
            &__uvisor_config, __uvisor_config.magic, UVISOR_MAGIC);
    }

    /* Verify basic assumptions about vmpu_bits/__builtin_clz. */
    assert(__builtin_clz(0) == 32);
    assert(__builtin_clz(1UL << 31) == 0);
    assert(vmpu_bits(0) == 0);
    assert(vmpu_bits(1UL << 31) == 32);
    assert(vmpu_bits(0x8000UL) == 16);
    assert(vmpu_bits(0x8001UL) == 16);
    assert(vmpu_bits(1) == 1);

    /* Verify that the core version is the same as expected. */
    if (!CORE_VERSION_CHECK() || !CORE_REVISION_CHECK()) {
        HALT_ERROR(SANITY_CHECK_FAILED, "This core is unsupported or there is a mismatch between the uVisor "
                                        "configuration you are using and the core this configuration supports.\n\r");
    }

    /* Verify that the known hard-coded symbols are equal to the ones taken from
     * the host linker script. */
    assert((uint32_t) __uvisor_config.flash_start == FLASH_ORIGIN);
    assert((uint32_t) __uvisor_config.sram_start == SRAM_ORIGIN);

    /* Verify that the uVisor binary blob is positioned at the flash offset. */
    assert(((uint32_t) __uvisor_config.flash_start + FLASH_OFFSET) == (uint32_t) __uvisor_config.main_start);

    /* Verify that the uVisor mode configuration is inside the public flash. */
    assert(vmpu_public_flash_addr((uint32_t) __uvisor_config.mode));
    assert(*(__uvisor_config.mode) <= 2);
    DPRINTF("uVisor mode: %u\n", *(__uvisor_config.mode));

    /* Verify the SRAM relocation. */
    /* Note: SRAM_ORIGIN + SRAM_OFFSET is assumed to be aligned to 32 bytes. */
    assert((uint32_t) __uvisor_config.bss_start == (SRAM_ORIGIN + SRAM_OFFSET));

    DPRINTF("uvisor_ram : @0x%08X (%u bytes) [config]\n",
        __uvisor_config.bss_main_start,
        VMPU_REGION_SIZE(__uvisor_config.bss_main_start, __uvisor_config.bss_main_end));
    DPRINTF("             @0x%08X (%u bytes) [linker]\n",
        SRAM_ORIGIN + SRAM_OFFSET,
        UVISOR_SRAM_LENGTH_USED);

    /* Verify that the sections inside the BSS region are disjoint. */
    DPRINTF("bss_boxes  : @0x%08X (%u bytes) [config]\n",
        __uvisor_config.bss_boxes_start,
        VMPU_REGION_SIZE(__uvisor_config.bss_boxes_start, __uvisor_config.bss_boxes_end));
    assert(__uvisor_config.bss_end > __uvisor_config.bss_start);
    assert(__uvisor_config.bss_main_end > __uvisor_config.bss_main_start);
    assert(__uvisor_config.bss_boxes_end > __uvisor_config.bss_boxes_start);
    assert((__uvisor_config.bss_main_start >= __uvisor_config.bss_boxes_end) ||
           (__uvisor_config.bss_main_end <= __uvisor_config.bss_boxes_start));

    /* Verify the uVisor expectations regarding its own memories. */
    assert(VMPU_REGION_SIZE(__uvisor_config.bss_main_start, __uvisor_config.bss_main_end) == UVISOR_SRAM_LENGTH_USED);
    assert((uint32_t) __uvisor_config.bss_main_end == (SRAM_ORIGIN + SRAM_OFFSET + UVISOR_SRAM_LENGTH_USED));
    assert((uint32_t) __uvisor_config.bss_main_end == (SRAM_ORIGIN + UVISOR_SRAM_LENGTH_PROTECTED));

    /* Verify SRAM sections are within uVisor's own SRAM. */
    assert(&__bss_start__ >= __uvisor_config.bss_main_start);
    assert(&__bss_end__ <= __uvisor_config.bss_main_end);
    assert(&__data_start__ >= __uvisor_config.bss_main_start);
    assert(&__data_end__ <= __uvisor_config.bss_main_end);
    assert(&__stack_start__ >= __uvisor_config.bss_main_start);
    assert(&__stack_end__ <= __uvisor_config.bss_main_end);

    /* Verify that the secure flash area is accessible and after public code. */
    assert(!vmpu_public_flash_addr((uint32_t) __uvisor_config.secure_start));
    assert(!vmpu_public_flash_addr((uint32_t) __uvisor_config.secure_end));
    assert(vmpu_flash_addr((uint32_t) __uvisor_config.secure_start));
    assert(vmpu_flash_addr((uint32_t) __uvisor_config.secure_end));
    assert(__uvisor_config.secure_start <= __uvisor_config.secure_end);
    assert(__uvisor_config.secure_start >= __uvisor_config.main_end);

    /* Verify the configuration table. */
    assert(__uvisor_config.cfgtbl_ptr_start <= __uvisor_config.cfgtbl_ptr_end);
    assert(__uvisor_config.cfgtbl_ptr_start >= __uvisor_config.secure_start);
    assert(__uvisor_config.cfgtbl_ptr_end <= __uvisor_config.secure_end);
    assert(!vmpu_public_flash_addr((uint32_t) __uvisor_config.cfgtbl_ptr_start));
    assert(!vmpu_public_flash_addr((uint32_t) __uvisor_config.cfgtbl_ptr_end));
    assert(vmpu_flash_addr((uint32_t) __uvisor_config.cfgtbl_ptr_start));
    assert(vmpu_flash_addr((uint32_t) __uvisor_config.cfgtbl_ptr_end));

    /* Verify the register gateway pointers section. */
    assert(__uvisor_config.register_gateway_ptr_start <= __uvisor_config.register_gateway_ptr_end);
    assert(__uvisor_config.register_gateway_ptr_start >= __uvisor_config.secure_start);
    assert(__uvisor_config.register_gateway_ptr_end <= __uvisor_config.secure_end);
    assert(!vmpu_public_flash_addr((uint32_t) __uvisor_config.register_gateway_ptr_start));
    assert(!vmpu_public_flash_addr((uint32_t) __uvisor_config.register_gateway_ptr_end));
    assert(vmpu_flash_addr((uint32_t) __uvisor_config.register_gateway_ptr_start));
    assert(vmpu_flash_addr((uint32_t) __uvisor_config.register_gateway_ptr_end));

    /* Verify that every register gateway in memory is aligned to 4 bytes. */
    uint32_t * register_gateway = __uvisor_config.register_gateway_ptr_start;
    for (; register_gateway < __uvisor_config.register_gateway_ptr_end; register_gateway++) {
        if (*register_gateway & 0x3) {
            HALT_ERROR(SANITY_CHECK_FAILED, "Register gateway 0x%08X is not aligned to 4 bytes",
                (uint32_t) register_gateway);
        }
    }

    /* Return an error if uVisor is disabled. */
    if (!__uvisor_config.mode || (*__uvisor_config.mode == 0)) {
        return -1;
    } else {
        return 0;
    }
}
Esempio n. 12
0
File: vmpu.c Progetto: Nienzu/uvisor
static void vmpu_load_boxes(void)
{
    int i, count;
    const UvisorBoxAclItem *region;
    const UvisorBoxConfig **box_cfgtbl;
    uint8_t box_id;

    /* enumerate and initialize boxes */
    g_vmpu_box_count = 0;
    for(box_cfgtbl = (const UvisorBoxConfig**) __uvisor_config.cfgtbl_ptr_start;
        box_cfgtbl < (const UvisorBoxConfig**) __uvisor_config.cfgtbl_ptr_end;
        box_cfgtbl++
        )
    {
        /* ensure that configuration resides in flash */
        if(!(VMPU_FLASH_ADDR(*box_cfgtbl) &&
            VMPU_FLASH_ADDR(
                ((uint8_t*)(*box_cfgtbl)) + (sizeof(**box_cfgtbl)-1)
            )))
            HALT_ERROR(SANITY_CHECK_FAILED,
                "invalid address - \
                *box_cfgtbl must point to flash (0x%08X)\n", *box_cfgtbl);

        /* check for magic value in box configuration */
        if(((*box_cfgtbl)->magic)!=UVISOR_BOX_MAGIC)
            HALT_ERROR(SANITY_CHECK_FAILED,
                "box[%i] @0x%08X - invalid magic\n",
                g_vmpu_box_count,
                (uint32_t)(*box_cfgtbl)
            );

        /* check for magic value in box configuration */
        if(((*box_cfgtbl)->version)!=UVISOR_BOX_VERSION)
            HALT_ERROR(SANITY_CHECK_FAILED,
                "box[%i] @0x%08X - invalid version (0x%04X!-0x%04X)\n",
                g_vmpu_box_count,
                *box_cfgtbl,
                (*box_cfgtbl)->version,
                UVISOR_BOX_VERSION
            );

        /* increment box counter */
        if((box_id = g_vmpu_box_count++)>=UVISOR_MAX_BOXES)
            HALT_ERROR(SANITY_CHECK_FAILED, "box number overflow\n");

        /* load box ACLs in table */
        DPRINTF("box[%i] ACL list:\n", box_id);

        /* add ACL's for all box stacks, the actual start addesses and
         * sizes are resolved later in vmpu_initialize_stacks */
        vmpu_acl_stack(
            box_id,
            (*box_cfgtbl)->context_size,
            (*box_cfgtbl)->stack_size
        );

        /* enumerate box ACLs */
        if( (region = (*box_cfgtbl)->acl_list)!=NULL )
        {
            count = (*box_cfgtbl)->acl_count;
            for(i=0; i<count; i++)
            {
                /* ensure that ACL resides in flash */
                if(!VMPU_FLASH_ADDR(region))
                    HALT_ERROR(SANITY_CHECK_FAILED,
                        "box[%i]:acl[%i] must be in code section (@0x%08X)\n",
                        box_id,
                        i,
                        *box_cfgtbl
                    );

                /* add ACL, and force entry as user-provided */
                if(region->acl & UVISOR_TACL_IRQ)
                    vmpu_acl_irq(box_id, region->param1, region->param2);
                else
                    vmpu_acl_add(
                        box_id,
                        region->param1,
                        region->param2,
                        region->acl | UVISOR_TACL_USER
                    );

                /* proceed to next ACL */
                region++;
            }
        }
    }

    /* load box 0 */
    vmpu_load_box(0);

    DPRINTF("vmpu_load_boxes [DONE]\n");
}
Esempio n. 13
0
void vmpu_arch_init_hw(void)
{
    /* Enable the public Flash. */
    vmpu_mpu_set_static_acl(
        0,
        FLASH_ORIGIN,
        ((uint32_t) __uvisor_config.secure_end) - FLASH_ORIGIN,
        UVISOR_TACLDEF_SECURE_CONST | UVISOR_TACL_EXECUTE,
        0
    );

    /* Enable the public SRAM:
     *
     * We use one region for this, which start at SRAM origin (which is always
     * aligned) and has a power-of-two size that is equal or _larger_ than
     * SRAM. This means the region may end _behind_ the end of SRAM!
     *
     * At the beginning of SRAM uVisor places its private BSS section and
     * behind that the page heap. In order to use only one region, we require
     * the end of the page heap to align with 1/8th of the region size, so that
     * we can use the subregion mask.
     * The page heap reduces the memory wastage to less than one page size, by
     * "growing" the page heap downwards from the subregion alignment towards
     * the uVisor bss.
     *
     * Note: The correct alignment needs to be done in the host linkerscript.
     *       Use `ALIGN( (1 << LOG2CEIL(LENGTH(SRAM)) / 8 )` for GNU linker.
     *
     *     2^n     <-- region end
     *     ...
     * .---------. <-- uvisor_config.sram_end
     * |  box 0  |
     * | public  |
     * | memory  |
     * +---------+ <-- uvisor_config.page_end: n/8th of _region_ size (not SRAM size)
     * |  page   |
     * |  heap   |
     * +---------+ <-- aligned to page size
     * | wastage | <-- wasted SRAM is less than 1 page size
     * +---------+ <-- uvisor_config.page_start
     * |  uVisor |
     * |   bss   |
     * '---------' <-- uvisor_config.sram_start, region start
     *
     * Example: The region size of a 96kB SRAM will be 128kB, and the page heap
     *          end will have to be aligned to 16kB, _not_ 12kB (= 96kB / 8).
     *
     * Note: In case the uVisor bss section is moved to another memory region
     *       (tightly-coupled memory for example), the page heap remains and
     *       the same considerations apply. Therefore the uVisor bss section
     *       location has no impact on this.
     */

    /* Calculate the region size by rounding up the SRAM size to the next power-of-two. */
    const uint32_t total_size = (1 << vmpu_region_bits((uint32_t) __uvisor_config.public_sram_end -
                                                       (uint32_t) __uvisor_config.public_sram_start));
    /* The alignment is 1/8th of the region size = rounded up SRAM size. */
    const uint32_t subregions_size = total_size / 8;
    const uint32_t protected_size = (uint32_t) __uvisor_config.page_end - (uint32_t) __uvisor_config.public_sram_start;
    /* The protected size must be aligned to the subregion size. */
    if (protected_size % subregions_size != 0) {
        HALT_ERROR(SANITY_CHECK_FAILED,
                   "The __uvisor_page_end symbol (0x%08X) is not aligned to an MPU subregion boundary.",
                   (uint32_t) __uvisor_config.page_end);
    }
    /* Note: It's called the subregion _disable_ mask, so setting one bit in it _disables_ the
     *       permissions in this subregion. Totally not confusing, amiright!? */
    const uint8_t subregions_disable_mask = (uint8_t) ((1UL << (protected_size / subregions_size)) - 1UL);

    /* Unlock the upper SRAM subregion only. */
    /* Note: We allow code execution for backwards compatibility. Both the user
     *       and superuser flags are set since the ARMv7-M MPU cannot
     *       distinguish between the two. */
    vmpu_mpu_set_static_acl(
        1,
        (uint32_t) __uvisor_config.public_sram_start,
        total_size,
        UVISOR_TACLDEF_DATA | UVISOR_TACL_EXECUTE,
        subregions_disable_mask
    );

    /* On page heap alignments:
     *
     * Individual pages in the page heap are protected by subregions. A page of
     * size 2^N must have its start address aligned to 2^N. However, for page
     * sizes > 1/8th region size, the start address is not guaranteed to be
     * aligned to 2^N.
     *
     * Example: 2^N = page size, 2^(N-1) = 1/8th SRAM (32kB page size in a 128kB SRAM).
     *
     * |           |
     * +-----------+ <-- uvisor_config.page_end: 0x30000 = 3/8th * 128kB is aligned to 16kB.
     * | 32kB page |
     * +-----------+ <-- page start address: 0x30000 - 32kB = 0x10000 is not aligned to 32kB!!
     * |           |
     *
     * Due to these contradicting alignment requirements, it is not possible to
     * have a page size larger than 1/8th region size.
     */
    if (subregions_size < *__uvisor_config.page_size) {
        HALT_ERROR(SANITY_CHECK_FAILED,
                   "The page size (%ukB) must not be larger than 1/8th of SRAM (%ukB).",
                   *__uvisor_config.page_size / 1024, subregions_size / 1024);
    }
}
Esempio n. 14
0
uint32_t vmpu_sys_mux_handler(uint32_t lr, uint32_t msp)
{
    uint32_t pc;
    uint32_t fault_addr, fault_status;

    /* The IPSR enumerates interrupt numbers from 0 up, while *_IRQn numbers
     * are both positive (hardware IRQn) and negative (system IRQn); here we
     * convert the IPSR value to this latter encoding. */
    int ipsr = ((int) (__get_IPSR() & 0x1FF)) - NVIC_OFFSET;

    /* Determine the origin of the exception. */
    bool from_psp = EXC_FROM_PSP(lr);
    uint32_t sp = from_psp ? __get_PSP() : msp;

    switch (ipsr) {
        case NonMaskableInt_IRQn:
            HALT_ERROR(NOT_IMPLEMENTED, "No NonMaskableInt IRQ handler registered.");
            break;

        case HardFault_IRQn:
            DEBUG_FAULT(FAULT_HARD, lr, sp);
            HALT_ERROR(FAULT_HARD, "Cannot recover from a hard fault.");
            lr = debug_box_enter_from_priv(lr);
            break;

        case MemoryManagement_IRQn:
            fault_status = VMPU_SCB_MMFSR;

            /* If we are having an unstacking fault, we can't read the pc
             * at fault. */
            if (fault_status & (SCB_CFSR_MSTKERR_Msk | SCB_CFSR_MUNSTKERR_Msk)) {
                /* Fake pc */
                pc = 0x0;

                /* The stack pointer is at fault. MMFAR doesn't contain a
                 * valid fault address. */
                fault_addr = sp;
            } else {
                /* pc at fault */
                if (from_psp) {
                    pc = vmpu_unpriv_uint32_read(sp + (6 * 4));
                } else {
                    /* We can be privileged here if we tried doing an ldrt or
                     * strt to a region not currently loaded in the MPU. In
                     * such cases, we are reading from the msp and shouldn't go
                     * through vmpu_unpriv_uint32_read. A box wouldn't have
                     * access to our stack. */
                    pc = *(uint32_t *) (msp + (6 * 4));
                }

                /* Backup fault address and status */
                fault_addr = SCB->MMFAR;
            }

            /* Check if the fault is an MPU fault. */
            if (vmpu_fault_recovery_mpu(pc, sp, fault_addr, fault_status)) {
                VMPU_SCB_MMFSR = fault_status;
                return lr;
            }

            /* If recovery was not successful, throw an error and halt. */
            DEBUG_FAULT(FAULT_MEMMANAGE, lr, sp);
            VMPU_SCB_MMFSR = fault_status;
            HALT_ERROR(PERMISSION_DENIED, "Access to restricted resource denied.");
            lr = debug_box_enter_from_priv(lr);
            break;

        case BusFault_IRQn:
            /* Bus faults can be used in a "managed" way, triggered to let
             * uVisor handle some restricted registers.
             * Note: This feature will not be needed anymore when the
             * register-level will be implemented. */

            /* Note: All recovery functions update the stacked stack pointer so
             * that exception return points to the correct instruction. */

            /* Currently we only support recovery from unprivileged mode. */
            if (from_psp) {
                /* pc at fault */
                pc = vmpu_unpriv_uint32_read(sp + (6 * 4));

                /* Backup fault address and status */
                fault_addr = SCB->BFAR;
                fault_status = VMPU_SCB_BFSR;

                /* Check if the fault is the special register corner case. */
                if (!vmpu_fault_recovery_bus(pc, sp, fault_addr, fault_status)) {
                    VMPU_SCB_BFSR = fault_status;
                    return lr;
                }
            }

            /* If recovery was not successful, throw an error and halt. */
            DEBUG_FAULT(FAULT_BUS, lr, sp);
            HALT_ERROR(PERMISSION_DENIED, "Access to restricted resource denied.");
            break;

        case UsageFault_IRQn:
            DEBUG_FAULT(FAULT_USAGE, lr, sp);
            HALT_ERROR(FAULT_USAGE, "Cannot recover from a usage fault.");
            break;

        case SVCall_IRQn:
            HALT_ERROR(NOT_IMPLEMENTED, "No SVCall IRQ handler registered.");
            break;

        case DebugMonitor_IRQn:
            DEBUG_FAULT(FAULT_DEBUG, lr, sp);
            HALT_ERROR(FAULT_DEBUG, "Cannot recover from a DebugMonitor fault.");
            break;

        case PendSV_IRQn:
            HALT_ERROR(NOT_IMPLEMENTED, "No PendSV IRQ handler registered.");
            break;

        case SysTick_IRQn:
            HALT_ERROR(NOT_IMPLEMENTED, "No SysTick IRQ handler registered.");
            break;

        default:
            HALT_ERROR(NOT_ALLOWED, "Active IRQn (%i) is not a system interrupt.", ipsr);
            break;
    }

    return lr;
}
Esempio n. 15
0
File: vmpu.c Progetto: kedars/uvisor
static void vmpu_load_boxes(void)
{
    int i, count;
    const UvisorBoxAclItem *region;
    const UvisorBoxConfig **box_cfgtbl;
    uint8_t box_id;

    /* Check heap start and end addresses. */
    if (!__uvisor_config.heap_start || !vmpu_sram_addr((uint32_t) __uvisor_config.heap_start)) {
        HALT_ERROR(SANITY_CHECK_FAILED, "Heap start pointer (0x%08x) is not in SRAM memory.\n",
            (uint32_t) __uvisor_config.heap_start);
    }
    if (!__uvisor_config.heap_end || !vmpu_sram_addr((uint32_t) __uvisor_config.heap_end)) {
        HALT_ERROR(SANITY_CHECK_FAILED, "Heap end pointer (0x%08x) is not in SRAM memory.\n",
            (uint32_t) __uvisor_config.heap_end);
    }
    if (__uvisor_config.heap_end < __uvisor_config.heap_start) {
        HALT_ERROR(SANITY_CHECK_FAILED, "Heap end pointer (0x%08x) is smaller than heap start pointer (0x%08x).\n",
            (uint32_t) __uvisor_config.heap_end, (uint32_t) __uvisor_config.heap_start);
    }

    /* Enumerate boxes. */
    g_vmpu_box_count = (uint32_t) (__uvisor_config.cfgtbl_ptr_end - __uvisor_config.cfgtbl_ptr_start);
    if (g_vmpu_box_count >= UVISOR_MAX_BOXES) {
        HALT_ERROR(SANITY_CHECK_FAILED, "box number overflow\n");
    }
    g_vmpu_boxes_counted = TRUE;

    /* Initialize boxes. */
    box_id = 0;
    for (box_cfgtbl = (const UvisorBoxConfig * *) __uvisor_config.cfgtbl_ptr_start;
         box_cfgtbl < (const UvisorBoxConfig * *) __uvisor_config.cfgtbl_ptr_end;
         box_cfgtbl++) {
        /* Ensure that the configuration table resides in flash. */
        if (!(vmpu_flash_addr((uint32_t) *box_cfgtbl) &&
            vmpu_flash_addr((uint32_t) ((uint8_t *) (*box_cfgtbl)) + (sizeof(**box_cfgtbl) - 1)))) {
            HALT_ERROR(SANITY_CHECK_FAILED, "invalid address - *box_cfgtbl must point to flash (0x%08X)\n",
                *box_cfgtbl);
        }

        /* Check the magic value in the box configuration table. */
        if (((*box_cfgtbl)->magic) != UVISOR_BOX_MAGIC) {
            HALT_ERROR(SANITY_CHECK_FAILED, "box[%i] @0x%08X - invalid magic\n",
                box_id, (uint32_t)(*box_cfgtbl));
        }

        /* Check the box configuration table version. */
        if (((*box_cfgtbl)->version) != UVISOR_BOX_VERSION) {
            HALT_ERROR(SANITY_CHECK_FAILED, "box[%i] @0x%08X - invalid version (0x%04X!-0x%04X)\n",
                box_id, *box_cfgtbl, (*box_cfgtbl)->version, UVISOR_BOX_VERSION);
        }

        /* Confirm the minimal size of the box index size. */
        if ((*box_cfgtbl)->index_size < sizeof(UvisorBoxIndex)) {
            HALT_ERROR(SANITY_CHECK_FAILED, "Box index size (%uB) must be large enough to hold UvisorBoxIndex (%uB).\n",
                (*box_cfgtbl)->index_size, sizeof(UvisorBoxIndex));
        }

        /* Check that the box namespace is not too long. */
        vmpu_sanity_check_box_namespace(box_id, (*box_cfgtbl)->box_namespace);

        /* Load the box ACLs. */
        DPRINTF("box[%i] ACL list:\n", box_id);

        /* Add ACL's for all box stacks. */
        vmpu_acl_stack(
            box_id,
            (*box_cfgtbl)->index_size + (*box_cfgtbl)->context_size + (*box_cfgtbl)->heap_size,
            (*box_cfgtbl)->stack_size
        );

        /* Initialize box index. */
        vmpu_box_index_init(
            box_id,
            *box_cfgtbl
        );

        /* Enumerate the box ACLs. */
        region = (*box_cfgtbl)->acl_list;
        if (region != NULL) {
            count = (*box_cfgtbl)->acl_count;
            for (i = 0; i < count; i++) {
                /* Ensure that the ACL resides in public flash. */
                if (!vmpu_public_flash_addr((uint32_t) region)) {
                    HALT_ERROR(SANITY_CHECK_FAILED, "box[%i]:acl[%i] must be in code section (@0x%08X)\n",
                        box_id, i, *box_cfgtbl);
                }

                /* Add the ACL and force the entry as user-provided. */
                if (region->acl & UVISOR_TACL_IRQ) {
                    vmpu_acl_irq(box_id, region->param1, region->param2);
                } else {
                    vmpu_acl_add(
                        box_id,
                        region->param1,
                        region->param2,
                        region->acl | UVISOR_TACL_USER
                    );
                }

                /* Proceed to the next ACL. */
                region++;
            }
        }

        /* Proceed to the next box. */
        box_id++;
    }

    /* Load box 0. */
    vmpu_load_box(0);
    *(__uvisor_config.uvisor_box_context) = (uint32_t *) g_context_current_states[0].bss;

    DPRINTF("vmpu_load_boxes [DONE]\n");
}
Esempio n. 16
0
File: vmpu.c Progetto: kedars/uvisor
int vmpu_fault_recovery_bus(uint32_t pc, uint32_t sp, uint32_t fault_addr, uint32_t fault_status)
{
    uint16_t opcode;
    uint32_t r0, r1;
    uint32_t cnt_max, cnt;
    int found;

    /* Check for attacks. */
    if (!vmpu_public_flash_addr(pc)) {
       HALT_ERROR(NOT_ALLOWED, "This is not the PC (0x%08X) your were searching for", pc);
    }

    /* Check fault register; the following two configurations are allowed:
     *   0x04 - imprecise data bus fault, no stacking/unstacking errors.
     *   0x82 - precise data bus fault, no stacking/unstacking errors. */
    /* Note: Currently the faulting address argument is not used, since it
     * is saved in r0 for managed bus faults. */
    switch (fault_status) {
        case 0x82:
            cnt_max = 0;
            break;
        case 0x04:
            cnt_max = UVISOR_NOP_CNT;
            break;
        default:
            return -1;
    }

    /* Parse the instruction opcode. */
    cnt = 0;
    do {
        /* Fetch the opcode from memory. */
        opcode = vmpu_unpriv_uint16_read(pc - (cnt << 1));

        /* Test the lower 8 bits for imm5 = 0, Rn = 0, Rt = 1. */
        found = TRUE;
        switch(opcode & 0xFF) {
            /* If using r0 and r1, we expect a strX instruction. */
            case VMPU_OPCODE16_LOWER_R0_R1_MASK:
                /* Fetch r0 and r1. */
                r0 = vmpu_unpriv_uint32_read(sp);
                r1 = vmpu_unpriv_uint32_read(sp+4);

                /* Check if there is an ACL mapping this access. */
                if ((vmpu_fault_find_acl(r0, sizeof(uint32_t)) & UVISOR_TACL_UWRITE) == 0) {
                    return -1;
                };

                /* Test the upper 8 bits for the desired opcode and imm5 = 0. */
                switch (opcode >> 8) {
                    case VMPU_OPCODE16_UPPER_STR_MASK:
                        *((uint32_t *) r0) = (uint32_t) r1;
                        break;
                    case VMPU_OPCODE16_UPPER_STRH_MASK:
                        *((uint16_t *) r0) = (uint16_t) r1;
                        break;
                    case VMPU_OPCODE16_UPPER_STRB_MASK:
                        *((uint8_t *) r0) = (uint8_t) r1;
                        break;
                    default:
                        found = FALSE;
                        break;
                }
                if (found) {
                    /* DPRINTF("Executed privileged access: 0x%08X written to 0x%08X\n\r", r1, r0); */
                }
                break;

            /* If using r0 only, we expect a ldrX instruction. */
            case VMPU_OPCODE16_LOWER_R0_R0_MASK:
                /* Fetch r0. */
                r0 = vmpu_unpriv_uint32_read(sp);

                /* Check if there is an ACL mapping this access. */
                if ((vmpu_fault_find_acl(r0, sizeof(uint32_t)) & UVISOR_TACL_UREAD) == 0) {
                    return -1;
                };

                /* Test the upper 8 bits for the desired opcode and imm5 = 0. */
                switch (opcode >> 8) {
                    case VMPU_OPCODE16_UPPER_LDR_MASK:
                        r1 = (uint32_t) *((uint32_t *) r0);
                        break;
                    case VMPU_OPCODE16_UPPER_LDRH_MASK:
                        r1 = (uint16_t) *((uint16_t *) r0);
                        break;
                    case VMPU_OPCODE16_UPPER_LDRB_MASK:
                        r1 = (uint8_t) *((uint8_t *) r0);
                        break;
                    default:
                        found = FALSE;
                        break;
                }
                if (found) {
                    /* The result is stored back to the stack (r0). */
                    vmpu_unpriv_uint32_write(sp, r1);
                    /* DPRINTF("Executed privileged access: read 0x%08X from 0x%08X\n\r", r1, r0); */
                }
                break;

            default:
                found = FALSE;
                break;
        }

        /* Parse the next opcode. */
        cnt++;
    } while (!found && cnt < cnt_max);

    /* Return an error if the opcode was not found. */
    if (!found) {
        return -1;
    }

    /* Otherwise execution continues from the instruction following the fault. */
    /* Note: We assume the instruction is 16 bits wide and skip possible NOPs. */
    vmpu_unpriv_uint32_write(sp + (6 << 2), pc + ((UVISOR_NOP_CNT + 2 - cnt) << 1));

    /* Success. */
    return 0;
}
Esempio n. 17
0
File: vmpu.c Progetto: Nienzu/uvisor
int vmpu_fault_recovery_bus(uint32_t pc, uint32_t sp, uint32_t fault_addr, uint32_t fault_status)
{
    uint16_t opcode;
    uint32_t r0, r1;
    uint32_t cnt_max, cnt;
    int found;

    /* check for attacks */
    if(!VMPU_FLASH_ADDR(pc))
       HALT_ERROR(NOT_ALLOWED, "This is not the PC (0x%08X) your were searching for", pc);

    /* check fault register; the following two configurations are allowed:
     *   0x04 - imprecise data bus fault, no stacking/unstacking errors
     *   0x82 - precise data bus fault, no stacking/unstacking errors */
    /* note: currently the faulting address argument is not used, since it
     * is saved in r0 for managed bus faults */
    switch(fault_status) {
        case 0x82:
            cnt_max = 0;
            break;
        case 0x04:
            cnt_max = UVISOR_NOP_CNT;
            break;
        default:
            return -1;
    }

    /* parse opcode */
    cnt = 0;
    do
    {
        /* fetch opcode from memory */
        opcode = vmpu_unpriv_uint16_read(pc - (cnt << 1));

        /* test lower 8bits for (partially)imm5 == 0, Rn = 0, Rt = 1 */
        found = TRUE;
        switch(opcode & 0xFF)
        {
            /* if using r0 and r1, we expect a strX instruction */
            case VMPU_OPCODE16_LOWER_R0_R1_MASK:
                /* fetch r0 and r1 */
                r0 = vmpu_unpriv_uint32_read(sp);
                r1 = vmpu_unpriv_uint32_read(sp+4);

                /* check ACls */
                if((vmpu_fault_find_acl(r0,sizeof(uint32_t)) & UVISOR_TACL_UWRITE) == 0) {
                    return -1;
                };

                /* test upper 8bits for opcode and (partially)imm5 == 0 */
                switch(opcode >> 8)
                {
                    case VMPU_OPCODE16_UPPER_STR_MASK:
                        *((uint32_t *) r0) = (uint32_t) r1;
                        break;
                    case VMPU_OPCODE16_UPPER_STRH_MASK:
                        *((uint16_t *) r0) = (uint16_t) r1;
                        break;
                    case VMPU_OPCODE16_UPPER_STRB_MASK:
                        *((uint8_t *) r0) = (uint8_t) r1;
                        break;
                    default:
                        found = FALSE;
                        break;
                }
                if (found) {
                    /* DPRINTF("Executed privileged access: 0x%08X written to 0x%08X\n\r", r1, r0); */
                }
                break;

            /* if using r0 only, we expect a ldrX instruction */
            case VMPU_OPCODE16_LOWER_R0_R0_MASK:
                /* fetch r0 */
                r0 = vmpu_unpriv_uint32_read(sp);

                /* check ACls */
                if((vmpu_fault_find_acl(r0,sizeof(uint32_t)) & UVISOR_TACL_UREAD) == 0) {
                    return -1;
                };

                /* test upper 8bits for opcode and (partially)imm5 == 0 */
                switch(opcode >> 8)
                {
                    case VMPU_OPCODE16_UPPER_LDR_MASK:
                        r1 = (uint32_t) *((uint32_t *) r0);
                        break;
                    case VMPU_OPCODE16_UPPER_LDRH_MASK:
                        r1 = (uint16_t) *((uint16_t *) r0);
                        break;
                    case VMPU_OPCODE16_UPPER_LDRB_MASK:
                        r1 = (uint8_t) *((uint8_t *) r0);
                        break;
                    default:
                        found = FALSE;
                        break;
                }
                if(found)
                {
                    /* the result is stored back to the stack (r0) */
                    vmpu_unpriv_uint32_write(sp, r1);

                    /* DPRINTF("Executed privileged access: read 0x%08X from 0x%08X\n\r", r1, r0); */
                }
                break;

            default:
                found = FALSE;
                break;
        }

        /* parse next opcode */
        cnt++;
    }
    while(!found && cnt < cnt_max);

    /* return error if opcode was not found */
    if(!found)
        return -1;

    /* otherwise execution continues from the instruction following the fault */
    /* note: we assume the instruction is 16 bits wide and skip possible NOPs */
    vmpu_unpriv_uint32_write(sp + (6 << 2), pc + ((UVISOR_NOP_CNT + 2 - cnt) << 1));

    /* success */
    return 0;
}
Esempio n. 18
0
File: vmpu.c Progetto: Nienzu/uvisor
static int vmpu_sanity_checks(void)
{
    /* verify uvisor config structure */
    if(__uvisor_config.magic != UVISOR_MAGIC)
        HALT_ERROR(SANITY_CHECK_FAILED,
            "config magic mismatch: &0x%08X = 0x%08X \
                                  - exptected 0x%08X\n",
            &__uvisor_config,
            __uvisor_config.magic,
            UVISOR_MAGIC);

    /* verify basic assumptions about vmpu_bits/__builtin_clz */
    assert(__builtin_clz(0) == 32);
    assert(__builtin_clz(1UL << 31) == 0);
    assert(vmpu_bits(0) == 0);
    assert(vmpu_bits(1UL << 31) == 32);
    assert(vmpu_bits(0x8000UL) == 16);
    assert(vmpu_bits(0x8001UL) == 16);
    assert(vmpu_bits(1) == 1);

    /* verify that __uvisor_config is within valid flash */
    assert( ((uint32_t) &__uvisor_config) >= FLASH_ORIGIN );
    assert( ((((uint32_t) &__uvisor_config) + sizeof(__uvisor_config))
             <= (FLASH_ORIGIN + FLASH_LENGTH)) );

    /* verify if configuration mode is inside flash memory */
    assert((uint32_t)__uvisor_config.mode >= FLASH_ORIGIN);
    assert((uint32_t)__uvisor_config.mode <= (FLASH_ORIGIN + FLASH_LENGTH - 4));
    DPRINTF("uvisor_mode: %u\n", *__uvisor_config.mode);
    assert(*__uvisor_config.mode <= 2);

    /* verify SRAM relocation */
    DPRINTF("uvisor_ram : @0x%08X (%u bytes) [config]\n",
        __uvisor_config.bss_main_start,
        VMPU_REGION_SIZE(__uvisor_config.bss_main_start,
                         __uvisor_config.bss_main_end));
    DPRINTF("             (0x%08X (%u bytes) [linker]\n",
            RESERVED_SRAM_START, USE_SRAM_SIZE);
    assert( __uvisor_config.bss_main_end > __uvisor_config.bss_main_start );
    assert( VMPU_REGION_SIZE(__uvisor_config.bss_main_start,
                             __uvisor_config.bss_main_end) == USE_SRAM_SIZE );
    assert(&__stack_end__ <= __uvisor_config.bss_main_end);

    assert( (uint32_t) __uvisor_config.bss_main_start == RESERVED_SRAM_START);
    assert( (uint32_t) __uvisor_config.bss_main_end == (RESERVED_SRAM_START +
                                                        USE_SRAM_SIZE) );

    /* verify that secure flash area is accessible and after public code */
    assert( __uvisor_config.secure_start <= __uvisor_config.secure_end );
    assert( (uint32_t) __uvisor_config.secure_end <=
            (uint32_t) (FLASH_ORIGIN + FLASH_LENGTH) );
    assert( (uint32_t) __uvisor_config.secure_start >=
            (uint32_t) &vmpu_sanity_checks );

    /* verify configuration table */
    assert( __uvisor_config.cfgtbl_ptr_start <= __uvisor_config.cfgtbl_ptr_end );
    assert( __uvisor_config.cfgtbl_ptr_start >= __uvisor_config.secure_start );
    assert( (uint32_t) __uvisor_config.cfgtbl_ptr_end <=
            (uint32_t) (FLASH_ORIGIN + FLASH_LENGTH) );

    /* return error if uvisor is disabled */
    if(!__uvisor_config.mode || (*__uvisor_config.mode == 0))
        return -1;
    else
        return 0;
}
Esempio n. 19
0
/* Perform a register gateway operation. */
void register_gateway_perform_operation(uint32_t svc_sp, uint32_t svc_pc)
{
    /* Check if the SVCall points to a register gateway. */
    TRegisterGateway const * const register_gateway = (TRegisterGateway const * const) svc_pc;
    int status = register_gateway_check(register_gateway);
    if (status != REGISTER_GATEWAY_STATUS_OK) {
        HALT_ERROR(PERMISSION_DENIED, "Register gateway 0x%08X not allowed. Error: %d.",
                   (uint32_t) register_gateway, status);
        return;
    }

    /* From now on we can assume the register_gateway structure and the address
     * are valid. */

    /* Fetch the value from the user stack.
     * This is only needed for write operations. */
    uint32_t value = vmpu_unpriv_uint32_read(svc_sp);

    /* De-reference the address.
     * The value at *address is always needed for every operation. */
    uint32_t address = register_gateway->address;
    uint32_t width = (register_gateway->operation & __UVISOR_RGW_OP_WIDTH_MASK) >> __UVISOR_RGW_OP_WIDTH_POS;
    uint32_t result = 0;
    switch(width) {
    case 4:
        result = (uint32_t) *((uint32_t *) address);
        break;
    case 2:
        result = (uint32_t) *((uint16_t *) address);
        break;
    case 1:
        result = (uint32_t) *((uint8_t *) address);
        break;
    default:
        HALT_ERROR(NOT_ALLOWED, "Register level gateway: Width %d not allowed.", width);
        break;
    }

    /* Perform the actual operation.
     * Read operations store the return value onto the user stack. */
    uint32_t operation = (register_gateway->operation & __UVISOR_RGW_OP_TYPE_MASK) >> __UVISOR_RGW_OP_TYPE_POS;
    switch (operation) {
    case UVISOR_RGW_OP_READ:
        return vmpu_unpriv_uint32_write(svc_sp, result);
    case UVISOR_RGW_OP_READ_AND:
        result &= register_gateway->mask;
        return vmpu_unpriv_uint32_write(svc_sp, result);
    case UVISOR_RGW_OP_WRITE:
        result = value;
        break;
    case UVISOR_RGW_OP_WRITE_AND:
        result &= (value | ~(register_gateway->mask));
        break;
    case UVISOR_RGW_OP_WRITE_OR:
        result |= (value & register_gateway->mask);
        break;
    case UVISOR_RGW_OP_WRITE_XOR:
        result ^= (value & register_gateway->mask);
        break;
    case UVISOR_RGW_OP_WRITE_REPLACE:
        result = (result & ~(register_gateway->mask)) | (value & register_gateway->mask);
        break;
    default:
        HALT_ERROR(NOT_ALLOWED, "Register level gateway: Operation 0x%08X not recognised.", operation);
        break;
    }

    /* Store the result at the target address.
     * The code runs here only if the register gateway performs a write
     * operation. */
    switch(width) {
    case 4:
        *((uint32_t *) address) = (uint32_t) result;
        break;
    case 2:
        *((uint16_t *) address) = (uint16_t) result;
        break;
    case 1:
        *((uint8_t *) address) = (uint8_t) result;
        break;
    default:
        HALT_ERROR(NOT_ALLOWED, "Register level gateway: Width %d not allowed.", width);
        break;
    }
}