/* FIXME: We've added very simple MPU region switching. - Optimize! */ void vmpu_switch(uint8_t src_box, uint8_t dst_box) { uint32_t dst_count; const MpuRegion * region; /* DPRINTF("switching from %i to %i\n\r", src_box, dst_box); */ vmpu_mpu_invalidate(); /* Update target box first to make target stack available. */ vmpu_region_get_for_box(dst_box, ®ion, &dst_count); /* Only write stack and context ACL for secure boxes. */ if (dst_box) { assert(dst_count); /* Push the stack and context protection ACL into ARMv7M_MPU_REGIONS_STATIC. */ vmpu_mpu_push(region, 255); region++; dst_count--; } /* Push one ACL for the page heap into place. */ page_allocator_iterate_active_page_masks(vmpu_mem_push_page_acl_iterator, PAGE_ALLOCATOR_ITERATOR_DIRECTION_BACKWARD); /* g_mpu_slot may now have been incremented by one, if page heap is used by this box. */ while (dst_count-- && vmpu_mpu_push(region++, 2)); if (!dst_box) { /* Handle public box ACLs last. */ vmpu_region_get_for_box(0, ®ion, &dst_count); while (dst_count-- && vmpu_mpu_push(region++, 1)); } }
static int vmpu_fault_recovery_mpu(uint32_t pc, uint32_t sp, uint32_t fault_addr, uint32_t fault_status) { const MpuRegion *region; uint8_t mask, index, page; /* No recovery is possible if the MPU syndrome register is not valid or * this is not a stacking fault (where the MPU syndrome register would not * be valid, but we can still recover). */ if (!((fault_status == (SCB_CFSR_MMARVALID_Msk | SCB_CFSR_DACCVIOL_Msk)) || (fault_status & (SCB_CFSR_MSTKERR_Msk | SCB_CFSR_MUNSTKERR_Msk)))) { return 0; } if (page_allocator_get_active_mask_for_address(fault_addr, &mask, &index, &page) == UVISOR_ERROR_PAGE_OK) { /* Remember this fault. */ page_allocator_register_fault(page); vmpu_mem_push_page_acl_iterator(mask, UVISOR_PAGE_MAP_COUNT * 4 - 1 - index); } else { /* Find region for faulting address. */ if ((region = vmpu_fault_find_region(fault_addr)) == NULL) { return 0; } vmpu_mpu_push(region, 3); } return 1; }
/* This is the iterator callback for inserting all page heap ACLs into to the * MPU during `vmpu_mem_switch()`. */ static int vmpu_mem_push_page_acl_iterator(uint32_t start_addr, uint32_t end_addr, uint8_t page) { (void) page; MpuRegion region = {.start = start_addr, .end = end_addr, .config = 0x1E}; /* We only continue if we have not wrapped around the end of the MPU regions yet. */ return vmpu_mpu_push(®ion, 100); } int vmpu_mem_push_page_acl(uint32_t start_addr, uint32_t end_addr) { /* Check that start and end address are aligned to 32-byte. */ if (start_addr & 0x1F || end_addr & 0x1F) { return -1; } vmpu_mem_push_page_acl_iterator(start_addr, end_addr, g_active_box); return 0; } void vmpu_mem_switch(uint8_t src_box, uint8_t dst_box) { uint32_t dst_count; const MpuRegion * region; vmpu_mpu_invalidate(); if(dst_box) { /* Update target box first to make target stack available. */ vmpu_region_get_for_box(dst_box, ®ion, &dst_count); while (dst_count-- && vmpu_mpu_push(region++, 255)) ; } page_allocator_iterate_active_pages(vmpu_mem_push_page_acl_iterator, PAGE_ALLOCATOR_ITERATOR_DIRECTION_FORWARD); }
static int vmpu_mem_push_page_acl_iterator(uint8_t mask, uint8_t index) { MpuRegion region; uint32_t size = g_page_size * 8; vmpu_region_translate_acl( ®ion, (g_page_head_end_rounded - size * (index + 1)), size, UVISOR_TACLDEF_DATA | UVISOR_TACL_EXECUTE, ~mask ); vmpu_mpu_push(®ion, 100); /* We do not add more than one region for the page heap. */ return 0; }