/* initialise our runtime environment */ static void init_env(env_t env) { allocman_t *allocman; UNUSED reservation_t virtual_reservation; UNUSED int error; /* create an allocator */ allocman = bootstrap_use_current_simple(&env->simple, ALLOCATOR_STATIC_POOL_SIZE, allocator_mem_pool); assert(allocman); /* create a vka (interface for interacting with the underlying allocator) */ allocman_make_vka(&env->vka, allocman); /* create a vspace (virtual memory management interface). We pass * boot info not because it will use capabilities from it, but so * it knows the address and will add it as a reserved region */ error = sel4utils_bootstrap_vspace_with_bootinfo_leaky(&env->vspace, &data, simple_get_pd(&env->simple), &env->vka, seL4_GetBootInfo()); /* fill the allocator with virtual memory */ void *vaddr; virtual_reservation = vspace_reserve_range(&env->vspace, ALLOCATOR_VIRTUAL_POOL_SIZE, seL4_AllRights, 1, &vaddr); assert(virtual_reservation.res); bootstrap_configure_virtual_pool(allocman, vaddr, ALLOCATOR_VIRTUAL_POOL_SIZE, simple_get_pd(&env->simple)); }
/* * Initialize all main data structures. * * The code to initialize simple, allocman, vka, and vspace is modeled * after the "sel4test-driver" app: * https://github.com/seL4/sel4test/blob/master/apps/sel4test-driver/src/main.c */ static void setup_system() { /* initialize boot information */ bootinfo = seL4_GetBootInfo(); /* initialize simple interface */ simple_stable_init_bootinfo(&simple, bootinfo); //simple_default_init_bootinfo(simple, bootinfo); /* create an allocator */ allocman_t *allocman; allocman = bootstrap_use_current_simple(&simple, POOL_SIZE, memPool); assert(allocman); /* create a VKA */ allocman_make_vka(&vka, allocman); /* create a vspace */ UNUSED int err; err = sel4utils_bootstrap_vspace_with_bootinfo_leaky(&vspace, &allocData, seL4_CapInitThreadPD, &vka, bootinfo); assert(err == 0); /* fill allocator with virtual memory */ void *vaddr; UNUSED reservation_t vres; vres = vspace_reserve_range(&vspace, VIRT_POOL_SIZE, seL4_AllRights, 1, &vaddr); assert(vres.res); bootstrap_configure_virtual_pool(allocman, vaddr, VIRT_POOL_SIZE, seL4_CapInitThreadPD); }
static void init_allocator(env_t env, test_init_data_t *init_data) { UNUSED int error; UNUSED reservation_t virtual_reservation; /* initialise allocator */ allocman_t *allocator = bootstrap_use_current_1level(init_data->root_cnode, init_data->cspace_size_bits, init_data->free_slots.start, init_data->free_slots.end, ALLOCATOR_STATIC_POOL_SIZE, allocator_mem_pool); if (allocator == NULL) { ZF_LOGF("Failed to bootstrap allocator"); } allocman_make_vka(&env->vka, allocator); /* fill the allocator with untypeds */ seL4_CPtr slot; unsigned int size_bits_index; for (slot = init_data->untypeds.start, size_bits_index = 0; slot <= init_data->untypeds.end; slot++, size_bits_index++) { cspacepath_t path; vka_cspace_make_path(&env->vka, slot, &path); /* allocman doesn't require the paddr unless we need to ask for phys addresses, * which we don't. */ uintptr_t fake_paddr = 0; size_t size_bits = init_data->untyped_size_bits_list[size_bits_index]; error = allocman_utspace_add_uts(allocator, 1, &path, &size_bits, &fake_paddr); if (error) { ZF_LOGF("Failed to add untyped objects to allocator"); } } /* create a vspace */ void *existing_frames[init_data->stack_pages + 2]; existing_frames[0] = (void *) init_data; existing_frames[1] = seL4_GetIPCBuffer(); assert(init_data->stack_pages > 0); for (int i = 0; i < init_data->stack_pages; i++) { existing_frames[i + 2] = init_data->stack + (i * PAGE_SIZE_4K); } error = sel4utils_bootstrap_vspace(&env->vspace, &alloc_data, init_data->page_directory, &env->vka, NULL, NULL, existing_frames); /* switch the allocator to a virtual memory pool */ void *vaddr; virtual_reservation = vspace_reserve_range(&env->vspace, ALLOCATOR_VIRTUAL_POOL_SIZE, seL4_AllRights, 1, &vaddr); if (virtual_reservation.res == 0) { ZF_LOGF("Failed to switch allocator to virtual memory pool"); } bootstrap_configure_virtual_pool(allocator, vaddr, ALLOCATOR_VIRTUAL_POOL_SIZE, env->page_directory); }
static int test_page_flush(env_t env) { seL4_CPtr frame, framec; uintptr_t vstart, vstartc; volatile uint32_t *ptr, *ptrc; vka_t *vka; int err; vka = &env->vka; void *vaddr; void *vaddrc; reservation_t reservation, reservationc; reservation = vspace_reserve_range(&env->vspace, PAGE_SIZE_4K, seL4_AllRights, 0, &vaddr); assert(reservation.res); reservationc = vspace_reserve_range(&env->vspace, PAGE_SIZE_4K, seL4_AllRights, 1, &vaddrc); assert(reservationc.res); vstart = (uintptr_t)vaddr; assert(IS_ALIGNED(vstart, seL4_PageBits)); vstartc = (uintptr_t)vaddrc; assert(IS_ALIGNED(vstartc, seL4_PageBits)); ptr = (volatile uint32_t*)vstart; ptrc = (volatile uint32_t*)vstartc; /* Create a frame */ frame = vka_alloc_frame_leaky(vka, PAGE_BITS_4K); test_assert(frame != seL4_CapNull); /* Duplicate the cap */ framec = get_free_slot(env); test_assert(framec != seL4_CapNull); err = cnode_copy(env, frame, framec, seL4_AllRights); test_assert(!err); /* map in a cap with cacheability */ err = vspace_map_pages_at_vaddr(&env->vspace, &framec, NULL, vaddrc, 1, seL4_PageBits, reservationc); test_assert(!err); /* map in a cap without cacheability */ err = vspace_map_pages_at_vaddr(&env->vspace, &frame, NULL, vaddr, 1, seL4_PageBits, reservation); test_assert(!err); /* Clean makes data observable to non-cached page */ *ptr = 0xC0FFEE; *ptrc = 0xDEADBEEF; test_assert(*ptr == 0xC0FFEE); test_assert(*ptrc == 0xDEADBEEF); err = seL4_ARM_Page_Clean_Data(framec, 0, PAGE_SIZE_4K); assert(!err); test_assert(*ptr == 0xDEADBEEF); test_assert(*ptrc == 0xDEADBEEF); /* Clean/Invalidate makes data observable to non-cached page */ *ptr = 0xC0FFEE; *ptrc = 0xDEADBEEF; test_assert(*ptr == 0xC0FFEE); test_assert(*ptrc == 0xDEADBEEF); err = seL4_ARM_Page_CleanInvalidate_Data(framec, 0, PAGE_SIZE_4K); assert(!err); test_assert(*ptr == 0xDEADBEEF); test_assert(*ptrc == 0xDEADBEEF); /* Invalidate makes RAM data observable to cached page */ *ptr = 0xC0FFEE; *ptrc = 0xDEADBEEF; test_assert(*ptr == 0xC0FFEE); test_assert(*ptrc == 0xDEADBEEF); err = seL4_ARM_Page_Invalidate_Data(framec, 0, PAGE_SIZE_4K); assert(!err); /* In case the invalidation performs an implicit clean, write a new value to RAM and make sure the cached read retrieves it Remember to drain any store buffer! */ *ptr = 0xBEEFCAFE; #ifdef CONFIG_ARCH_ARM_V7A asm volatile ("dmb" ::: "memory"); #endif test_assert(*ptrc == 0xBEEFCAFE); test_assert(*ptr == 0xBEEFCAFE); return sel4test_get_result(); }
static int test_large_page_flush_operation(env_t env) { int num_frame_types = ARRAY_SIZE(frame_types); seL4_CPtr frames[num_frame_types]; int error; vka_t *vka = &env->vka; bool pt_mapped = false; /* Grab some free vspace big enough to hold all the tests. */ seL4_Word vstart; reservation_t reserve = vspace_reserve_range(&env->vspace, 2 * (1 << 25), seL4_AllRights, 1, (void **) &vstart); test_assert(reserve.res != 0); vstart = ALIGN_UP(vstart, (1 << 25)); /* Create us some frames to play with. */ for (int i = 0; i < num_frame_types; i++) { frames[i] = vka_alloc_frame_leaky(vka, CTZ(frame_types[i].size)); assert(frames[i]); } /* Also create a pagetable to map the pages into. */ seL4_CPtr pt = vka_alloc_page_table_leaky(vka); /* Map the pages in. */ for (int i = 0; i < num_frame_types; i++) { if (frame_types[i].need_pt && !pt_mapped) { /* Map the pagetable in. */ error = seL4_ARCH_PageTable_Map(pt, env->page_directory, vstart + frame_types[i].vaddr_offset, seL4_ARCH_Default_VMAttributes); test_assert(error == 0); pt_mapped = true; } error = seL4_ARCH_Page_Map(frames[i], env->page_directory, vstart + frame_types[i].vaddr_offset, seL4_AllRights, seL4_ARCH_Default_VMAttributes); test_assert(error == 0); } /* See if we can invoke page flush on each of them */ for (int i = 0; i < num_frame_types; i++) { error = seL4_ARM_Page_Invalidate_Data(frames[i], 0, frame_types[i].size); test_assert(error == 0); error = seL4_ARM_Page_Clean_Data(frames[i], 0, frame_types[i].size); test_assert(error == 0); error = seL4_ARM_Page_CleanInvalidate_Data(frames[i], 0, frame_types[i].size); test_assert(error == 0); error = seL4_ARM_Page_Unify_Instruction(frames[i], 0, frame_types[i].size); test_assert(error == 0); error = seL4_ARM_PageDirectory_Invalidate_Data(env->page_directory, vstart + frame_types[i].vaddr_offset, vstart + frame_types[i].vaddr_offset + frame_types[i].size); test_assert(error == 0); error = seL4_ARM_PageDirectory_Clean_Data(env->page_directory, vstart + frame_types[i].vaddr_offset, vstart + frame_types[i].vaddr_offset + frame_types[i].size); test_assert(error == 0); error = seL4_ARM_PageDirectory_CleanInvalidate_Data(env->page_directory, vstart + frame_types[i].vaddr_offset, vstart + frame_types[i].vaddr_offset + frame_types[i].size); test_assert(error == 0); error = seL4_ARM_PageDirectory_Unify_Instruction(env->page_directory, vstart + frame_types[i].vaddr_offset, vstart + frame_types[i].vaddr_offset + frame_types[i].size); test_assert(error == 0); } return sel4test_get_result(); }