static int test_iopt_map_remap_pt(env_t env) { int error; iopt_cptrs_t pts; seL4_CPtr iospace, frame; error = map_iopt_set(env, &iospace, &pts, &frame); test_eq(error, seL4_NoError); /* unmap the pt */ error = seL4_X86_IOPageTable_Unmap(pts.pts[pts.depth - 1]); test_eq(error, seL4_NoError); /* now map it back in */ error = seL4_X86_IOPageTable_Map(pts.pts[pts.depth - 1], iospace, IOPT_MAP_BASE); test_eq(error, seL4_NoError); /* it should retain its old mappings, and mapping in a new frame should fail */ frame = vka_alloc_frame_leaky(&env->vka, seL4_PageBits); test_assert(frame); error = seL4_X86_Page_MapIO(frame, iospace, seL4_AllRights, IOPT_MAP_BASE); test_assert(error != seL4_NoError); delete_iospace(env, iospace); return sel4test_get_result(); }
static int _dma_morecore(size_t min_size, int cached, struct dma_mem_descriptor* dma_desc) { static uint32_t _vaddr = DMA_VSTART; struct seL4_ARM_Page_GetAddress getaddr_ret; seL4_CPtr frame; seL4_CPtr pd; vka_t* vka; int err; pd = simple_get_pd(&_simple); vka = &_vka; /* Create a frame */ frame = vka_alloc_frame_leaky(vka, 12); assert(frame); if (!frame) { return -1; } /* Try to map the page */ err = seL4_ARM_Page_Map(frame, pd, _vaddr, seL4_AllRights, 0); if (err) { seL4_CPtr pt; /* Allocate a page table */ pt = vka_alloc_page_table_leaky(vka); if (!pt) { printf("Failed to create page table\n"); return -1; } /* Map the page table */ err = seL4_ARM_PageTable_Map(pt, pd, _vaddr, 0); if (err) { printf("Failed to map page table\n"); return -1; } /* Try to map the page again */ err = seL4_ARM_Page_Map(frame, pd, _vaddr, seL4_AllRights, 0); if (err) { printf("Failed to map page\n"); return -1; } } /* Find the physical address of the page */ getaddr_ret = seL4_ARM_Page_GetAddress(frame); assert(!getaddr_ret.error); /* Setup dma memory description */ dma_desc->vaddr = _vaddr; dma_desc->paddr = getaddr_ret.paddr; dma_desc->cached = 0; dma_desc->size_bits = 12; dma_desc->alloc_cookie = (void*)frame; dma_desc->cookie = NULL; /* Advance the virtual address marker */ _vaddr += BIT(12); return 0; }
static int test_iopt_no_overlapping_4k(env_t env) { int error; iopt_cptrs_t pts; seL4_CPtr iospace, frame; error = map_iopt_set(env, &iospace, &pts, &frame); test_eq(error, seL4_NoError); frame = vka_alloc_frame_leaky(&env->vka, seL4_PageBits); test_assert(frame); error = seL4_X86_Page_MapIO(frame, iospace, seL4_AllRights, IOPT_MAP_BASE); test_assert(error != seL4_NoError); delete_iospace(env, iospace); return sel4test_get_result(); }
static int map_iopt_from_iospace(env_t env, seL4_CPtr iospace, seL4_CPtr *iopt, seL4_CPtr *frame) { int error; *frame = vka_alloc_frame_leaky(&env->vka, seL4_PageBits); test_assert(*frame); error = seL4_ARM_Page_MapIO(*frame, iospace, seL4_AllRights, IOPT_MAP_BASE); if (error == seL4_FailedLookup) { *iopt = vka_alloc_io_page_table_leaky(&env->vka); test_assert(*iopt); error = seL4_ARM_IOPageTable_Map(*iopt, iospace, IOPT_MAP_BASE); test_eq(error, seL4_NoError); error = seL4_ARM_Page_MapIO(*frame, iospace, seL4_AllRights, IOPT_MAP_BASE); test_eq(error, seL4_NoError); } test_eq(error, seL4_NoError); return error; }
static int map_iopt_from_iospace(env_t env, seL4_CPtr iospace, iopt_cptrs_t *pts, seL4_CPtr *frame) { int error = seL4_NoError; pts->depth = 0; /* Allocate and map page tables until we can map a frame */ *frame = vka_alloc_frame_leaky(&env->vka, seL4_PageBits); test_assert(*frame); while (seL4_X86_Page_MapIO(*frame, iospace, seL4_AllRights, IOPT_MAP_BASE) == seL4_FailedLookup) { test_assert(pts->depth < MAX_IOPT_DEPTH); pts->pts[pts->depth] = vka_alloc_io_page_table_leaky(&env->vka); test_assert(pts->pts[pts->depth]); error = seL4_X86_IOPageTable_Map(pts->pts[pts->depth], iospace, IOPT_MAP_BASE); test_eq(error, seL4_NoError); pts->depth++; } test_eq(error, seL4_NoError); return error; }
static int test_iopt_no_overlapping_4k(env_t env) { int error; int i; seL4_CPtr iospace, pt, frame; seL4_SlotRegion caps = env->io_space_caps; int cap_count = caps.end - caps.start + 1; for (i = 0; i < cap_count; i++) { iospace = caps.start + i; error = map_iopt_set(env, iospace, &pt, &frame); test_eq(error, seL4_NoError); frame = vka_alloc_frame_leaky(&env->vka, seL4_PageBits); test_assert(frame); /* mapping in a new frame should fail */ error = seL4_ARM_Page_MapIO(frame, iospace, seL4_AllRights, IOPT_MAP_BASE); test_assert(error != seL4_NoError); delete_iospace(env, iospace); } return sel4test_get_result(); }
static int test_page_flush(env_t env) { seL4_CPtr frame, framec; uintptr_t vstart, vstartc; volatile uint32_t *ptr, *ptrc; vka_t *vka; int err; vka = &env->vka; void *vaddr; void *vaddrc; reservation_t reservation, reservationc; reservation = vspace_reserve_range(&env->vspace, PAGE_SIZE_4K, seL4_AllRights, 0, &vaddr); assert(reservation.res); reservationc = vspace_reserve_range(&env->vspace, PAGE_SIZE_4K, seL4_AllRights, 1, &vaddrc); assert(reservationc.res); vstart = (uintptr_t)vaddr; assert(IS_ALIGNED(vstart, seL4_PageBits)); vstartc = (uintptr_t)vaddrc; assert(IS_ALIGNED(vstartc, seL4_PageBits)); ptr = (volatile uint32_t*)vstart; ptrc = (volatile uint32_t*)vstartc; /* Create a frame */ frame = vka_alloc_frame_leaky(vka, PAGE_BITS_4K); test_assert(frame != seL4_CapNull); /* Duplicate the cap */ framec = get_free_slot(env); test_assert(framec != seL4_CapNull); err = cnode_copy(env, frame, framec, seL4_AllRights); test_assert(!err); /* map in a cap with cacheability */ err = vspace_map_pages_at_vaddr(&env->vspace, &framec, NULL, vaddrc, 1, seL4_PageBits, reservationc); test_assert(!err); /* map in a cap without cacheability */ err = vspace_map_pages_at_vaddr(&env->vspace, &frame, NULL, vaddr, 1, seL4_PageBits, reservation); test_assert(!err); /* Clean makes data observable to non-cached page */ *ptr = 0xC0FFEE; *ptrc = 0xDEADBEEF; test_assert(*ptr == 0xC0FFEE); test_assert(*ptrc == 0xDEADBEEF); err = seL4_ARM_Page_Clean_Data(framec, 0, PAGE_SIZE_4K); assert(!err); test_assert(*ptr == 0xDEADBEEF); test_assert(*ptrc == 0xDEADBEEF); /* Clean/Invalidate makes data observable to non-cached page */ *ptr = 0xC0FFEE; *ptrc = 0xDEADBEEF; test_assert(*ptr == 0xC0FFEE); test_assert(*ptrc == 0xDEADBEEF); err = seL4_ARM_Page_CleanInvalidate_Data(framec, 0, PAGE_SIZE_4K); assert(!err); test_assert(*ptr == 0xDEADBEEF); test_assert(*ptrc == 0xDEADBEEF); /* Invalidate makes RAM data observable to cached page */ *ptr = 0xC0FFEE; *ptrc = 0xDEADBEEF; test_assert(*ptr == 0xC0FFEE); test_assert(*ptrc == 0xDEADBEEF); err = seL4_ARM_Page_Invalidate_Data(framec, 0, PAGE_SIZE_4K); assert(!err); /* In case the invalidation performs an implicit clean, write a new value to RAM and make sure the cached read retrieves it Remember to drain any store buffer! */ *ptr = 0xBEEFCAFE; #ifdef CONFIG_ARCH_ARM_V7A asm volatile ("dmb" ::: "memory"); #endif test_assert(*ptrc == 0xBEEFCAFE); test_assert(*ptr == 0xBEEFCAFE); return sel4test_get_result(); }
static int test_large_page_flush_operation(env_t env) { int num_frame_types = ARRAY_SIZE(frame_types); seL4_CPtr frames[num_frame_types]; int error; vka_t *vka = &env->vka; bool pt_mapped = false; /* Grab some free vspace big enough to hold all the tests. */ seL4_Word vstart; reservation_t reserve = vspace_reserve_range(&env->vspace, 2 * (1 << 25), seL4_AllRights, 1, (void **) &vstart); test_assert(reserve.res != 0); vstart = ALIGN_UP(vstart, (1 << 25)); /* Create us some frames to play with. */ for (int i = 0; i < num_frame_types; i++) { frames[i] = vka_alloc_frame_leaky(vka, CTZ(frame_types[i].size)); assert(frames[i]); } /* Also create a pagetable to map the pages into. */ seL4_CPtr pt = vka_alloc_page_table_leaky(vka); /* Map the pages in. */ for (int i = 0; i < num_frame_types; i++) { if (frame_types[i].need_pt && !pt_mapped) { /* Map the pagetable in. */ error = seL4_ARCH_PageTable_Map(pt, env->page_directory, vstart + frame_types[i].vaddr_offset, seL4_ARCH_Default_VMAttributes); test_assert(error == 0); pt_mapped = true; } error = seL4_ARCH_Page_Map(frames[i], env->page_directory, vstart + frame_types[i].vaddr_offset, seL4_AllRights, seL4_ARCH_Default_VMAttributes); test_assert(error == 0); } /* See if we can invoke page flush on each of them */ for (int i = 0; i < num_frame_types; i++) { error = seL4_ARM_Page_Invalidate_Data(frames[i], 0, frame_types[i].size); test_assert(error == 0); error = seL4_ARM_Page_Clean_Data(frames[i], 0, frame_types[i].size); test_assert(error == 0); error = seL4_ARM_Page_CleanInvalidate_Data(frames[i], 0, frame_types[i].size); test_assert(error == 0); error = seL4_ARM_Page_Unify_Instruction(frames[i], 0, frame_types[i].size); test_assert(error == 0); error = seL4_ARM_PageDirectory_Invalidate_Data(env->page_directory, vstart + frame_types[i].vaddr_offset, vstart + frame_types[i].vaddr_offset + frame_types[i].size); test_assert(error == 0); error = seL4_ARM_PageDirectory_Clean_Data(env->page_directory, vstart + frame_types[i].vaddr_offset, vstart + frame_types[i].vaddr_offset + frame_types[i].size); test_assert(error == 0); error = seL4_ARM_PageDirectory_CleanInvalidate_Data(env->page_directory, vstart + frame_types[i].vaddr_offset, vstart + frame_types[i].vaddr_offset + frame_types[i].size); test_assert(error == 0); error = seL4_ARM_PageDirectory_Unify_Instruction(env->page_directory, vstart + frame_types[i].vaddr_offset, vstart + frame_types[i].vaddr_offset + frame_types[i].size); test_assert(error == 0); } return sel4test_get_result(); }