static int _dma_morecore(size_t min_size, int cached, struct dma_mem_descriptor* dma_desc) { static uint32_t _vaddr = DMA_VSTART; struct seL4_ARM_Page_GetAddress getaddr_ret; seL4_CPtr frame; seL4_CPtr pd; vka_t* vka; int err; pd = simple_get_pd(&_simple); vka = &_vka; /* Create a frame */ frame = vka_alloc_frame_leaky(vka, 12); assert(frame); if (!frame) { return -1; } /* Try to map the page */ err = seL4_ARM_Page_Map(frame, pd, _vaddr, seL4_AllRights, 0); if (err) { seL4_CPtr pt; /* Allocate a page table */ pt = vka_alloc_page_table_leaky(vka); if (!pt) { printf("Failed to create page table\n"); return -1; } /* Map the page table */ err = seL4_ARM_PageTable_Map(pt, pd, _vaddr, 0); if (err) { printf("Failed to map page table\n"); return -1; } /* Try to map the page again */ err = seL4_ARM_Page_Map(frame, pd, _vaddr, seL4_AllRights, 0); if (err) { printf("Failed to map page\n"); return -1; } } /* Find the physical address of the page */ getaddr_ret = seL4_ARM_Page_GetAddress(frame); assert(!getaddr_ret.error); /* Setup dma memory description */ dma_desc->vaddr = _vaddr; dma_desc->paddr = getaddr_ret.paddr; dma_desc->cached = 0; dma_desc->size_bits = 12; dma_desc->alloc_cookie = (void*)frame; dma_desc->cookie = NULL; /* Advance the virtual address marker */ _vaddr += BIT(12); return 0; }
static int test_large_page_flush_operation(env_t env) { int num_frame_types = ARRAY_SIZE(frame_types); seL4_CPtr frames[num_frame_types]; int error; vka_t *vka = &env->vka; bool pt_mapped = false; /* Grab some free vspace big enough to hold all the tests. */ seL4_Word vstart; reservation_t reserve = vspace_reserve_range(&env->vspace, 2 * (1 << 25), seL4_AllRights, 1, (void **) &vstart); test_assert(reserve.res != 0); vstart = ALIGN_UP(vstart, (1 << 25)); /* Create us some frames to play with. */ for (int i = 0; i < num_frame_types; i++) { frames[i] = vka_alloc_frame_leaky(vka, CTZ(frame_types[i].size)); assert(frames[i]); } /* Also create a pagetable to map the pages into. */ seL4_CPtr pt = vka_alloc_page_table_leaky(vka); /* Map the pages in. */ for (int i = 0; i < num_frame_types; i++) { if (frame_types[i].need_pt && !pt_mapped) { /* Map the pagetable in. */ error = seL4_ARCH_PageTable_Map(pt, env->page_directory, vstart + frame_types[i].vaddr_offset, seL4_ARCH_Default_VMAttributes); test_assert(error == 0); pt_mapped = true; } error = seL4_ARCH_Page_Map(frames[i], env->page_directory, vstart + frame_types[i].vaddr_offset, seL4_AllRights, seL4_ARCH_Default_VMAttributes); test_assert(error == 0); } /* See if we can invoke page flush on each of them */ for (int i = 0; i < num_frame_types; i++) { error = seL4_ARM_Page_Invalidate_Data(frames[i], 0, frame_types[i].size); test_assert(error == 0); error = seL4_ARM_Page_Clean_Data(frames[i], 0, frame_types[i].size); test_assert(error == 0); error = seL4_ARM_Page_CleanInvalidate_Data(frames[i], 0, frame_types[i].size); test_assert(error == 0); error = seL4_ARM_Page_Unify_Instruction(frames[i], 0, frame_types[i].size); test_assert(error == 0); error = seL4_ARM_PageDirectory_Invalidate_Data(env->page_directory, vstart + frame_types[i].vaddr_offset, vstart + frame_types[i].vaddr_offset + frame_types[i].size); test_assert(error == 0); error = seL4_ARM_PageDirectory_Clean_Data(env->page_directory, vstart + frame_types[i].vaddr_offset, vstart + frame_types[i].vaddr_offset + frame_types[i].size); test_assert(error == 0); error = seL4_ARM_PageDirectory_CleanInvalidate_Data(env->page_directory, vstart + frame_types[i].vaddr_offset, vstart + frame_types[i].vaddr_offset + frame_types[i].size); test_assert(error == 0); error = seL4_ARM_PageDirectory_Unify_Instruction(env->page_directory, vstart + frame_types[i].vaddr_offset, vstart + frame_types[i].vaddr_offset + frame_types[i].size); test_assert(error == 0); } return sel4test_get_result(); }