static void map_unity_ram(vm_t* vm) { /* Dimensions of physical memory that we'll use. Note that we do not map the entirety of RAM. */ static const uintptr_t paddr_start = RAM_BASE; static const uintptr_t paddr_end = 0x60000000; int err; uintptr_t start; reservation_t res; unsigned int bits = 21; res = vspace_reserve_range_at(&vm->vm_vspace, (void*)paddr_start, paddr_end - paddr_start, seL4_AllRights, 1); assert(res.res); for (start = paddr_start;; start += BIT(bits)) { cspacepath_t frame; err = vka_cspace_alloc_path(vm->vka, &frame); assert(!err); err = simple_get_frame_cap(vm->simple, (void*)start, bits, &frame); if (err) { vka_cspace_free(vm->vka, frame.capPtr); break; } err = vspace_map_pages_at_vaddr(&vm->vm_vspace, &frame.capPtr, &bits, (void*)start, 1, bits, res); assert(!err); } }
static void map_unity_ram(vm_t* vm) { int err; uintptr_t start; reservation_t res; unsigned int bits = 21; res = vspace_reserve_range_at(&vm->vm_vspace, (void*)RAM_START, RAM_END - RAM_START, seL4_AllRights, 1); assert(res.res); for (start = RAM_START;; start += BIT(bits)) { cspacepath_t frame; err = vka_cspace_alloc_path(vm->vka, &frame); assert(!err); err = simple_get_frame_cap(vm->simple, (void*)start, bits, &frame); if (err) { vka_cspace_free(vm->vka, frame.capPtr); break; } err = vspace_map_pages_at_vaddr(&vm->vm_vspace, &frame.capPtr, &bits, (void*)start, 1, bits, res); assert(!err); } }
int proxy_vka_utspace_alloc(void *data, const cspacepath_t *dest, seL4_Word type, seL4_Word size_bits, uint32_t *res) { proxy_vka_t *vka = (proxy_vka_t*)data; int error; uint32_t cookie; ut_node_t *node = allocman_mspace_alloc(vka->allocman, sizeof(*node), &error); if (!node) { return -1; } if (type == seL4_IA32_4K && vka->have_mem && vka->vspace.map_pages_at_vaddr && !vka->recurse) { cookie = _utspace_trickle_alloc(vka->allocman, &vka->ram_ut_manager, seL4_PageBits, seL4_IA32_4K, dest, &error); if (error != 0) { vka->have_mem = 0; } else { node->frame = 1; node->cookie = cookie; /* briefly map this frame in so we can zero it. Avoid recursively allocating * for book keeping */ assert(!vka->recurse); vka->recurse = 1; error = vspace_map_pages_at_vaddr(&vka->vspace, (seL4_CPtr*)&dest->capPtr, NULL, vka->temp_map_address, 1, PAGE_BITS_4K, vka->temp_map_reservation); assert(!error); memset(vka->temp_map_address, 0, PAGE_SIZE_4K); vspace_unmap_pages(&vka->vspace, vka->temp_map_address, 1, PAGE_BITS_4K, VSPACE_PRESERVE); vka->recurse = 0; return 0; } } error = vka_utspace_alloc(&vka->regular_vka, dest, type, size_bits, &cookie); if (!error) { node->frame = 0; node->cookie = cookie; *res = (uint32_t)node; return 0; } allocman_mspace_free(vka->allocman, node, sizeof(*node)); return error; }
static int test_page_flush(env_t env) { seL4_CPtr frame, framec; uintptr_t vstart, vstartc; volatile uint32_t *ptr, *ptrc; vka_t *vka; int err; vka = &env->vka; void *vaddr; void *vaddrc; reservation_t reservation, reservationc; reservation = vspace_reserve_range(&env->vspace, PAGE_SIZE_4K, seL4_AllRights, 0, &vaddr); assert(reservation.res); reservationc = vspace_reserve_range(&env->vspace, PAGE_SIZE_4K, seL4_AllRights, 1, &vaddrc); assert(reservationc.res); vstart = (uintptr_t)vaddr; assert(IS_ALIGNED(vstart, seL4_PageBits)); vstartc = (uintptr_t)vaddrc; assert(IS_ALIGNED(vstartc, seL4_PageBits)); ptr = (volatile uint32_t*)vstart; ptrc = (volatile uint32_t*)vstartc; /* Create a frame */ frame = vka_alloc_frame_leaky(vka, PAGE_BITS_4K); test_assert(frame != seL4_CapNull); /* Duplicate the cap */ framec = get_free_slot(env); test_assert(framec != seL4_CapNull); err = cnode_copy(env, frame, framec, seL4_AllRights); test_assert(!err); /* map in a cap with cacheability */ err = vspace_map_pages_at_vaddr(&env->vspace, &framec, NULL, vaddrc, 1, seL4_PageBits, reservationc); test_assert(!err); /* map in a cap without cacheability */ err = vspace_map_pages_at_vaddr(&env->vspace, &frame, NULL, vaddr, 1, seL4_PageBits, reservation); test_assert(!err); /* Clean makes data observable to non-cached page */ *ptr = 0xC0FFEE; *ptrc = 0xDEADBEEF; test_assert(*ptr == 0xC0FFEE); test_assert(*ptrc == 0xDEADBEEF); err = seL4_ARM_Page_Clean_Data(framec, 0, PAGE_SIZE_4K); assert(!err); test_assert(*ptr == 0xDEADBEEF); test_assert(*ptrc == 0xDEADBEEF); /* Clean/Invalidate makes data observable to non-cached page */ *ptr = 0xC0FFEE; *ptrc = 0xDEADBEEF; test_assert(*ptr == 0xC0FFEE); test_assert(*ptrc == 0xDEADBEEF); err = seL4_ARM_Page_CleanInvalidate_Data(framec, 0, PAGE_SIZE_4K); assert(!err); test_assert(*ptr == 0xDEADBEEF); test_assert(*ptrc == 0xDEADBEEF); /* Invalidate makes RAM data observable to cached page */ *ptr = 0xC0FFEE; *ptrc = 0xDEADBEEF; test_assert(*ptr == 0xC0FFEE); test_assert(*ptrc == 0xDEADBEEF); err = seL4_ARM_Page_Invalidate_Data(framec, 0, PAGE_SIZE_4K); assert(!err); /* In case the invalidation performs an implicit clean, write a new value to RAM and make sure the cached read retrieves it Remember to drain any store buffer! */ *ptr = 0xBEEFCAFE; #ifdef CONFIG_ARCH_ARM_V7A asm volatile ("dmb" ::: "memory"); #endif test_assert(*ptrc == 0xBEEFCAFE); test_assert(*ptr == 0xBEEFCAFE); return sel4test_get_result(); }
int vm_copyout_atags(vm_t* vm, struct atag_list* atags, uint32_t addr) { vspace_t *vm_vspace, *vmm_vspace; void* vm_addr, *vmm_addr, *buf; reservation_t res; vka_t* vka; vka_object_t frame; size_t size; struct atag_list* atag_cur; int err; vka = vm->vka; vm_addr = (void*)(addr & ~0xfff); vm_vspace = vm_get_vspace(vm); vmm_vspace = vm->vmm_vspace; /* Make sure we don't cross a page boundary * NOTE: the next page will usually be used by linux for PT! */ for (size = 0, atag_cur = atags; atag_cur != NULL; atag_cur = atag_cur->next) { size += atags_size_bytes(atag_cur); } size += 8; /* NULL tag */ assert((addr & 0xfff) + size < 0x1000); /* Create a frame (and a copy for the VMM) */ err = vka_alloc_frame(vka, 12, &frame); assert(!err); if (err) { return -1; } /* Map the frame to the VMM */ vmm_addr = vspace_map_pages(vmm_vspace, &frame.cptr, NULL, seL4_AllRights, 1, 12, 0); assert(vmm_addr); /* Copy in the atags */ buf = vmm_addr + (addr & 0xfff); for (atag_cur = atags; atag_cur != NULL; atag_cur = atag_cur->next) { int tag_size = atags_size_bytes(atag_cur); DVM("ATAG copy 0x%x<-0x%x %d\n", (uint32_t)buf, (uint32_t)atag_cur->hdr, tag_size); memcpy(buf, atag_cur->hdr, tag_size); buf += tag_size; } /* NULL tag terminator */ memset(buf, 0, 8); /* Unmap the page and map it into the VM */ vspace_unmap_pages(vmm_vspace, vmm_addr, 1, 12, NULL); res = vspace_reserve_range_at(vm_vspace, vm_addr, 0x1000, seL4_AllRights, 0); assert(res.res); if (!res.res) { vka_free_object(vka, &frame); return -1; } err = vspace_map_pages_at_vaddr(vm_vspace, &frame.cptr, NULL, vm_addr, 1, 12, res); vspace_free_reservation(vm_vspace, res); assert(!err); if (err) { printf("Failed to provide memory\n"); vka_free_object(vka, &frame); return -1; } return 0; }