/* Free a list of objects */ static void free_objects(vka_object_t *objects, unsigned int num) { for (unsigned int i = 0; i < num; i++) { vka_free_object(&env.vka, &objects[i]); } }
static int test_kalloc(void) { test_start("kalloc"); /* Test that malloc works and the process server can allocate from static heap properly. */ for (int repeats = 0; repeats < 100; repeats++) { int *a = kmalloc(sizeof(int) * 10240); assert(a); for (int i = 0; i < 10240; i++) a[i] = i; for (int i = 0; i < 10240; i++) test_assert(a[i] == i); kfree(a); } /* Test that kernel obj allocation works and that the VKA allocator has been bootstrapped properly. */ vka_object_t obj[100]; int error = -1; for (int repeats = 0; repeats < 100; repeats++) { for (int i = 0; i < 100; i++) { error = vka_alloc_endpoint(&procServ.vka, &obj[i]); test_assert(!error); test_assert(obj[i].cptr != 0); } for (int i = 0; i < 100; i++) { vka_free_object(&procServ.vka, &obj[i]); } for (int i = 0; i < 100; i++) { error = vka_alloc_frame(&procServ.vka, seL4_PageBits, &obj[i]); test_assert(!error); test_assert(obj[i].cptr != 0); } for (int i = 0; i < 100; i++) { vka_free_object(&procServ.vka, &obj[i]); } } return test_success(); }
int test_allocator(env_t env) { /* Perform a bunch of allocations and frees */ vka_object_t endpoint; int error; for (int i = 0; i < MIN_EXPECTED_ALLOCATIONS; i++) { error = vka_alloc_endpoint(&env->vka, &endpoint); test_assert(error == 0); test_assert(endpoint.cptr != 0); vka_free_object(&env->vka, &endpoint); } return sel4test_get_result(); }
/** * Clean up after a thrd has finished */ void thrd_cleanup(thrd_env_t *env, thrd_t *thread) { vka_free_object(&env->vka, &thread->local_endpoint); if (thread->is_process) { /* free the regions (no need to unmap, as the * entry address space / cspace is being destroyed */ for (int i = 0; i < thread->num_regions; i++) { vspace_free_reservation(&thread->process.vspace, thread->regions[i].reservation); } thread->process.fault_endpoint.cptr = 0; sel4utils_destroy_process(&thread->process, &env->vka); } else { sel4utils_clean_up_thread(&env->vka, &env->vspace, &thread->thread); } }
static int new_pages_at_vaddr(vspace_t *vspace, void *vaddr, size_t num_pages, size_t size_bits, seL4_CapRights rights, int cacheable) { sel4utils_alloc_data_t *data = get_alloc_data(vspace); int i; int error = seL4_NoError; void *start_vaddr = vaddr; for (i = 0; i < num_pages; i++) { vka_object_t object; if (vka_alloc_frame(data->vka, size_bits, &object) != 0) { /* abort! */ ZF_LOGE("Failed to allocate page"); error = seL4_NotEnoughMemory; break; } error = map_page(vspace, object.cptr, vaddr, rights, cacheable, size_bits); if (error == seL4_NoError) { error = update_entries(vspace, (uintptr_t) vaddr, object.cptr, size_bits, object.ut); vaddr = (void *) ((uintptr_t) vaddr + (1 << size_bits)); } else { vka_free_object(data->vka, &object); break; } } if (i < num_pages) { /* we failed, clean up successfully allocated pages */ sel4utils_unmap_pages(vspace, start_vaddr, i, size_bits, data->vka); } return error; }
int vm_copyout_atags(vm_t* vm, struct atag_list* atags, uint32_t addr) { vspace_t *vm_vspace, *vmm_vspace; void* vm_addr, *vmm_addr, *buf; reservation_t res; vka_t* vka; vka_object_t frame; size_t size; struct atag_list* atag_cur; int err; vka = vm->vka; vm_addr = (void*)(addr & ~0xfff); vm_vspace = vm_get_vspace(vm); vmm_vspace = vm->vmm_vspace; /* Make sure we don't cross a page boundary * NOTE: the next page will usually be used by linux for PT! */ for (size = 0, atag_cur = atags; atag_cur != NULL; atag_cur = atag_cur->next) { size += atags_size_bytes(atag_cur); } size += 8; /* NULL tag */ assert((addr & 0xfff) + size < 0x1000); /* Create a frame (and a copy for the VMM) */ err = vka_alloc_frame(vka, 12, &frame); assert(!err); if (err) { return -1; } /* Map the frame to the VMM */ vmm_addr = vspace_map_pages(vmm_vspace, &frame.cptr, NULL, seL4_AllRights, 1, 12, 0); assert(vmm_addr); /* Copy in the atags */ buf = vmm_addr + (addr & 0xfff); for (atag_cur = atags; atag_cur != NULL; atag_cur = atag_cur->next) { int tag_size = atags_size_bytes(atag_cur); DVM("ATAG copy 0x%x<-0x%x %d\n", (uint32_t)buf, (uint32_t)atag_cur->hdr, tag_size); memcpy(buf, atag_cur->hdr, tag_size); buf += tag_size; } /* NULL tag terminator */ memset(buf, 0, 8); /* Unmap the page and map it into the VM */ vspace_unmap_pages(vmm_vspace, vmm_addr, 1, 12, NULL); res = vspace_reserve_range_at(vm_vspace, vm_addr, 0x1000, seL4_AllRights, 0); assert(res.res); if (!res.res) { vka_free_object(vka, &frame); return -1; } err = vspace_map_pages_at_vaddr(vm_vspace, &frame.cptr, NULL, vm_addr, 1, 12, res); vspace_free_reservation(vm_vspace, res); assert(!err); if (err) { printf("Failed to provide memory\n"); vka_free_object(vka, &frame); return -1; } return 0; }
int sel4utils_map_page(vka_t *vka, seL4_CPtr pd, seL4_CPtr frame, void *vaddr, seL4_CapRights rights, int cacheable, vka_object_t *objects, int *num_objects) { assert(vka != NULL); assert(pd != 0); assert(frame != 0); assert(vaddr != 0); assert(rights != 0); assert(num_objects); seL4_ARCH_VMAttributes attr = 0; int num = 0; #ifdef CONFIG_ARCH_IA32 if (!cacheable) { attr = seL4_IA32_CacheDisabled; } #elif CONFIG_ARCH_ARM /* CONFIG_ARCH_IA32 */ if (cacheable) { attr = seL4_ARM_PageCacheable; } #endif /* CONFIG_ARCH_ARM */ int error = seL4_ARCH_Page_Map(frame, pd, (seL4_Word) vaddr, rights, attr); #ifdef CONFIG_X86_64 page_map_retry: if (error == seL4_FailedLookupPDPT) { error = vka_alloc_page_directory_pointer_table(vka, pagetable); if (!error) { error = seL4_ARCH_PageDirectoryPointerTable_Map(pagetable->cptr, pd, (seL4_Word)vaddr, seL4_ARCH_Default_VMAttributes); } else { LOG_ERROR("Page directory pointer table allocation failed %d", error); } if (!error) { error = seL4_ARCH_Page_Map(frame, pd, (seL4_Word)vaddr, rights, attr); if (error != seL4_NoError) { goto page_map_retry; } } else { LOG_ERROR("Page directory pointer table mapping failed %d\n", error); } } if (error == seL4_FailedLookupPD) { error = vka_alloc_page_directory(vka, pagetable); if (!error) { error = seL4_ARCH_PageDirectory_Map(pagetable->cptr, pd, (seL4_Word)vaddr, seL4_ARCH_Default_VMAttributes); } else { LOG_ERROR("Page direcotry allocation failed %d\n", error); } if (!error) { error = seL4_ARCH_Page_Map(frame, pd, (seL4_Word)vaddr, rights, attr); if (error != seL4_NoError) { goto page_map_retry; } } else { LOG_ERROR("Page directory mapping failed %d\n", error); } } #endif if (error == seL4_FailedLookup) { /* need a page table, allocate one */ assert(objects != NULL); assert(*num_objects > 0); error = vka_alloc_page_table(vka, &objects[0]); /* map in the page table */ if (!error) { error = seL4_ARCH_PageTable_Map(objects[0].cptr, pd, (seL4_Word) vaddr, seL4_ARCH_Default_VMAttributes); } else { LOG_ERROR("Page table allocation failed, %d", error); } if (error == seL4_DeleteFirst) { /* It's possible that in allocated the page table, we needed to allocate/map * in some memory, which caused a page table to get mapped in at the * same location we are wanting one. If this has happened then we can just * delete this page table and try the frame mapping again */ vka_free_object(vka, &objects[0]); error = seL4_NoError; } else { num = 1; } #ifdef CONFIG_PAE_PAGING if (error == seL4_FailedLookup) { /* need a page directory, allocate one */ assert(*num_objects > 1); error = vka_alloc_page_directory(vka, &objects[1]); if (!error) { error = seL4_IA32_PageDirectory_Map(objects[1].cptr, pd, (seL4_Word) vaddr, seL4_ARCH_Default_VMAttributes); } else { LOG_ERROR("Page directory allocation failed, %d", error); } if (error == seL4_DeleteFirst) { vka_free_object(vka, &objects[1]); error = seL4_NoError; } else { num = 2; } if (!error) { error = seL4_ARCH_PageTable_Map(objects[0].cptr, pd, (seL4_Word) vaddr, seL4_ARCH_Default_VMAttributes); } else { LOG_ERROR("Page directory mapping failed, %d", error); } } #endif /* now try mapping the frame in again if nothing else went wrong */ if (!error) { error = seL4_ARCH_Page_Map(frame, pd, (seL4_Word) vaddr, rights, attr); } else { LOG_ERROR("Page table mapping failed, %d", error); } } if (error != seL4_NoError) { LOG_ERROR("Failed to map page at address %p with cap %"PRIuPTR", error: %d", vaddr, frame, error); } *num_objects = num; return error; }
seL4_Error serial_server_parent_spawn_thread(simple_t *parent_simple, vka_t *parent_vka, vspace_t *parent_vspace, uint8_t priority) { const size_t shmem_max_size = SERIAL_SERVER_SHMEM_MAX_SIZE; seL4_Error error; size_t shmem_max_n_pages; cspacepath_t parent_cspace_cspath; seL4_MessageInfo_t tag; if (parent_simple == NULL || parent_vka == NULL || parent_vspace == NULL) { return seL4_InvalidArgument; } memset(get_serial_server(), 0, sizeof(serial_server_context_t)); /* Get a CPtr to the parent's root cnode. */ shmem_max_n_pages = BYTES_TO_4K_PAGES(shmem_max_size); vka_cspace_make_path(parent_vka, 0, &parent_cspace_cspath); get_serial_server()->server_vka = parent_vka; get_serial_server()->server_vspace = parent_vspace; get_serial_server()->server_cspace = parent_cspace_cspath.root; get_serial_server()->server_simple = parent_simple; /* Allocate the Endpoint that the server will be listening on. */ error = vka_alloc_endpoint(parent_vka, &get_serial_server()->server_ep_obj); if (error != 0) { ZF_LOGE(SERSERVP"spawn_thread: failed to alloc endpoint, err=%d.", error); return error; } /* And also allocate a badged copy of the Server's endpoint that the Parent * can use to send to the Server. This is used to allow the Server to report * back to the Parent on whether or not the Server successfully bound to a * platform serial driver. * * This badged endpoint will be reused by the library as the Parent's badged * Endpoint cap, if the Parent itself ever chooses to connect() to the * Server later on. */ get_serial_server()->parent_badge_value = serial_server_badge_value_alloc(); if (get_serial_server()->parent_badge_value == SERIAL_SERVER_BADGE_VALUE_EMPTY) { error = seL4_NotEnoughMemory; goto out; } error = vka_mint_object(parent_vka, &get_serial_server()->server_ep_obj, &get_serial_server()->_badged_server_ep_cspath, seL4_AllRights, seL4_CapData_Badge_new(get_serial_server()->parent_badge_value)); if (error != 0) { ZF_LOGE(SERSERVP"spawn_thread: Failed to mint badged Endpoint cap to " "server.\n" "\tParent cannot confirm Server thread successfully spawned."); goto out; } /* Allocate enough Cnode slots in our CSpace to enable us to receive * frame caps from our clients, sufficient to cover "shmem_max_size". * The problem here is that we're sort of forced to assume that we get * these slots contiguously. If they're not, we have a problem. * * If a client tries to send us too many frames, we respond with an error, * and indicate our shmem_max_size in the SSMSGREG_RESPONSE * message register. */ get_serial_server()->frame_cap_recv_cspaths = calloc(shmem_max_n_pages, sizeof(cspacepath_t)); if (get_serial_server()->frame_cap_recv_cspaths == NULL) { error = seL4_NotEnoughMemory; goto out; } for (size_t i = 0; i < shmem_max_n_pages; i++) { error = vka_cspace_alloc_path(parent_vka, &get_serial_server()->frame_cap_recv_cspaths[i]); if (error != 0) { ZF_LOGE(SERSERVP"spawn_thread: Failed to alloc enough cnode slots " "to receive shmem frame caps equal to %d bytes.", shmem_max_size); goto out; } } error = sel4utils_configure_thread(parent_vka, parent_vspace, parent_vspace, get_serial_server()->server_ep_obj.cptr, priority, parent_cspace_cspath.root, seL4_NilData, &get_serial_server()->server_thread); if (error != 0) { ZF_LOGE(SERSERVP"spawn_thread: sel4utils_configure_thread failed " "with %d.", error); goto out; } error = sel4utils_start_thread(&get_serial_server()->server_thread, &serial_server_main, NULL, NULL, 1); if (error != 0) { ZF_LOGE(SERSERVP"spawn_thread: sel4utils_start_thread failed with " "%d.", error); goto out; } /* When the Server is spawned, it will reply to tell us whether or not it * successfully bound itself to the platform serial device. Block here * and wait for that reply. */ seL4_SetMR(SSMSGREG_FUNC, FUNC_SERVER_SPAWN_SYNC_REQ); tag = seL4_MessageInfo_new(0, 0, 0, SSMSGREG_SPAWN_SYNC_REQ_END); tag = seL4_Call(get_serial_server()->_badged_server_ep_cspath.capPtr, tag); /* Did all go well with the server? */ if (seL4_GetMR(SSMSGREG_FUNC) != FUNC_SERVER_SPAWN_SYNC_ACK) { ZF_LOGE(SERSERVP"spawn_thread: Server thread sync message after spawn " "was not a SYNC_ACK as expected."); error = seL4_InvalidArgument; goto out; } error = seL4_MessageInfo_get_label(tag); if (error != 0) { ZF_LOGE(SERSERVP"spawn_thread: Server thread failed to bind to the " "platform serial device."); goto out; } get_serial_server()->shmem_max_size = shmem_max_size; get_serial_server()->shmem_max_n_pages = shmem_max_n_pages; return 0; out: if (get_serial_server()->frame_cap_recv_cspaths != NULL) { for (size_t i = 0; i < shmem_max_n_pages; i++) { /* Since the array was allocated with calloc(), it was zero'd out. So * those indexes that didn't get allocated will have NULL in them. * Break early on the first index that has NULL. */ if (get_serial_server()->frame_cap_recv_cspaths[i].capPtr == 0) { break; } vka_cspace_free_path(parent_vka, get_serial_server()->frame_cap_recv_cspaths[i]); } } free(get_serial_server()->frame_cap_recv_cspaths); if (get_serial_server()->_badged_server_ep_cspath.capPtr != 0) { vka_cspace_free_path(parent_vka, get_serial_server()->_badged_server_ep_cspath); } if (get_serial_server()->parent_badge_value != SERIAL_SERVER_BADGE_VALUE_EMPTY) { serial_server_badge_value_free(get_serial_server()->parent_badge_value); } vka_free_object(parent_vka, &get_serial_server()->server_ep_obj); return error; }
int test_vspace_mapping(void) { test_start("vspace mapping"); /* Create a vspace for testing mapping. */ struct vs_vspace vs; int error = vs_initialise(&vs, 31337); test_assert(error == ESUCCESS); test_assert(vs.magic == REFOS_VSPACE_MAGIC); /* Create a memory segment window. */ const vaddr_t window = 0x10000; const vaddr_t windowSize = 0x8000; int windowID; error = vs_create_window(&vs, window, windowSize, W_PERMISSION_WRITE | W_PERMISSION_READ, true, &windowID); test_assert(error == ESUCCESS); test_assert(windowID != W_INVALID_WINID); /* Allocate a frame to map. */ vka_object_t frame; error = vka_alloc_frame(&procServ.vka, seL4_PageBits, &frame); test_assert(error == ESUCCESS); test_assert(frame.cptr != 0); /* Try to map in some invalid spots. */ tvprintf("trying mapping into invalid spots...\n"); error = vs_map(&vs, 0x9A0, &frame.cptr, 1); test_assert(error == EINVALIDWINDOW); error = vs_map(&vs, window - 0x9A0, &frame.cptr, 1); test_assert(error == EINVALIDWINDOW); error = vs_map(&vs, window + windowSize + 0x1, &frame.cptr, 1); test_assert(error == EINVALIDWINDOW); error = vs_map(&vs, window + windowSize + 0x5123, &frame.cptr, 1); test_assert(error == EINVALIDWINDOW); /* Try to unmap from some invalid spots. */ tvprintf("trying unmapping from invalid spots...\n"); error = vs_unmap(&vs, window - 0x9A0, 1); test_assert(error == EINVALIDWINDOW); error = vs_unmap(&vs, window + windowSize + 0x423, 5); test_assert(error == EINVALIDWINDOW); error = vs_unmap(&vs, window, windowSize + 1); test_assert(error == EINVALIDWINDOW); /* Map the frame many times in all the valid spots. */ for (vaddr_t waddr = window; waddr < window + windowSize; waddr += (1 << seL4_PageBits)) { tvprintf("trying mapping into valid spot 0x%x...\n", (uint32_t) waddr); /* Map the frame. */ error = vs_map(&vs, waddr, &frame.cptr, 1); test_assert(error == ESUCCESS); /* Try to map frame here again. Should complain. */ error = vs_map(&vs, waddr, &frame.cptr, 1); test_assert(error == EUNMAPFIRST); } /* Unmap and remap the frame many times in all the valid spots. */ for (vaddr_t waddr = window; waddr < window + windowSize; waddr += (1 << seL4_PageBits)) { tvprintf("trying remapping into valid spot 0x%x...\n", (uint32_t) waddr); /* Unmap the frame. */ error = vs_unmap(&vs, waddr, 1); test_assert(error == ESUCCESS); /* Remap the frame. */ error = vs_map(&vs, waddr, &frame.cptr, 1); test_assert(error == ESUCCESS); } /* Clean up. Note that deleting the vspace should delete the created window. */ tvprintf("cleaning up everything in vspace...\n"); vs_unref(&vs); test_assert(vs.magic != REFOS_VSPACE_MAGIC); vka_free_object(&procServ.vka, &frame); return test_success(); }