static void sys_ipa_to_pa(vm_t* vm, seL4_UserContext* regs) { seL4_ARM_Page_GetAddress_t ret; uint32_t ipa; seL4_CPtr cap; ipa = regs->r0; cap = vspace_get_cap(vm_get_vspace(vm), (void*)ipa); if (cap == seL4_CapNull) { void* mapped_address; mapped_address = map_vm_ram(vm, ipa); if (mapped_address == NULL) { printf("Could not map address for IPA translation\n"); return; } cap = vspace_get_cap(vm_get_vspace(vm), (void*)ipa); assert(cap != seL4_CapNull); } ret = seL4_ARM_Page_GetAddress(cap); assert(!ret.error); DSTRACE("IPA translation syscall from [%s]: 0x%08x->0x%08x\n", vm->name, ipa, ret.paddr); regs->r0 = ret.paddr; }
void sel4utils_unmap_dup(vka_t *vka, vspace_t *vspace, void *mapping, size_t size_bits) { /* Grap a copy of the cap */ seL4_CPtr copy = vspace_get_cap(vspace, mapping); cspacepath_t copy_path; assert(copy); /* now free the mapping */ vspace_unmap_pages(vspace, mapping, 1, size_bits, VSPACE_PRESERVE); /* delete and free the cap */ vka_cspace_make_path(vka, copy, ©_path); vka_cnode_delete(©_path); vka_cspace_free(vka, copy); }
uintptr_t vm_ipa_to_pa(vm_t* vm, uintptr_t ipa_base, size_t size) { seL4_ARM_Page_GetAddress_t ret; uintptr_t pa_base = 0; uintptr_t ipa; vspace_t *vspace; vspace = vm_get_vspace(vm); ipa = ipa_base; do { seL4_CPtr cap; int bits; /* Find the cap */ cap = vspace_get_cap(vspace, (void*)ipa); if (cap == seL4_CapNull) { return 0; } /* Find mapping size */ bits = vspace_get_cookie(vspace, (void*)ipa); assert(bits == 12 || bits == 21); /* Find the physical address */ ret = seL4_ARM_Page_GetAddress(cap); if (ret.error) { return 0; } if (ipa == ipa_base) { /* Record the result */ pa_base = ret.paddr + (ipa & MASK(bits)); /* From here on, ipa and ret.paddr will be aligned */ ipa &= ~MASK(bits); } else { /* Check for a contiguous mapping */ if (ret.paddr - pa_base != ipa - ipa_base) { return 0; } } ipa += BIT(bits); } while (ipa - ipa_base < size); return pa_base; }
int serial_server_client_connect(seL4_CPtr badged_server_ep_cap, vka_t *client_vka, vspace_t *client_vspace, serial_client_context_t *conn) { seL4_Error error; int shmem_n_pages; uintptr_t shmem_tmp_vaddr; seL4_MessageInfo_t tag; cspacepath_t frame_cspath; if (badged_server_ep_cap == 0 || client_vka == NULL || client_vspace == NULL || conn == NULL) { return seL4_InvalidArgument; } memset(conn, 0, sizeof(serial_client_context_t)); shmem_n_pages = BYTES_TO_4K_PAGES(SERIAL_SERVER_SHMEM_MAX_SIZE); if (shmem_n_pages > seL4_MsgMaxExtraCaps) { ZF_LOGE(SERSERVC"connect: Currently unsupported shared memory size: " "IPC cap transfer capability is inadequate."); return seL4_RangeError; } conn->shmem = vspace_new_pages(client_vspace, seL4_AllRights, shmem_n_pages, seL4_PageBits); if (conn->shmem == NULL) { ZF_LOGE(SERSERVC"connect: Failed to alloc shmem."); return seL4_NotEnoughMemory; } assert(IS_ALIGNED((uintptr_t)conn->shmem, seL4_PageBits)); /* Look up the Frame cap behind each page in the shmem range, and marshal * all of those Frame caps to the parent. The parent will then map those * Frames into its VSpace and establish a shmem link. */ shmem_tmp_vaddr = (uintptr_t)conn->shmem; for (int i = 0; i < shmem_n_pages; i++) { vka_cspace_make_path(client_vka, vspace_get_cap(client_vspace, (void *)shmem_tmp_vaddr), &frame_cspath); seL4_SetCap(i, frame_cspath.capPtr); shmem_tmp_vaddr += BIT(seL4_PageBits); } /* Call the server asking it to establish the shmem mapping with us, and * get us connected up. */ seL4_SetMR(SSMSGREG_FUNC, FUNC_CONNECT_REQ); seL4_SetMR(SSMSGREG_CONNECT_REQ_SHMEM_SIZE, SERIAL_SERVER_SHMEM_MAX_SIZE); /* extraCaps doubles up as the number of shmem pages. */ tag = seL4_MessageInfo_new(0, 0, shmem_n_pages, SSMSGREG_CONNECT_REQ_END); tag = seL4_Call(badged_server_ep_cap, tag); /* It makes sense to verify that the message we're getting back is an * ACK response to our request message. */ if (seL4_GetMR(SSMSGREG_FUNC) != FUNC_CONNECT_ACK) { error = seL4_IllegalOperation; ZF_LOGE(SERSERVC"connect: Reply message was not a CONNECT_ACK as " "expected."); goto out; } /* When the parent replies, we check to see if it was successful, etc. */ error = seL4_MessageInfo_get_label(tag); if (error != (int)SERIAL_SERVER_NOERROR) { ZF_LOGE(SERSERVC"connect ERR %d: Failed to connect to the server.", error); if (error == (int)SERIAL_SERVER_ERROR_SHMEM_TOO_LARGE) { ZF_LOGE(SERSERVC"connect: Your requested shmem mapping size is too " "large.\n\tServer's max shmem size is %luB.", (long)seL4_GetMR(SSMSGREG_CONNECT_ACK_MAX_SHMEM_SIZE)); } goto out; } conn->shmem_size = SERIAL_SERVER_SHMEM_MAX_SIZE; vka_cspace_make_path(client_vka, badged_server_ep_cap, &conn->badged_server_ep_cspath); return seL4_NoError; out: if (conn->shmem != NULL) { vspace_unmap_pages(client_vspace, (void *)conn->shmem, shmem_n_pages, seL4_PageBits, VSPACE_FREE); } return error; }
printf("\n"); printf("seL4 Test\n"); printf("=========\n"); printf("\n"); /* allocate lots of untyped memory for tests to use */ num_untypeds = populate_untypeds(untypeds); /* create a frame that will act as the init data, we can then map that * in to target processes */ env.init = (test_init_data_t *) vspace_new_pages(&env.vspace, seL4_AllRights, 1, PAGE_BITS_4K); assert(env.init != NULL); /* copy the cap to map into the remote process */ cspacepath_t src, dest; vka_cspace_make_path(&env.vka, vspace_get_cap(&env.vspace, env.init), &src); UNUSED int error = vka_cspace_alloc(&env.vka, &env.init_frame_cap_copy); assert(error == 0); vka_cspace_make_path(&env.vka, env.init_frame_cap_copy, &dest); error = vka_cnode_copy(&dest, &src, seL4_AllRights); assert(error == 0); /* copy the untyped size bits list across to the init frame */ memcpy(env.init->untyped_size_bits_list, untyped_size_bits_list, sizeof(uint8_t) * num_untypeds); /* parse elf region data about the test image to pass to the tests app */ num_elf_regions = sel4utils_elf_num_regions(TESTS_APP); assert(num_elf_regions < MAX_REGIONS); sel4utils_elf_reserve(NULL, TESTS_APP, elf_regions);