// Kludge to push changes in VFS memobj back out to disk errval_t memobj_flush_vfs(struct memobj *memobj, struct vregion *vregion) { errval_t err; assert(memobj->type == MEMOBJ_VFS); struct memobj_vfs *mv = (struct memobj_vfs *)memobj; struct vspace *vspace = vregion_get_vspace(vregion); struct pmap *pmap = vspace_get_pmap(vspace); genvaddr_t vregion_base = vregion_get_base_addr(vregion); lvaddr_t vregion_lbase = vspace_genvaddr_to_lvaddr(vregion_base); genvaddr_t vregion_off = vregion_get_offset(vregion); assert(vregion_off == 0); // not sure if we handle this correctly /* TODO: mv->size instead of BASE_PAGE_SIZE?*/ for (genvaddr_t off = 0; off < mv->filesize ; off += BASE_PAGE_SIZE){ genvaddr_t retvaddr; size_t retsize; vregion_flags_t retflags; // For each page check if it's in memory err = pmap->f.lookup(pmap, vregion_base + off, &retvaddr, &retsize, NULL, NULL, &retflags); if (err_is_fail(err)) { continue; // Page not in memory #if 0 /* this optimisation may not be correct if flags were changed -AB */ } else if ((retflags & VREGION_FLAGS_WRITE) == 0) { continue; // Not writable #endif } //TRACE("Flushing page at address: %lx\n", vregion_base + off); // seek file handle err = vfs_seek(mv->vh, VFS_SEEK_SET, off + mv->offset); if (err_is_fail(err)) { return err; } // write contents to file size_t rsize, pos = 0; size_t nbytes = mv->filesize - off; if (nbytes > BASE_PAGE_SIZE) { nbytes = BASE_PAGE_SIZE; } do { err = vfs_write(mv->vh, (char *)vregion_lbase + off + pos, nbytes - pos, &rsize); if (err_is_fail(err)) { return err; } pos += rsize; } while(rsize > 0 && pos < nbytes); assert(pos==nbytes); } return SYS_ERR_OK; }
/** * \brief Allocate some slabs * * \param retbuf Pointer to return the allocated memory * \param slab_type Type of slab the memory is allocated for * * Since this region is used for backing specific slabs, * only those types of slabs can be allocated. */ errval_t vspace_pinned_alloc(void **retbuf, enum slab_type slab_type) { errval_t err; struct pinned_state *state = get_current_pinned_state(); // Select slab type struct slab_allocator *slab; switch(slab_type) { case VREGION_LIST: slab = &state->vregion_list_slab; break; case FRAME_LIST: slab = &state->frame_list_slab; break; default: return LIB_ERR_VSPACE_PINNED_INVALID_TYPE; } thread_mutex_lock(&state->mutex); // Try allocating void *buf = slab_alloc(slab); if (buf == NULL) { // Out of memory, grow struct capref frame; err = frame_alloc(&frame, BASE_PAGE_SIZE, NULL); if (err_is_fail(err)) { thread_mutex_unlock(&state->mutex); DEBUG_ERR(err, "frame_alloc in vspace_pinned_alloc"); return err_push(err, LIB_ERR_FRAME_ALLOC); } err = state->memobj.m.f.fill((struct memobj*)&state->memobj, state->offset, frame, BASE_PAGE_SIZE); if (err_is_fail(err)) { thread_mutex_unlock(&state->mutex); DEBUG_ERR(err, "memobj_fill in vspace_pinned_alloc"); return err_push(err, LIB_ERR_MEMOBJ_FILL); } genvaddr_t gvaddr = vregion_get_base_addr(&state->vregion) + state->offset; void *slab_buf = (void*)vspace_genvaddr_to_lvaddr(gvaddr); slab_grow(slab, slab_buf, BASE_PAGE_SIZE); state->offset += BASE_PAGE_SIZE; // Try again buf = slab_alloc(slab); } thread_mutex_unlock(&state->mutex); if (buf == NULL) { return LIB_ERR_SLAB_ALLOC_FAIL; } else { *retbuf = buf; return SYS_ERR_OK; } }
errval_t vspace_mmu_aware_unmap(struct vspace_mmu_aware *state, lvaddr_t base, size_t bytes) { errval_t err; struct capref frame; genvaddr_t gvaddr = vregion_get_base_addr(&state->vregion) + state->offset; lvaddr_t eaddr = vspace_genvaddr_to_lvaddr(gvaddr); genvaddr_t offset; genvaddr_t gen_base = vspace_lvaddr_to_genvaddr(base) - vregion_get_base_addr(&state->vregion); genvaddr_t min_offset = 0; bool success = false; assert(vspace_lvaddr_to_genvaddr(base) >= vregion_get_base_addr(&state->vregion)); assert(base + bytes == (lvaddr_t)eaddr); assert(bytes <= state->consumed); assert(bytes <= state->offset); // Reduce offset state->offset -= bytes; state->consumed -= bytes; // Free only in bigger blocks if(state->mapoffset - state->offset > MIN_MEM_FOR_FREE) { do { // Unmap and return (via unfill) frames from base err = state->memobj.m.f.unfill(&state->memobj.m, gen_base, &frame, &offset); if(err_is_fail(err) && err_no(err) != LIB_ERR_MEMOBJ_UNFILL_TOO_HIGH_OFFSET) { return err_push(err, LIB_ERR_MEMOBJ_UNMAP_REGION); } // Delete frame cap if(err_is_ok(err)) { success = true; if (min_offset == 0 || min_offset > offset) { min_offset = offset; } err = cap_destroy(frame); if(err_is_fail(err)) { return err; } } } while(err != LIB_ERR_MEMOBJ_UNFILL_TOO_HIGH_OFFSET); // state->consumed -= bytes; if (success) { state->mapoffset = min_offset; } } return SYS_ERR_OK; }
static errval_t refill_slabs(struct pmap_arm *pmap, size_t request) { errval_t err; /* Keep looping till we have #request slabs */ while (slab_freecount(&pmap->slab) < request) { // Amount of bytes required for #request size_t bytes = SLAB_STATIC_SIZE(request - slab_freecount(&pmap->slab), sizeof(struct vnode)); /* Get a frame of that size */ struct capref cap; err = frame_alloc(&cap, bytes, &bytes); if (err_is_fail(err)) { return err_push(err, LIB_ERR_FRAME_ALLOC); } /* If we do not have enough slabs to map the frame in, recurse */ size_t required_slabs_for_frame = max_slabs_required(bytes); if (slab_freecount(&pmap->slab) < required_slabs_for_frame) { // If we recurse, we require more slabs than to map a single page assert(required_slabs_for_frame > 4); err = refill_slabs(pmap, required_slabs_for_frame); if (err_is_fail(err)) { return err_push(err, LIB_ERR_SLAB_REFILL); } } /* Perform mapping */ genvaddr_t genvaddr = pmap->vregion_offset; pmap->vregion_offset += (genvaddr_t)bytes; // if this assert fires, increase META_DATA_RESERVED_SPACE assert(pmap->vregion_offset < (vregion_get_base_addr(&pmap->vregion) + vregion_get_size(&pmap->vregion))); err = do_map(pmap, genvaddr, cap, 0, bytes, VREGION_FLAGS_READ_WRITE, NULL, NULL); if (err_is_fail(err)) { return err_push(err, LIB_ERR_PMAP_DO_MAP); } /* Grow the slab */ lvaddr_t buf = vspace_genvaddr_to_lvaddr(genvaddr); slab_grow(&pmap->slab, (void*)buf, bytes); } return SYS_ERR_OK; }
/// Map with an alignment constraint errval_t vspace_map_anon_nomalloc(void **retaddr, struct memobj_anon *memobj, struct vregion *vregion, size_t size, size_t *retsize, vregion_flags_t flags, size_t alignment) { errval_t err1, err2; size = ROUND_UP(size, BASE_PAGE_SIZE); if (retsize) { *retsize = size; } // Create a memobj and vregion err1 = memobj_create_anon(memobj, size, 0); if (err_is_fail(err1)) { err1 = err_push(err1, LIB_ERR_MEMOBJ_CREATE_ANON); goto error; } err1 = vregion_map_aligned(vregion, get_current_vspace(), (struct memobj *)memobj, 0, size, flags, alignment); if (err_is_fail(err1)) { err1 = err_push(err1, LIB_ERR_VREGION_MAP); goto error; } *retaddr = (void*)vspace_genvaddr_to_lvaddr(vregion_get_base_addr(vregion)); return SYS_ERR_OK; error: if (err_no(err1) != LIB_ERR_MEMOBJ_CREATE_ANON) { err2 = memobj_destroy_anon((struct memobj *)memobj); if (err_is_fail(err2)) { DEBUG_ERR(err2, "memobj_destroy_anon failed"); } } return err1; }
static errval_t elf_allocate(void *state, genvaddr_t base, size_t size, uint32_t flags, void **retbase) { errval_t err; struct spawninfo *si = state; // Increase size by space wasted on first page due to page-alignment size_t base_offset = BASE_PAGE_OFFSET(base); size += base_offset; base -= base_offset; // Page-align size = ROUND_UP(size, BASE_PAGE_SIZE); cslot_t vspace_slot = si->elfload_slot; // Allocate the frames size_t sz = 0; for (lpaddr_t offset = 0; offset < size; offset += sz) { sz = 1UL << log2floor(size - offset); struct capref frame = { .cnode = si->segcn, .slot = si->elfload_slot++, }; err = frame_create(frame, sz, NULL); if (err_is_fail(err)) { return err_push(err, LIB_ERR_FRAME_CREATE); } } cslot_t spawn_vspace_slot = si->elfload_slot; cslot_t new_slot_count = si->elfload_slot - vspace_slot; // create copies of the frame capabilities for spawn vspace for (int copy_idx = 0; copy_idx < new_slot_count; copy_idx++) { struct capref frame = { .cnode = si->segcn, .slot = vspace_slot + copy_idx, }; struct capref spawn_frame = { .cnode = si->segcn, .slot = si->elfload_slot++, }; err = cap_copy(spawn_frame, frame); if (err_is_fail(err)) { // TODO: make debug printf printf("cap_copy failed for src_slot = %"PRIuCSLOT", dest_slot = %"PRIuCSLOT"\n", frame.slot, spawn_frame.slot); return err_push(err, LIB_ERR_CAP_COPY); } } /* Map into my vspace */ struct memobj *memobj = malloc(sizeof(struct memobj_anon)); if (!memobj) { return LIB_ERR_MALLOC_FAIL; } struct vregion *vregion = malloc(sizeof(struct vregion)); if (!vregion) { return LIB_ERR_MALLOC_FAIL; } // Create the objects err = memobj_create_anon((struct memobj_anon*)memobj, size, 0); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_CREATE_ANON); } err = vregion_map(vregion, get_current_vspace(), memobj, 0, size, VREGION_FLAGS_READ_WRITE); if (err_is_fail(err)) { return err_push(err, LIB_ERR_VSPACE_MAP); } for (lvaddr_t offset = 0; offset < size; offset += sz) { sz = 1UL << log2floor(size - offset); struct capref frame = { .cnode = si->segcn, .slot = vspace_slot++, }; genvaddr_t genvaddr = vspace_lvaddr_to_genvaddr(offset); err = memobj->f.fill(memobj, genvaddr, frame, sz); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_FILL); } err = memobj->f.pagefault(memobj, vregion, offset, 0); if (err_is_fail(err)) { DEBUG_ERR(err, "lib_err_memobj_pagefault_handler"); return err_push(err, LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER); } } /* Map into spawn vspace */ struct memobj *spawn_memobj = NULL; struct vregion *spawn_vregion = NULL; err = spawn_vspace_map_anon_fixed_attr(si, base, size, &spawn_vregion, &spawn_memobj, elf_to_vregion_flags(flags)); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_VSPACE_MAP); } for (lvaddr_t offset = 0; offset < size; offset += sz) { sz = 1UL << log2floor(size - offset); struct capref frame = { .cnode = si->segcn, .slot = spawn_vspace_slot++, }; genvaddr_t genvaddr = vspace_lvaddr_to_genvaddr(offset); err = memobj->f.fill(spawn_memobj, genvaddr, frame, sz); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_FILL); } err = spawn_memobj->f.pagefault(spawn_memobj, spawn_vregion, offset, 0); if (err_is_fail(err)) { DEBUG_ERR(err, "lib_err_memobj_pagefault_handler"); return err_push(err, LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER); } } genvaddr_t genvaddr = vregion_get_base_addr(vregion) + base_offset; *retbase = (void*)vspace_genvaddr_to_lvaddr(genvaddr); return SYS_ERR_OK; } /** * \brief Load the elf image */ errval_t spawn_arch_load(struct spawninfo *si, lvaddr_t binary, size_t binary_size, genvaddr_t *entry, void** arch_info) { errval_t err; // Reset the elfloader_slot si->elfload_slot = 0; struct capref cnode_cap = { .cnode = si->rootcn, .slot = ROOTCN_SLOT_SEGCN, }; err = cnode_create_raw(cnode_cap, &si->segcn, DEFAULT_CNODE_SLOTS, NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_SEGCN); } // TLS is NYI si->tls_init_base = 0; si->tls_init_len = si->tls_total_len = 0; // Load the binary err = elf_load(EM_HOST, elf_allocate, si, binary, binary_size, entry); if (err_is_fail(err)) { return err; } struct Elf32_Shdr* got_shdr = elf32_find_section_header_name(binary, binary_size, ".got"); if (got_shdr) { *arch_info = (void*)got_shdr->sh_addr; } else { return SPAWN_ERR_LOAD; } return SYS_ERR_OK; } void spawn_arch_set_registers(void *arch_load_info, dispatcher_handle_t handle, arch_registers_state_t *enabled_area, arch_registers_state_t *disabled_area) { assert(arch_load_info != NULL); uintptr_t got_base = (uintptr_t)arch_load_info; struct dispatcher_shared_arm* disp_arm = get_dispatcher_shared_arm(handle); disp_arm->got_base = got_base; enabled_area->regs[REG_OFFSET(PIC_REGISTER)] = got_base; disabled_area->regs[REG_OFFSET(PIC_REGISTER)] = got_base; #ifndef __ARM_ARCH_7M__ //armv7-m does not support these flags enabled_area->named.cpsr = CPSR_F_MASK | ARM_MODE_USR; disabled_area->named.cpsr = CPSR_F_MASK | ARM_MODE_USR; #endif }
/** * \brief Setup arguments and environment * * \param argv Command-line arguments, NULL-terminated * \param envp Environment, NULL-terminated */ static errval_t spawn_setup_env(struct spawninfo *si, char *const argv[], char *const envp[]) { errval_t err; // Create frame (actually multiple pages) for arguments si->argspg.cnode = si->taskcn; si->argspg.slot = TASKCN_SLOT_ARGSPAGE; struct capref spawn_argspg = { .cnode = si->taskcn, .slot = TASKCN_SLOT_ARGSPAGE2, }; err = frame_create(si->argspg, ARGS_SIZE, NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_ARGSPG); } err = cap_copy(spawn_argspg, si->argspg); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_ARGSPG); } /* Map in args frame */ genvaddr_t spawn_args_base; err = spawn_vspace_map_one_frame(si, &spawn_args_base, spawn_argspg, ARGS_SIZE); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_MAP_ARGSPG_TO_NEW); } void *argspg; err = vspace_map_one_frame(&argspg, ARGS_SIZE, si->argspg, NULL, NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_MAP_ARGSPG_TO_SELF); } /* Layout of arguments page: * struct spawn_domain_params; // contains pointers to other fields * char buf[]; // NUL-terminated strings for arguments and environment * vspace layout data follows the string data */ struct spawn_domain_params *params = argspg; char *buf = (char *)(params + 1); size_t buflen = ARGS_SIZE - (buf - (char *)argspg); /* Copy command-line arguments */ int i; size_t len; for (i = 0; argv[i] != NULL; i++) { len = strlen(argv[i]) + 1; if (len > buflen) { return SPAWN_ERR_ARGSPG_OVERFLOW; } strcpy(buf, argv[i]); params->argv[i] = buf - (char *)argspg + (char *)(lvaddr_t)spawn_args_base; buf += len; buflen -= len; } assert(i <= MAX_CMDLINE_ARGS); int argc = i; params->argv[i] = NULL; /* Copy environment strings */ for (i = 0; envp[i] != NULL; i++) { len = strlen(envp[i]) + 1; if (len > buflen) { return SPAWN_ERR_ARGSPG_OVERFLOW; } strcpy(buf, envp[i]); params->envp[i] = buf - (char *)argspg + (char *)(lvaddr_t)spawn_args_base; buf += len; buflen -= len; } assert(i <= MAX_ENVIRON_VARS); params->envp[i] = NULL; /* Serialise vspace data */ // XXX: align buf to next word char *vspace_buf = (char *)ROUND_UP((lvaddr_t)buf, sizeof(uintptr_t)); buflen -= vspace_buf - buf; // FIXME: currently just the pmap is serialised err = si->vspace->pmap->f.serialise(si->vspace->pmap, vspace_buf, buflen); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_SERIALISE_VSPACE); } /* Setup environment pointer and vspace pointer */ params->argc = argc; params->vspace_buf = (char *)vspace_buf - (char *)argspg + (char *)(lvaddr_t)spawn_args_base; params->vspace_buf_len = buflen; // Setup TLS data params->tls_init_base = (void *)vspace_genvaddr_to_lvaddr(si->tls_init_base); params->tls_init_len = si->tls_init_len; params->tls_total_len = si->tls_total_len; arch_registers_state_t *enabled_area = dispatcher_get_enabled_save_area(si->handle); registers_set_param(enabled_area, (uintptr_t)spawn_args_base); return SYS_ERR_OK; } /** * Copies caps from inheritcnode into destination cnode, * ignores caps that to not exist. * * \param inheritcn Source cnode * \param inherit_slot Source cnode slot * \param destcn Target cnode * \param destcn_slot Target cnode slot * * \retval SYS_ERR_OK Copy to target was successful or source cap * did not exist. * \retval SPAWN_ERR_COPY_INHERITCN_CAP Error in cap_copy */ static errval_t spawn_setup_inherited_cap(struct cnoderef inheritcn, capaddr_t inherit_slot, struct cnoderef destcn, capaddr_t destcn_slot) { errval_t err; struct capref src; src.cnode = inheritcn; src.slot = inherit_slot;; // Create frame (actually multiple pages) for fds struct capref dest; dest.cnode = destcn; dest.slot = destcn_slot; err = cap_copy(dest, src); if (err_no(err) == SYS_ERR_SOURCE_CAP_LOOKUP) { // there was no fdcap to inherit, continue return SYS_ERR_OK; } else if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_COPY_INHERITCN_CAP); } return SYS_ERR_OK; }
static errval_t elf_allocate(void *state, genvaddr_t base, size_t size, uint32_t flags, void **retbase) { errval_t err; struct spawninfo *si = state; // Increase size by space wasted on first page due to page-alignment size_t base_offset = BASE_PAGE_OFFSET(base); size += base_offset; base -= base_offset; // Page-align size = ROUND_UP(size, BASE_PAGE_SIZE); cslot_t vspace_slot = si->elfload_slot; // Allocate the frames size_t sz = 0; for (lpaddr_t offset = 0; offset < size; offset += sz) { sz = 1UL << log2floor(size - offset); struct capref frame = { .cnode = si->segcn, .slot = si->elfload_slot++, }; err = frame_create(frame, sz, NULL); if (err_is_fail(err)) { return err_push(err, LIB_ERR_FRAME_CREATE); } } cslot_t spawn_vspace_slot = si->elfload_slot; cslot_t new_slot_count = si->elfload_slot - vspace_slot; // create copies of the frame capabilities for spawn vspace for (int copy_idx = 0; copy_idx < new_slot_count; copy_idx++) { struct capref frame = { .cnode = si->segcn, .slot = vspace_slot + copy_idx, }; struct capref spawn_frame = { .cnode = si->segcn, .slot = si->elfload_slot++, }; err = cap_copy(spawn_frame, frame); if (err_is_fail(err)) { // TODO: make debug printf printf("cap_copy failed for src_slot = %"PRIuCSLOT", dest_slot = %"PRIuCSLOT"\n", frame.slot, spawn_frame.slot); return err_push(err, LIB_ERR_CAP_COPY); } } /* Map into my vspace */ struct memobj *memobj = malloc(sizeof(struct memobj_anon)); if (!memobj) { return LIB_ERR_MALLOC_FAIL; } struct vregion *vregion = malloc(sizeof(struct vregion)); if (!vregion) { return LIB_ERR_MALLOC_FAIL; } // Create the objects err = memobj_create_anon((struct memobj_anon*)memobj, size, 0); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_CREATE_ANON); } err = vregion_map(vregion, get_current_vspace(), memobj, 0, size, VREGION_FLAGS_READ_WRITE); if (err_is_fail(err)) { return err_push(err, LIB_ERR_VSPACE_MAP); } for (lvaddr_t offset = 0; offset < size; offset += sz) { sz = 1UL << log2floor(size - offset); struct capref frame = { .cnode = si->segcn, .slot = vspace_slot++, }; genvaddr_t genvaddr = vspace_lvaddr_to_genvaddr(offset); err = memobj->f.fill(memobj, genvaddr, frame, sz); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_FILL); } err = memobj->f.pagefault(memobj, vregion, offset, 0); if (err_is_fail(err)) { DEBUG_ERR(err, "lib_err_memobj_pagefault_handler"); return err_push(err, LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER); } } /* Map into spawn vspace */ struct memobj *spawn_memobj = NULL; struct vregion *spawn_vregion = NULL; err = spawn_vspace_map_anon_fixed_attr(si, base, size, &spawn_vregion, &spawn_memobj, elf_to_vregion_flags(flags)); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_VSPACE_MAP); } for (lvaddr_t offset = 0; offset < size; offset += sz) { sz = 1UL << log2floor(size - offset); struct capref spawn_frame = { .cnode = si->segcn, .slot = spawn_vspace_slot++, }; genvaddr_t genvaddr = vspace_lvaddr_to_genvaddr(offset); err = memobj->f.fill(spawn_memobj, genvaddr, spawn_frame, sz); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_FILL); } err = spawn_memobj->f.pagefault(spawn_memobj, spawn_vregion, offset, 0); if (err_is_fail(err)) { DEBUG_ERR(err, "lib_err_memobj_pagefault_handler"); return err_push(err, LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER); } } si->vregion[si->vregions] = vregion; si->base[si->vregions++] = base; genvaddr_t genvaddr = vregion_get_base_addr(vregion) + base_offset; *retbase = (void*)vspace_genvaddr_to_lvaddr(genvaddr); return SYS_ERR_OK; } /** * \brief Load the elf image */ errval_t spawn_arch_load(struct spawninfo *si, lvaddr_t binary, size_t binary_size, genvaddr_t *entry, void** arch_load_info) { errval_t err; // Reset the elfloader_slot si->elfload_slot = 0; si->vregions = 0; struct capref cnode_cap = { .cnode = si->rootcn, .slot = ROOTCN_SLOT_SEGCN, }; // XXX: this code assumes that elf_load never needs more than 32 slots for // text frame capabilities. err = cnode_create_raw(cnode_cap, &si->segcn, DEFAULT_CNODE_SLOTS, NULL); if (err_is_fail(err)) { return err_push(err, SPAWN_ERR_CREATE_SEGCN); } // Load the binary si->tls_init_base = 0; si->tls_init_len = si->tls_total_len = 0; err = elf_load_tls(EM_HOST, elf_allocate, si, binary, binary_size, entry, &si->tls_init_base, &si->tls_init_len, &si->tls_total_len); if (err_is_fail(err)) { return err; } return SYS_ERR_OK; } void spawn_arch_set_registers(void *arch_load_info, dispatcher_handle_t handle, arch_registers_state_t *enabled_area, arch_registers_state_t *disabled_area) { #if defined(__x86_64__) /* XXX: 1st argument to _start is the dispatcher pointer * see lib/crt/arch/x86_64/crt0.s */ disabled_area->rdi = get_dispatcher_shared_generic(handle)->udisp; #elif defined(__i386__) /* XXX: 1st argument to _start is the dispatcher pointer * see lib/crt/arch/x86_32/crt0.s */ disabled_area->edi = get_dispatcher_shared_generic(handle)->udisp; #endif }
/** * \brief Wrapper for creating and mapping a memory object * of type one frame with specific flags and a specific alignment */ errval_t vspace_map_one_frame_attr_aligned(void **retaddr, size_t size, struct capref frame, vregion_flags_t flags, size_t alignment, struct memobj **retmemobj, struct vregion **retvregion) { errval_t err1, err2; struct memobj *memobj = NULL; struct vregion *vregion = NULL; size = ROUND_UP(size, BASE_PAGE_SIZE); // Allocate space memobj = calloc(1, sizeof(struct memobj_one_frame)); if (!memobj) { err1 = LIB_ERR_MALLOC_FAIL; goto error; } vregion = calloc(1, sizeof(struct vregion)); if (!vregion) { err1 = LIB_ERR_MALLOC_FAIL; goto error; } // Create mappings err1 = memobj_create_one_frame((struct memobj_one_frame*)memobj, size, 0); if (err_is_fail(err1)) { err1 = err_push(err1, LIB_ERR_MEMOBJ_CREATE_ONE_FRAME); goto error; } err1 = memobj->f.fill(memobj, 0, frame, size); if (err_is_fail(err1)) { err1 = err_push(err1, LIB_ERR_MEMOBJ_FILL); goto error; } err1 = vregion_map_aligned(vregion, get_current_vspace(), memobj, 0, size, flags, alignment); if (err_is_fail(err1)) { err1 = err_push(err1, LIB_ERR_VREGION_MAP); goto error; } err1 = memobj->f.pagefault(memobj, vregion, 0, 0); if (err_is_fail(err1)) { err1 = err_push(err1, LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER); goto error; } *retaddr = (void*)vspace_genvaddr_to_lvaddr(vregion_get_base_addr(vregion)); if (retmemobj) { *retmemobj = memobj; } if (retvregion) { *retvregion = vregion; } return SYS_ERR_OK; error: if (memobj) { err2 = memobj_destroy_one_frame(memobj); if (err_is_fail(err2)) { DEBUG_ERR(err2, "memobj_destroy_anon failed"); } } if (vregion) { err2 = vregion_destroy(vregion); if (err_is_fail(err2)) { DEBUG_ERR(err2, "vregion_destroy failed"); } } return err1; }
/** * \brief Create mappings * * \param state The object metadata * \param frame An empty slot to place the frame capability in * \param req_size The required amount by the application * \param retbuf Pointer to return the mapped buffer * \param retsize The actual size returned * * This function will returns a special error code if frame_create * fails due to the constrains to the memory server (amount of memory * or region of memory). This is to facilitate retrying with different * constraints. */ errval_t vspace_mmu_aware_map(struct vspace_mmu_aware *state, size_t req_size, void **retbuf, size_t *retsize) { errval_t err; struct capref frame; // Calculate how much still to map in size_t origsize = req_size; assert(state->mapoffset >= state->offset); if(state->mapoffset - state->offset > req_size) { req_size = 0; } else { req_size -= state->mapoffset - state->offset; } size_t alloc_size = ROUND_UP(req_size, BASE_PAGE_SIZE); size_t ret_size = 0; if (req_size > 0) { #if __x86_64__ if ((state->vregion.flags & VREGION_FLAGS_HUGE) && (state->mapoffset & HUGE_PAGE_MASK) == 0) { // this is an opportunity to switch to 1G pages if requested. // we know that we can use large pages without jumping through hoops // if state->vregion.flags has VREGION_FLAGS_HUGE set and // mapoffset is aligned to at least HUGE_PAGE_SIZE. alloc_size = ROUND_UP(req_size, HUGE_PAGE_SIZE); // goto allocation directly so we can avoid nasty code interaction // between #if __x86_64__ and the size checks, we want to be able // to use 2M pages on x86_64 also. -SG, 2015-04-30. goto allocate; } #endif if ((state->vregion.flags & VREGION_FLAGS_LARGE) && (state->mapoffset & LARGE_PAGE_MASK) == 0) { // this is an opportunity to switch to 2M pages if requested. // we know that we can use large pages without jumping through hoops // if state->vregion.flags has VREGION_FLAGS_LARGE set and // mapoffset is aligned to at least LARGE_PAGE_SIZE. alloc_size = ROUND_UP(req_size, LARGE_PAGE_SIZE); } // Create frame of appropriate size allocate: err = state->slot_alloc->alloc(state->slot_alloc, &frame); if (err_is_fail(err)) { return err_push(err, LIB_ERR_SLOT_ALLOC_NO_SPACE); } err = frame_create(frame, alloc_size, &ret_size); if (err_is_fail(err)) { if (err_no(err) == LIB_ERR_RAM_ALLOC_MS_CONSTRAINTS) { // we can only get 4k frames for now; retry with 4k if (alloc_size > BASE_PAGE_SIZE && req_size <= BASE_PAGE_SIZE) { alloc_size = BASE_PAGE_SIZE; goto allocate; } return err_push(err, LIB_ERR_FRAME_CREATE_MS_CONSTRAINTS); } return err_push(err, LIB_ERR_FRAME_CREATE); } assert(ret_size >= req_size); origsize += ret_size - req_size; req_size = ret_size; if (state->consumed + req_size > state->size) { err = cap_delete(frame); if (err_is_fail(err)) { debug_err(__FILE__, __func__, __LINE__, err, "cap_delete failed"); } state->slot_alloc->free(state->slot_alloc, frame); return LIB_ERR_VSPACE_MMU_AWARE_NO_SPACE; } // Map it in err = state->memobj.m.f.fill(&state->memobj.m, state->mapoffset, frame, req_size); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_FILL); } err = state->memobj.m.f.pagefault(&state->memobj.m, &state->vregion, state->mapoffset, 0); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER); } } // Return buffer genvaddr_t gvaddr = vregion_get_base_addr(&state->vregion) + state->offset; *retbuf = (void*)vspace_genvaddr_to_lvaddr(gvaddr); *retsize = origsize; state->mapoffset += req_size; state->offset += origsize; state->consumed += origsize; return SYS_ERR_OK; }