errval_t vspace_mmu_aware_init_aligned(struct vspace_mmu_aware *state, struct slot_allocator *slot_allocator, size_t size, size_t alignment, vregion_flags_t flags) { state->size = size; state->consumed = 0; state->alignment = alignment; vspace_mmu_aware_set_slot_alloc(state, slot_allocator); errval_t err; size = ROUND_UP(size, BASE_PAGE_SIZE); err = memobj_create_anon(&state->memobj, size, 0); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_CREATE_ANON); } err = vregion_map_aligned(&state->vregion, get_current_vspace(), &state->memobj.m, 0, size, flags, alignment); if (err_is_fail(err)) { return err_push(err, LIB_ERR_VREGION_MAP); } state->offset = state->mapoffset = 0; return SYS_ERR_OK; }
/// Map with an alignment constraint errval_t vspace_map_anon_nomalloc(void **retaddr, struct memobj_anon *memobj, struct vregion *vregion, size_t size, size_t *retsize, vregion_flags_t flags, size_t alignment) { errval_t err1, err2; size = ROUND_UP(size, BASE_PAGE_SIZE); if (retsize) { *retsize = size; } // Create a memobj and vregion err1 = memobj_create_anon(memobj, size, 0); if (err_is_fail(err1)) { err1 = err_push(err1, LIB_ERR_MEMOBJ_CREATE_ANON); goto error; } err1 = vregion_map_aligned(vregion, get_current_vspace(), (struct memobj *)memobj, 0, size, flags, alignment); if (err_is_fail(err1)) { err1 = err_push(err1, LIB_ERR_VREGION_MAP); goto error; } *retaddr = (void*)vspace_genvaddr_to_lvaddr(vregion_get_base_addr(vregion)); return SYS_ERR_OK; error: if (err_no(err1) != LIB_ERR_MEMOBJ_CREATE_ANON) { err2 = memobj_destroy_anon((struct memobj *)memobj); if (err_is_fail(err2)) { DEBUG_ERR(err2, "memobj_destroy_anon failed"); } } return err1; }
/** * \brief allocates size bytes of memory page interleaved the nodes specified in * the nodemask. * * \param size size of the memory region in bytes * \param nodemask subset of nodes to consider for allocation * \param pagesize preferred page size to be used * * \returns pointer to the mapped memory region * * should only be used for large areas consisting of multiple pages. * The memory must be freed with numa_free(). On errors NULL is returned. */ void *numa_alloc_interleaved_subset(size_t size, size_t pagesize, struct bitmap *nodemask) { errval_t err; /* clear out invalid bits */ bitmap_clear_range(nodemask, numa_num_configured_nodes(), bitmap_get_nbits(nodemask)); /* get the number of nodes */ nodeid_t nodes = bitmap_get_weight(nodemask); if (nodes == 0) { return NULL; } NUMA_DEBUG_ALLOC("allocating interleaved using %" PRIuNODEID " nodes\n", nodes); assert(nodes <= numa_num_configured_nodes()); vregion_flags_t flags; validate_page_size(&pagesize, &flags); size_t stride = pagesize; size_t node_size = size / nodes; node_size = (node_size + pagesize - 1) & ~(pagesize - 1); /* update total size as this may change due to rounding of node sizes*/ size = nodes * node_size; /* * XXX: we may want to keep track of numa alloced frames */ struct memobj_numa *memobj = calloc(1, sizeof(struct memobj_numa)); err = memobj_create_numa(memobj, size, 0, numa_num_configured_nodes(), stride); if (err_is_fail(err)) { return NULL; } bitmap_bit_t node = bitmap_get_first(nodemask); nodeid_t node_idx=0; while(node != BITMAP_BIT_NONE) { struct capref frame; err = numa_frame_alloc_on_node(&frame, node_size, (nodeid_t)node, NULL); if (err_is_fail(err)) { DEBUG_ERR(err, "numa_frame_alloc_on_node"); goto out_err; } memobj->m.f.fill(&memobj->m, node_idx, frame, 0); ++node_idx; node = bitmap_get_next(nodemask, node); } struct vregion *vreg = calloc(1, sizeof(struct vregion)); if (vreg == NULL) { goto out_err; } err = vregion_map_aligned(vreg, get_current_vspace(), &memobj->m, 0, size, flags, pagesize); if (err_is_fail(err)) { DEBUG_ERR(err, "vregion_map_aligned"); goto out_err; } err = memobj->m.f.pagefault(&memobj->m, vreg, 0, 0); if (err_is_fail(err)) { vregion_destroy(vreg); free(vreg); DEBUG_ERR(err, "memobj.m.f.pagefault"); goto out_err; } // XXX - Is this right? return (void *)(uintptr_t)vregion_get_base_addr(vreg); out_err: for (int i = 0; i < node_idx; ++i) { struct capref frame; memobj->m.f.unfill(&memobj->m, node_idx, &frame, NULL); cap_delete(frame); } return NULL; }
/** * \brief Wrapper for creating and mapping a memory object * of type one frame with specific flags and a specific alignment */ errval_t vspace_map_one_frame_attr_aligned(void **retaddr, size_t size, struct capref frame, vregion_flags_t flags, size_t alignment, struct memobj **retmemobj, struct vregion **retvregion) { errval_t err1, err2; struct memobj *memobj = NULL; struct vregion *vregion = NULL; size = ROUND_UP(size, BASE_PAGE_SIZE); // Allocate space memobj = calloc(1, sizeof(struct memobj_one_frame)); if (!memobj) { err1 = LIB_ERR_MALLOC_FAIL; goto error; } vregion = calloc(1, sizeof(struct vregion)); if (!vregion) { err1 = LIB_ERR_MALLOC_FAIL; goto error; } // Create mappings err1 = memobj_create_one_frame((struct memobj_one_frame*)memobj, size, 0); if (err_is_fail(err1)) { err1 = err_push(err1, LIB_ERR_MEMOBJ_CREATE_ONE_FRAME); goto error; } err1 = memobj->f.fill(memobj, 0, frame, size); if (err_is_fail(err1)) { err1 = err_push(err1, LIB_ERR_MEMOBJ_FILL); goto error; } err1 = vregion_map_aligned(vregion, get_current_vspace(), memobj, 0, size, flags, alignment); if (err_is_fail(err1)) { err1 = err_push(err1, LIB_ERR_VREGION_MAP); goto error; } err1 = memobj->f.pagefault(memobj, vregion, 0, 0); if (err_is_fail(err1)) { err1 = err_push(err1, LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER); goto error; } *retaddr = (void*)vspace_genvaddr_to_lvaddr(vregion_get_base_addr(vregion)); if (retmemobj) { *retmemobj = memobj; } if (retvregion) { *retvregion = vregion; } return SYS_ERR_OK; error: if (memobj) { err2 = memobj_destroy_one_frame(memobj); if (err_is_fail(err2)) { DEBUG_ERR(err2, "memobj_destroy_anon failed"); } } if (vregion) { err2 = vregion_destroy(vregion); if (err_is_fail(err2)) { DEBUG_ERR(err2, "vregion_destroy failed"); } } return err1; }
/** * \brief Wrapper to create and map a file object, optionally at a fixed address * * The memory object and vregion are returned so the user can call fill and * pagefault on it to create actual mappings. */ static errval_t vspace_map_file_internal(genvaddr_t opt_base, size_t opt_alignment, size_t size, vregion_flags_t flags, vfs_handle_t file, off_t offset, size_t filesize, struct vregion **ret_vregion, struct memobj **ret_memobj) { errval_t err1, err2; struct memobj *memobj = NULL; struct vregion *vregion = NULL; // Allocate space memobj = malloc(sizeof(struct memobj_vfs)); if (!memobj) { err1 = LIB_ERR_MALLOC_FAIL; goto error; } vregion = malloc(sizeof(struct vregion)); if (!vregion) { err1 = LIB_ERR_MALLOC_FAIL; goto error; } // Create a memobj and vregion err1 = memobj_create_vfs((struct memobj_vfs *)memobj, size, 0, file, offset, filesize); if (err_is_fail(err1)) { err1 = err_push(err1, LIB_ERR_MEMOBJ_CREATE_VFS); goto error; } if (opt_base != 0) { err1 = vregion_map_fixed(vregion, get_current_vspace(), memobj, 0, size, opt_base, flags); } else if (opt_alignment != 0) { err1 = vregion_map_aligned(vregion, get_current_vspace(), memobj, 0, size, flags, opt_alignment); } else { err1 = vregion_map(vregion, get_current_vspace(), memobj, 0, size, flags); } if (err_is_fail(err1)) { err1 = err_push(err1, LIB_ERR_VREGION_MAP); goto error; } *ret_vregion = vregion; *ret_memobj = memobj; return SYS_ERR_OK; error: if (memobj) { err2 = memobj_destroy_vfs(memobj); if (err_is_fail(err2)) { DEBUG_ERR(err2, "memobj_destroy_anon failed"); } free(memobj); } if (vregion) { err2 = vregion_destroy(vregion); if (err_is_fail(err2)) { DEBUG_ERR(err2, "vregion_destroy failed"); } free(vregion); } return err1; }
/** * \brief Setup a new vregion anywhere in the address space * * \param vregion The vregion * \param vspace The vspace to associate with the vregion * \param memobj The memory object to associate with the region * \param offset Offset into the memory object * \param size Size of the memoryg object to use * \param flags Vregion specific flags */ errval_t vregion_map(struct vregion *vregion, struct vspace *vspace, struct memobj *memobj, size_t offset, size_t size, vregion_flags_t flags) { return vregion_map_aligned(vregion, vspace, memobj, offset, size, flags, 0); }