errval_t memserv_alloc(struct capref *ret, uint8_t bits, genpaddr_t minbase, genpaddr_t maxlimit) { errval_t err; assert(bits >= MINSIZEBITS); /* refill slot allocator if needed */ err = slot_prealloc_refill(mm_ram.slot_alloc_inst); assert(err_is_ok(err)); /* refill slab allocator if needed */ size_t freecount = slab_freecount(&mm_ram.slabs); while (!refilling && (freecount <= MINSPARENODES)) { refilling = true; struct capref frame; err = msa.a.alloc(&msa.a, &frame); assert(err_is_ok(err)); size_t retsize; err = frame_create(frame, BASE_PAGE_SIZE * 8, &retsize); assert(err_is_ok(err)); assert(retsize % BASE_PAGE_SIZE == 0); assert(retsize >= BASE_PAGE_SIZE); void *buf; err = paging_map_frame(get_current_paging_state(), &buf, retsize, frame, NULL, NULL); if (err_is_fail(err)) { DEBUG_ERR(err, "paging_map_frame failed"); assert(buf); } slab_grow(&mm_ram.slabs, buf, retsize); freecount = slab_freecount(&mm_ram.slabs); } if (freecount > MINSPARENODES) { refilling = false; } if(maxlimit == 0) { err = mm_alloc(&mm_ram, bits, ret, NULL); } else { err = mm_alloc_range(&mm_ram, bits, minbase, maxlimit, ret, NULL); } if (err_is_fail(err)) { debug_printf("in mem_serv:mymm_alloc(bits=%"PRIu8", minbase=%"PRIxGENPADDR ", maxlimit=%"PRIxGENPADDR")\n", bits, minbase, maxlimit); DEBUG_ERR(err, "mem_serv:mymm_alloc"); } return err; }
static errval_t refill_slabs(struct pmap_arm *pmap, size_t request) { errval_t err; /* Keep looping till we have #request slabs */ while (slab_freecount(&pmap->slab) < request) { // Amount of bytes required for #request size_t bytes = SLAB_STATIC_SIZE(request - slab_freecount(&pmap->slab), sizeof(struct vnode)); /* Get a frame of that size */ struct capref cap; err = frame_alloc(&cap, bytes, &bytes); if (err_is_fail(err)) { return err_push(err, LIB_ERR_FRAME_ALLOC); } /* If we do not have enough slabs to map the frame in, recurse */ size_t required_slabs_for_frame = max_slabs_required(bytes); if (slab_freecount(&pmap->slab) < required_slabs_for_frame) { // If we recurse, we require more slabs than to map a single page assert(required_slabs_for_frame > 4); err = refill_slabs(pmap, required_slabs_for_frame); if (err_is_fail(err)) { return err_push(err, LIB_ERR_SLAB_REFILL); } } /* Perform mapping */ genvaddr_t genvaddr = pmap->vregion_offset; pmap->vregion_offset += (genvaddr_t)bytes; // if this assert fires, increase META_DATA_RESERVED_SPACE assert(pmap->vregion_offset < (vregion_get_base_addr(&pmap->vregion) + vregion_get_size(&pmap->vregion))); err = do_map(pmap, genvaddr, cap, 0, bytes, VREGION_FLAGS_READ_WRITE, NULL, NULL); if (err_is_fail(err)) { return err_push(err, LIB_ERR_PMAP_DO_MAP); } /* Grow the slab */ lvaddr_t buf = vspace_genvaddr_to_lvaddr(genvaddr); slab_grow(&pmap->slab, (void*)buf, bytes); } return SYS_ERR_OK; }
// FIXME: error handling (not asserts) needed in this function static void mem_allocate_handler(struct mem_binding *b, uint8_t bits, genpaddr_t minbase, genpaddr_t maxlimit) { struct capref *cap = malloc(sizeof(struct capref)); errval_t err, ret; trace_event(TRACE_SUBSYS_MEMSERV, TRACE_EVENT_MEMSERV_ALLOC, bits); /* refill slot allocator if needed */ err = slot_prealloc_refill(mm_ram.slot_alloc_inst); assert(err_is_ok(err)); /* refill slab allocator if needed */ while (slab_freecount(&mm_ram.slabs) <= MINSPARENODES) { struct capref frame; err = msa.a.alloc(&msa.a, &frame); assert(err_is_ok(err)); err = frame_create(frame, BASE_PAGE_SIZE * 8, NULL); assert(err_is_ok(err)); void *buf; err = vspace_map_one_frame(&buf, BASE_PAGE_SIZE * 8, frame, NULL, NULL); if (err_is_fail(err)) { DEBUG_ERR(err, "vspace_map_one_frame failed"); assert(buf); } slab_grow(&mm_ram.slabs, buf, BASE_PAGE_SIZE * 8); } ret = mymm_alloc(cap, bits, minbase, maxlimit); if (err_is_ok(ret)) { mem_avail -= 1UL << bits; } else { // DEBUG_ERR(ret, "allocation of %d bits in % " PRIxGENPADDR "-%" PRIxGENPADDR " failed", // bits, minbase, maxlimit); *cap = NULL_CAP; } /* Reply */ err = b->tx_vtbl.allocate_response(b, MKCONT(allocate_response_done, cap), ret, *cap); if (err_is_fail(err)) { if (err_no(err) == FLOUNDER_ERR_TX_BUSY) { struct pending_reply *r = malloc(sizeof(struct pending_reply)); assert(r != NULL); r->b = b; r->err = ret; r->cap = cap; err = b->register_send(b, get_default_waitset(), MKCONT(retry_reply,r)); assert(err_is_ok(err)); } else { DEBUG_ERR(err, "failed to reply to memory request"); allocate_response_done(cap); } } }
/** * \brief Create page mappings * * \param pmap The pmap object * \param vaddr The virtual address to create the mapping for * \param frame The frame cap to map in * \param offset Offset into the frame cap * \param size Size of the mapping * \param flags Flags for the mapping * \param retoff If non-NULL, filled in with adjusted offset of mapped region * \param retsize If non-NULL, filled in with adjusted size of mapped region */ static errval_t map(struct pmap *pmap, genvaddr_t vaddr, struct capref frame, size_t offset, size_t size, vregion_flags_t flags, size_t *retoff, size_t *retsize) { struct pmap_arm *pmap_arm = (struct pmap_arm *)pmap; size += BASE_PAGE_OFFSET(offset); size = ROUND_UP(size, BASE_PAGE_SIZE); offset -= BASE_PAGE_OFFSET(offset); const size_t slabs_reserve = 3; // == max_slabs_required(1) uint64_t slabs_free = slab_freecount(&pmap_arm->slab); size_t slabs_required = max_slabs_required(size) + slabs_reserve; if (slabs_required > slabs_free) { if (get_current_pmap() == pmap) { errval_t err = refill_slabs(pmap_arm, slabs_required); if (err_is_fail(err)) { return err_push(err, LIB_ERR_SLAB_REFILL); } } else { size_t bytes = SLAB_STATIC_SIZE(slabs_required - slabs_free, sizeof(struct vnode)); void *buf = malloc(bytes); if (!buf) { return LIB_ERR_MALLOC_FAIL; } slab_grow(&pmap_arm->slab, buf, bytes); } } return do_map(pmap_arm, vaddr, frame, offset, size, flags, retoff, retsize); }
errval_t initialize_mem_serv(void) { errval_t err; /* Step 1: Initialize slot allocator by passing a cnode cap for it to start with */ struct capref cnode_cap; err = slot_alloc(&cnode_cap); assert(err_is_ok(err)); struct capref cnode_start_cap = { .slot = 0 }; struct capref ram; err = ram_alloc_fixed(&ram, BASE_PAGE_BITS, 0, 0); assert(err_is_ok(err)); err = cnode_create_from_mem(cnode_cap, ram, &cnode_start_cap.cnode, DEFAULT_CNODE_BITS); assert(err_is_ok(err)); /* location where slot allocator will place its top-level cnode */ struct capref top_slot_cap = { .cnode = cnode_root, .slot = ROOTCN_SLOT_SLOT_ALLOCR, }; /* clear mm_ram struct */ memset(&mm_ram, 0, sizeof(mm_ram)); /* init slot allocator */ err = slot_prealloc_init(&ram_slot_alloc, top_slot_cap, MAXCHILDBITS, CNODE_BITS, cnode_start_cap, 1UL << DEFAULT_CNODE_BITS, &mm_ram); assert(err_is_ok(err)); // FIXME: remove magic constant for lowest valid RAM address err = mm_init(&mm_ram, ObjType_RAM, 0x80000000, MAXSIZEBITS, MAXCHILDBITS, NULL, slot_alloc_prealloc, &ram_slot_alloc, true); assert(err_is_ok(err)); /* Step 2: give MM allocator static storage to get it started */ static char nodebuf[SLAB_STATIC_SIZE(MINSPARENODES, MM_NODE_SIZE(MAXCHILDBITS))]; slab_grow(&mm_ram.slabs, nodebuf, sizeof(nodebuf)); /* Step 3: walk bootinfo and add all unused RAM caps to allocator */ struct capref mem_cap = { .cnode = cnode_super, .slot = 0, }; for (int i = 0; i < bi->regions_length; i++) { if (bi->regions[i].mr_type == RegionType_Empty) { //dump_ram_region(i, bi->regions + i); mem_total += ((size_t)1) << bi->regions[i].mr_bits; if (bi->regions[i].mr_consumed) { // region consumed by init, skipped mem_cap.slot++; continue; } err = mm_add(&mm_ram, mem_cap, bi->regions[i].mr_bits, bi->regions[i].mr_base); if (err_is_ok(err)) { mem_avail += ((size_t)1) << bi->regions[i].mr_bits; } else { DEBUG_ERR(err, "Warning: adding RAM region %d (%p/%d) FAILED", i, bi->regions[i].mr_base, bi->regions[i].mr_bits); } /* try to refill slot allocator (may fail if the mem allocator is empty) */ err = slot_prealloc_refill(mm_ram.slot_alloc_inst); if (err_is_fail(err) && err_no(err) != MM_ERR_SLOT_MM_ALLOC) { DEBUG_ERR(err, "in slot_prealloc_refill() while initialising" " memory allocator"); abort(); } /* refill slab allocator if needed and possible */ if (slab_freecount(&mm_ram.slabs) <= MINSPARENODES && mem_avail > (1UL << (CNODE_BITS + OBJBITS_CTE)) * 2 + 10 * BASE_PAGE_SIZE) { slab_default_refill(&mm_ram.slabs); // may fail } mem_cap.slot++; } } err = slot_prealloc_refill(mm_ram.slot_alloc_inst); if (err_is_fail(err)) { debug_printf("Fatal internal error in RAM allocator: failed to initialise " "slot allocator\n"); DEBUG_ERR(err, "failed to init slot allocator"); abort(); } debug_printf("RAM allocator initialised, %zd MB (of %zd MB) available\n", mem_avail / 1024 / 1024, mem_total / 1024 / 1024); // setup proper multi slot alloc err = multi_slot_alloc_init(&msa, DEFAULT_CNODE_SLOTS, NULL); if(err_is_fail(err)) { USER_PANIC_ERR(err, "multi_slot_alloc_init"); } debug_printf("MSA initialised\n"); // switch over ram alloc to proper ram allocator ram_alloc_set(memserv_alloc); return SYS_ERR_OK; }