static errval_t refill_slabs(struct pmap_arm *pmap, size_t request) { errval_t err; /* Keep looping till we have #request slabs */ while (slab_freecount(&pmap->slab) < request) { // Amount of bytes required for #request size_t bytes = SLAB_STATIC_SIZE(request - slab_freecount(&pmap->slab), sizeof(struct vnode)); /* Get a frame of that size */ struct capref cap; err = frame_alloc(&cap, bytes, &bytes); if (err_is_fail(err)) { return err_push(err, LIB_ERR_FRAME_ALLOC); } /* If we do not have enough slabs to map the frame in, recurse */ size_t required_slabs_for_frame = max_slabs_required(bytes); if (slab_freecount(&pmap->slab) < required_slabs_for_frame) { // If we recurse, we require more slabs than to map a single page assert(required_slabs_for_frame > 4); err = refill_slabs(pmap, required_slabs_for_frame); if (err_is_fail(err)) { return err_push(err, LIB_ERR_SLAB_REFILL); } } /* Perform mapping */ genvaddr_t genvaddr = pmap->vregion_offset; pmap->vregion_offset += (genvaddr_t)bytes; // if this assert fires, increase META_DATA_RESERVED_SPACE assert(pmap->vregion_offset < (vregion_get_base_addr(&pmap->vregion) + vregion_get_size(&pmap->vregion))); err = do_map(pmap, genvaddr, cap, 0, bytes, VREGION_FLAGS_READ_WRITE, NULL, NULL); if (err_is_fail(err)) { return err_push(err, LIB_ERR_PMAP_DO_MAP); } /* Grow the slab */ lvaddr_t buf = vspace_genvaddr_to_lvaddr(genvaddr); slab_grow(&pmap->slab, (void*)buf, bytes); } return SYS_ERR_OK; }
/** * \brief Create page mappings * * \param pmap The pmap object * \param vaddr The virtual address to create the mapping for * \param frame The frame cap to map in * \param offset Offset into the frame cap * \param size Size of the mapping * \param flags Flags for the mapping * \param retoff If non-NULL, filled in with adjusted offset of mapped region * \param retsize If non-NULL, filled in with adjusted size of mapped region */ static errval_t map(struct pmap *pmap, genvaddr_t vaddr, struct capref frame, size_t offset, size_t size, vregion_flags_t flags, size_t *retoff, size_t *retsize) { struct pmap_arm *pmap_arm = (struct pmap_arm *)pmap; size += BASE_PAGE_OFFSET(offset); size = ROUND_UP(size, BASE_PAGE_SIZE); offset -= BASE_PAGE_OFFSET(offset); const size_t slabs_reserve = 3; // == max_slabs_required(1) uint64_t slabs_free = slab_freecount(&pmap_arm->slab); size_t slabs_required = max_slabs_required(size) + slabs_reserve; if (slabs_required > slabs_free) { if (get_current_pmap() == pmap) { errval_t err = refill_slabs(pmap_arm, slabs_required); if (err_is_fail(err)) { return err_push(err, LIB_ERR_SLAB_REFILL); } } else { size_t bytes = SLAB_STATIC_SIZE(slabs_required - slabs_free, sizeof(struct vnode)); void *buf = malloc(bytes); if (!buf) { return LIB_ERR_MALLOC_FAIL; } slab_grow(&pmap_arm->slab, buf, bytes); } } return do_map(pmap_arm, vaddr, frame, offset, size, flags, retoff, retsize); }
errval_t initialize_mem_serv(void) { errval_t err; /* Step 1: Initialize slot allocator by passing a cnode cap for it to start with */ struct capref cnode_cap; err = slot_alloc(&cnode_cap); assert(err_is_ok(err)); struct capref cnode_start_cap = { .slot = 0 }; struct capref ram; err = ram_alloc_fixed(&ram, BASE_PAGE_BITS, 0, 0); assert(err_is_ok(err)); err = cnode_create_from_mem(cnode_cap, ram, &cnode_start_cap.cnode, DEFAULT_CNODE_BITS); assert(err_is_ok(err)); /* location where slot allocator will place its top-level cnode */ struct capref top_slot_cap = { .cnode = cnode_root, .slot = ROOTCN_SLOT_SLOT_ALLOCR, }; /* clear mm_ram struct */ memset(&mm_ram, 0, sizeof(mm_ram)); /* init slot allocator */ err = slot_prealloc_init(&ram_slot_alloc, top_slot_cap, MAXCHILDBITS, CNODE_BITS, cnode_start_cap, 1UL << DEFAULT_CNODE_BITS, &mm_ram); assert(err_is_ok(err)); // FIXME: remove magic constant for lowest valid RAM address err = mm_init(&mm_ram, ObjType_RAM, 0x80000000, MAXSIZEBITS, MAXCHILDBITS, NULL, slot_alloc_prealloc, &ram_slot_alloc, true); assert(err_is_ok(err)); /* Step 2: give MM allocator static storage to get it started */ static char nodebuf[SLAB_STATIC_SIZE(MINSPARENODES, MM_NODE_SIZE(MAXCHILDBITS))]; slab_grow(&mm_ram.slabs, nodebuf, sizeof(nodebuf)); /* Step 3: walk bootinfo and add all unused RAM caps to allocator */ struct capref mem_cap = { .cnode = cnode_super, .slot = 0, }; for (int i = 0; i < bi->regions_length; i++) { if (bi->regions[i].mr_type == RegionType_Empty) { //dump_ram_region(i, bi->regions + i); mem_total += ((size_t)1) << bi->regions[i].mr_bits; if (bi->regions[i].mr_consumed) { // region consumed by init, skipped mem_cap.slot++; continue; } err = mm_add(&mm_ram, mem_cap, bi->regions[i].mr_bits, bi->regions[i].mr_base); if (err_is_ok(err)) { mem_avail += ((size_t)1) << bi->regions[i].mr_bits; } else { DEBUG_ERR(err, "Warning: adding RAM region %d (%p/%d) FAILED", i, bi->regions[i].mr_base, bi->regions[i].mr_bits); } /* try to refill slot allocator (may fail if the mem allocator is empty) */ err = slot_prealloc_refill(mm_ram.slot_alloc_inst); if (err_is_fail(err) && err_no(err) != MM_ERR_SLOT_MM_ALLOC) { DEBUG_ERR(err, "in slot_prealloc_refill() while initialising" " memory allocator"); abort(); } /* refill slab allocator if needed and possible */ if (slab_freecount(&mm_ram.slabs) <= MINSPARENODES && mem_avail > (1UL << (CNODE_BITS + OBJBITS_CTE)) * 2 + 10 * BASE_PAGE_SIZE) { slab_default_refill(&mm_ram.slabs); // may fail } mem_cap.slot++; } } err = slot_prealloc_refill(mm_ram.slot_alloc_inst); if (err_is_fail(err)) { debug_printf("Fatal internal error in RAM allocator: failed to initialise " "slot allocator\n"); DEBUG_ERR(err, "failed to init slot allocator"); abort(); } debug_printf("RAM allocator initialised, %zd MB (of %zd MB) available\n", mem_avail / 1024 / 1024, mem_total / 1024 / 1024); // setup proper multi slot alloc err = multi_slot_alloc_init(&msa, DEFAULT_CNODE_SLOTS, NULL); if(err_is_fail(err)) { USER_PANIC_ERR(err, "multi_slot_alloc_init"); } debug_printf("MSA initialised\n"); // switch over ram alloc to proper ram allocator ram_alloc_set(memserv_alloc); return SYS_ERR_OK; }
/** * \brief Setups a local memory allocator for init to use till the memory server * is ready to be used. */ errval_t initialize_ram_alloc(void) { errval_t err; /* walk bootinfo looking for suitable RAM cap to use * we pick the first cap equal to MM_REQUIREDBITS, * or else the next closest less than MM_MAXSIZEBITS */ int mem_region = -1, mem_slot = 0; struct capref mem_cap = { .cnode = cnode_super, .slot = 0, }; assert(bi != NULL); for (int i = 0; i < bi->regions_length; i++) { assert(!bi->regions[i].mr_consumed); if (bi->regions[i].mr_type == RegionType_Empty) { if (bi->regions[i].mr_bits >= MM_REQUIREDBITS && bi->regions[i].mr_bits <= MM_MAXSIZEBITS && (mem_region == -1 || bi->regions[i].mr_bits < bi->regions[mem_region].mr_bits)) { mem_region = i; mem_cap.slot = mem_slot; if (bi->regions[i].mr_bits == MM_REQUIREDBITS) { break; } } mem_slot++; } } if (mem_region < 0) { printf("Error: no RAM capability found in the size range " "2^%d to 2^%d bytes\n", MM_REQUIREDBITS, MM_MAXSIZEBITS); return INIT_ERR_NO_MATCHING_RAM_CAP; } bi->regions[mem_region].mr_consumed = true; /* init slot allocator */ static struct slot_alloc_basecn init_slot_alloc; err = slot_alloc_basecn_init(&init_slot_alloc); if (err_is_fail(err)) { return err_push(err, MM_ERR_SLOT_ALLOC_INIT); } /* init MM allocator */ assert(bi->regions[mem_region].mr_type != RegionType_Module); err = mm_init(&mymm, ObjType_RAM, bi->regions[mem_region].mr_base, bi->regions[mem_region].mr_bits, MM_MAXCHILDBITS, NULL, slot_alloc_basecn, &init_slot_alloc, true); if (err_is_fail(err)) { return err_push(err, MM_ERR_MM_INIT); } /* give MM allocator enough static storage for its node allocator */ static char nodebuf[SLAB_STATIC_SIZE(MM_NNODES, MM_NODE_SIZE(MM_MAXCHILDBITS))]; slab_grow(&mymm.slabs, nodebuf, sizeof(nodebuf)); /* add single RAM cap to allocator */ err = mm_add(&mymm, mem_cap, bi->regions[mem_region].mr_bits, bi->regions[mem_region].mr_base); if (err_is_fail(err)) { return err_push(err, MM_ERR_MM_ADD); } // initialise generic RAM allocator to use local allocator err = ram_alloc_set(mymm_alloc); if (err_is_fail(err)) { return err_push(err, LIB_ERR_RAM_ALLOC_SET); } return SYS_ERR_OK; }