Exemplo n.º 1
0
/**
 * \brief Initialize the pmap object
 */
errval_t
pmap_init(struct pmap   *pmap,
          struct vspace *vspace,
          struct capref  vnode,
          struct slot_allocator *opt_slot_alloc)
{
    struct pmap_arm* pmap_arm = (struct pmap_arm*)pmap;

    /* Generic portion */
    pmap->f = pmap_funcs;
    pmap->vspace = vspace;

    // Slab allocator for vnodes
    slab_init(&pmap_arm->slab, sizeof(struct vnode), NULL);
    slab_grow(&pmap_arm->slab,
              pmap_arm->slab_buffer,
              sizeof(pmap_arm->slab_buffer));

    pmap_arm->root.is_vnode         = true;
    pmap_arm->root.u.vnode.cap      = vnode;
    pmap_arm->root.next             = NULL;
    pmap_arm->root.u.vnode.children = NULL;

    return SYS_ERR_OK;
}
Exemplo n.º 2
0
/**
 * \brief Allocate some slabs
 *
 * \param retbuf     Pointer to return the allocated memory
 * \param slab_type  Type of slab the memory is allocated for
 *
 * Since this region is used for backing specific slabs,
 * only those types of slabs can be allocated.
 */
errval_t vspace_pinned_alloc(void **retbuf, enum slab_type slab_type)
{
    errval_t err;
    struct pinned_state *state = get_current_pinned_state();

    // Select slab type
    struct slab_allocator *slab;
    switch(slab_type) {
    case VREGION_LIST:
        slab = &state->vregion_list_slab;
        break;
    case FRAME_LIST:
        slab = &state->frame_list_slab;
        break;
    default:
        return LIB_ERR_VSPACE_PINNED_INVALID_TYPE;
    }

    thread_mutex_lock(&state->mutex);

    // Try allocating
    void *buf = slab_alloc(slab);
    if (buf == NULL) {
        // Out of memory, grow
        struct capref frame;
        err = frame_alloc(&frame, BASE_PAGE_SIZE, NULL);
        if (err_is_fail(err)) {
            thread_mutex_unlock(&state->mutex);
            DEBUG_ERR(err, "frame_alloc in vspace_pinned_alloc");
            return err_push(err, LIB_ERR_FRAME_ALLOC);
        }
        err = state->memobj.m.f.fill((struct memobj*)&state->memobj,
                                     state->offset, frame,
                                     BASE_PAGE_SIZE);
        if (err_is_fail(err)) {
            thread_mutex_unlock(&state->mutex);
            DEBUG_ERR(err, "memobj_fill in vspace_pinned_alloc");
            return err_push(err, LIB_ERR_MEMOBJ_FILL);
        }

        genvaddr_t gvaddr = vregion_get_base_addr(&state->vregion) +
            state->offset;
        void *slab_buf = (void*)vspace_genvaddr_to_lvaddr(gvaddr);
        slab_grow(slab, slab_buf, BASE_PAGE_SIZE);
        state->offset += BASE_PAGE_SIZE;

        // Try again
        buf = slab_alloc(slab);
    }

    thread_mutex_unlock(&state->mutex);

    if (buf == NULL) {
        return LIB_ERR_SLAB_ALLOC_FAIL;
    } else {
        *retbuf = buf;
        return SYS_ERR_OK;
    }
}
Exemplo n.º 3
0
// FIXME: error handling (not asserts) needed in this function
static void mem_allocate_handler(struct mem_binding *b, uint8_t bits,
                                 genpaddr_t minbase, genpaddr_t maxlimit)
{
    struct capref *cap = malloc(sizeof(struct capref));
    errval_t err, ret;

    trace_event(TRACE_SUBSYS_MEMSERV, TRACE_EVENT_MEMSERV_ALLOC, bits);

    /* refill slot allocator if needed */
    err = slot_prealloc_refill(mm_ram.slot_alloc_inst);
    assert(err_is_ok(err));

    /* refill slab allocator if needed */
    while (slab_freecount(&mm_ram.slabs) <= MINSPARENODES) {
        struct capref frame;
        err = msa.a.alloc(&msa.a, &frame);
        assert(err_is_ok(err));
        err = frame_create(frame, BASE_PAGE_SIZE * 8, NULL);
        assert(err_is_ok(err));
        void *buf;
        err = vspace_map_one_frame(&buf, BASE_PAGE_SIZE * 8, frame, NULL, NULL);
        if (err_is_fail(err)) {
            DEBUG_ERR(err, "vspace_map_one_frame failed");
            assert(buf);
        }
        slab_grow(&mm_ram.slabs, buf, BASE_PAGE_SIZE * 8);
    }

    ret = mymm_alloc(cap, bits, minbase, maxlimit);
    if (err_is_ok(ret)) {
        mem_avail -= 1UL << bits;
    } else {
        // DEBUG_ERR(ret, "allocation of %d bits in % " PRIxGENPADDR "-%" PRIxGENPADDR " failed",
        //          bits, minbase, maxlimit);
        *cap = NULL_CAP;
    }

    /* Reply */
    err = b->tx_vtbl.allocate_response(b, MKCONT(allocate_response_done, cap),
                                       ret, *cap);
    if (err_is_fail(err)) {
        if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
            struct pending_reply *r = malloc(sizeof(struct pending_reply));
            assert(r != NULL);
            r->b = b;
            r->err = ret;
            r->cap = cap;
            err = b->register_send(b, get_default_waitset(), MKCONT(retry_reply,r));
            assert(err_is_ok(err));
        } else {
            DEBUG_ERR(err, "failed to reply to memory request");
            allocate_response_done(cap);
        }
    }
}
Exemplo n.º 4
0
errval_t memserv_alloc(struct capref *ret, uint8_t bits, genpaddr_t minbase,
                       genpaddr_t maxlimit)
{
    errval_t err;

    assert(bits >= MINSIZEBITS);

    /* refill slot allocator if needed */
    err = slot_prealloc_refill(mm_ram.slot_alloc_inst);
    assert(err_is_ok(err));

    /* refill slab allocator if needed */
    size_t freecount = slab_freecount(&mm_ram.slabs);
    while (!refilling && (freecount <= MINSPARENODES)) {
        refilling = true;
        struct capref frame;
        err = msa.a.alloc(&msa.a, &frame);
        assert(err_is_ok(err));
        size_t retsize;
        err = frame_create(frame, BASE_PAGE_SIZE * 8, &retsize);
        assert(err_is_ok(err));
        assert(retsize % BASE_PAGE_SIZE == 0);
        assert(retsize >= BASE_PAGE_SIZE);
        void *buf;
        err = paging_map_frame(get_current_paging_state(), &buf, retsize, frame, NULL, NULL);
        if (err_is_fail(err)) {
            DEBUG_ERR(err, "paging_map_frame failed");
            assert(buf);
        }
        slab_grow(&mm_ram.slabs, buf, retsize);
        freecount = slab_freecount(&mm_ram.slabs);
    }
    if (freecount > MINSPARENODES) {
        refilling = false;
    }

    if(maxlimit == 0) {
        err = mm_alloc(&mm_ram, bits, ret, NULL);
    } else {
        err = mm_alloc_range(&mm_ram, bits, minbase, maxlimit, ret, NULL);
    }

    if (err_is_fail(err)) {
        debug_printf("in mem_serv:mymm_alloc(bits=%"PRIu8", minbase=%"PRIxGENPADDR
                     ", maxlimit=%"PRIxGENPADDR")\n", bits, minbase, maxlimit);
        DEBUG_ERR(err, "mem_serv:mymm_alloc");
    }

    return err;
}
Exemplo n.º 5
0
static errval_t refill_slabs(struct pmap_arm *pmap, size_t request)
{
    errval_t err;

    /* Keep looping till we have #request slabs */
    while (slab_freecount(&pmap->slab) < request) {
        // Amount of bytes required for #request
        size_t bytes = SLAB_STATIC_SIZE(request - slab_freecount(&pmap->slab),
                                        sizeof(struct vnode));

        /* Get a frame of that size */
        struct capref cap;
        err = frame_alloc(&cap, bytes, &bytes);
        if (err_is_fail(err)) {
            return err_push(err, LIB_ERR_FRAME_ALLOC);
        }

        /* If we do not have enough slabs to map the frame in, recurse */
        size_t required_slabs_for_frame = max_slabs_required(bytes);
        if (slab_freecount(&pmap->slab) < required_slabs_for_frame) {
            // If we recurse, we require more slabs than to map a single page
            assert(required_slabs_for_frame > 4);

            err = refill_slabs(pmap, required_slabs_for_frame);
            if (err_is_fail(err)) {
                return err_push(err, LIB_ERR_SLAB_REFILL);
            }
        }

        /* Perform mapping */
        genvaddr_t genvaddr = pmap->vregion_offset;
        pmap->vregion_offset += (genvaddr_t)bytes;

        // if this assert fires, increase META_DATA_RESERVED_SPACE
        assert(pmap->vregion_offset < (vregion_get_base_addr(&pmap->vregion) +
               vregion_get_size(&pmap->vregion)));

        err = do_map(pmap, genvaddr, cap, 0, bytes,
                     VREGION_FLAGS_READ_WRITE, NULL, NULL);
        if (err_is_fail(err)) {
            return err_push(err, LIB_ERR_PMAP_DO_MAP);
        }

        /* Grow the slab */
        lvaddr_t buf = vspace_genvaddr_to_lvaddr(genvaddr);
        slab_grow(&pmap->slab, (void*)buf, bytes);
    }

    return SYS_ERR_OK;
}
Exemplo n.º 6
0
/**
 * \brief Create page mappings
 *
 * \param pmap     The pmap object
 * \param vaddr    The virtual address to create the mapping for
 * \param frame    The frame cap to map in
 * \param offset   Offset into the frame cap
 * \param size     Size of the mapping
 * \param flags    Flags for the mapping
 * \param retoff   If non-NULL, filled in with adjusted offset of mapped region
 * \param retsize  If non-NULL, filled in with adjusted size of mapped region
 */
static errval_t
map(struct pmap     *pmap,
    genvaddr_t       vaddr,
    struct capref    frame,
    size_t           offset,
    size_t           size,
    vregion_flags_t  flags,
    size_t          *retoff,
    size_t          *retsize)
{
    struct pmap_arm *pmap_arm = (struct pmap_arm *)pmap;

    size   += BASE_PAGE_OFFSET(offset);
    size    = ROUND_UP(size, BASE_PAGE_SIZE);
    offset -= BASE_PAGE_OFFSET(offset);

    const size_t slabs_reserve = 3; // == max_slabs_required(1)
    uint64_t  slabs_free       = slab_freecount(&pmap_arm->slab);
    size_t    slabs_required   = max_slabs_required(size) + slabs_reserve;

    if (slabs_required > slabs_free) {
        if (get_current_pmap() == pmap) {
            errval_t err = refill_slabs(pmap_arm, slabs_required);
            if (err_is_fail(err)) {
                return err_push(err, LIB_ERR_SLAB_REFILL);
            }
        }
        else {
            size_t bytes = SLAB_STATIC_SIZE(slabs_required - slabs_free,
                                            sizeof(struct vnode));
            void *buf = malloc(bytes);
            if (!buf) {
                return LIB_ERR_MALLOC_FAIL;
            }
            slab_grow(&pmap_arm->slab, buf, bytes);
        }
    }

    return do_map(pmap_arm, vaddr, frame, offset, size, flags,
                  retoff, retsize);
}
Exemplo n.º 7
0
/**
 * \brief slot allocator
 *
 * \param ca   Instance of the allocator
 * \param ret  Pointer to return the allocated slot
 */
errval_t multi_alloc(struct slot_allocator *ca, struct capref *ret)
{
    errval_t err = SYS_ERR_OK;
    struct multi_slot_allocator *mca = (struct multi_slot_allocator*)ca;

    thread_mutex_lock(&ca->mutex);
    assert(ca->space != 0);
    ca->space--;

    /* Try allocating from the list of single slot allocators */
    struct slot_allocator_list *walk = mca->head;
    //struct slot_allocator_list *prev = NULL;
    while(walk != NULL) {
        err = walk->a.a.alloc(&walk->a.a, ret);
        if (err_no(err) != LIB_ERR_SLOT_ALLOC_NO_SPACE) {
            break;
        }
        //prev = walk;
        walk = walk->next;
    }
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_SINGLE_SLOT_ALLOC);
    }

    /* If no more slots left, grow */
    if (ca->space == 0) {
        ca->space = ca->nslots;
        /* Pull in the reserve */
        mca->reserve->next = mca->head;
        mca->head = mca->reserve;

        /* Setup a new reserve */
        // Cnode
        struct capref cap;
        struct cnoderef cnode;
        err = mca->top->alloc(mca->top, &cap);
        if (err_is_fail(err)) {
            thread_mutex_unlock(&ca->mutex);
            return err_push(err, LIB_ERR_SLOT_ALLOC);
        }
        thread_mutex_unlock(&ca->mutex); // cnode_create_raw uses ram_alloc
                                         // which may call slot_alloc
        err = cnode_create_raw(cap, &cnode, ca->nslots, NULL);
        if (err_is_fail(err)) {
            return err_push(err, LIB_ERR_CNODE_CREATE);
        }
        thread_mutex_lock(&ca->mutex);

        // Buffers
        void *buf = slab_alloc(&mca->slab);
        if (!buf) { /* Grow slab */
            // Allocate slot out of the list
            mca->a.space--;
            struct capref frame;
            err = mca->head->a.a.alloc(&mca->head->a.a, &frame);
            if (err_is_fail(err)) {
                thread_mutex_unlock(&ca->mutex);
                return err_push(err, LIB_ERR_SLOT_ALLOC);
            }

            thread_mutex_unlock(&ca->mutex); // following functions may call
                                             // slot_alloc
            void *slab_buf;
            size_t size;
            err = vspace_mmu_aware_map(&mca->mmu_state, frame,
                                       mca->slab.blocksize, &slab_buf, &size);
            if (err_is_fail(err)) {
                return err_push(err, LIB_ERR_VSPACE_MMU_AWARE_MAP);
            }

            thread_mutex_lock(&ca->mutex);

            // Grow slab
            slab_grow(&mca->slab, slab_buf, size);

            // Try allocating again
            buf = slab_alloc(&mca->slab);
            if (err_is_fail(err)) {
                thread_mutex_unlock(&ca->mutex);
                return err_push(err, LIB_ERR_SLAB_ALLOC_FAIL);
            }
        }

        mca->reserve = buf;
        buf = (char *)buf + sizeof(struct slot_allocator_list);
        size_t bufsize = mca->slab.blocksize - sizeof(struct slot_allocator_list);

        // Allocator
        err = single_slot_alloc_init_raw(&mca->reserve->a, cap, cnode,
                                         mca->a.nslots, buf, bufsize);
        if (err_is_fail(err)) {
            thread_mutex_unlock(&ca->mutex);
            return err_push(err, LIB_ERR_SINGLE_SLOT_ALLOC_INIT_RAW);
        }
    }

    thread_mutex_unlock(&ca->mutex);
    return SYS_ERR_OK;
}
Exemplo n.º 8
0
errval_t initialize_mem_serv(void)
{
    errval_t err;

    /* Step 1: Initialize slot allocator by passing a cnode cap for it to start with */
    struct capref cnode_cap;
    err = slot_alloc(&cnode_cap);
    assert(err_is_ok(err));
    struct capref cnode_start_cap = { .slot  = 0 };

    struct capref ram;
    err = ram_alloc_fixed(&ram, BASE_PAGE_BITS, 0, 0);
    assert(err_is_ok(err));
    err = cnode_create_from_mem(cnode_cap, ram, &cnode_start_cap.cnode,
                              DEFAULT_CNODE_BITS);
    assert(err_is_ok(err));

    /* location where slot allocator will place its top-level cnode */
    struct capref top_slot_cap = {
        .cnode = cnode_root,
        .slot = ROOTCN_SLOT_SLOT_ALLOCR,
    };

    /* clear mm_ram struct */
    memset(&mm_ram, 0, sizeof(mm_ram));

    /* init slot allocator */
    err = slot_prealloc_init(&ram_slot_alloc, top_slot_cap, MAXCHILDBITS,
                           CNODE_BITS, cnode_start_cap,
                           1UL << DEFAULT_CNODE_BITS, &mm_ram);
    assert(err_is_ok(err));

    // FIXME: remove magic constant for lowest valid RAM address
    err = mm_init(&mm_ram, ObjType_RAM, 0x80000000,
                MAXSIZEBITS, MAXCHILDBITS, NULL,
                slot_alloc_prealloc, &ram_slot_alloc, true);
    assert(err_is_ok(err));

    /* Step 2: give MM allocator static storage to get it started */
    static char nodebuf[SLAB_STATIC_SIZE(MINSPARENODES, MM_NODE_SIZE(MAXCHILDBITS))];
    slab_grow(&mm_ram.slabs, nodebuf, sizeof(nodebuf));

    /* Step 3: walk bootinfo and add all unused RAM caps to allocator */
    struct capref mem_cap = {
        .cnode = cnode_super,
        .slot = 0,
    };

    for (int i = 0; i < bi->regions_length; i++) {
        if (bi->regions[i].mr_type == RegionType_Empty) {
            //dump_ram_region(i, bi->regions + i);

            mem_total += ((size_t)1) << bi->regions[i].mr_bits;

            if (bi->regions[i].mr_consumed) {
                // region consumed by init, skipped
                mem_cap.slot++;
                continue;
            }

            err = mm_add(&mm_ram, mem_cap, bi->regions[i].mr_bits,
                         bi->regions[i].mr_base);
            if (err_is_ok(err)) {
                mem_avail += ((size_t)1) << bi->regions[i].mr_bits;
            } else {
                DEBUG_ERR(err, "Warning: adding RAM region %d (%p/%d) FAILED",
                          i, bi->regions[i].mr_base, bi->regions[i].mr_bits);
            }

            /* try to refill slot allocator (may fail if the mem allocator is empty) */
            err = slot_prealloc_refill(mm_ram.slot_alloc_inst);
            if (err_is_fail(err) && err_no(err) != MM_ERR_SLOT_MM_ALLOC) {
                DEBUG_ERR(err, "in slot_prealloc_refill() while initialising"
                               " memory allocator");
                abort();
            }

            /* refill slab allocator if needed and possible */
            if (slab_freecount(&mm_ram.slabs) <= MINSPARENODES
                && mem_avail > (1UL << (CNODE_BITS + OBJBITS_CTE)) * 2
                                + 10 * BASE_PAGE_SIZE) {
                slab_default_refill(&mm_ram.slabs); // may fail
            }
            mem_cap.slot++;
        }
    }

    err = slot_prealloc_refill(mm_ram.slot_alloc_inst);
    if (err_is_fail(err)) {
        debug_printf("Fatal internal error in RAM allocator: failed to initialise "
               "slot allocator\n");
        DEBUG_ERR(err, "failed to init slot allocator");
        abort();
    }

    debug_printf("RAM allocator initialised, %zd MB (of %zd MB) available\n",
           mem_avail / 1024 / 1024, mem_total / 1024 / 1024);


    // setup proper multi slot alloc
    err = multi_slot_alloc_init(&msa, DEFAULT_CNODE_SLOTS, NULL);
    if(err_is_fail(err)) {
        USER_PANIC_ERR(err, "multi_slot_alloc_init");
    }
    debug_printf("MSA initialised\n");

    // switch over ram alloc to proper ram allocator
    ram_alloc_set(memserv_alloc);

    return SYS_ERR_OK;
}
Exemplo n.º 9
0
/**
 * \brief Setups a local memory allocator for init to use till the memory server
 * is ready to be used.
 */
errval_t initialize_ram_alloc(void)
{
    errval_t err;

    /* walk bootinfo looking for suitable RAM cap to use
     * we pick the first cap equal to MM_REQUIREDBITS,
     * or else the next closest less than MM_MAXSIZEBITS */
    int mem_region = -1, mem_slot = 0;
    struct capref mem_cap = {
        .cnode = cnode_super,
        .slot = 0,
    };

    assert(bi != NULL);
    for (int i = 0; i < bi->regions_length; i++) {
        assert(!bi->regions[i].mr_consumed);
        if (bi->regions[i].mr_type == RegionType_Empty) {
            if (bi->regions[i].mr_bits >= MM_REQUIREDBITS
                && bi->regions[i].mr_bits <= MM_MAXSIZEBITS && (mem_region == -1
                 || bi->regions[i].mr_bits < bi->regions[mem_region].mr_bits)) {
                mem_region = i;
                mem_cap.slot = mem_slot;
                if (bi->regions[i].mr_bits == MM_REQUIREDBITS) {
                    break;
                }
            }
            mem_slot++;
        }
    }
    if (mem_region < 0) {
        printf("Error: no RAM capability found in the size range "
               "2^%d to 2^%d bytes\n", MM_REQUIREDBITS, MM_MAXSIZEBITS);
        return INIT_ERR_NO_MATCHING_RAM_CAP;
    }
    bi->regions[mem_region].mr_consumed = true;

    /* init slot allocator */
    static struct slot_alloc_basecn init_slot_alloc;
    err = slot_alloc_basecn_init(&init_slot_alloc);
    if (err_is_fail(err)) {
        return err_push(err, MM_ERR_SLOT_ALLOC_INIT);
    }

    /*  init MM allocator */
    assert(bi->regions[mem_region].mr_type != RegionType_Module);
    err = mm_init(&mymm, ObjType_RAM, bi->regions[mem_region].mr_base,
                  bi->regions[mem_region].mr_bits, MM_MAXCHILDBITS, NULL,
                  slot_alloc_basecn, &init_slot_alloc, true);
    if (err_is_fail(err)) {
        return err_push(err, MM_ERR_MM_INIT);
    }

    /* give MM allocator enough static storage for its node allocator */
    static char nodebuf[SLAB_STATIC_SIZE(MM_NNODES, MM_NODE_SIZE(MM_MAXCHILDBITS))];
    slab_grow(&mymm.slabs, nodebuf, sizeof(nodebuf));

    /* add single RAM cap to allocator */
    err = mm_add(&mymm, mem_cap, bi->regions[mem_region].mr_bits,
               bi->regions[mem_region].mr_base);
    if (err_is_fail(err)) {
        return err_push(err, MM_ERR_MM_ADD);
    }

    // initialise generic RAM allocator to use local allocator
    err = ram_alloc_set(mymm_alloc);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_RAM_ALLOC_SET);
    }

    return SYS_ERR_OK;
}