Exemple #1
0
static errval_t mymm_free(struct capref ramcap, genpaddr_t base, uint8_t bits)
{
    errval_t ret;
    genpaddr_t mem_to_add;

    mem_to_add = (genpaddr_t)1 << bits;

    ret = mm_free(&mm_ram, ramcap, base, bits);
    if (err_is_fail(ret)) {
        if (err_no(ret) == MM_ERR_NOT_FOUND) {
            // memory wasn't there initially, add it
            ret = mm_add(&mm_ram, ramcap, bits, base);
            if (err_is_fail(ret)) {
                /* DEBUG_ERR(ret, "failed to add RAM to allocator"); */
                return ret;
            }
            mem_total += mem_to_add;
        } else {
            /* DEBUG_ERR(ret, "failed to free RAM in allocator"); */
            return ret;
        }
    }

    mem_avail += mem_to_add;

    return SYS_ERR_OK;
}
/*
  coalesce - boundary tag coalescing. Return ptr to coalesced block
  Removes adjacent blocks from the free list if either one or both are free.
  Merges block bp with these free adjacent blocks and inserts it onto list.

 */
static void *coalesce(void *bp){
    size_t prev_alloc;
    prev_alloc = GET_ALLOC(PREV_BLKP(bp)) || PREV_BLKP(bp) == bp;
    size_t next_alloc = GET_ALLOC(NEXT_BLKP(bp));
    size_t size = GET_SIZE(bp);
    if (prev_alloc && next_alloc) {                 /* when both are allocated */
        //do nothing
    }/* when prev block is free */
    else if (!prev_alloc && next_alloc) {
        size += GET_SIZE(PREV_BLKP(bp));
        bp = PREV_BLKP(bp);
        mm_delete(bp);
        SET_HDRP(bp, PACK(size, 0));
        SET_FTRP(bp, PACK(size, 0));
    }

    /* when next block is free */
    else if (prev_alloc && !next_alloc) {
        size += GET_SIZE(NEXT_BLKP(bp));
        mm_delete(NEXT_BLKP(bp));
        SET_HDRP(bp, PACK(size, 0));
        SET_FTRP(bp, PACK(size, 0));
    }/* when both blocks are free */
    else if (!prev_alloc && !next_alloc) {
        size += GET_SIZE(PREV_BLKP(bp)) + GET_SIZE(NEXT_BLKP(bp));
        mm_delete(PREV_BLKP(bp));
        mm_delete(NEXT_BLKP(bp));
        bp = PREV_BLKP(bp);
        SET_HDRP(bp, PACK(size, 0));
        SET_FTRP(bp, PACK(size, 0));
    }/* lastly insert bp into free list and return bp */
    mm_add(bp);

    //if ((ro > (char *)bp) && (ro < NEXT_BLKP(bp)))
    //ro = bp;
    return bp;
}
Exemple #3
0
errval_t initialize_mem_serv(void)
{
    errval_t err;

    /* Step 1: Initialize slot allocator by passing a cnode cap for it to start with */
    struct capref cnode_cap;
    err = slot_alloc(&cnode_cap);
    assert(err_is_ok(err));
    struct capref cnode_start_cap = { .slot  = 0 };

    struct capref ram;
    err = ram_alloc_fixed(&ram, BASE_PAGE_BITS, 0, 0);
    assert(err_is_ok(err));
    err = cnode_create_from_mem(cnode_cap, ram, &cnode_start_cap.cnode,
                              DEFAULT_CNODE_BITS);
    assert(err_is_ok(err));

    /* location where slot allocator will place its top-level cnode */
    struct capref top_slot_cap = {
        .cnode = cnode_root,
        .slot = ROOTCN_SLOT_SLOT_ALLOCR,
    };

    /* clear mm_ram struct */
    memset(&mm_ram, 0, sizeof(mm_ram));

    /* init slot allocator */
    err = slot_prealloc_init(&ram_slot_alloc, top_slot_cap, MAXCHILDBITS,
                           CNODE_BITS, cnode_start_cap,
                           1UL << DEFAULT_CNODE_BITS, &mm_ram);
    assert(err_is_ok(err));

    // FIXME: remove magic constant for lowest valid RAM address
    err = mm_init(&mm_ram, ObjType_RAM, 0x80000000,
                MAXSIZEBITS, MAXCHILDBITS, NULL,
                slot_alloc_prealloc, &ram_slot_alloc, true);
    assert(err_is_ok(err));

    /* Step 2: give MM allocator static storage to get it started */
    static char nodebuf[SLAB_STATIC_SIZE(MINSPARENODES, MM_NODE_SIZE(MAXCHILDBITS))];
    slab_grow(&mm_ram.slabs, nodebuf, sizeof(nodebuf));

    /* Step 3: walk bootinfo and add all unused RAM caps to allocator */
    struct capref mem_cap = {
        .cnode = cnode_super,
        .slot = 0,
    };

    for (int i = 0; i < bi->regions_length; i++) {
        if (bi->regions[i].mr_type == RegionType_Empty) {
            //dump_ram_region(i, bi->regions + i);

            mem_total += ((size_t)1) << bi->regions[i].mr_bits;

            if (bi->regions[i].mr_consumed) {
                // region consumed by init, skipped
                mem_cap.slot++;
                continue;
            }

            err = mm_add(&mm_ram, mem_cap, bi->regions[i].mr_bits,
                         bi->regions[i].mr_base);
            if (err_is_ok(err)) {
                mem_avail += ((size_t)1) << bi->regions[i].mr_bits;
            } else {
                DEBUG_ERR(err, "Warning: adding RAM region %d (%p/%d) FAILED",
                          i, bi->regions[i].mr_base, bi->regions[i].mr_bits);
            }

            /* try to refill slot allocator (may fail if the mem allocator is empty) */
            err = slot_prealloc_refill(mm_ram.slot_alloc_inst);
            if (err_is_fail(err) && err_no(err) != MM_ERR_SLOT_MM_ALLOC) {
                DEBUG_ERR(err, "in slot_prealloc_refill() while initialising"
                               " memory allocator");
                abort();
            }

            /* refill slab allocator if needed and possible */
            if (slab_freecount(&mm_ram.slabs) <= MINSPARENODES
                && mem_avail > (1UL << (CNODE_BITS + OBJBITS_CTE)) * 2
                                + 10 * BASE_PAGE_SIZE) {
                slab_default_refill(&mm_ram.slabs); // may fail
            }
            mem_cap.slot++;
        }
    }

    err = slot_prealloc_refill(mm_ram.slot_alloc_inst);
    if (err_is_fail(err)) {
        debug_printf("Fatal internal error in RAM allocator: failed to initialise "
               "slot allocator\n");
        DEBUG_ERR(err, "failed to init slot allocator");
        abort();
    }

    debug_printf("RAM allocator initialised, %zd MB (of %zd MB) available\n",
           mem_avail / 1024 / 1024, mem_total / 1024 / 1024);


    // setup proper multi slot alloc
    err = multi_slot_alloc_init(&msa, DEFAULT_CNODE_SLOTS, NULL);
    if(err_is_fail(err)) {
        USER_PANIC_ERR(err, "multi_slot_alloc_init");
    }
    debug_printf("MSA initialised\n");

    // switch over ram alloc to proper ram allocator
    ram_alloc_set(memserv_alloc);

    return SYS_ERR_OK;
}
Exemple #4
0
/**
 * \brief Setups a local memory allocator for init to use till the memory server
 * is ready to be used.
 */
errval_t initialize_ram_alloc(void)
{
    errval_t err;

    /* walk bootinfo looking for suitable RAM cap to use
     * we pick the first cap equal to MM_REQUIREDBITS,
     * or else the next closest less than MM_MAXSIZEBITS */
    int mem_region = -1, mem_slot = 0;
    struct capref mem_cap = {
        .cnode = cnode_super,
        .slot = 0,
    };

    assert(bi != NULL);
    for (int i = 0; i < bi->regions_length; i++) {
        assert(!bi->regions[i].mr_consumed);
        if (bi->regions[i].mr_type == RegionType_Empty) {
            if (bi->regions[i].mr_bits >= MM_REQUIREDBITS
                && bi->regions[i].mr_bits <= MM_MAXSIZEBITS && (mem_region == -1
                 || bi->regions[i].mr_bits < bi->regions[mem_region].mr_bits)) {
                mem_region = i;
                mem_cap.slot = mem_slot;
                if (bi->regions[i].mr_bits == MM_REQUIREDBITS) {
                    break;
                }
            }
            mem_slot++;
        }
    }
    if (mem_region < 0) {
        printf("Error: no RAM capability found in the size range "
               "2^%d to 2^%d bytes\n", MM_REQUIREDBITS, MM_MAXSIZEBITS);
        return INIT_ERR_NO_MATCHING_RAM_CAP;
    }
    bi->regions[mem_region].mr_consumed = true;

    /* init slot allocator */
    static struct slot_alloc_basecn init_slot_alloc;
    err = slot_alloc_basecn_init(&init_slot_alloc);
    if (err_is_fail(err)) {
        return err_push(err, MM_ERR_SLOT_ALLOC_INIT);
    }

    /*  init MM allocator */
    assert(bi->regions[mem_region].mr_type != RegionType_Module);
    err = mm_init(&mymm, ObjType_RAM, bi->regions[mem_region].mr_base,
                  bi->regions[mem_region].mr_bits, MM_MAXCHILDBITS, NULL,
                  slot_alloc_basecn, &init_slot_alloc, true);
    if (err_is_fail(err)) {
        return err_push(err, MM_ERR_MM_INIT);
    }

    /* give MM allocator enough static storage for its node allocator */
    static char nodebuf[SLAB_STATIC_SIZE(MM_NNODES, MM_NODE_SIZE(MM_MAXCHILDBITS))];
    slab_grow(&mymm.slabs, nodebuf, sizeof(nodebuf));

    /* add single RAM cap to allocator */
    err = mm_add(&mymm, mem_cap, bi->regions[mem_region].mr_bits,
               bi->regions[mem_region].mr_base);
    if (err_is_fail(err)) {
        return err_push(err, MM_ERR_MM_ADD);
    }

    // initialise generic RAM allocator to use local allocator
    err = ram_alloc_set(mymm_alloc);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_RAM_ALLOC_SET);
    }

    return SYS_ERR_OK;
}
static errval_t init_allocators(void)
{
    errval_t err, msgerr;

    struct monitor_blocking_rpc_client *cl = get_monitor_blocking_rpc_client();
    assert(cl != NULL);

    // Get the bootinfo and map it in.
    struct capref bootinfo_frame;
    size_t bootinfo_size;
    struct bootinfo *bootinfo;

    msgerr = cl->vtbl.get_bootinfo(cl, &err, &bootinfo_frame, &bootinfo_size);
    if (err_is_fail(msgerr) || err_is_fail(err)) {
        USER_PANIC_ERR(err_is_fail(msgerr) ? msgerr : err, "failed in get_bootinfo");
    }

    err = vspace_map_one_frame((void**)&bootinfo, bootinfo_size, bootinfo_frame,
                               NULL, NULL);
    assert(err_is_ok(err));

    /* Initialize the memory allocator to handle PhysAddr caps */
    static struct range_slot_allocator devframes_allocator;
    err = range_slot_alloc_init(&devframes_allocator, PCI_CNODE_SLOTS, NULL);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_SLOT_ALLOC_INIT);
    }

    err = mm_init(&pci_mm_physaddr, ObjType_DevFrame, 0, 48,
                  /* This next parameter is important. It specifies the maximum
                   * amount that a cap may be "chunked" (i.e. broken up) at each
                   * level in the allocator. Setting it higher than 1 reduces the
                   * memory overhead of keeping all the intermediate caps around,
                   * but leads to problems if you chunk up a cap too small to be
                   * able to allocate a large subregion. This caused problems
                   * for me with a large framebuffer... -AB 20110810 */
                  1, /*was DEFAULT_CNODE_BITS,*/
                  slab_default_refill, slot_alloc_dynamic, &devframes_allocator, false);
    if (err_is_fail(err)) {
        return err_push(err, MM_ERR_MM_INIT);
    }

    // Request I/O Cap
    struct capref requested_caps;
    errval_t error_code;
    err = cl->vtbl.get_io_cap(cl, &requested_caps, &error_code);
    assert(err_is_ok(err) && err_is_ok(error_code));
    // Copy into correct slot
    struct capref caps_io = {
        .cnode = cnode_task,
        .slot  = TASKCN_SLOT_IO
    };
    err = cap_copy(caps_io, requested_caps);

    // XXX: The code below is confused about gen/l/paddrs.
    // Caps should be managed in genpaddr, while the bus mgmt must be in lpaddr.
    err = cl->vtbl.get_phyaddr_cap(cl, &requested_caps, &error_code);
    assert(err_is_ok(err) && err_is_ok(error_code));
    physical_caps = requested_caps;

    // Build the capref for the first physical address capability
    struct capref phys_cap;
    phys_cap.cnode = build_cnoderef(requested_caps, PHYSADDRCN_BITS);
    phys_cap.slot = 0;

    struct cnoderef devcnode;
    err = slot_alloc(&my_devframes_cnode);
    assert(err_is_ok(err));
    cslot_t slots;
    err = cnode_create(&my_devframes_cnode, &devcnode, 255, &slots);
    if (err_is_fail(err)) { USER_PANIC_ERR(err, "cnode create"); }
    struct capref devframe;
    devframe.cnode = devcnode;
    devframe.slot = 0;

    for (int i = 0; i < bootinfo->regions_length; i++) {
		struct mem_region *mrp = &bootinfo->regions[i];
		if (mrp->mr_type == RegionType_Module) {
			skb_add_fact("memory_region(16'%" PRIxGENPADDR ",%u,%zu,%u,%tu).",
						mrp->mr_base,
						0,
						mrp->mrmod_size,
						mrp->mr_type,
						mrp->mrmod_data);
		}
		else {
			skb_add_fact("memory_region(16'%" PRIxGENPADDR ",%u,%zu,%u,%tu).",
						mrp->mr_base,
						mrp->mr_bits,
						((size_t)1) << mrp->mr_bits,
						mrp->mr_type,
						mrp->mrmod_data);
		}

        if (mrp->mr_type == RegionType_PhyAddr ||
            mrp->mr_type == RegionType_PlatformData) {
            ACPI_DEBUG("Region %d: %"PRIxGENPADDR" - %"PRIxGENPADDR" %s\n",
                       i, mrp->mr_base,
                       mrp->mr_base + (((size_t)1)<<mrp->mr_bits),
                       mrp->mr_type == RegionType_PhyAddr ?
                       "physical address" : "platform data");

            err = cap_retype(devframe, phys_cap, ObjType_DevFrame, mrp->mr_bits);
            if (err_no(err) == SYS_ERR_REVOKE_FIRST) {
                printf("cannot retype region %d: need to revoke first; ignoring it\n", i);
            } else {
                assert(err_is_ok(err));

                err = mm_add(&pci_mm_physaddr, devframe,
                             mrp->mr_bits, mrp->mr_base);
                if (err_is_fail(err)) {
                    USER_PANIC_ERR(err, "adding region %d FAILED\n", i);
                }
            }

            phys_cap.slot++;
            devframe.slot++;
        }
    }

    return SYS_ERR_OK;
}