Esempio n. 1
0
int allocman_configure_mspace_reserve(allocman_t *alloc, struct allocman_mspace_chunk chunk) {
    int root = _start_operation(alloc);
    uint32_t i;
    struct allocman_mspace_chunk *new_chunk;
    uint32_t *new_counts;
    void ***new_chunks;
    void **new_alloc;
    int error;
    /* ensure this chunk hasn't already been added. would be nice to handle both decreasing and
     * icnreasing reservations, but I cannot see the use case for that */
    for (i = 0; i < alloc->num_mspace_chunks; i++) {
        if (alloc->mspace_chunk[i].size == chunk.size) {
            return 1;
        }
    }
    /* tack this chunk on */
    new_chunk = allocman_mspace_alloc(alloc, sizeof(struct allocman_mspace_chunk) * (alloc->num_mspace_chunks + 1), &error);
    if (error) {
        return error;
    }
    new_counts = allocman_mspace_alloc(alloc, sizeof(uint32_t) * (alloc->num_mspace_chunks + 1), &error);
    if (error) {
        allocman_mspace_free(alloc, new_chunk, sizeof(struct allocman_mspace_chunk) * (alloc->num_mspace_chunks + 1));
        return error;
    }
    new_chunks = allocman_mspace_alloc(alloc, sizeof(void **) * (alloc->num_mspace_chunks + 1), &error);
    if (error) {
        allocman_mspace_free(alloc, new_chunk, sizeof(struct allocman_mspace_chunk) * (alloc->num_mspace_chunks + 1));
        allocman_mspace_free(alloc, new_counts, sizeof(uint32_t) * (alloc->num_mspace_chunks + 1));
        return error;
    }
    new_alloc = allocman_mspace_alloc(alloc, sizeof(void *) * chunk.count, &error);
    if (error) {
        allocman_mspace_free(alloc, new_chunk, sizeof(struct allocman_mspace_chunk) * (alloc->num_mspace_chunks + 1));
        allocman_mspace_free(alloc, new_counts, sizeof(uint32_t) * (alloc->num_mspace_chunks + 1));
        allocman_mspace_free(alloc, new_chunks, sizeof(void **) * (alloc->num_mspace_chunks + 1));
        return error;
    }
    if (alloc->num_mspace_chunks > 0) {
        memcpy(new_chunk, alloc->mspace_chunk, sizeof(struct allocman_mspace_chunk) * alloc->num_mspace_chunks);
        memcpy(new_counts, alloc->mspace_chunk_count, sizeof(uint32_t) * alloc->num_mspace_chunks);
        memcpy(new_chunks, alloc->mspace_chunks, sizeof(void **) * alloc->num_mspace_chunks);
        allocman_mspace_free(alloc, alloc->mspace_chunk, sizeof(struct allocman_mspace_chunk) * alloc->num_mspace_chunks);
        allocman_mspace_free(alloc, alloc->mspace_chunk_count, sizeof(uint32_t) * alloc->num_mspace_chunks);
        allocman_mspace_free(alloc, alloc->mspace_chunks, sizeof(void **) * alloc->num_mspace_chunks);
    }
    new_chunk[alloc->num_mspace_chunks] = chunk;
    new_counts[alloc->num_mspace_chunks] = 0;
    new_chunks[alloc->num_mspace_chunks] = new_alloc;
    alloc->mspace_chunk = new_chunk;
    alloc->mspace_chunk_count = new_counts;
    alloc->mspace_chunks = new_chunks;
    alloc->num_mspace_chunks++;
    alloc->used_watermark = 1;
    _end_operation(alloc, root);
    return 0;
}
Esempio n. 2
0
int cspace_single_level_create(struct allocman *alloc, cspace_single_level_t *cspace, struct cspace_single_level_config config)
{
    size_t num_slots;
    size_t num_entries;
    int error;
    cspace->config = config;
    /* Allocate bitmap */
    num_slots = cspace->config.end_slot - cspace->config.first_slot;
    num_entries = num_slots / BITS_PER_WORD;
    cspace->bitmap_length = num_entries;
    if (num_slots % BITS_PER_WORD != 0) {
        num_entries++;
    }
    cspace->bitmap = (size_t*)allocman_mspace_alloc(alloc, num_entries * sizeof(size_t), &error);
    if (error) {
        return error;
    }
    /* Make everything 1's */
    memset(cspace->bitmap, -1, num_entries * sizeof(size_t));
    if (num_slots % BITS_PER_WORD != 0) {
        /* Mark the padding slots as allocated */
        size_t excess = num_slots % BITS_PER_WORD;
        size_t i;
        for (i = excess; i < BITS_PER_WORD; i++) {
            cspace->bitmap[num_entries - 1] ^= BIT(i);
        }
    }
    cspace->last_entry = 0;
    return 0;
}
Esempio n. 3
0
static int resize_array(allocman_t *alloc, uint32_t num, void **array, uint32_t *size, uint32_t *count, uint32_t item_size) {
    int root = _start_operation(alloc);
    void *new_array;
    int error;

    assert(root);

    /* allocate new array */
    new_array = allocman_mspace_alloc(alloc, item_size * num, &error);
    if (!!error) {
        return error;
    }

    /* if we have less than before. throw an error */
    while (num < (*count)) {
        return -1;
    }

    /* copy any existing slots and free the old array, but avoid using a null array */
    if ((*array)) {
        memcpy(new_array, (*array), item_size * (*count));
        allocman_mspace_free(alloc, (*array), item_size * (*size));
    }

    /* switch the new array in */
    (*array) = new_array;
    (*size) = num;

    alloc->used_watermark = 1;
    _end_operation(alloc, root);
    return error;
}
Esempio n. 4
0
static inline struct utspace_trickle_node *_make_node(struct allocman *alloc, int *error) {
    compile_time_assert(trickle_struct_size, sizeof(struct utspace_trickle_node) == 36);
    uint32_t addr = (uint32_t)allocman_mspace_alloc(alloc, 68, error);
    uint32_t ret;
    struct utspace_trickle_node *node;
    if (*error) {
        return NULL;
    }
    ret = (addr + 32) & ((uint32_t)~MASK(5));
    node = (struct utspace_trickle_node*)ret;
    node->padding = addr;
    return node;
}
Esempio n. 5
0
static int _insert_new_node(allocman_t *alloc, struct utspace_split_node **head, cspacepath_t ut, uint32_t paddr) {
    int error;
    struct utspace_split_node *node;
    node = (struct utspace_split_node*) allocman_mspace_alloc(alloc, sizeof(*node), &error);
    if (error) {
        return 1;
    }
    node->parent = NULL;
    node->ut = ut;
    node->paddr = paddr;
    _insert_node(head, node);
    return 0;
}
Esempio n. 6
0
static struct utspace_split_node *_new_node(allocman_t *alloc) {
    int error;
    struct utspace_split_node *node;
    node = (struct utspace_split_node*) allocman_mspace_alloc(alloc, sizeof(*node), &error);
    if (error) {
        return NULL;
    }
    error = allocman_cspace_alloc(alloc, &node->ut);
    if (error) {
        allocman_mspace_free(alloc, node, sizeof(*node));
        return NULL;
    }
    return node;
}
Esempio n. 7
0
void pre_init(void) {
    int error;

    set_putchar(putchar_putchar);

    /* Camkes adds nothing to our address space, so this array is empty */
    void *existing_frames[] = {
        NULL
    };
    camkes_make_simple(&camkes_simple);
    camkes_simple.IOPort_cap = simple_ioport_wrapper;
    camkes_simple.frame_cap = simple_frame_cap_wrapper;

    /* Initialize allocator */
    allocman = bootstrap_use_current_1level(
            simple_get_cnode(&camkes_simple),
            simple_get_cnode_size_bits(&camkes_simple),
            simple_last_valid_cap(&camkes_simple) + 1,
            BIT(simple_get_cnode_size_bits(&camkes_simple)),
            sizeof(allocator_mempool), allocator_mempool
    );
    assert(allocman);
    error = allocman_add_simple_untypeds(allocman, &camkes_simple);
    make_proxy_vka(&vka, allocman);

    /* Initialize the vspace */
    error = sel4utils_bootstrap_vspace(&vspace, &vspace_data,
            simple_get_init_cap(&camkes_simple, seL4_CapInitThreadPD), &vka, NULL, NULL, existing_frames);
    assert(!error);

    /* Create temporary mapping reservation, and map in a frame to
     * create any book keeping */
    reservation_t reservation;
    reservation.res = allocman_mspace_alloc(allocman, sizeof(sel4utils_res_t), &error);
    assert(reservation.res);
    void *reservation_vaddr;
    error = sel4utils_reserve_range_no_alloc(&vspace, reservation.res, PAGE_SIZE_4K, seL4_AllRights, 1, &reservation_vaddr);
    assert(!error);
    error = vspace_new_pages_at_vaddr(&vspace, reservation_vaddr, 1, seL4_PageBits, reservation);
    assert(!error);
    vspace_unmap_pages(&vspace, reservation_vaddr, 1, seL4_PageBits, VSPACE_FREE);

    proxy_give_vspace(&vka, &vspace, reservation_vaddr, reservation);

    sel4utils_reserve_range_no_alloc(&vspace, &muslc_brk_reservation_memory, BRK_VIRTUAL_SIZE, seL4_AllRights, 1, &muslc_brk_reservation_start);
    muslc_this_vspace = &vspace;
    muslc_brk_reservation = (reservation_t){.res = &muslc_brk_reservation_memory};

}
Esempio n. 8
0
static int _insert_new_node(allocman_t *alloc, struct utspace_split_node **head, cspacepath_t ut, uintptr_t paddr) {
    int error;
    struct utspace_split_node *node;
    node = (struct utspace_split_node*) allocman_mspace_alloc(alloc, sizeof(*node), &error);
    if (error) {
        ZF_LOGV("Failed to allocate node of size %zu", sizeof(*node));
        return 1;
    }
    node->parent = NULL;
    node->ut = ut;
    node->paddr = paddr;
    node->origin_head = head;
    _insert_node(head, node);
    return 0;
}
Esempio n. 9
0
static struct utspace_split_node *_new_node(allocman_t *alloc) {
    int error;
    struct utspace_split_node *node;
    node = (struct utspace_split_node*) allocman_mspace_alloc(alloc, sizeof(*node), &error);
    if (error) {
        ZF_LOGV("Failed to allocate node of size %zu", sizeof(*node));
        return NULL;
    }
    error = allocman_cspace_alloc(alloc, &node->ut);
    if (error) {
        allocman_mspace_free(alloc, node, sizeof(*node));
        ZF_LOGV("Failed to allocate slot");
        return NULL;
    }
    return node;
}
Esempio n. 10
0
void test_use_current_cspace() {
    int error;
    seL4_CPtr i;
    allocman_t *allocman;
    cspace_single_level_t *cspace;
    utspace_trickle_t *utspace;
    seL4_BootInfo *bi = seL4_GetBootInfo();
    allocman = bootstrap_create_allocman(sizeof(initial_mem_pool), initial_mem_pool);
    assert(allocman);
    /* construct a description of our current cspace */
    cspace = allocman_mspace_alloc(allocman, sizeof(*cspace), &error);
    assert(!error);
    error = cspace_single_level_create(allocman, cspace, (struct cspace_single_level_config) {
        .cnode = seL4_CapInitThreadCNode,
        .cnode_size_bits = bi->initThreadCNodeSizeBits,
        .cnode_guard_bits = seL4_WordBits - bi->initThreadCNodeSizeBits,
        .first_slot = bi->empty.start,
        .end_slot = bi->empty.end
    });
Esempio n. 11
0
allocman_t *test_use_current_cspace_bootinfo() {
    int error;
    allocman_t *allocman;
    vspace_alloc_t vspace;
    vka_t *vka;
    allocman = bootstrap_use_bootinfo(seL4_GetBootInfo(), sizeof(initial_mem_pool), initial_mem_pool);
    assert(allocman);
    vka = allocman_mspace_alloc(allocman, sizeof(*vka), &error);
    assert(!error);
    allocman_make_vka(vka, allocman);
    sel4util_get_vspace_alloc_leaky(&vspace, seL4_CapInitThreadPD, vka, seL4_GetBootInfo());

    reservation_t *reservation = vspace_reserve_range_at(&vspace, VIRTUAL_START, MEM_POOL_SIZE, seL4_AllRights, 1);
    assert(reservation);

    bootstrap_configure_virtual_pool(allocman, VIRTUAL_START, MEM_POOL_SIZE, seL4_CapInitThreadPD);
    error = allocman_fill_reserves(allocman);
    assert(!error);
    return allocman;
}
Esempio n. 12
0
int _utspace_trickle_add_uts(allocman_t *alloc, void *_trickle, uint32_t num, cspacepath_t *uts, uint32_t *size_bits, uint32_t *paddr) {
    utspace_trickle_t *trickle = (utspace_trickle_t*) _trickle;
    struct utspace_trickle_node *nodes[num];
    cspacepath_t *uts_copy[num];
    int error;
    int i;
    for (i = 0; i < num; i++) {
        nodes[i] = _make_node(alloc, &error);
        if (error) {
            for (i--; i >= 0; i--) {
                _free_node(alloc, nodes[i]);
                allocman_mspace_free(alloc, uts_copy[i], sizeof(cspacepath_t));
            }
            return error;
        }
        uts_copy[i] = allocman_mspace_alloc(alloc, sizeof(cspacepath_t), &error);
        if (error) {
            _free_node(alloc, nodes[i]);
            for (i--; i >= 0; i--) {
                _free_node(alloc, nodes[i]);
                allocman_mspace_free(alloc, uts_copy[i], sizeof(cspacepath_t));
            }
        }
    }
    for (i = 0; i < num; i++) {
        *uts_copy[i] = uts[i];
        nodes[i]->ut = uts_copy[i];
        nodes[i]->offset = 0;
        nodes[i]->paddr = paddr[i];
        nodes[i]->parent_cookie = 0;
        nodes[i]->next = nodes[i]->prev = NULL;
        /* Start with only 1 thing free */
        nodes[i]->bitmap = BIT(31);
        nodes[i]->bitmap_bits = 1;
        _insert_node(&trickle->heads[size_bits[i]], nodes[i]);
    }
    return 0;
}
Esempio n. 13
0
int proxy_vka_utspace_alloc(void *data, const cspacepath_t *dest, seL4_Word type, seL4_Word size_bits, uint32_t *res) {
    proxy_vka_t *vka = (proxy_vka_t*)data;
    int error;
    uint32_t cookie;
    ut_node_t *node = allocman_mspace_alloc(vka->allocman, sizeof(*node), &error);
    if (!node) {
        return -1;
    }
    if (type == seL4_IA32_4K && vka->have_mem && vka->vspace.map_pages_at_vaddr && !vka->recurse) {
        cookie = _utspace_trickle_alloc(vka->allocman, &vka->ram_ut_manager, seL4_PageBits, seL4_IA32_4K, dest, &error);
        if (error != 0) {
            vka->have_mem = 0;
        } else {
            node->frame = 1;
            node->cookie = cookie;
            /* briefly map this frame in so we can zero it. Avoid recursively allocating
             * for book keeping */
            assert(!vka->recurse);
            vka->recurse = 1;
            error = vspace_map_pages_at_vaddr(&vka->vspace, (seL4_CPtr*)&dest->capPtr, NULL, vka->temp_map_address, 1, PAGE_BITS_4K, vka->temp_map_reservation);
            assert(!error);
            memset(vka->temp_map_address, 0, PAGE_SIZE_4K);
            vspace_unmap_pages(&vka->vspace, vka->temp_map_address, 1, PAGE_BITS_4K, VSPACE_PRESERVE);
            vka->recurse = 0;
            return 0;
        }
    }
    error = vka_utspace_alloc(&vka->regular_vka, dest, type, size_bits, &cookie);
    if (!error) {
        node->frame = 0;
        node->cookie = cookie;
        *res = (uint32_t)node;
        return  0;
    }
    allocman_mspace_free(vka->allocman, node, sizeof(*node));
    return error;
}
Esempio n. 14
0
    /* construct a description of our current cspace */
    cspace = allocman_mspace_alloc(allocman, sizeof(*cspace), &error);
    assert(!error);
    error = cspace_single_level_create(allocman, cspace, (struct cspace_single_level_config) {
        .cnode = seL4_CapInitThreadCNode,
        .cnode_size_bits = bi->initThreadCNodeSizeBits,
        .cnode_guard_bits = seL4_WordBits - bi->initThreadCNodeSizeBits,
        .first_slot = bi->empty.start,
        .end_slot = bi->empty.end
    });
    assert(!error);

    error = allocman_attach_cspace(allocman, cspace_single_level_make_interface(cspace));
    assert(!error);

    utspace = allocman_mspace_alloc(allocman, sizeof(*utspace), &error);
    assert(!error);
    error = allocman_attach_utspace(allocman, utspace_trickle_make_interface(utspace));
    assert(!error);

    /* have to add all our resources manually */
    for (i = bi->untyped.start; i < bi->untyped.end; i++) {
        cspacepath_t slot = allocman_cspace_make_path(allocman, i);
        uint32_t size_bits = bi->untypedSizeBitsList[i - bi->untyped.start];
        uint32_t paddr = bi->untypedPaddrList[i - bi->untyped.start];
        error = allocman_utspace_add_uts(allocman, 1, &slot, &size_bits);
        assert(!error);
    }
    error = allocman_fill_reserves(allocman);
    assert(!error);
}