Пример #1
0
void
sel4utils_unmap_dup(vka_t *vka, vspace_t *vspace, void *mapping, size_t size_bits)
{
    /* Grap a copy of the cap */
    seL4_CPtr copy = vspace_get_cap(vspace, mapping);
    cspacepath_t copy_path;
    assert(copy);
    /* now free the mapping */
    vspace_unmap_pages(vspace, mapping, 1, size_bits, VSPACE_PRESERVE);
    /* delete and free the cap */
    vka_cspace_make_path(vka, copy, &copy_path);
    vka_cnode_delete(&copy_path);
    vka_cspace_free(vka, copy);
}
Пример #2
0
void pre_init(void) {
    int error;

    set_putchar(putchar_putchar);

    /* Camkes adds nothing to our address space, so this array is empty */
    void *existing_frames[] = {
        NULL
    };
    camkes_make_simple(&camkes_simple);
    camkes_simple.IOPort_cap = simple_ioport_wrapper;
    camkes_simple.frame_cap = simple_frame_cap_wrapper;

    /* Initialize allocator */
    allocman = bootstrap_use_current_1level(
            simple_get_cnode(&camkes_simple),
            simple_get_cnode_size_bits(&camkes_simple),
            simple_last_valid_cap(&camkes_simple) + 1,
            BIT(simple_get_cnode_size_bits(&camkes_simple)),
            sizeof(allocator_mempool), allocator_mempool
    );
    assert(allocman);
    error = allocman_add_simple_untypeds(allocman, &camkes_simple);
    make_proxy_vka(&vka, allocman);

    /* Initialize the vspace */
    error = sel4utils_bootstrap_vspace(&vspace, &vspace_data,
            simple_get_init_cap(&camkes_simple, seL4_CapInitThreadPD), &vka, NULL, NULL, existing_frames);
    assert(!error);

    /* Create temporary mapping reservation, and map in a frame to
     * create any book keeping */
    reservation_t reservation;
    reservation.res = allocman_mspace_alloc(allocman, sizeof(sel4utils_res_t), &error);
    assert(reservation.res);
    void *reservation_vaddr;
    error = sel4utils_reserve_range_no_alloc(&vspace, reservation.res, PAGE_SIZE_4K, seL4_AllRights, 1, &reservation_vaddr);
    assert(!error);
    error = vspace_new_pages_at_vaddr(&vspace, reservation_vaddr, 1, seL4_PageBits, reservation);
    assert(!error);
    vspace_unmap_pages(&vspace, reservation_vaddr, 1, seL4_PageBits, VSPACE_FREE);

    proxy_give_vspace(&vka, &vspace, reservation_vaddr, reservation);

    sel4utils_reserve_range_no_alloc(&vspace, &muslc_brk_reservation_memory, BRK_VIRTUAL_SIZE, seL4_AllRights, 1, &muslc_brk_reservation_start);
    muslc_this_vspace = &vspace;
    muslc_brk_reservation = (reservation_t){.res = &muslc_brk_reservation_memory};

}
Пример #3
0
int proxy_vka_utspace_alloc(void *data, const cspacepath_t *dest, seL4_Word type, seL4_Word size_bits, uint32_t *res) {
    proxy_vka_t *vka = (proxy_vka_t*)data;
    int error;
    uint32_t cookie;
    ut_node_t *node = allocman_mspace_alloc(vka->allocman, sizeof(*node), &error);
    if (!node) {
        return -1;
    }
    if (type == seL4_IA32_4K && vka->have_mem && vka->vspace.map_pages_at_vaddr && !vka->recurse) {
        cookie = _utspace_trickle_alloc(vka->allocman, &vka->ram_ut_manager, seL4_PageBits, seL4_IA32_4K, dest, &error);
        if (error != 0) {
            vka->have_mem = 0;
        } else {
            node->frame = 1;
            node->cookie = cookie;
            /* briefly map this frame in so we can zero it. Avoid recursively allocating
             * for book keeping */
            assert(!vka->recurse);
            vka->recurse = 1;
            error = vspace_map_pages_at_vaddr(&vka->vspace, (seL4_CPtr*)&dest->capPtr, NULL, vka->temp_map_address, 1, PAGE_BITS_4K, vka->temp_map_reservation);
            assert(!error);
            memset(vka->temp_map_address, 0, PAGE_SIZE_4K);
            vspace_unmap_pages(&vka->vspace, vka->temp_map_address, 1, PAGE_BITS_4K, VSPACE_PRESERVE);
            vka->recurse = 0;
            return 0;
        }
    }
    error = vka_utspace_alloc(&vka->regular_vka, dest, type, size_bits, &cookie);
    if (!error) {
        node->frame = 0;
        node->cookie = cookie;
        *res = (uint32_t)node;
        return  0;
    }
    allocman_mspace_free(vka->allocman, node, sizeof(*node));
    return error;
}
Пример #4
0
int
serial_server_client_connect(seL4_CPtr badged_server_ep_cap,
                             vka_t *client_vka, vspace_t *client_vspace,
                             serial_client_context_t *conn)
{
    seL4_Error error;
    int shmem_n_pages;
    uintptr_t shmem_tmp_vaddr;
    seL4_MessageInfo_t tag;
    cspacepath_t frame_cspath;

    if (badged_server_ep_cap == 0 || client_vka == NULL || client_vspace == NULL
            || conn == NULL) {
        return seL4_InvalidArgument;
    }

    memset(conn, 0, sizeof(serial_client_context_t));

    shmem_n_pages = BYTES_TO_4K_PAGES(SERIAL_SERVER_SHMEM_MAX_SIZE);
    if (shmem_n_pages > seL4_MsgMaxExtraCaps) {
        ZF_LOGE(SERSERVC"connect: Currently unsupported shared memory size: "
                "IPC cap transfer capability is inadequate.");
        return seL4_RangeError;
    }
    conn->shmem = vspace_new_pages(client_vspace,
                                   seL4_AllRights,
                                   shmem_n_pages,
                                   seL4_PageBits);
    if (conn->shmem == NULL) {
        ZF_LOGE(SERSERVC"connect: Failed to alloc shmem.");
        return seL4_NotEnoughMemory;
    }
    assert(IS_ALIGNED((uintptr_t)conn->shmem, seL4_PageBits));

    /* Look up the Frame cap behind each page in the shmem range, and marshal
     * all of those Frame caps to the parent. The parent will then map those
     * Frames into its VSpace and establish a shmem link.
     */
    shmem_tmp_vaddr = (uintptr_t)conn->shmem;
    for (int i = 0; i < shmem_n_pages; i++) {
        vka_cspace_make_path(client_vka,
                             vspace_get_cap(client_vspace,
                                            (void *)shmem_tmp_vaddr),
                             &frame_cspath);

        seL4_SetCap(i, frame_cspath.capPtr);
        shmem_tmp_vaddr += BIT(seL4_PageBits);
    }

    /* Call the server asking it to establish the shmem mapping with us, and
     * get us connected up.
     */
    seL4_SetMR(SSMSGREG_FUNC, FUNC_CONNECT_REQ);
    seL4_SetMR(SSMSGREG_CONNECT_REQ_SHMEM_SIZE,
               SERIAL_SERVER_SHMEM_MAX_SIZE);
    /* extraCaps doubles up as the number of shmem pages. */
    tag = seL4_MessageInfo_new(0, 0,
                               shmem_n_pages,
                               SSMSGREG_CONNECT_REQ_END);

    tag = seL4_Call(badged_server_ep_cap, tag);

    /* It makes sense to verify that the message we're getting back is an
     * ACK response to our request message.
     */
    if (seL4_GetMR(SSMSGREG_FUNC) != FUNC_CONNECT_ACK) {
        error = seL4_IllegalOperation;
        ZF_LOGE(SERSERVC"connect: Reply message was not a CONNECT_ACK as "
                "expected.");
        goto out;
    }
    /* When the parent replies, we check to see if it was successful, etc. */
    error = seL4_MessageInfo_get_label(tag);
    if (error != (int)SERIAL_SERVER_NOERROR) {
        ZF_LOGE(SERSERVC"connect ERR %d: Failed to connect to the server.",
                error);

        if (error == (int)SERIAL_SERVER_ERROR_SHMEM_TOO_LARGE) {
            ZF_LOGE(SERSERVC"connect: Your requested shmem mapping size is too "
                    "large.\n\tServer's max shmem size is %luB.",
                    (long)seL4_GetMR(SSMSGREG_CONNECT_ACK_MAX_SHMEM_SIZE));
        }
        goto out;
    }

    conn->shmem_size = SERIAL_SERVER_SHMEM_MAX_SIZE;
    vka_cspace_make_path(client_vka, badged_server_ep_cap,
                         &conn->badged_server_ep_cspath);

    return seL4_NoError;

out:
    if (conn->shmem != NULL) {
        vspace_unmap_pages(client_vspace, (void *)conn->shmem, shmem_n_pages,
                           seL4_PageBits, VSPACE_FREE);
    }
    return error;
}
Пример #5
0
int
vm_copyout_atags(vm_t* vm, struct atag_list* atags, uint32_t addr)
{
    vspace_t *vm_vspace, *vmm_vspace;
    void* vm_addr, *vmm_addr, *buf;
    reservation_t res;
    vka_t* vka;
    vka_object_t frame;
    size_t size;
    struct atag_list* atag_cur;
    int err;

    vka = vm->vka;
    vm_addr = (void*)(addr & ~0xfff);
    vm_vspace = vm_get_vspace(vm);
    vmm_vspace = vm->vmm_vspace;

    /* Make sure we don't cross a page boundary
     * NOTE: the next page will usually be used by linux for PT!
     */
    for (size = 0, atag_cur = atags; atag_cur != NULL; atag_cur = atag_cur->next) {
        size += atags_size_bytes(atag_cur);
    }
    size += 8; /* NULL tag */
    assert((addr & 0xfff) + size < 0x1000);

    /* Create a frame (and a copy for the VMM) */
    err = vka_alloc_frame(vka, 12, &frame);
    assert(!err);
    if (err) {
        return -1;
    }
    /* Map the frame to the VMM */
    vmm_addr = vspace_map_pages(vmm_vspace, &frame.cptr, NULL, seL4_AllRights, 1, 12, 0);
    assert(vmm_addr);

    /* Copy in the atags */
    buf = vmm_addr + (addr & 0xfff);
    for (atag_cur = atags; atag_cur != NULL; atag_cur = atag_cur->next) {
        int tag_size = atags_size_bytes(atag_cur);
        DVM("ATAG copy 0x%x<-0x%x %d\n", (uint32_t)buf, (uint32_t)atag_cur->hdr, tag_size);
        memcpy(buf, atag_cur->hdr, tag_size);
        buf += tag_size;
    }
    /* NULL tag terminator */
    memset(buf, 0, 8);

    /* Unmap the page and map it into the VM */
    vspace_unmap_pages(vmm_vspace, vmm_addr, 1, 12, NULL);
    res = vspace_reserve_range_at(vm_vspace, vm_addr, 0x1000, seL4_AllRights, 0);
    assert(res.res);
    if (!res.res) {
        vka_free_object(vka, &frame);
        return -1;
    }
    err = vspace_map_pages_at_vaddr(vm_vspace, &frame.cptr, NULL, vm_addr, 1, 12, res);
    vspace_free_reservation(vm_vspace, res);
    assert(!err);
    if (err) {
        printf("Failed to provide memory\n");
        vka_free_object(vka, &frame);
        return -1;
    }

    return 0;
}
Пример #6
0
/* Run a single test.
 * Each test is launched as its own process. */
int
run_test(struct testcase *test)
{
    UNUSED int error;
    sel4utils_process_t test_process;

    /* Test intro banner. */
    printf("  %s\n", test->name);

    error = sel4utils_configure_process(&test_process, &env.vka, &env.vspace,
                                        env.init->priority, TESTS_APP);
    assert(error == 0);

    /* set up caps about the process */
    env.init->page_directory = copy_cap_to_process(&test_process, test_process.pd.cptr);
    env.init->root_cnode = SEL4UTILS_CNODE_SLOT;
    env.init->tcb = copy_cap_to_process(&test_process, test_process.thread.tcb.cptr);
    env.init->domain = copy_cap_to_process(&test_process, simple_get_init_cap(&env.simple, seL4_CapDomain));
#ifndef CONFIG_KERNEL_STABLE
    env.init->asid_pool = copy_cap_to_process(&test_process, simple_get_init_cap(&env.simple, seL4_CapInitThreadASIDPool));
#endif /* CONFIG_KERNEL_STABLE */
#ifdef CONFIG_IOMMU
    env.init->io_space = copy_cap_to_process(&test_process, simple_get_init_cap(&env.simple, seL4_CapIOSpace));
#endif /* CONFIG_IOMMU */
    /* setup data about untypeds */
    env.init->untypeds = copy_untypeds_to_process(&test_process, untypeds, num_untypeds);
    copy_timer_caps(env.init, &env, &test_process);
    /* copy the fault endpoint - we wait on the endpoint for a message
     * or a fault to see when the test finishes */
    seL4_CPtr endpoint = copy_cap_to_process(&test_process, test_process.fault_endpoint.cptr);

    /* WARNING: DO NOT COPY MORE CAPS TO THE PROCESS BEYOND THIS POINT,
     * AS THE SLOTS WILL BE CONSIDERED FREE AND OVERRIDDEN BY THE TEST PROCESS. */
    /* set up free slot range */
    env.init->cspace_size_bits = CONFIG_SEL4UTILS_CSPACE_SIZE_BITS;
    env.init->free_slots.start = endpoint + 1;
    env.init->free_slots.end = (1u << CONFIG_SEL4UTILS_CSPACE_SIZE_BITS);
    assert(env.init->free_slots.start < env.init->free_slots.end);
    /* copy test name */
    strncpy(env.init->name, test->name + strlen("TEST_"), TEST_NAME_MAX);
#ifdef SEL4_DEBUG_KERNEL
    seL4_DebugNameThread(test_process.thread.tcb.cptr, env.init->name);
#endif

    /* set up args for the test process */
    char endpoint_string[10];
    char sel4test_name[] = { TESTS_APP };
    char zero_string[] = {"0"};
    char *argv[] = {sel4test_name, zero_string, endpoint_string};
    argv[0] = endpoint_string;
    snprintf(endpoint_string, 10, "%d", endpoint);
    /* spawn the process */
    error = sel4utils_spawn_process_v(&test_process, &env.vka, &env.vspace,
                            ARRAY_SIZE(argv), argv, 1);
    assert(error == 0);

    /* send env.init_data to the new process */
    void *remote_vaddr = send_init_data(&env, test_process.fault_endpoint.cptr, &test_process);

    /* wait on it to finish or fault, report result */
    seL4_Word badge;
    seL4_MessageInfo_t info = seL4_Wait(test_process.fault_endpoint.cptr, &badge);

    int result = seL4_GetMR(0);
    if (seL4_MessageInfo_get_label(info) != seL4_NoFault) {
        sel4utils_print_fault_message(info, test->name);
        result = FAILURE;
    }

    /* unmap the env.init data frame */
    vspace_unmap_pages(&test_process.vspace, remote_vaddr, 1, PAGE_BITS_4K, NULL);

    /* reset all the untypeds for the next test */
    for (int i = 0; i < num_untypeds; i++) {
        cspacepath_t path;
        vka_cspace_make_path(&env.vka, untypeds[i].cptr, &path);
        vka_cnode_revoke(&path);
    }

    /* destroy the process */
    sel4utils_destroy_process(&test_process, &env.vka);

    test_assert(result == SUCCESS);
    return result;
}