Exemple #1
0
static void morecore_free(void *base, size_t bytes)
{
    struct morecore_state *state = get_morecore_state();
    errval_t err = vspace_mmu_aware_unmap(&state->mmu_state,
                                          (lvaddr_t)base, bytes);
    if(err_is_fail(err)) {
        USER_PANIC_ERR(err, "vspace_mmu_aware_unmap");
    }
}
Exemple #2
0
/**
 * \brief Allocate some memory for malloc to use
 *
 * This function will keep trying with smaller and smaller frames till
 * it finds a set of frames that satisfy the requirement. retbytes can
 * be smaller than bytes if we were able to allocate a smaller memory
 * region than requested for.
 */
static void *morecore_alloc(size_t bytes, size_t *retbytes)
{
    errval_t err;
    struct morecore_state *state = get_morecore_state();

    void *buf = NULL;
    size_t mapped = 0;
    size_t step = bytes;
    while (mapped < bytes) {
        struct capref cap;
        err = slot_alloc(&cap);
        if (err_is_fail(err)) {
            USER_PANIC_ERR(err, "slot_alloc failed");
        }

        void *mid_buf = NULL;
        err = vspace_mmu_aware_map(&state->mmu_state, cap, step,
                                   &mid_buf, &step);
        if (err_is_ok(err)) {
            if (buf == NULL) {
                buf = mid_buf;
            }
            mapped += step;
        } else {
            /*
              vspace_mmu_aware_map failed probably because we asked
              for a very large frame, will try asking for smaller one.
             */
            if (err_no(err) == LIB_ERR_FRAME_CREATE_MS_CONSTRAINTS) {
                err = slot_free(cap);
                if (err_is_fail(err)) {
                    debug_err(__FILE__, __func__, __LINE__, err,
                              "slot_free failed");
                    return NULL;
                }
                if (step < BASE_PAGE_SIZE) {
                    // Return whatever we have allocated until now
                    break;
                }
                step /= 2;
                continue;
            } else {
                debug_err(__FILE__, __func__, __LINE__, err,
                          "vspace_mmu_aware_map fail");
                return NULL;
            }
        }
    }

    *retbytes = mapped;
    return buf;
}
Exemple #3
0
errval_t morecore_init(void)
{
    errval_t err;
    struct morecore_state *state = get_morecore_state();

    thread_mutex_init(&state->mutex);

    err = vspace_mmu_aware_init(&state->mmu_state, HEAP_REGION);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_VSPACE_MMU_AWARE_INIT);
    }

    sys_morecore_alloc = morecore_alloc;
    sys_morecore_free = morecore_free;

    return SYS_ERR_OK;
}
Exemple #4
0
Header *get_malloc_freep(void)
{
    return get_morecore_state()->header_freep;
}
Exemple #5
0
/**
 * \brief Since we cannot dynamically grow our stack yet, we need a
 * verion that will create threads on remote core with variable stack size
 *
 * \bug this is a hack
 */
static errval_t domain_new_dispatcher_varstack(coreid_t core_id,
                                               domain_spanned_callback_t callback,
                                               void *callback_arg, size_t stack_size)
{
    assert(core_id != disp_get_core_id());

    errval_t err;
    struct domain_state *domain_state = get_domain_state();
    struct monitor_binding *mb = get_monitor_binding();
    assert(domain_state != NULL);

    /* Set reply handler */
    mb->rx_vtbl.span_domain_reply = span_domain_reply;

    while(domain_state->iref == 0) { /* If not initialized, wait */
        messages_wait_and_handle_next();
    }

    /* Create the remote_core_state passed to the new dispatcher */
    struct remote_core_state *remote_core_state =
        calloc(1, sizeof(struct remote_core_state));
    if (!remote_core_state) {
        return LIB_ERR_MALLOC_FAIL;
    }
    remote_core_state->core_id = disp_get_core_id();
    remote_core_state->iref    = domain_state->iref;

    /* get the alignment of the morecore state */
    struct morecore_state *state = get_morecore_state();
    remote_core_state->pagesize = state->mmu_state.alignment;

    /* Create the thread for the new dispatcher to init on */
    struct thread *newthread =
        thread_create_unrunnable(remote_core_init_enabled,
                                 (void*)remote_core_state, stack_size);
    if (newthread == NULL) {
        return LIB_ERR_THREAD_CREATE;
    }

    /* Save the state for later steps of the spanning state machine */
    struct span_domain_state *span_domain_state =
        malloc(sizeof(struct span_domain_state));
    if (!span_domain_state) {
        return LIB_ERR_MALLOC_FAIL;
    }
    span_domain_state->thread       = newthread;
    span_domain_state->core_id      = core_id;
    span_domain_state->callback     = callback;
    span_domain_state->callback_arg = callback_arg;

    /* Give remote_core_state pointer to span_domain_state */
    remote_core_state->span_domain_state = span_domain_state;

    /* Start spanning domain state machine by sending vroot to the monitor */
    struct capref vroot = {
        .cnode = cnode_page,
        .slot = 0
    };

    /* Create new dispatcher frame */
    struct capref frame;
    size_t dispsize = ((size_t)1) << DISPATCHER_FRAME_BITS;
    err = frame_alloc(&frame, dispsize, &dispsize);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_FRAME_ALLOC);
    }
    lvaddr_t dispaddr;

    err = vspace_map_one_frame((void **)&dispaddr, dispsize, frame, NULL, NULL);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_VSPACE_MAP);
    }

    dispatcher_handle_t handle = dispaddr;
    struct dispatcher_shared_generic *disp =
        get_dispatcher_shared_generic(handle);
    struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle);
    arch_registers_state_t *disabled_area =
        dispatcher_get_disabled_save_area(handle);

    /* Set dispatcher on the newthread */
    span_domain_state->thread->disp = handle;
    span_domain_state->frame = frame;
    span_domain_state->vroot = vroot;

    /* Setup dispatcher */
    disp->udisp = (lvaddr_t)handle;
    disp->disabled = true;
    disp->fpu_trap = 1;
    disp_gen->core_id = span_domain_state->core_id;
    // Setup the dispatcher to run remote_core_init_disabled
    // and pass the created thread as an argument
    registers_set_initial(disabled_area, span_domain_state->thread,
                          (lvaddr_t)remote_core_init_disabled,
                          (lvaddr_t)&disp_gen->stack[DISPATCHER_STACK_WORDS],
                          (uintptr_t)span_domain_state->thread, 0, 0, 0);
    // Give dispatcher a unique name for debugging
    snprintf(disp->name, DISP_NAME_LEN, "%s%d", disp_name(),
             span_domain_state->core_id);

#ifdef __x86_64__
    // XXX: share LDT state between all dispatchers
    // this needs to happen before the remote core starts, otherwise the segment
    // selectors in the new thread state are invalid
    struct dispatcher_shared_x86_64 *disp_x64
        = get_dispatcher_shared_x86_64(handle);
    struct dispatcher_shared_x86_64 *mydisp_x64
        = get_dispatcher_shared_x86_64(curdispatcher());

    disp_x64->ldt_base = mydisp_x64->ldt_base;
    disp_x64->ldt_npages = mydisp_x64->ldt_npages;
#endif

    threads_prepare_to_span(handle);

    // Setup new local thread for inter-dispatcher messages, if not already done
    static struct thread *interdisp_thread = NULL;
    if(interdisp_thread == NULL) {
        interdisp_thread = thread_create(interdisp_msg_handler,
                                         &domain_state->interdisp_ws);
        err = thread_detach(interdisp_thread);
        assert(err_is_ok(err));
    }

#if 0
    // XXX: Tell currently active interdisp-threads to handle default waitset
    for(int i = 0; i < MAX_CPUS; i++) {
        struct interdisp_binding *b = domain_state->b[i];

        if(disp_get_core_id() != i && b != NULL) {
            err = b->tx_vtbl.span_slave(b, NOP_CONT);
            assert(err_is_ok(err));
        }
    }
#endif

    #if 0
    /* XXX: create a thread that will handle the default waitset */
    if (domain_state->default_waitset_handler == NULL) {
        domain_state->default_waitset_handler
            = thread_create(span_slave_thread, NULL);
        assert(domain_state->default_waitset_handler != NULL);
    }
#endif
    /* Wait to use the monitor binding */
    struct monitor_binding *mcb = get_monitor_binding();
    event_mutex_enqueue_lock(&mcb->mutex, &span_domain_state->event_qnode,
                             (struct event_closure) {
                                 .handler = span_domain_request_sender_wrapper,
                                     .arg = span_domain_state });

#if 1
    while(!span_domain_state->initialized) {
        event_dispatch(get_default_waitset());
    }

    /* Free state */
    free(span_domain_state);
#endif

    return SYS_ERR_OK;
}