Beispiel #1
0
void handle_irq(arch_registers_state_t* save_area, uintptr_t fault_pc,
                uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3)
{
    uint32_t irq = 0;

    /* The assembly stub leaves the first 4 registers, the stack pointer, and
     * the exception PC for us to save, as it's run out of room for the
     * necessary instructions. */
    save_area->named.x0    = x0;
    save_area->named.x1    = x1;
    save_area->named.x2    = x2;
    save_area->named.x3    = x3;
    save_area->named.stack = sysreg_read_sp_el0();
    save_area->named.pc    = fault_pc;

    irq = gic_get_active_irq();

    debug(SUBSYS_DISPATCH, "IRQ %"PRIu32" while %s\n", irq,
          dcb_current ? (dcb_current->disabled ? "disabled": "enabled") :
                        "in kernel");

    if (dcb_current != NULL) {
        dispatcher_handle_t handle = dcb_current->disp;
        if (save_area == dispatcher_get_disabled_save_area(handle)) {
            assert(dispatcher_is_disabled_ip(handle, fault_pc));
            dcb_current->disabled = true;
        } else {
/*            debug(SUBSYS_DISPATCH,
                  "save_area=%p, dispatcher_get_enabled_save_are(handle)=%p\n",
                   save_area, dispatcher_get_enabled_save_area(handle));
*/

            assert(save_area == dispatcher_get_enabled_save_area(handle));
            assert(!dispatcher_is_disabled_ip(handle, fault_pc));
            dcb_current->disabled = false;
        }
    }

    if (pit_handle_irq(irq)) {
        // Timer interrupt, pit_handle_irq acks it at the timer.
        assert(kernel_ticks_enabled);
        kernel_now += kernel_timeslice;
        wakeup_check(kernel_now);
        dispatch(schedule());
    }
    /* This is the (still) unacknowledged startup interrupt sent by the BSP.
     * We just acknowledge it here. */
    else if(irq == 1)
    {
    	gic_ack_irq(irq);
    	dispatch(schedule());
    }
    else {
        gic_ack_irq(irq);
        send_user_interrupt(irq);
        panic("Unhandled IRQ %"PRIu32"\n", irq);
    }

}
Beispiel #2
0
/**
 * \brief Setup the dispatcher frame
 */
static errval_t spawn_setup_dispatcher(struct spawninfo *si,
                                       coreid_t core_id,
                                       const char *name,
                                       genvaddr_t entry,
                                       void* arch_info)
{
    errval_t err;

    /* Create dispatcher frame (in taskcn) */
    si->dispframe.cnode = si->taskcn;
    si->dispframe.slot  = TASKCN_SLOT_DISPFRAME;
    struct capref spawn_dispframe = {
        .cnode = si->taskcn,
        .slot  = TASKCN_SLOT_DISPFRAME2,
    };
    err = frame_create(si->dispframe, (1 << DISPATCHER_FRAME_BITS), NULL);
    if (err_is_fail(err)) {
        return err_push(err, SPAWN_ERR_CREATE_DISPATCHER_FRAME);
    }
    err = cap_copy(spawn_dispframe, si->dispframe);
    if (err_is_fail(err)) {
        return err_push(err, SPAWN_ERR_CREATE_DISPATCHER_FRAME);
    }

    /* Map in dispatcher frame */
    dispatcher_handle_t handle;
    err = vspace_map_one_frame((void**)&handle, 1ul << DISPATCHER_FRAME_BITS,
                               si->dispframe, NULL, NULL);
    if (err_is_fail(err)) {
        return err_push(err, SPAWN_ERR_MAP_DISPATCHER_TO_SELF);
    }
    genvaddr_t spawn_dispatcher_base;
    err = spawn_vspace_map_one_frame(si, &spawn_dispatcher_base, spawn_dispframe,
                                     1UL << DISPATCHER_FRAME_BITS);
    if (err_is_fail(err)) {
        return err_push(err, SPAWN_ERR_MAP_DISPATCHER_TO_NEW);
    }

    /* Set initial state */
    // XXX: Confusion address translation about l/gen/addr in entry
    struct dispatcher_shared_generic *disp =
        get_dispatcher_shared_generic(handle);
    struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle);
    arch_registers_state_t *enabled_area =
        dispatcher_get_enabled_save_area(handle);
    arch_registers_state_t *disabled_area =
        dispatcher_get_disabled_save_area(handle);

    /* Place core_id */
    disp_gen->core_id = core_id;

    /* place eh information */
    disp_gen->eh_frame = si->eh_frame;
    disp_gen->eh_frame_size = si->eh_frame_size;
    disp_gen->eh_frame_hdr = si->eh_frame_hdr;
    disp_gen->eh_frame_hdr_size = si->eh_frame_hdr_size;

    /* Setup dispatcher and make it runnable */
    disp->udisp = spawn_dispatcher_base;
    disp->disabled = 1;
    disp->fpu_trap = 1;
#ifdef __k1om__
    disp->xeon_phi_id = disp_xeon_phi_id();
#endif

    // Copy the name for debugging
    const char *copy_name = strrchr(name, '/');
    if (copy_name == NULL) {
        copy_name = name;
    } else {
        copy_name++;
    }
    strncpy(disp->name, copy_name, DISP_NAME_LEN);

    spawn_arch_set_registers(arch_info, handle, enabled_area, disabled_area);
    registers_set_entry(disabled_area, entry);

    si->handle = handle;
    return SYS_ERR_OK;
}

errval_t spawn_map_bootinfo(struct spawninfo *si, genvaddr_t *retvaddr)
{
    errval_t err;

    struct capref src = {
        .cnode = cnode_task,
        .slot  = TASKCN_SLOT_BOOTINFO
    };
    struct capref dest = {
        .cnode = si->taskcn,
        .slot  = TASKCN_SLOT_BOOTINFO
    };
    err = cap_copy(dest, src);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_CAP_COPY);
    }

    err = spawn_vspace_map_one_frame(si, retvaddr, dest, BOOTINFO_SIZE);
    if (err_is_fail(err)) {
        return err_push(err, SPAWN_ERR_MAP_BOOTINFO);
    }

    return SYS_ERR_OK;
}

/**
 * \brief Retrive the commandline args of #name
 *
 * The arguments are malloced into a new space so need to be freed after use
 */
errval_t spawn_get_cmdline_args(struct mem_region *module,
                                char **retargs)
{
    assert(module != NULL && retargs != NULL);

    /* Get the cmdline args */
    const char *args = getopt_module(module);

    /* Allocate space */
    *retargs = malloc(sizeof(char) * strlen(args));
    if (!retargs) {
        return LIB_ERR_MALLOC_FAIL;
    }

    /* Copy args */
    strcpy(*retargs, args);
    return SYS_ERR_OK;
}
Beispiel #3
0
/**
 * \brief Since we cannot dynamically grow our stack yet, we need a
 * verion that will create threads on remote core with variable stack size
 *
 * \bug this is a hack
 */
static errval_t domain_new_dispatcher_varstack(coreid_t core_id,
                                               domain_spanned_callback_t callback,
                                               void *callback_arg, size_t stack_size)
{
    assert(core_id != disp_get_core_id());

    errval_t err;
    struct domain_state *domain_state = get_domain_state();
    struct monitor_binding *mb = get_monitor_binding();
    assert(domain_state != NULL);

    /* Set reply handler */
    mb->rx_vtbl.span_domain_reply = span_domain_reply;

    while(domain_state->iref == 0) { /* If not initialized, wait */
        messages_wait_and_handle_next();
    }

    /* Create the remote_core_state passed to the new dispatcher */
    struct remote_core_state *remote_core_state =
        calloc(1, sizeof(struct remote_core_state));
    if (!remote_core_state) {
        return LIB_ERR_MALLOC_FAIL;
    }
    remote_core_state->core_id = disp_get_core_id();
    remote_core_state->iref    = domain_state->iref;

    /* get the alignment of the morecore state */
    struct morecore_state *state = get_morecore_state();
    remote_core_state->pagesize = state->mmu_state.alignment;

    /* Create the thread for the new dispatcher to init on */
    struct thread *newthread =
        thread_create_unrunnable(remote_core_init_enabled,
                                 (void*)remote_core_state, stack_size);
    if (newthread == NULL) {
        return LIB_ERR_THREAD_CREATE;
    }

    /* Save the state for later steps of the spanning state machine */
    struct span_domain_state *span_domain_state =
        malloc(sizeof(struct span_domain_state));
    if (!span_domain_state) {
        return LIB_ERR_MALLOC_FAIL;
    }
    span_domain_state->thread       = newthread;
    span_domain_state->core_id      = core_id;
    span_domain_state->callback     = callback;
    span_domain_state->callback_arg = callback_arg;

    /* Give remote_core_state pointer to span_domain_state */
    remote_core_state->span_domain_state = span_domain_state;

    /* Start spanning domain state machine by sending vroot to the monitor */
    struct capref vroot = {
        .cnode = cnode_page,
        .slot = 0
    };

    /* Create new dispatcher frame */
    struct capref frame;
    size_t dispsize = ((size_t)1) << DISPATCHER_FRAME_BITS;
    err = frame_alloc(&frame, dispsize, &dispsize);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_FRAME_ALLOC);
    }
    lvaddr_t dispaddr;

    err = vspace_map_one_frame((void **)&dispaddr, dispsize, frame, NULL, NULL);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_VSPACE_MAP);
    }

    dispatcher_handle_t handle = dispaddr;
    struct dispatcher_shared_generic *disp =
        get_dispatcher_shared_generic(handle);
    struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle);
    arch_registers_state_t *disabled_area =
        dispatcher_get_disabled_save_area(handle);

    /* Set dispatcher on the newthread */
    span_domain_state->thread->disp = handle;
    span_domain_state->frame = frame;
    span_domain_state->vroot = vroot;

    /* Setup dispatcher */
    disp->udisp = (lvaddr_t)handle;
    disp->disabled = true;
    disp->fpu_trap = 1;
    disp_gen->core_id = span_domain_state->core_id;
    // Setup the dispatcher to run remote_core_init_disabled
    // and pass the created thread as an argument
    registers_set_initial(disabled_area, span_domain_state->thread,
                          (lvaddr_t)remote_core_init_disabled,
                          (lvaddr_t)&disp_gen->stack[DISPATCHER_STACK_WORDS],
                          (uintptr_t)span_domain_state->thread, 0, 0, 0);
    // Give dispatcher a unique name for debugging
    snprintf(disp->name, DISP_NAME_LEN, "%s%d", disp_name(),
             span_domain_state->core_id);

#ifdef __x86_64__
    // XXX: share LDT state between all dispatchers
    // this needs to happen before the remote core starts, otherwise the segment
    // selectors in the new thread state are invalid
    struct dispatcher_shared_x86_64 *disp_x64
        = get_dispatcher_shared_x86_64(handle);
    struct dispatcher_shared_x86_64 *mydisp_x64
        = get_dispatcher_shared_x86_64(curdispatcher());

    disp_x64->ldt_base = mydisp_x64->ldt_base;
    disp_x64->ldt_npages = mydisp_x64->ldt_npages;
#endif

    threads_prepare_to_span(handle);

    // Setup new local thread for inter-dispatcher messages, if not already done
    static struct thread *interdisp_thread = NULL;
    if(interdisp_thread == NULL) {
        interdisp_thread = thread_create(interdisp_msg_handler,
                                         &domain_state->interdisp_ws);
        err = thread_detach(interdisp_thread);
        assert(err_is_ok(err));
    }

#if 0
    // XXX: Tell currently active interdisp-threads to handle default waitset
    for(int i = 0; i < MAX_CPUS; i++) {
        struct interdisp_binding *b = domain_state->b[i];

        if(disp_get_core_id() != i && b != NULL) {
            err = b->tx_vtbl.span_slave(b, NOP_CONT);
            assert(err_is_ok(err));
        }
    }
#endif

    #if 0
    /* XXX: create a thread that will handle the default waitset */
    if (domain_state->default_waitset_handler == NULL) {
        domain_state->default_waitset_handler
            = thread_create(span_slave_thread, NULL);
        assert(domain_state->default_waitset_handler != NULL);
    }
#endif
    /* Wait to use the monitor binding */
    struct monitor_binding *mcb = get_monitor_binding();
    event_mutex_enqueue_lock(&mcb->mutex, &span_domain_state->event_qnode,
                             (struct event_closure) {
                                 .handler = span_domain_request_sender_wrapper,
                                     .arg = span_domain_state });

#if 1
    while(!span_domain_state->initialized) {
        event_dispatch(get_default_waitset());
    }

    /* Free state */
    free(span_domain_state);
#endif

    return SYS_ERR_OK;
}
Beispiel #4
0
/*
 * \brief handler pretty much any usermode IRQ except system calls
 */
void handle_irq(uint32_t irq, arch_registers_state_t* save_area)
{
    /*
    printf("handle_irq: registers:\n");//dump content for debugging reasons
    for(uint32_t i = 0; i<NUM_REGS; i++){
        printf("0x%x\n", save_area->regs[i]);
    }*/
    
  /*  
    uint32_t regval;
    __asm volatile ("mrs %[regval], xpsr" : [regval] "=r"(regval));
    printf("current XPSR register: 0x%x\n", regval);
    
    printf("M3 MMU address: 0x%x\n", *((uint32_t*) &mmu));
    printf("M3 MMU_FAULT_AD register: 0x%x\n", omap44xx_mmu_fault_ad_rd(&mmu));
    printf("M3 MMU_FAULT_STATUS register: 0x%x\n", omap44xx_mmu_fault_status_rd(&mmu));
    printf("M3 MMU_FAULT_PC register: 0x%x\n", omap44xx_mmu_fault_pc_rd(&mmu));
    printf("M3 MMU_IRQSTATUS register: 0x%x\n", omap44xx_mmu_irqstatus_rd(&mmu));
    
    printf("ICTR: 0x%x\n", omap44xx_cortex_m3_nvic_ICTR_rd(&nvic));
    printf("CPUID_BASE: 0x%x\n", omap44xx_cortex_m3_nvic_CPUID_BASE_rd(&nvic));
    printf("ICSR: 0x%x\n", omap44xx_cortex_m3_nvic_ICSR_rd(&nvic));
    printf("VTOR: 0x%x\n", omap44xx_cortex_m3_nvic_VTOR_rd(&nvic));
    printf("AIRCR: 0x%x\n", omap44xx_cortex_m3_nvic_AIRCR_rd(&nvic));
    printf("CCR: 0x%x\n", omap44xx_cortex_m3_nvic_CCR_rd(&nvic));
    printf("SHCSR: 0x%x\n", omap44xx_cortex_m3_nvic_SHCSR_rd(&nvic));
    printf("CFSR: 0x%x\n", omap44xx_cortex_m3_nvic_CFSR_rd(&nvic));
    printf("BFAR: 0x%x\n", omap44xx_cortex_m3_nvic_BFAR_rd(&nvic));
    printf("SYSTICK_CTRL: 0x%x\n", omap44xx_cortex_m3_nvic_SYSTICK_CTRL_rd(&nvic));
    printf("SYSTICK_CALV: 0x%x\n", omap44xx_cortex_m3_nvic_SYSTICK_CALV_rd(&nvic));
    */
    uintptr_t fault_pc = save_area->named.pc;//read faulting pc from pushed context
    
    debug(SUBSYS_DISPATCH, "IRQ %"PRIu32" while %s\n", irq,
          dcb_current ? (dcb_current->disabled ? "disabled": "enabled") : "in kernel");


    if (dcb_current != NULL) {
        dispatcher_handle_t handle = dcb_current->disp;
        if (save_area == dispatcher_get_disabled_save_area(handle)) {
            assert(dispatcher_is_disabled_ip(handle, fault_pc));
            dcb_current->disabled = true;
        } else {
/*            debug(SUBSYS_DISPATCH,
                  "save_area=%p, dispatcher_get_enabled_save_are(handle)=%p\n",
                   save_area, dispatcher_get_enabled_save_area(handle));
*/

            assert(save_area == dispatcher_get_enabled_save_area(handle));
            assert(!dispatcher_is_disabled_ip(handle, fault_pc));
            dcb_current->disabled = false;
        }
    }
    //TODO: heteropanda: make a case distinction on the type of interrupt, and  
    //actually handle it

    if (irq == ARM_7M_EVECTOR_SYSTICK) {
        // Timer interrupt
        assert(kernel_ticks_enabled);
        kernel_now += kernel_timeslice;
        wakeup_check(kernel_now);
        printf(".");//to see how many we get & their distribution, without spamming lines
        dispatch(schedule());
    }
    else {
        // send_user_interrupt(irq);
        panic("Unhandled IRQ %"PRIu32"\n", irq);
    }

}