コード例 #1
0
ファイル: alarm.c プロジェクト: yaojiadong/2016_ses
fsmReturnStatus led_toggle(Fsm * fsm, const Event* e) {

//!!!!!!make all tasks global.
//!!!mistake: set td.next=NULL will disrupt scheduler!!!!
//	static taskDescriptor td;
//	td.task = &wrapper_red_led;
//	//td.param = &;  //void pointer point to void, param is of type void
//	td.expire = 0;
//	td.period = 250;   //0.25s = 4Hz
//	td.execute = 0;
//	td.next = NULL;
//
//	static taskDescriptor td2;
//	td2.task = &wrapper_turnoff_led;
//	td2.param = fsm;  //void pointer point to void, param is of type void
//	td2.expire = 5000;  //count 5s
//	td2.period = 0;
//	td2.execute = 0;
//	td2.next = NULL;
//
//	static taskDescriptor td3;
//	td3.task = &time_increment;
//	td3.param = fsm;
//	td3.expire = 0;
//	td3.period = 1000; // every second update the time of the clock
//	td3.execute = 0;
//	td3.next = NULL;

	switch (e->signal) {
	case ENTRY:
		td_toggle_led.expire = 0;
		td_turnoff_led.expire = 5000;
		td_time_increment.expire = 0;
		scheduler_add(&td_toggle_led);
		scheduler_add(&td_turnoff_led);
		scheduler_add(&td_time_increment);
		return RET_HANDLED;
	case JOYSTICK_PRESSED:
		return TRANSITION(normal_operating);
	case ROTARY_PRESSED:
		return TRANSITION(normal_operating);
	case EXIT:
		led_redOff();
		scheduler_remove(&td_toggle_led);
		scheduler_remove(&td_turnoff_led);
		scheduler_remove(&td_time_increment);
		return RET_HANDLED;
	default:
		return RET_IGNORED;
	}
}
コード例 #2
0
ファイル: ses_scheduler.c プロジェクト: yaojiadong/2016_ses
void scheduler_run() {

	taskDescriptor* temp;
	while (1) {
		ATOMIC_BLOCK(ATOMIC_RESTORESTATE)
		{
			temp = taskList; // every time you put the temp from the first node!!!!!!!
			while (temp != NULL) {

				if (temp->execute == 1) {
					task_t the_task = temp->task;
					void* the_param = temp->param;

					NONATOMIC_BLOCK( NONATOMIC_RESTORESTATE)
					{
						the_task(the_param);			// execute the function
					}

					temp->execute = 0;
					if (temp->period == 0)
						scheduler_remove(temp);
				}
				temp = temp->next;
			}
		}
	}
コード例 #3
0
ファイル: scheduler.c プロジェクト: fritz0705/xelix
// Called by the PIT a few hundred times per second.
task_t* scheduler_select(cpu_state_t* lastRegs)
{
	if(unlikely(scheduler_state == STATE_INITIALIZING))
	{
		scheduler_state = STATE_INITIALIZED;
		return currentTask;
	}

	currentTask->state = lastRegs;

	if(skipnext == SKIP_WAIT) skipnext = SKIP_NEXT;
	else if(skipnext == SKIP_NEXT)
	{
		skipnext = SKIP_OFF;
		return currentTask;
	}

	while (1)
	{
		currentTask = currentTask->next;
		
		if (currentTask->task_state == TASK_STATE_KILLED ||
				currentTask->task_state == TASK_STATE_TERMINATED)
		{
			if (currentTask->next == currentTask)
				currentTask->next = NULL;
			scheduler_remove(currentTask);
		}

		if (unlikely(currentTask == NULL || currentTask->task_state == TASK_STATE_RUNNING))
			break;
	}

	return currentTask;
}
コード例 #4
0
ファイル: syscall.c プロジェクト: achreto/barrelfish
struct sysret
sys_dispatcher_properties(struct capability *to,
                          enum task_type type, unsigned long deadline,
                          unsigned long wcet, unsigned long period,
                          unsigned long release, unsigned short weight)
{
    assert(to->type == ObjType_Dispatcher);

#ifdef CONFIG_SCHEDULER_RBED
    struct dcb *dcb = to->u.dispatcher.dcb;

    assert(type >= TASK_TYPE_BEST_EFFORT && type <= TASK_TYPE_HARD_REALTIME);
    assert(wcet <= deadline);
    assert(wcet <= period);
    assert(type != TASK_TYPE_BEST_EFFORT || weight > 0);

    trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_SCHED_REMOVE,
                152);
    scheduler_remove(dcb);

    /* Set task properties */
    dcb->type = type;
    dcb->deadline = deadline;
    dcb->wcet = wcet;
    dcb->period = period;
    dcb->release_time = (release == 0) ? kernel_now : release;
    dcb->weight = weight;

    make_runnable(dcb);
#endif

    return SYSRET(SYS_ERR_OK);
}
コード例 #5
0
void scheduler_run() {

	task_t taskRunner = NULL;
	taskDescriptor* cursor = NULL;

	ATOMIC_BLOCK(ATOMIC_RESTORESTATE)
	{
		cursor = taskList;
		while (cursor != NULL) {
			taskRunner = cursor->task;

			if (cursor->execute && taskRunner != NULL) {
				taskRunner(cursor->param);
				cursor->execute = 0;
			}
			/*
			 * delete non periodic tasks after the execution
			 */
			if (cursor->period == 0) {
				scheduler_remove(cursor);
			}
			cursor = cursor->next;
		}
	}
}
コード例 #6
0
ファイル: alarm.c プロジェクト: yaojiadong/2016_ses
fsmReturnStatus normal_operating(Fsm * fsm, const Event* e) {
	static bool enable = false;

	switch (e->signal) {
	case ENTRY:
		td_time_increment.expire = 0;
		scheduler_add(&td_time_increment);
		return RET_HANDLED;
	case JOYSTICK_PRESSED:
		return TRANSITION(set_alarm_hour);
	case ROTARY_PRESSED:
		enable = !enable;
		fsm->isAlarmEnabled = enable;
		if (enable)
			led_yellowOn();
		else
			led_yellowOff();
		return RET_HANDLED;
	case MATCHING:
		if (fsm->isAlarmEnabled) {
			return TRANSITION(led_toggle);
		} else
			return RET_HANDLED;
	case EXIT:
		scheduler_remove(&td_time_increment);
		return RET_HANDLED;
	default:
		return RET_IGNORED;
	}
}
コード例 #7
0
ファイル: scheduler_test.c プロジェクト: yaojiadong/2016_ses
void control_yellow() {
	static taskDescriptor td3;
	//td3 should remain same as before.
	//without static, the parameter in wrapper_yellowOff(&td3); would be null.
	state_on = !state_on;

	if (state_on) {
		led_yellowOn();
		td3.task = &wrapper_yellowOff;
		td3.param = &td3;
		td3.expire = 5000; //count 5s
		td3.period = 0;//run once
		td3.execute = 0;
		td3.next = NULL;
		scheduler_add(&td3);
	} else {
		led_redOff();
		wrapper_yellowOff(&td3);
		scheduler_remove(&td3);
	}
}
コード例 #8
0
ファイル: syscall.c プロジェクト: achreto/barrelfish
struct sysret sys_yield(capaddr_t target)
{
    dispatcher_handle_t handle = dcb_current->disp;
    struct dispatcher_shared_generic *disp =
        get_dispatcher_shared_generic(handle);


    debug(SUBSYS_DISPATCH, "%.*s yields%s\n", DISP_NAME_LEN, disp->name,
          !disp->haswork && disp->lmp_delivered == disp->lmp_seen
           ? " and is removed from the runq" : "");

    if (!disp->disabled) {
        printk(LOG_ERR, "SYSCALL_YIELD while enabled\n");
        return SYSRET(SYS_ERR_CALLER_ENABLED);
    }

    struct capability *yield_to = NULL;
    if (target != CPTR_NULL) {
        errval_t err;

        /* directed yield */
        err = caps_lookup_cap(&dcb_current->cspace.cap, target, CPTR_BITS,
                              &yield_to, CAPRIGHTS_READ);
        if (err_is_fail(err)) {
            return SYSRET(err);
        } else if (yield_to == NULL ||
                   (yield_to->type != ObjType_EndPoint
                    && yield_to->type != ObjType_Dispatcher)) {
            return SYSRET(SYS_ERR_INVALID_YIELD_TARGET);
        }
        /* FIXME: check rights? */
    }

    disp->disabled = false;
    dcb_current->disabled = false;

    // Remove from queue when no work and no more messages and no missed wakeup
    systime_t wakeup = disp->wakeup;
    if (!disp->haswork && disp->lmp_delivered == disp->lmp_seen
        && (wakeup == 0 || wakeup > (kernel_now + kcb_current->kernel_off))) {

        trace_event(TRACE_SUBSYS_NNET, TRACE_EVENT_NNET_SCHED_REMOVE,
            (uint32_t)(lvaddr_t)dcb_current & 0xFFFFFFFF);
        trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_SCHED_REMOVE,
                151);

        scheduler_remove(dcb_current);
        if (wakeup != 0) {
            wakeup_set(dcb_current, wakeup);
        }
    } else {
        // Otherwise yield for the timeslice
        scheduler_yield(dcb_current);
    }

    if (yield_to != NULL) {
        struct dcb *target_dcb = NULL;
        if (yield_to->type == ObjType_EndPoint) {
            target_dcb = yield_to->u.endpoint.listener;
        } else if (yield_to->type == ObjType_Dispatcher) {
            target_dcb = yield_to->u.dispatcher.dcb;
        } else {
            panic("invalid type in yield cap");
        }

        trace_event(TRACE_SUBSYS_NNET, TRACE_EVENT_NNET_YIELD,
            (uint32_t)(lvaddr_t)target_dcb & 0xFFFFFFFF);
        make_runnable(target_dcb);
        dispatch(target_dcb);
    } else {
//        trace_event(TRACE_SUBSYS_BNET, TRACE_EVENT_BNET_YIELD,
//            0);

        /* undirected yield */
        dispatch(schedule());
    }

    panic("Yield returned!");
}
コード例 #9
0
ファイル: exn.c プロジェクト: MichaelFQuigley/barrelfish
void handle_user_page_fault(lvaddr_t                fault_address,
                            arch_registers_state_t* save_area)
{
    lvaddr_t handler;
    struct dispatcher_shared_aarch64 *disp =
        get_dispatcher_shared_aarch64(dcb_current->disp);
    uintptr_t saved_pc = save_area->named.pc;

    disp->d.disabled = dispatcher_is_disabled_ip(dcb_current->disp, saved_pc);
    bool disabled = (disp->d.disabled != 0);

    assert(dcb_current->disp_cte.cap.type == ObjType_Frame);

    printk(LOG_WARN, "user page fault%s in '%.*s': addr %"PRIxLVADDR
                      " IP %"PRIxPTR"\n",
           disabled ? " WHILE DISABLED" : "", DISP_NAME_LEN,
           disp->d.name, fault_address, saved_pc);

    if (disabled) {
        assert(save_area == &disp->trap_save_area);
        handler = disp->d.dispatcher_pagefault_disabled;
        dcb_current->faults_taken++;
    }
    else {
        assert(save_area == &disp->enabled_save_area);
        handler = disp->d.dispatcher_pagefault;
    }

    if (dcb_current->faults_taken > 2) {
        printk(LOG_WARN, "handle_user_page_fault: too many faults, "
               "making domain unrunnable\n");
        dcb_current->faults_taken = 0; // just in case it gets restarted
        scheduler_remove(dcb_current);
        dispatch(schedule());
    }
    else {
        //
        // Upcall to dispatcher
        //
        // NB System might be cleaner with a prototype
        // dispatch context that has R0-R3 to be overwritten
        // plus initial stack, thread, and gic registers. Could do
        // a faster resume_for_upcall().
        //

        struct dispatcher_shared_generic *disp_gen =
            get_dispatcher_shared_generic(dcb_current->disp);

        /* XXX - This code leaks the contents of the kernel stack to the
         * user-level fault handler. */
        union registers_aarch64 resume_area;

        resume_area.named.x0   = disp_gen->udisp;
        resume_area.named.x1   = fault_address;
        resume_area.named.x2   = 0;
        resume_area.named.x3   = saved_pc;
        /* Why does the kernel do this? */
        resume_area.named.x10  = disp->got_base;
        resume_area.named.pc   = handler;
        resume_area.named.spsr = CPSR_F_MASK | AARCH64_MODE_USR;

        // SP is set by handler routine.

        // Upcall user to save area
        disp->d.disabled = true;
		printk(LOG_WARN, "page fault at %p calling handler %p\n",
               fault_address, handler);
        resume(&resume_area);
    }
}
コード例 #10
0
ファイル: simulator.c プロジェクト: CoryXie/BarrelfishOS
int main(int argc, char **argv)
{
    int tasks = 0, alltasks = MAXTASKS, runtime, quantum = 1;

    if(argc < 3) {
        printf("Usage: %s <config.cfg> <runtime> [quantum]\n", argv[0]);
        exit(EXIT_FAILURE);
    }

    runtime = atoi(argv[2]);
    if(argc >= 4) {
        quantum = atoi(argv[3]);
    }

    sched = malloc(sizeof(struct dcb) * runtime * alltasks);
    allptrs = calloc(alltasks, sizeof(struct dcb *));

    FILE *f = fopen(argv[1], "r");
    assert(f != NULL);
    bool readline = true;

    for(kernel_now = 0; kernel_now < runtime; kernel_now++) {
        unsigned long time, wcet, period, weight, id, blocktime, deadline, rd;
        char b[512], *r;

        for(;;) {
            if(readline) {
                do {
                    r = fgets(b, 512, f);
                } while(r != NULL && (b[0] == '#' || b[0] == '\n'));

                if(r == NULL) {
                    break;
                }
            } else {
                readline = true;
            }

            if((rd = sscanf(b, "%lu H %lu %lu %lu %lu", &time, &wcet, &period, &blocktime, &deadline)) >= 4) {
                if(time != kernel_now) { readline = false; break; }
                // Create new hard real-time task
                struct dcb *dcb = malloc(sizeof(struct dcb));
                init_dcb(dcb, tasks);
                dcb->type = TASK_TYPE_HARD_REALTIME;
                dcb->wcet = wcet;
                dcb->period = period;
                dcb->blocktime = blocktime;
                dcb->release_time = kernel_now;
                snprintf(dcb->dsg.name, DISP_NAME_LEN, "h %d", tasks);
                if(rd == 5) {
                    dcb->deadline = deadline;
                } else {
                    dcb->deadline = period;
                }
                make_runnable(dcb);
                assert(tasks < MAXTASKS);
                allptrs[tasks++] = dcb;
            } else if(sscanf(b, "%lu S %lu %lu", &time, &wcet, &period) == 3) {
                if(time != kernel_now) { readline = false; break; }
                // Create new soft real-time task
                struct dcb *dcb = malloc(sizeof(struct dcb));
                init_dcb(dcb, tasks);
                dcb->type = TASK_TYPE_SOFT_REALTIME;
                dcb->wcet = wcet;
                dcb->period = period;
                snprintf(dcb->dsg.name, DISP_NAME_LEN, "s %d", tasks);
                make_runnable(dcb);
                assert(tasks < MAXTASKS);
                allptrs[tasks++] = dcb;
            } else if(sscanf(b, "%lu B %lu", &time, &weight) == 2) {
                if(time != kernel_now) { readline = false; break; }
                // Create new best-effort task
                struct dcb *dcb = malloc(sizeof(struct dcb));
                init_dcb(dcb, tasks);
                dcb->type = TASK_TYPE_BEST_EFFORT;
                dcb->weight = weight;
                snprintf(dcb->dsg.name, DISP_NAME_LEN, "b %d", tasks);
                make_runnable(dcb);
                assert(tasks < MAXTASKS);
                allptrs[tasks++] = dcb;
            } else if(sscanf(b, "%lu d %lu", &time, &id) == 2) {
                if(time != kernel_now) { readline = false; break; }
                // Delete task with given ID
                assert(id < MAXTASKS);
                scheduler_remove(allptrs[id]);
            } else if(sscanf(b, "%lu r %lu", &time, &id) == 2) {
                if(time != kernel_now) { readline = false; break; }
                // Re-release task with given ID
                assert(id < MAXTASKS);
                if(allptrs[id]->type != TASK_TYPE_BEST_EFFORT) {
                    allptrs[id]->release_time = kernel_now;
                }
                make_runnable(allptrs[id]);
            } else if(sscanf(b, "%lu y %lu", &time, &id) == 2) {
                if(time != kernel_now) { readline = false; break; }
                // Yield task with given ID
                assert(id < MAXTASKS);
                scheduler_yield(allptrs[id]);
            } else if(sscanf(b, "%lu c %lu", &time, &id) == 2) {
                if(time != kernel_now) { readline = false; break; }
                // Context switch to task with given ID
                assert(id < MAXTASKS);
                dcb_current = allptrs[id];
                continue;
            } else {
                fprintf(stderr, "Invalid line: %s\n", b);
                abort();
            }

            dcb_current = schedule();
        }

        for(int i = 0; i < alltasks; i++) {
            struct dcb *cd = allptrs[i];
            if(cd != NULL) {
                cd->dispatched = false;

#if 0
                if(cd->type == TASK_TYPE_HARD_REALTIME) {
                    if(cd->etime >= cd->blocktime) {
                        scheduler_remove(cd);
                    }
                }
#endif
            }
        }

        if(kernel_now % quantum == 0) {
            dcb_current = schedule();
        }

        if(dcb_current != NULL) {
            dcb_current->dispatched = true;

            /* printf("%4d: dispatching %2d, release time: %4lu, deadline: %4lu, period: %3lu, WCET: %3lu/%3lu\n", kernel_now, dcb_current->id, dcb_current->release_time, dcb_current->deadline, dcb_current->period, dcb_current->etime, dcb_current->wcet); */
        }
        for(int i = 0; i < alltasks; i++) {
            if(allptrs[i] != NULL) {
                sched[kernel_now * alltasks + i] = *allptrs[i];
            }
        }
    }

    fclose(f);

    // Print schedule
    printf("     ");
    for(int t = 0; t < runtime; t++) {
        if(t % 1000 == 0) {
            printf("%d", (t / 1000) % 10);
        } else {
            printf(" ");
        }
    }
    printf("\n");
    printf("     ");
    for(int t = 0; t < runtime; t++) {
        if(t % 100 == 0) {
            printf("%d", (t / 100) % 10);
        } else {
            printf(" ");
        }
    }
    printf("\n");
    printf("     ");
    for(int t = 0; t < runtime; t++) {
        if(t % 10 == 0) {
            printf("%d", (t / 10) % 10);
        } else {
            printf(" ");
        }
    }
    printf("\n");

    printf("     ");
    for(int t = 0; t < runtime; t++) {
        printf("%d", t % 10);
    }
    printf("\n");

    for(int i = 0; i < tasks; i++) {
        struct dcb *ct = allptrs[i];
        printf("%c%2d: ", typechar(ct->type), i);
        for(int t = 0; t < runtime; t++) {
            struct dcb *s = &sched[t * alltasks + i];

            if(s->dispatched) {
                printf("#");
            } else {
                printf(" ");
            }
        }
        printf("\n");
        printf("     ");
        for(int t = 0; t < runtime; t++) {
            struct dcb *s = &sched[t * alltasks + i];

            if(s->release_time == t) {
                printf("r");
            } else {
                printf(" ");
            }
        }
        printf("\n");
    }

    free(sched);
    free(allptrs);
    return 0;
}
コード例 #11
0
ファイル: exn.c プロジェクト: Karamax/arrakis
/*
 * Try to find out what address generated page fault, and then tell the dispatcher what
 * happened. Some faults will not be recoverable (e.g. stack faults), because the 
 * context has already been lost
 */
void handle_user_page_fault(arch_registers_state_t* save_area)
{
    lvaddr_t fault_address;//not passed as argument, because there is not just one place to look
 /*   
    //print out registers for debugging
    printf("page fault. registers:\n");
    for(uint32_t i = 0; i<NUM_REGS; i++){
        printf("0x%x\n", save_area->regs[i]);
    }
    uint32_t regval;
    __asm volatile ("mrs %[regval], xpsr" : [regval] "=r"(regval));
    printf("current XPSR register: 0x%x\n", regval);
    
    printf("M3 MMU address: 0x%x\n", *((uint32_t*) &mmu));
    printf("M3 MMU_FAULT_AD register: 0x%x\n", omap44xx_mmu_fault_ad_rd(&mmu));
    printf("M3 MMU_FAULT_STATUS register: 0x%x\n", omap44xx_mmu_fault_status_rd(&mmu));
    printf("M3 MMU_FAULT_PC register: 0x%x\n", omap44xx_mmu_fault_pc_rd(&mmu));
    printf("M3 MMU_IRQSTATUS register: 0x%x\n", omap44xx_mmu_irqstatus_rd(&mmu));
    
    printf("ICTR: 0x%x\n", omap44xx_cortex_m3_nvic_ICTR_rd(&nvic));
    printf("CPUID_BASE: 0x%x\n", omap44xx_cortex_m3_nvic_CPUID_BASE_rd(&nvic));
    printf("ICSR: 0x%x\n", omap44xx_cortex_m3_nvic_ICSR_rd(&nvic));
    printf("VTOR: 0x%x\n", omap44xx_cortex_m3_nvic_VTOR_rd(&nvic));
    printf("AIRCR: 0x%x\n", omap44xx_cortex_m3_nvic_AIRCR_rd(&nvic));
    printf("CCR: 0x%x\n", omap44xx_cortex_m3_nvic_CCR_rd(&nvic));
    printf("SHCSR: 0x%x\n", omap44xx_cortex_m3_nvic_SHCSR_rd(&nvic));
    printf("CFSR: 0x%x\n", omap44xx_cortex_m3_nvic_CFSR_rd(&nvic));
    printf("BFAR: 0x%x\n", omap44xx_cortex_m3_nvic_BFAR_rd(&nvic));
    printf("SYSTICK_CTRL: 0x%x\n", omap44xx_cortex_m3_nvic_SYSTICK_CTRL_rd(&nvic));
    printf("SYSTICK_CALV: 0x%x\n", omap44xx_cortex_m3_nvic_SYSTICK_CALV_rd(&nvic));
  */ 
    if (omap44xx_cortex_m3_nvic_SHCSR_busfaultact_rdf(&nvic)){
        //triggered by bus fault    
        if (omap44xx_mmu_irqstatus_rd(&mmu)){
            //L2 MMU triggered fault: either no valid mapping, or two mappings in TLB
            //XXX: cachemarker: once we have chaching enabled, this is the place to
            //look at table entry for special permission bits.
            
            //XXX: MMU_FAULT_ADDR register seems to just contain the last address that was
            //requested. By this time this is probably just a kernelspace address.
            //I am not sure if the M3 can actually find out what the faulting address really was
            fault_address = omap44xx_mmu_fault_ad_rd(&mmu);
        }
        else{
            //"regular" bus fault -> look in NVIC entries
            if (omap44xx_cortex_m3_nvic_CFSR_bfarvalid_rdf(&nvic)){
                //bus fault address register valid
                fault_address = omap44xx_cortex_m3_nvic_BFAR_rd(&nvic);
            }
            else{
                //one of the bus faults that do not write the BFAR -> faulting address
                //literally unknown to system
                printk(LOG_WARN, "user bus fault with unknown faulting address\n");
                fault_address = (lvaddr_t) NULL;
            }
        }
    }
    else{
        //memory management fault (probably access violation)
        if (omap44xx_cortex_m3_nvic_CFSR_mmarvalid_rdf(&nvic)){
            //MMAR contains faulting address
            fault_address = omap44xx_cortex_m3_nvic_MMAR_rd(&nvic);
        }
        else{
            //MMAR not written. probably executing in noexecute region
            assert(omap44xx_cortex_m3_nvic_CFSR_iaccviol_rdf(&nvic));
            //so we can assume the pc caused the fault
            fault_address = save_area->named.pc;
        }
    }
    
    lvaddr_t handler;
    struct dispatcher_shared_arm *disp = get_dispatcher_shared_arm(dcb_current->disp);
    uintptr_t saved_pc = save_area->named.pc;

    disp->d.disabled = dispatcher_is_disabled_ip(dcb_current->disp, saved_pc);
    bool disabled = (disp->d.disabled != 0);

    assert(dcb_current->disp_cte.cap.type == ObjType_Frame);

    printk(LOG_WARN, "user page fault%s in '%.*s': addr %"PRIxLVADDR
                      " IP %"PRIxPTR"\n",
           disabled ? " WHILE DISABLED" : "", DISP_NAME_LEN,
           disp->d.name, fault_address, saved_pc);

    if (disabled) {
        assert(save_area == &disp->trap_save_area);
        handler = disp->d.dispatcher_pagefault_disabled;
        dcb_current->faults_taken++;
    }
    else {
        assert(save_area == &disp->enabled_save_area);
        handler = disp->d.dispatcher_pagefault;
    }

    if (dcb_current->faults_taken > 2) {
        printk(LOG_WARN, "handle_user_page_fault: too many faults, "
               "making domain unrunnable\n");
        dcb_current->faults_taken = 0; // just in case it gets restarted
        scheduler_remove(dcb_current);
        dispatch(schedule());
    }
    else {
        // Upcall to dispatcher


        struct dispatcher_shared_generic *disp_gen =
            get_dispatcher_shared_generic(dcb_current->disp);

        union registers_arm resume_area;

        //make sure we do not accidentaly create an IT block when upcalling
        resume_area.named.cpsr = 0; 

        resume_area.named.pc   = handler;
        resume_area.named.r0   = disp_gen->udisp;
        resume_area.named.r1   = fault_address;
        resume_area.named.r2   = 0;
        resume_area.named.r3   = saved_pc;
        resume_area.named.rtls = disp_gen->udisp;
        resume_area.named.r10  = disp->got_base;
        
        //we need some temporary stack to exit handler mode. memory in userspace would be
        //better, but for the moment we can use this temporary region
        resume_area.named.stack   = (uint32_t) &irq_save_pushed_area_top;
        

        // Upcall user to save area
        disp->d.disabled = true;
        resume(&resume_area);
    }
}
コード例 #12
0
ファイル: cap_delete.c プロジェクト: achreto/barrelfish
/**
 * \brief Delete the last copy of a cap in the entire system.
 * \bug Somewhere in the delete process, the remote_ancs property should be
 *      propagated to (remote) immediate descendants.
 */
errval_t caps_delete_last(struct cte *cte, struct cte *ret_ram_cap)
{
    errval_t err;
    assert(!has_copies(cte));

    if (cte->mdbnode.remote_copies) {
        printk(LOG_WARN, "delete_last but remote_copies is set\n");
    }

    TRACE_CAP_MSG("deleting last", cte);

    // try simple delete
    // XXX: this really should always fail, enforce that? -MN
    // XXX: this is probably not the way we should enforce/check this -SG
    err = caps_try_delete(cte);
    if (err_no(err) != SYS_ERR_DELETE_LAST_OWNED &&
        err_no(err) != SYS_ERR_CAP_LOCKED) {
        return err;
    }

    // CNodes and dcbs contain further CTEs, so cannot simply be deleted
    // instead, we place them in a clear list, which is progressivly worked
    // through until each list element contains only ctes that point to
    // other CNodes or dcbs, at which point they are scheduled for final
    // deletion, which only happens when the clear lists are empty.

    if (cte->cap.type == ObjType_CNode) {
        debug(SUBSYS_CAPS, "deleting last copy of cnode: %p\n", cte);
        // Mark all non-Null slots for deletion
        for (cslot_t i = 0; i < (1<<cte->cap.u.cnode.bits); i++) {
            struct cte *slot = caps_locate_slot(cte->cap.u.cnode.cnode, i);
            caps_mark_revoke_generic(slot);
        }

        assert(cte->delete_node.next == NULL || delete_head == cte);
        cte->delete_node.next = NULL;
        clear_list_prepend(cte);

        return SYS_ERR_OK;
    }
    else if (cte->cap.type == ObjType_Dispatcher)
    {
        debug(SUBSYS_CAPS, "deleting last copy of dispatcher: %p\n", cte);
        struct capability *cap = &cte->cap;
        struct dcb *dcb = cap->u.dispatcher.dcb;

        // Remove from queue
        scheduler_remove(dcb);
        // Reset current if it was deleted
        if (dcb_current == dcb) {
            dcb_current = NULL;
        }

        // Remove from wakeup queue
        wakeup_remove(dcb);

        // Notify monitor
        if (monitor_ep.u.endpoint.listener == dcb) {
            printk(LOG_ERR, "monitor terminated; expect badness!\n");
            monitor_ep.u.endpoint.listener = NULL;
        } else if (monitor_ep.u.endpoint.listener != NULL) {
            uintptr_t payload = dcb->domain_id;
            err = lmp_deliver_payload(&monitor_ep, NULL, &payload, 1, false);
            if (err_is_fail(err)) {
                printk(LOG_NOTE, "while notifying monitor about domain exit: %"PRIuERRV".\n", err);
                printk(LOG_NOTE, "please add the console output to the following bug report: https://code.systems.ethz.ch/T78\n");
            }
            assert(err_is_ok(err));
        }

        caps_mark_revoke_generic(&dcb->cspace);
        caps_mark_revoke_generic(&dcb->disp_cte);
        assert(cte->delete_node.next == NULL || delete_head == cte);
        cte->delete_node.next = NULL;
        clear_list_prepend(cte);

        return SYS_ERR_OK;
    }
    else
    {
        // last copy, perform object cleanup
        return cleanup_last(cte, ret_ram_cap);
    }
}