Beispiel #1
0
void
test_cv_broadcast()
{

	long i;
        int result;

	unintr_printf("starting cv broadcast test\n");
	unintr_printf("threads should print out in reverse order\n");

	testcv_broadcast = cv_create();
	testlock = lock_create();
	done = 0;
	testval1 = NTHREADS - 1;
	for (i = 0; i < NTHREADS; i++) {
		result = thread_create((void (*)(void *))
				       test_cv_broadcast_thread, (void *)i);
		assert(thread_ret_ok(result));
	}

	while (__sync_fetch_and_add(&done, 0) < NTHREADS) {
		/* this requires thread_yield to be working correctly */
		thread_yield(THREAD_ANY);
	}

	assert(interrupts_enabled());
	cv_destroy(testcv_broadcast);
	assert(interrupts_enabled());	
	unintr_printf("cv broadcast test done\n");
}
Beispiel #2
0
void
test_lock()
{
	long i;
	Tid result;

	unintr_printf("starting lock test\n");

	testlock = lock_create();
	done = 0;
	for (i = 0; i < NTHREADS; i++) {
		result = thread_create((void (*)(void *))test_lock_thread,
				       (void *)i);
		assert(thread_ret_ok(result));
	}

	while (__sync_fetch_and_add(&done, 0) < NTHREADS) {
		/* this requires thread_yield to be working correctly */
		thread_yield(THREAD_ANY);
	}

	assert(interrupts_enabled());
	lock_destroy(testlock);
	assert(interrupts_enabled());
	unintr_printf("lock test done\n");
}
Beispiel #3
0
static int
try_move_potato(int num, int pass)
{
	int ret = 0;
	int err;
	struct timeval pend, pdiff;

	assert(interrupts_enabled());
	err = __sync_bool_compare_and_swap(&potato_lock, 0, 1);
	if (!err) {	/* couldn't acquire lock */
		return ret;
	}
	if (potato[num]) {
		potato[num] = 0;
		potato[(num + 1) % NPOTATO] = 1;
		gettimeofday(&pend, NULL);
		timersub(&pend, &pstart, &pdiff);
		unintr_printf("%d: thread %3d passes potato "
			      "at time = %9.6f\n", pass, num,
			      (float)pdiff.tv_sec +
			      (float)pdiff.tv_usec / 1000000);
		assert(potato[(num + 1) % NPOTATO] == 1);
		assert(potato[(num) % NPOTATO] == 0);
		ret = 1;
	}
	err = __sync_bool_compare_and_swap(&potato_lock, 1, 0);
	assert(err);
	return ret;
}
Beispiel #4
0
void show_regs(struct pt_regs * regs)
{
	unsigned long flags;

	flags = condition_codes(regs);

	printk("pc : [<%08lx>]    lr : [<%08lx>]    %s\n"
	       "sp : %08lx  ip : %08lx  fp : %08lx\n",
		instruction_pointer(regs),
		regs->ARM_lr, print_tainted(), regs->ARM_sp,
		regs->ARM_ip, regs->ARM_fp);
	printk("r10: %08lx  r9 : %08lx  r8 : %08lx\n",
		regs->ARM_r10, regs->ARM_r9,
		regs->ARM_r8);
	printk("r7 : %08lx  r6 : %08lx  r5 : %08lx  r4 : %08lx\n",
		regs->ARM_r7, regs->ARM_r6,
		regs->ARM_r5, regs->ARM_r4);
	printk("r3 : %08lx  r2 : %08lx  r1 : %08lx  r0 : %08lx\n",
		regs->ARM_r3, regs->ARM_r2,
		regs->ARM_r1, regs->ARM_r0);
	printk("Flags: %c%c%c%c",
		flags & PSR_N_BIT ? 'N' : 'n',
		flags & PSR_Z_BIT ? 'Z' : 'z',
		flags & PSR_C_BIT ? 'C' : 'c',
		flags & PSR_V_BIT ? 'V' : 'v');
	printk("  IRQs o%s  FIQs o%s  Mode %s  Segment %s\n",
		interrupts_enabled(regs) ? "n" : "ff",
		fast_interrupts_enabled(regs) ? "n" : "ff",
		processor_modes[processor_mode(regs)],
		get_fs() == get_ds() ? "kernel" : "user");
}
Beispiel #5
0
void show_regs(struct pt_regs *regs)
{
	printk("PC is at %pS\n", (void *)instruction_pointer(regs));
	printk("LP is at %pS\n", (void *)regs->lp);
	pr_info("pc : [<%08lx>]    lp : [<%08lx>]    %s\n"
		"sp : %08lx  fp : %08lx  gp : %08lx\n",
		instruction_pointer(regs),
		regs->lp, print_tainted(), regs->sp, regs->fp, regs->gp);
	pr_info("r25: %08lx  r24: %08lx\n", regs->uregs[25], regs->uregs[24]);

	pr_info("r23: %08lx  r22: %08lx  r21: %08lx  r20: %08lx\n",
		regs->uregs[23], regs->uregs[22],
		regs->uregs[21], regs->uregs[20]);
	pr_info("r19: %08lx  r18: %08lx  r17: %08lx  r16: %08lx\n",
		regs->uregs[19], regs->uregs[18],
		regs->uregs[17], regs->uregs[16]);
	pr_info("r15: %08lx  r14: %08lx  r13: %08lx  r12: %08lx\n",
		regs->uregs[15], regs->uregs[14],
		regs->uregs[13], regs->uregs[12]);
	pr_info("r11: %08lx  r10: %08lx  r9 : %08lx  r8 : %08lx\n",
		regs->uregs[11], regs->uregs[10],
		regs->uregs[9], regs->uregs[8]);
	pr_info("r7 : %08lx  r6 : %08lx  r5 : %08lx  r4 : %08lx\n",
		regs->uregs[7], regs->uregs[6], regs->uregs[5], regs->uregs[4]);
	pr_info("r3 : %08lx  r2 : %08lx  r1 : %08lx  r0 : %08lx\n",
		regs->uregs[3], regs->uregs[2], regs->uregs[1], regs->uregs[0]);
	pr_info("  IRQs o%s  Segment %s\n",
		interrupts_enabled(regs) ? "n" : "ff",
		segment_eq(get_fs(), get_ds())? "kernel" : "user");
}
Beispiel #6
0
/*
 * do_splx routine, takes new ipl to set
 * returns the old ipl.
 * We are careful not to set priority lower than CPU->cpu_base_pri,
 * even though it seems we're raising the priority, it could be set
 * higher at any time by an interrupt routine, so we must block interrupts
 * and look at CPU->cpu_base_pri
 */
int
do_splx(int newpri)
{
	ulong_t	flag;
	cpu_t	*cpu;
	int	curpri, basepri;

	flag = intr_clear();
	cpu = CPU; /* ints are disabled, now safe to cache cpu ptr */
	curpri = cpu->cpu_m.mcpu_pri;
	basepri = cpu->cpu_base_spl;
	if (newpri < basepri)
		newpri = basepri;
	cpu->cpu_m.mcpu_pri = newpri;
	(*setspl)(newpri);
	/*
	 * If we are going to reenable interrupts see if new priority level
	 * allows pending softint delivery.
	 */
	if (IS_FAKE_SOFTINT(flag, newpri))
		fakesoftint();
	ASSERT(!interrupts_enabled());
	intr_restore(flag);
	return (curpri);
}
void show_regs(struct pt_regs *regs)
{
	unsigned long flags;
	const char *processor_modes[] = {
		"USER_26", "FIQ_26", "IRQ_26", "SVC_26",
		"UK4_26", "UK5_26", "UK6_26", "UK7_26",
		"UK8_26", "UK9_26", "UK10_26", "UK11_26",
		"UK12_26", "UK13_26", "UK14_26", "UK15_26",
		"USER_32", "FIQ_32", "IRQ_32", "SVC_32",
		"UK4_32", "UK5_32", "UK6_32", "ABT_32",
		"UK8_32", "UK9_32", "UK10_32", "UND_32",
		"UK12_32", "UK13_32", "UK14_32", "SYS_32",
	};

	flags = condition_codes(regs);

	printf("pc : [<%08lx>]	   lr : [<%08lx>]\n"
	       "sp : %08lx  ip : %08lx	 fp : %08lx\n",
	       instruction_pointer(regs), regs->ARM_lr, regs->ARM_sp, regs->ARM_ip, regs->ARM_fp);
	printf("r10: %08lx  r9 : %08lx	 r8 : %08lx\n", regs->ARM_r10, regs->ARM_r9, regs->ARM_r8);
	printf("r7 : %08lx  r6 : %08lx	 r5 : %08lx  r4 : %08lx\n",
	       regs->ARM_r7, regs->ARM_r6, regs->ARM_r5, regs->ARM_r4);
	printf("r3 : %08lx  r2 : %08lx	 r1 : %08lx  r0 : %08lx\n",
	       regs->ARM_r3, regs->ARM_r2, regs->ARM_r1, regs->ARM_r0);
	printf("Flags: %c%c%c%c",
	       flags & CC_N_BIT ? 'N' : 'n',
	       flags & CC_Z_BIT ? 'Z' : 'z',
	       flags & CC_C_BIT ? 'C' : 'c', flags & CC_V_BIT ? 'V' : 'v');
	printf("  IRQs %s  FIQs %s  Mode %s%s\n",
	       interrupts_enabled(regs) ? "on" : "off",
	       fast_interrupts_enabled(regs) ? "on" : "off",
	       processor_modes[processor_mode(regs)], thumb_mode(regs) ? " (T)" : "");
}
Beispiel #8
0
/*
 * STUB: once register_interrupt_handler() is called, this routine
 * gets called each time SIG_TYPE is sent to this process
 */
static void
interrupt_handler(int sig, siginfo_t * sip, void *contextVP)
{
	ucontext_t *context = (ucontext_t *) contextVP;

	/* check that SIG_TYPE is blocked on entry */
	assert(!interrupts_enabled());
	if (loud) {
		int ret;
		ret = gettimeofday(&end, NULL);
		assert(!ret);
		if (first) {
			first = 0;
		} else {
			timersub(&end, &start, &diff);
		}
		start = end;
		printf("%s: context at %10p, time diff = %ld us\n",
		       __FUNCTION__, context,
		       diff.tv_sec * 1000000 + diff.tv_usec);
	}
	set_interrupt();
	/* implement preemptive threading by calling thread_yield */
	thread_yield(THREAD_ANY);
}
Beispiel #9
0
/* Returns the first free frame, roughly after (or at) /start_addr/ */
static uint32 _pmm_first_free_frame(uint32 start_addr) {
	assert(interrupts_enabled() == false);
	uint32 index = start_addr / PAGE_SIZE / 32;
	if (index != 0)
		index -= 1; // TODO: fix this - this is to be on the safe side by wasting time instead of getting bad results during the initial implementation phase

	for (; index < nframes / 32; index++) {
		if (used_frames[index] == 0xffffffff) {
			/* No bits are free among the 32 tested; try the next index */
			continue;
		}

		/* Since we're still here, at least one bit among these 32 is zero... Let's find the first. */
		// Offset starts at 0 which means we *may* return something earlier than start_addr,
		// but that is only indended as a rough guide, not a hard rule.
		for (uint32 offset = 0; offset < 32; offset++) {
			if ((used_frames[index] & (1 << offset)) == 0) {
				/* Found it! Return the frame address. */
				return (index * 32 + offset) * PAGE_SIZE;
			}
		}
	}

	/* If this is reached, there were no free frames! */
	return 0xffffffff;
}
Beispiel #10
0
/* Clear a bit in the used_frames bitmap */
static void _pmm_clear_frame(uint32 phys_addr) {
	assert(interrupts_enabled() == false);
	uint32 frame_index = phys_addr / PAGE_SIZE;
	uint32 index = ARRAY_INDEX(frame_index);
	assert (index <= nframes/32);
	uint32 offset = OFFSET_INTO_DWORD(frame_index);
	assert((used_frames[index] & (1 << offset)) != 0);
	used_frames[index] &= ~(1 << offset);
}
Beispiel #11
0
static void
test_lock_thread(unsigned long num)
{
	int i, j;

	for (i = 0; i < LOOPS; i++) {
		for (j = 0; j < NLOCKLOOPS; j++) {
			int ret;
			
			assert(interrupts_enabled());
			lock_acquire(testlock);

			
			testval1 = num;

			/* let's yield to make sure that even when other threads
			 * run, they cannot access the critical section. */
			assert(interrupts_enabled());
			ret = thread_yield(THREAD_ANY);
			assert(thread_ret_ok(ret));

			testval2 = num * num;

			/* yield again */
			assert(interrupts_enabled());
			ret = thread_yield(THREAD_ANY);
			assert(thread_ret_ok(ret));

			testval3 = num % 3;
			
			assert(testval2 == testval1 * testval1);
			assert(testval2 % 3 == (testval3 * testval3) % 3);
			assert(testval3 == testval1 % 3);
			assert(testval1 == num);
			assert(testval2 == num * num);
			assert(testval3 == num % 3);

			assert(interrupts_enabled());
			lock_release(testlock);
		}
		unintr_printf("%d: thread %3d passes\n", i, num);
	}
	__sync_fetch_and_add(&done, 1);
}
Beispiel #12
0
/* Test whether a bit is set in the used_frames bitmap */
static bool _pmm_test_frame(uint32 phys_addr) {
	assert(interrupts_enabled() == false);
	uint32 frame_index = phys_addr / PAGE_SIZE;
	uint32 index = ARRAY_INDEX(frame_index);
	assert (index <= nframes/32);
	uint32 offset = OFFSET_INTO_DWORD(frame_index);
	if ((used_frames[index] & (1 << offset)) != 0)
		return true;
	else
		return false;
}
Beispiel #13
0
void
test_preemptive()
{

	int ret;
        long ii;
	Tid potato_tids[NPOTATO];

	unintr_printf("starting preemptive test\n");
	unintr_printf("this test will take %d seconds\n", DURATION / 1000000);
	gettimeofday(&pstart, NULL);
	/* spin for sometime, so you see the interrupt handler output */
	spin(SIG_INTERVAL * 5);
	interrupts_quiet();

	potato[0] = 1;
	for (ii = 1; ii < NPOTATO; ii++) {
		potato[ii] = 0;
	}

	for (ii = 0; ii < NPOTATO; ii++) {
		potato_tids[ii] =
			thread_create((void (*)(void *))do_potato, (void *)ii);
		assert(thread_ret_ok(potato_tids[ii]));
        assert(interrupts_enabled());
	}

	spin(DURATION);

	unintr_printf("cleaning hot potato\n");

	for (ii = 0; ii < NPOTATO; ii++) {
		assert(interrupts_enabled());
		ret = thread_exit(THREAD_ANY);
		assert(thread_ret_ok(ret));
	}

	unintr_printf("preemptive test done\n");
}
Beispiel #14
0
static void
test_cv_broadcast_thread(unsigned long num)
{
	int i;
	struct timeval start, end, diff;

	for (i = 0; i < LOOPS; i++) {
		assert(interrupts_enabled());
		lock_acquire(testlock);
		while (testval1 != num) {
			gettimeofday(&start, NULL);
			assert(interrupts_enabled());			
			cv_wait(testcv_broadcast, testlock);
			gettimeofday(&end, NULL);
			timersub(&end, &start, &diff);

			/* cv_wait should wait at least 4-5 ms */
			if (diff.tv_sec == 0 && diff.tv_usec < 4000) {
				unintr_printf("%s took %ld us. That's too fast."
					      " You must be busy looping\n",
					      __FUNCTION__, diff.tv_usec);
				goto out;
			}
		}
		unintr_printf("%d: thread %3d passes\n", i, num);
		testval1 = (testval1 + NTHREADS - 1) % NTHREADS;

		/* spin for 5 ms */
		spin(5000);

		assert(interrupts_enabled());
		cv_broadcast(testcv_broadcast, testlock);
		assert(interrupts_enabled());
		lock_release(testlock);
	}
out:
	__sync_fetch_and_add(&done, 1);
}
Beispiel #15
0
/*
 * Reach a point at which the CPU can be safely powered-off or
 * suspended.  Nothing can wake this CPU out of the loop.
 */
static void
enter_safe_phase(void)
{
	ulong_t flags = intr_clear();

	if (setjmp(&curthread->t_pcb) == 0) {
		cpu_phase[CPU->cpu_id] = CPU_PHASE_SAFE;
		while (cpu_phase[CPU->cpu_id] == CPU_PHASE_SAFE)
			SMT_PAUSE();
	}

	ASSERT(!interrupts_enabled());

	intr_restore(flags);
}
Beispiel #16
0
static void
test(int enabled)
{
	int i;
	int is_enabled;

	for (i = 0; i < 16; i++) {
		spin(SIG_INTERVAL / 5);	/* spin for a short period */
		unintr_printf(".");
		fflush(stdout);
		/* check whether interrupts are enabled or not correctly */
		is_enabled = interrupts_enabled();
		assert(enabled == is_enabled);
	}
	unintr_printf("\n");
}
Beispiel #17
0
// TODO: some way of telling when actually ... use readline to know
static bool anybody_alive(conf_object_t *cpu, struct test_state *t,
			  struct sched_state *s)
{
	struct agent *shell;

	if (t->test_ever_caused) {
		if (t->start_population == s->most_agents_ever) {
			/* Then the shell hasn't even spawned it yet. */
			return true;
		} else if (t->start_population != s->num_agents) {
			/* Shell's descendants are still alive. */
			assert(t->start_population < s->num_agents);
			return true;
		}
	}

	/* Now we are either before the beginning of the test case, or waiting
	 * for the shell and init to clean up the last remains. Either way, wait
	 * for shell and init to finish switching back and forth until both of
	 * them are suitably blocked. */

	/* In atomic scheduler paths, both threads might be off the runqueue
	 * (i.e., one going to sleep and switching to the other). Since we
	 * assume the scheduler is sane, this condition should hold then. */
	if (!kern_ready_for_timer_interrupt(cpu) || !interrupts_enabled(cpu)) {
		return true;
	}

	/* */
	if ((shell = agent_by_tid_or_null(&s->rq, kern_get_shell_tid())) ||
	    (shell = agent_by_tid_or_null(&s->dq, kern_get_shell_tid()))) {
		if (shell->action.readlining) {
			if (kern_has_idle()) {
				return s->cur_agent->tid != kern_get_idle_tid();
			} else {
				return (Q_GET_SIZE(&s->rq) != 0 ||
					Q_GET_SIZE(&s->sq) != 0);
			}
		} else {
			return true;
		}
	}

	/* If we get here, the shell wasn't even created yet..! */
	return true;
}
Beispiel #18
0
int execve(const char *path, char *argv[], char *envp[]) {
	INTERRUPT_LOCK;
	int r = elf_load_int(path, (task_t *)current_task, argv, envp);
	kfree((void *)path);
	// argv and envp are freed in elf_load_int
	argv = envp = NULL;

	if (r == 0) {
		assert(interrupts_enabled() == false);
		current_task->state = TASK_RUNNING;
		destroy_user_page_dir(current_task->old_mm->page_directory);
		vmm_destroy_task_mm(current_task->old_mm);
		current_task->old_mm = NULL;

		current_task->did_execve = true;
		current_task->esp = (uint32)current_task->stack - 84 + 12;
		// Overwrite the task's stack with new, zeroed values for registers etc.

		set_task_stack((task_t *)current_task, NULL, 0, 0);
		assert(current_task->new_entry > 0x100000);
		set_entry_point((task_t *)current_task, current_task->new_entry);

		assert(current_directory == current_task->mm->page_directory);
		//current_task->esp = ((uint32)((uint32)USER_STACK_START - sizeof(registers_t)));

		INTERRUPT_UNLOCK;
		YIELD;
		panic("returned past execve()!");
	}
	else {
		assert(r < 0);

		// execve failed! Let's undo most of the work, and return.
		struct task_mm *new_mm = current_task->mm;
		current_task->mm = current_task->old_mm;
		switch_page_directory(current_task->mm->page_directory);

		destroy_user_page_dir(new_mm->page_directory);
		vmm_destroy_task_mm(new_mm);

		INTERRUPT_UNLOCK;
		return r;
	}

	return 0; // To silence warnings
}
Beispiel #19
0
static void dump_ipu_registers(struct rproc *rproc)
{
	unsigned long flags;
	char buf[64];
	struct pt_regs regs;

	if (!rproc->cdump_buf1)
		return;

	remoteproc_fill_pt_regs(&regs,
			(struct exc_regs *)rproc->cdump_buf1);

	pr_info("REGISTER DUMP FOR REMOTEPROC %s\n", rproc->name);
	pr_info("PC is at %08lx\n", instruction_pointer(&regs));
	pr_info("LR is at %08lx\n", regs.ARM_lr);
	pr_info("pc : [<%08lx>]    lr : [<%08lx>]    psr: %08lx\n"
	       "sp : %08lx  ip : %08lx  fp : %08lx\n",
		regs.ARM_pc, regs.ARM_lr, regs.ARM_cpsr,
		regs.ARM_sp, regs.ARM_ip, regs.ARM_fp);
	pr_info("r10: %08lx  r9 : %08lx  r8 : %08lx\n",
		regs.ARM_r10, regs.ARM_r9,
		regs.ARM_r8);
	pr_info("r7 : %08lx  r6 : %08lx  r5 : %08lx  r4 : %08lx\n",
		regs.ARM_r7, regs.ARM_r6,
		regs.ARM_r5, regs.ARM_r4);
	pr_info("r3 : %08lx  r2 : %08lx  r1 : %08lx  r0 : %08lx\n",
		regs.ARM_r3, regs.ARM_r2,
		regs.ARM_r1, regs.ARM_r0);

	flags = regs.ARM_cpsr;
	buf[0] = flags & PSR_N_BIT ? 'N' : 'n';
	buf[1] = flags & PSR_Z_BIT ? 'Z' : 'z';
	buf[2] = flags & PSR_C_BIT ? 'C' : 'c';
	buf[3] = flags & PSR_V_BIT ? 'V' : 'v';
	buf[4] = '\0';

	pr_info("Flags: %s  IRQs o%s  FIQs o%s\n",
		buf, interrupts_enabled(&regs) ? "n" : "ff",
		fast_interrupts_enabled(&regs) ? "n" : "ff");
}
Beispiel #20
0
Tid thread_exit(Tid tid) {
    int enabled = interrupts_set(0);
    if (tid == THREAD_SELF) {
        if (READY_HEAD == NULL)
            return THREAD_NONE;
        interrupts_enabled(0);
        struct thread *temp = RUNNING_THREAD;
        struct thread *new_run = READY_HEAD;
        READY_HEAD = READY_HEAD->next;
        temp->next = NULL;
        thread_id_array[temp->tid] = 0;
        insert_thread_to_delete(temp);
        new_run->next = NULL;
        new_run->state = RECOVER_STATE;
        RUNNING_THREAD = new_run;
        setcontext(&(new_run->thread_context));
    } else if (tid == THREAD_ANY) {
        if (READY_HEAD == NULL)
            return THREAD_NONE;
        struct thread *temp = READY_HEAD;
        Tid returned_tid = temp->tid;
        temp->flag = 1;
        interrupts_set(enabled);
        return returned_tid;
    } else {
        if (tid > THREAD_MAX_THREADS - 1 || tid < THREAD_FAILED || !thread_id_array[tid])
            return THREAD_INVALID;
        if (READY_HEAD == NULL)
            return THREAD_NONE;
        struct thread *temp = READY_HEAD;
        while (temp->tid != tid)
            temp = temp->next;
        Tid returned_tid = temp->tid;
        temp->flag = 1;
        interrupts_set(enabled);
        return returned_tid;
    }
    interrupts_set(enabled);
    return THREAD_FAILED;
}
Beispiel #21
0
static void
do_potato(int num)
{
	int ret;
	int pass = 1;

	unintr_printf("0: thread %3d made it to %s\n", num, __FUNCTION__);
	while (1) {
		ret = try_move_potato(num, pass);
		if (ret) {
			pass++;
		}
		spin(1);
		/* Add some yields by some threads to scramble the list */
		if (num > 4) {
			int ii;
			for (ii = 0; ii < num - 4; ii++) {
				assert(interrupts_enabled());
				ret = thread_yield(THREAD_ANY);
				assert(thread_ret_ok(ret));
			}
		}
	}
}
static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
				   struct pt_regs *regs)
{
	struct task_struct *tsk;
	struct mm_struct *mm;
	int fault, sig, code;
	unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
	unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;

	tsk = current;
	mm  = tsk->mm;

	/* Enable interrupts if they were enabled in the parent context. */
	if (interrupts_enabled(regs))
		local_irq_enable();

	/*
	 * If we're in an interrupt or have no user context, we must not take
	 * the fault.
	 */
	if (in_atomic() || !mm)
		goto no_context;

	if (user_mode(regs))
		mm_flags |= FAULT_FLAG_USER;

	if (esr & ESR_LNX_EXEC) {
		vm_flags = VM_EXEC;
	} else if ((esr & ESR_EL1_WRITE) && !(esr & ESR_EL1_CM)) {
		vm_flags = VM_WRITE;
		mm_flags |= FAULT_FLAG_WRITE;
	}

	/*
	 * As per x86, we may deadlock here. However, since the kernel only
	 * validly references user space from well defined areas of the code,
	 * we can bug out early if this is from code which shouldn't.
	 */
	if (!down_read_trylock(&mm->mmap_sem)) {
		if (!user_mode(regs) && !search_exception_tables(regs->pc))
			goto no_context;
retry:
		down_read(&mm->mmap_sem);
	} else {
		/*
		 * The above down_read_trylock() might have succeeded in which
		 * case, we'll have missed the might_sleep() from down_read().
		 */
		might_sleep();
#ifdef CONFIG_DEBUG_VM
		if (!user_mode(regs) && !search_exception_tables(regs->pc))
			goto no_context;
#endif
	}

	fault = __do_page_fault(mm, addr, mm_flags, vm_flags, tsk);

	/*
	 * If we need to retry but a fatal signal is pending, handle the
	 * signal first. We do not need to release the mmap_sem because it
	 * would already be released in __lock_page_or_retry in mm/filemap.c.
	 */
	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
		return 0;

	/*
	 * Major/minor page fault accounting is only done on the initial
	 * attempt. If we go through a retry, it is extremely likely that the
	 * page will be found in page cache at that point.
	 */

	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
	if (mm_flags & FAULT_FLAG_ALLOW_RETRY) {
		if (fault & VM_FAULT_MAJOR) {
			tsk->maj_flt++;
			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs,
				      addr);
		} else {
			tsk->min_flt++;
			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs,
				      addr);
		}
		if (fault & VM_FAULT_RETRY) {
			/*
			 * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of
			 * starvation.
			 */
			mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
			mm_flags |= FAULT_FLAG_TRIED;
			goto retry;
		}
	}

	up_read(&mm->mmap_sem);

	/*
	 * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR
	 */
	if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP |
			      VM_FAULT_BADACCESS))))
		return 0;

	/*
	 * If we are in kernel mode at this point, we have no context to
	 * handle this fault with.
	 */
	if (!user_mode(regs))
		goto no_context;

	if (fault & VM_FAULT_OOM) {
		/*
		 * We ran out of memory, call the OOM killer, and return to
		 * userspace (which will retry the fault, or kill us if we got
		 * oom-killed).
		 */
		pagefault_out_of_memory();
		return 0;
	}

	if (fault & VM_FAULT_SIGBUS) {
		/*
		 * We had some memory, but were unable to successfully fix up
		 * this page fault.
		 */
		sig = SIGBUS;
		code = BUS_ADRERR;
	} else {
		/*
		 * Something tried to access memory that isn't in our memory
		 * map.
		 */
		sig = SIGSEGV;
		code = fault == VM_FAULT_BADACCESS ?
			SEGV_ACCERR : SEGV_MAPERR;
	}

	__do_user_fault(tsk, addr, esr, sig, code, regs);
	return 0;

no_context:
	__do_kernel_fault(mm, addr, esr, regs);
	return 0;
}
Beispiel #23
0
static int elf_load_int(const char *path, task_t *task, char *argv[], char *envp[]) {
	// Loads to a fixed address of 0x10000000 for now; not a HUGE deal
	// since each (user mode) task has its own address space

	assert(interrupts_enabled() == false); // TODO: get rid of the race condition from create_task, so that this isn't needed

	assert(task != NULL);
	struct task_mm *mm = task->mm;
	assert(mm != NULL);

	struct stat st;

	int r;
	if ((r = stat(path, &st)) != 0) {
		assert(r < 0);
		return r;
	}

	uint32 file_size = st.st_size;
	unsigned char *data = kmalloc(file_size);
	int retval = 0;

	int fd = open(path, O_RDONLY);
	if (fd < 0) {
		printk("elf_load(): unable to open %s\n", path);
		retval = fd;
		goto err;
	}

	if ((r = read(fd, data, file_size)) != (int)file_size) {
		printk("elf_load(): unable to read from %s; got %d bytes, requested %d\n", path, r, (int)file_size);
		if (r < 0) {
			retval = r;
			goto err;
		}
		else {
			panic("read() returned less than the expected file size, but not a negative value... why?");
			retval = -EIO;
			goto err;
		}
	}

	close(fd);

	elf_header_t *header = (elf_header_t *)data;

	const unsigned char ELF_IDENT[] = {0x7f, 'E', 'L', 'F'};

	if (memcmp(header->e_ident.ei_mag, &ELF_IDENT, 4) != 0) {
		printk("Warning: file %s is not an ELF file; aborting execution\n", path);
		retval = -ENOEXEC;
		goto err;
	}

	// TODO SECURITY: don't trust anything from the file - users can EASILY execute
	// "forged" ELF files!

	if (header->e_ident.ei_class != ELFCLASS32 || header->e_ident.ei_data != ELFDATA2LSB || \
		header->e_ident.ei_version != 1 || header->e_machine != EM_386 || header->e_type != ET_EXEC) {
		printk("Warning: file %s is not a valid ELF file (invalid ELFCLASS, ELFDATA, version, machine or not ET_EXEC\n");
		retval = -ENOEXEC;
		goto err;
	}

	assert(header->e_entry >= 0x10000000);
	assert(header->e_entry <  0x11000000);

	if (task == current_task) {
		// execve
		assert(current_task->mm != NULL);
		assert(current_task->mm->areas != NULL);
		assert(current_task->mm->page_directory != NULL);
	}

	for (int i=0; i < header->e_phnum; i++) {
		Elf32_Phdr *phdr = (Elf32_Phdr *)(data + header->e_phoff + header->e_phentsize * i);
		if (phdr->p_type == PT_LOAD) {
			// This is a segment to load!

			// Should this be writable to the task?
			bool writable = ((phdr->p_flags & PF_W) ? true : false);

#if ELF_DEBUG
			printk("Segment #%u: copy %u bytes from 0x%08x (data + offset) to 0x%08x (virt in task page dir); read%s\n",
					i, phdr->p_filesz, data + phdr->p_offset, phdr->p_vaddr, writable ? "-write" : "only");
#endif

			if (i == 0)
				assert(phdr->p_vaddr == 0x10000000);
			else
				assert(phdr->p_vaddr > 0x10000000);

			uint32 start_addr = phdr->p_vaddr;
			uint32 start_addr_aligned = (phdr->p_vaddr & 0xfffff000);
			uint32 end_addr   = start_addr + phdr->p_memsz;
			if (!IS_PAGE_ALIGNED(end_addr)) {
				end_addr &= ~(PAGE_SIZE - 1);
				end_addr += PAGE_SIZE;
			}

			if (end_addr > task->mm->brk_start) {
				uint32 new_brk = end_addr;
				if (!IS_PAGE_ALIGNED(new_brk)) {
					new_brk &= ~(PAGE_SIZE - 1);
					new_brk += PAGE_SIZE;
				}
				task->mm->brk_start = new_brk;
				task->mm->brk = new_brk;
			}

			// Allocate memory for this address in the task's address space, set for user mode
			vmm_alloc_user(start_addr_aligned, end_addr, mm, writable);

			// Switch to the new page directory, so that we can copy the data there
			page_directory_t *old_dir = current_directory;
			switch_page_directory(mm->page_directory);

			// Okay, we should have the memory. Let's clear it (since PARTS may be left empty by the memcpy,
			// e.g. the .bss section, and we do want zeroes to be there)
			memset((void *)start_addr_aligned, 0, end_addr - start_addr_aligned);

			// Copy the segment (e.g. .text + .rodata + .eh_frame, or .data + .bss) to the location
			// DO NOT use start_addr_aligned here - we want the program to dictate the exact location

			memcpy((void *)start_addr, data + phdr->p_offset, phdr->p_filesz);

			switch_page_directory(old_dir);
		}
		else if (phdr->p_type == PT_GNU_STACK || phdr->p_type == PT_GNU_RELRO || phdr->p_type == PT_GNU_EH_FRAME) {
			// Quietly ignore
		}
		else
			printk("Warning: skipping unsupported ELF program header (#%u, p_type = 0x%x)\n", i, phdr->p_type);
	}

	// Set up the reentrancy structure for Newlib
	// (It is initialized below, after switching to the new page directory.)
	uint32 reent_size = sizeof(struct _reent);
	if (reent_size & 0xfff) {
		reent_size &= 0xfffff000;
		reent_size += PAGE_SIZE;
	}

	vmm_alloc_user(task->mm->brk, task->mm->brk + reent_size, mm, PAGE_RW);

	//assert(current_directory == kernel_directory);
	page_directory_t *old_dir = current_directory;
	switch_page_directory(task->mm->page_directory);

	task->reent = (struct _reent *)task->mm->brk;
	_REENT_INIT_PTR(task->reent);
	task->mm->brk += reent_size;
	task->mm->brk_start += reent_size;

	assert(IS_PAGE_ALIGNED(task->mm->brk));
	assert(task->mm->brk == task->mm->brk_start);

	// The value brk has when the process starts;
	// userspace may not decrease the brk point below this address
	task->mm->initial_brk = task->mm->brk_start;

	// Copy the argv data from the kernel heap to the task's address space
	// This function updates argv to point to the new location.
	uint32 argc = 0;
	for (; argv[argc] != NULL; argc++) { }
	copy_argv_env_to_task(&argv, argc, task);

	uint32 envc = 0;
	assert(envp != NULL);
	for (; envp[envc] != NULL; envc++) { }
	copy_argv_env_to_task(&envp, envc, task);

	*((uint32 *)(USER_STACK_START - 0)) = (uint32)envp;
	*((uint32 *)(USER_STACK_START - 4)) = (uint32)argv;
	*((uint32 *)(USER_STACK_START - 8)) = (uint32)argc;

	// Update the task's name
	strlcpy((char *)task->name, argv[0], TASK_NAME_LEN);

	if (old_dir != kernel_directory) {
		// execve, stay with the new dir
	}
	else
		switch_page_directory(old_dir);

#if ELF_DEBUG

	printk("File has %u program headers (each %u bytes), %u section headers (each %u bytes)\n",
		   header->e_phnum, header->e_phentsize, header->e_shnum, header->e_shentsize);

	printk("Program Header:\n");
	for (int i=0; i < header->e_phnum; i++) {
		Elf32_Phdr *phdr = (Elf32_Phdr *)(data + header->e_phoff + header->e_phentsize * i);

		if (phdr->p_type == PT_LOAD) {
			printk("LOAD  offset 0x%08x vaddr 0x%08x alignment %u bytes\n", phdr->p_offset, phdr->p_vaddr, phdr->p_align);
			unsigned int f = phdr->p_flags;
			printk("      filesz 0x%08x memsz 0x%08x flags %c%c%c\n", phdr->p_filesz, phdr->p_memsz, 
					(f & PF_R ? 'r' : '-'), (f & PF_W ? 'w' : '-'), (f & PF_X ? 'x' : '-'));
		}
		else {
			printk("unsupported program header (#%u), skipping\n", i);
		}
	}

	// Find the string table
	assert(header->e_shoff != 0); // we need a section header
	Elf32_Shdr *string_table_hdr = (Elf32_Shdr *)(data + header->e_shoff + header->e_shentsize * header->e_shstrndx);
	char *string_table = (char *)(data + string_table_hdr->sh_offset);

	printk("Sections:\n");
	printk("Idx         Name Size     VMA      LMA      File off Align\n");
	for (int i=1; i < header->e_shnum; i++) { // skip #0, which is always empty
		Elf32_Shdr *shdr = (Elf32_Shdr *)(data + header->e_shoff + header->e_shentsize * i);

		char *name = (char *)&string_table[shdr->sh_name];

		printk("%03d %12s %08x %08x %08x %08x %u\n", i, name, shdr->sh_size, shdr->sh_addr, shdr->sh_addr /* TODO: LMA */, shdr->sh_offset, shdr->sh_addralign);
		unsigned int f = shdr->sh_flags;
		printk("                 ");
		if (shdr->sh_type != SHT_NOBITS)
			printk("CONTENTS, ");
		if ((f & SHF_ALLOC))
			printk("ALLOC, ");
		if ((f & SHF_WRITE) == 0)
			printk("READONLY, ");
		if ((f & SHF_EXECINSTR))
			printk("CODE\n");
		else
			printk("DATA\n");
	}
#endif // ELF_DEBUG

	// Try to find symbols, so we can get nice backtrace displays
	Elf32_Sym *symhdr = NULL;
	uint32 num_syms = 0;
	const char *sym_string_table = NULL;
	uint32 string_table_size = 0;

	for (uint32 i=1; i < header->e_shnum; i++) { // skip #0, which is always empty
		Elf32_Shdr *shdr = (Elf32_Shdr *)((uint32)data + header->e_shoff + (header->e_shentsize * i));

		if (shdr->sh_type == SHT_SYMTAB) {
			symhdr = (Elf32_Sym *)(data + shdr->sh_offset);
			num_syms = shdr->sh_size / shdr->sh_entsize;
			Elf32_Shdr *string_table_hdr = (Elf32_Shdr *)((uint32)data + header->e_shoff + shdr->sh_link * header->e_shentsize);
			string_table_size = string_table_hdr->sh_size;
			sym_string_table = (char *)(data + string_table_hdr->sh_offset);
			break;
		}
	}

	// Load symbols for this file, so that we can display them in backtraces
	if (!symhdr || !sym_string_table || num_syms < 1) {
		printk("Warning: failed to load symbols for %s\n", path);
	}
	else {
		// Clone the string table. Because load_symbols doesn't strdup() names
		// for performance reasons, we need the string table to keep existing
		// for as long as the task lives.
		char *old_table = task->symbol_string_table;
		task->symbol_string_table = kmalloc(string_table_size);
		task->symbol_string_table_size = string_table_size;
		memcpy(task->symbol_string_table, sym_string_table, string_table_size);

		if (load_symbols(symhdr, task->symbol_string_table, &task->symbols, num_syms) != 0) {
			printk("Warning: failed to load symbols for %s\n", path);
		}
		else if (old_table) {
			// execve, so free the old one, or it'll leak
			kfree(old_table);
		}
	}

	// If we're still here: set the program entry point
	// (This updates the value on the stack in task.c)
	task->new_entry = (uint32)header->e_entry;
	set_entry_point((task_t *)task, task->new_entry);

	retval = 0;
	/* fall through on success */

err:

	kfree(data);
	assert(retval <= 0);
	return retval;
}
Beispiel #24
0
void Machine::enable_interrupts() {
  assert(!interrupts_enabled());
  __asm__ __volatile__ ("sti");
}
Beispiel #25
0
void Machine::disable_interrupts() {
  assert(interrupts_enabled());
  __asm__ __volatile__ ("cli");
}
Beispiel #26
0
void sched_update(struct ls_state *ls)
{
	struct sched_state *s = &ls->sched;
	int old_tid = s->cur_agent->tid;
	int new_tid;

	/* wait until the guest is ready */
	if (!s->guest_init_done) {
		if (kern_sched_init_done(ls->eip)) {
			s->guest_init_done = true;
			/* Deprecated since kern_get_current_tid went away. */
			// assert(old_tid == new_tid && "init tid mismatch");
		} else {
			return;
		}
	}

	/* The Importance of Being Assertive, A Trivial Style Guideline for
	 * Serious Programmers, by Ben Blum */
	if (s->entering_timer) {
		assert(ls->eip == kern_get_timer_wrap_begin() &&
		       "simics is a clown and tried to delay our interrupt :<");
		s->entering_timer = false;
	} else {
		if (kern_timer_entering(ls->eip)) {
			lsprintf(DEV, "A timer tick that wasn't ours (0x%x).\n",
				 (int)READ_STACK(ls->cpu0, 0));
			ls->eip = avoid_timer_interrupt_immediately(ls->cpu0);
		}
	}

	/**********************************************************************
	 * Update scheduler state.
	 **********************************************************************/

	if (kern_thread_switch(ls->cpu0, ls->eip, &new_tid) && new_tid != old_tid) {
		/*
		 * So, fork needs to be handled twice, both here and below in the
		 * runnable case. And for kernels that trigger both, both places will
		 * need to have a check for whether the newly forked thread exists
		 * already.
		 *
		 * Sleep and vanish actually only need to happen here. They should
		 * check both the rq and the dq, 'cause there's no telling where the
		 * thread got moved to before. As for the descheduling case, that needs
		 * to check for a new type of action flag "asleep" or "vanished" (and
		 * I guess using last_vanished_agent might work), and probably just
		 * assert that that condition holds if the thread couldn't be found
		 * for the normal descheduling case.
		 */
		/* Has to be handled before updating cur_agent, of course. */
		handle_sleep(s);
		handle_vanish(s);
		handle_unsleep(s, new_tid);

		/* Careful! On some kernels, the trigger for a new agent forking
		 * (where it first gets added to the RQ) may happen AFTER its
		 * tcb is set to be the currently running thread. This would
		 * cause this case to be reached before agent_fork() is called,
		 * so agent_by_tid would fail. Instead, we have an option to
		 * find it later. (see the kern_thread_runnable case below.) */
		struct agent *next = agent_by_tid_or_null(&s->rq, new_tid);
		if (next == NULL) next = agent_by_tid_or_null(&s->dq, new_tid);

		if (next != NULL) {
			lsprintf(DEV, "switched threads %d -> %d\n", old_tid,
				 new_tid);
			s->last_agent = s->cur_agent;
			s->cur_agent = next;
		/* This fork check is for kernels which context switch to a
		 * newly-forked thread before adding it to the runqueue - and
		 * possibly won't do so at all (if current_extra_runnable). We
		 * need to do agent_fork now. (agent_fork *also* needs to be
		 * handled below, for kernels which don't c-s to the new thread
		 * immediately.) The */
		} else if (handle_fork(s, new_tid, false)) {
			next = agent_by_tid_or_null(&s->dq, new_tid);
			assert(next != NULL && "Newly forked thread not on DQ");
			lsprintf(DEV, "switching threads %d -> %d\n", old_tid,
				 new_tid);
			s->last_agent = s->cur_agent;
			s->cur_agent = next;
		} else {
			lsprintf(ALWAYS, COLOUR_BOLD COLOUR_RED "Couldn't find "
				 "new thread %d; current %d; did you forget to "
				 "tell_landslide_forking()?\n" COLOUR_DEFAULT,
				 new_tid, s->cur_agent->tid);
			assert(0);
		}
		/* Some debug info to help the studence. */
		if (s->cur_agent->tid == kern_get_init_tid()) {
			lsprintf(DEV, "Now running init.\n");
		} else if (s->cur_agent->tid == kern_get_shell_tid()) {
			lsprintf(DEV, "Now running shell.\n");
		} else if (kern_has_idle() &&
			   s->cur_agent->tid == kern_get_idle_tid()) {
			lsprintf(DEV, "Now idling.\n");
		}
	}

	s->current_extra_runnable = kern_current_extra_runnable(ls->cpu0);

	int target_tid;
	int mutex_addr;

	/* Timer interrupt handling. */
	if (kern_timer_entering(ls->eip)) {
		// XXX: same as the comment in the below condition.
		if (!kern_timer_exiting(READ_STACK(ls->cpu0, 0))) {
			assert(!ACTION(s, handling_timer));
		} else {
			lsprintf(DEV, "WARNING: allowing a nested timer on "
				 "tid %d's stack\n", s->cur_agent->tid);
		}
		ACTION(s, handling_timer) = true;
		lsprintf(INFO, "%d timer enter from 0x%x\n", s->cur_agent->tid,
		         (unsigned int)READ_STACK(ls->cpu0, 0));
	} else if (kern_timer_exiting(ls->eip)) {
		if (ACTION(s, handling_timer)) {
			// XXX: This condition is a hack to compensate for when
			// simics "sometimes", when keeping a schedule-in-
			// flight, takes the caused timer interrupt immediately,
			// even before the iret.
			if (!kern_timer_exiting(READ_STACK(ls->cpu0, 0))) {
				ACTION(s, handling_timer) = false;
				s->just_finished_reschedule = true;
			}
			/* If the schedule target was in a timer interrupt when we
			 * decided to schedule him, then now is when the operation
			 * finishes landing. (otherwise, see below)
			 * FIXME: should this be inside the above if statement? */
			if (ACTION(s, schedule_target)) {
				ACTION(s, schedule_target) = false;
				s->schedule_in_flight = NULL;
			}
		} else {
			lsprintf(INFO, "WARNING: exiting a non-timer interrupt "
				 "through a path shared with the timer..? (from 0x%x, #%d)\n", (int)READ_STACK(ls->cpu0, 0), (int)READ_STACK(ls->cpu0, -2));
		}
	/* Context switching. */
	} else if (kern_context_switch_entering(ls->eip)) {
		/* It -is- possible for a context switch to interrupt a
		 * context switch if a timer goes off before c-s disables
		 * interrupts. TODO: if we care, make this an int counter. */
		ACTION(s, context_switch) = true;
		/* Maybe update the voluntary resched trace. See schedule.h */
		if (!ACTION(s, handling_timer)) {
			lsprintf(DEV, "Voluntary resched tid ");
			print_agent(DEV, s->cur_agent);
			printf(DEV, "\n");
			s->voluntary_resched_tid = s->cur_agent->tid;
			if (s->voluntary_resched_stack != NULL)
				MM_FREE(s->voluntary_resched_stack);
			s->voluntary_resched_stack =
				stack_trace(ls->cpu0, ls->eip, s->cur_agent->tid);
		}
	} else if (kern_context_switch_exiting(ls->eip)) {
		assert(ACTION(s, cs_free_pass) || ACTION(s, context_switch));
		ACTION(s, context_switch) = false;
		ACTION(s, cs_free_pass) = false;
		/* For threads that context switched of their own accord. */
		if (!HANDLING_INTERRUPT(s)) {
			s->just_finished_reschedule = true;
			if (ACTION(s, schedule_target)) {
				ACTION(s, schedule_target) = false;
				s->schedule_in_flight = NULL;
			}
		}
	/* Lifecycle. */
	} else if (kern_forking(ls->eip)) {
		assert_no_action(s, "forking");
		ACTION(s, forking) = true;
	} else if (kern_sleeping(ls->eip)) {
		assert_no_action(s, "sleeping");
		ACTION(s, sleeping) = true;
	} else if (kern_vanishing(ls->eip)) {
		assert_no_action(s, "vanishing");
		ACTION(s, vanishing) = true;
	} else if (kern_readline_enter(ls->eip)) {
		assert_no_action(s, "readlining");
		ACTION(s, readlining) = true;
	} else if (kern_readline_exit(ls->eip)) {
		assert(ACTION(s, readlining));
		ACTION(s, readlining) = false;
	/* Runnable state change (incl. consequences of fork, vanish, sleep). */
	} else if (kern_thread_runnable(ls->cpu0, ls->eip, &target_tid)) {
		/* A thread is about to become runnable. Was it just spawned? */
		if (!handle_fork(s, target_tid, true)) {
			agent_wake(s, target_tid);
		}
	} else if (kern_thread_descheduling(ls->cpu0, ls->eip, &target_tid)) {
		/* A thread is about to deschedule. Is it vanishing/sleeping? */
		agent_deschedule(s, target_tid);
	/* Mutex tracking and noob deadlock detection */
	} else if (kern_mutex_locking(ls->cpu0, ls->eip, &mutex_addr)) {
		//assert(!ACTION(s, mutex_locking));
		assert(!ACTION(s, mutex_unlocking));
		ACTION(s, mutex_locking) = true;
		s->cur_agent->blocked_on_addr = mutex_addr;
	} else if (kern_mutex_blocking(ls->cpu0, ls->eip, &target_tid)) {
		/* Possibly not the case - if this thread entered mutex_lock,
		 * then switched and someone took it, these would be set already
		 * assert(s->cur_agent->blocked_on == NULL);
		 * assert(s->cur_agent->blocked_on_tid == -1); */
		lsprintf(DEV, "mutex: on 0x%x tid %d blocks, owned by %d\n",
			 s->cur_agent->blocked_on_addr, s->cur_agent->tid,
			 target_tid);
		s->cur_agent->blocked_on_tid = target_tid;
		if (deadlocked(s)) {
			lsprintf(BUG, COLOUR_BOLD COLOUR_RED "DEADLOCK! ");
			print_deadlock(BUG, s->cur_agent);
			printf(BUG, "\n");
			found_a_bug(ls);
		}
	} else if (kern_mutex_locking_done(ls->eip)) {
		//assert(ACTION(s, mutex_locking));
		assert(!ACTION(s, mutex_unlocking));
		ACTION(s, mutex_locking) = false;
		s->cur_agent->blocked_on = NULL;
		s->cur_agent->blocked_on_tid = -1;
		s->cur_agent->blocked_on_addr = -1;
		/* no need to check for deadlock; this can't create a cycle. */
		mutex_block_others(&s->rq, mutex_addr, s->cur_agent,
				   s->cur_agent->tid);
	} else if (kern_mutex_unlocking(ls->cpu0, ls->eip, &mutex_addr)) {
		/* It's allowed to have a mutex_unlock call inside a mutex_lock
		 * (and it can happen), or mutex_lock inside of mutex_lock, but
		 * not the other way around. */
		assert(!ACTION(s, mutex_unlocking));
		ACTION(s, mutex_unlocking) = true;
		mutex_block_others(&s->rq, mutex_addr, NULL, -1);
	} else if (kern_mutex_unlocking_done(ls->eip)) {
		assert(ACTION(s, mutex_unlocking));
		ACTION(s, mutex_unlocking) = false;
	}

	/**********************************************************************
	 * Exercise our will upon the guest kernel
	 **********************************************************************/

	/* Some checks before invoking the arbiter. First see if an operation of
	 * ours is already in-flight. */
	if (s->schedule_in_flight) {
		if (s->schedule_in_flight == s->cur_agent) {
			/* the in-flight schedule operation is cleared for
			 * landing. note that this may cause another one to
			 * be triggered again as soon as the context switcher
			 * and/or the timer handler finishes; it is up to the
			 * arbiter to decide this. */
			assert(ACTION(s, schedule_target));
			/* this condition should trigger in the middle of the
			 * switch, rather than after it finishes. (which is also
			 * why we leave the schedule_target flag turned on).
			 * the special case is for newly forked agents that are
			 * schedule targets - they won't exit timer or c-s above
			 * so here is where we have to clear it for them. */
			if (ACTION(s, just_forked)) {
				/* Interrupts are "probably" off, but that's why
				 * just_finished_reschedule is persistent. */
				lsprintf(DEV, "Finished flying to %d.\n",
					 s->cur_agent->tid);
				ACTION(s, schedule_target) = false;
				ACTION(s, just_forked) = false;
				s->schedule_in_flight = NULL;
				s->just_finished_reschedule = true;
			} else {
				assert(ACTION(s, cs_free_pass) ||
				       ACTION(s, context_switch) ||
				       HANDLING_INTERRUPT(s));
			}
			/* The schedule_in_flight flag itself is cleared above,
			 * along with schedule_target. Sometimes sched_recover
			 * sets in_flight and needs it not cleared here. */
		} else {
			/* An undesirable thread has been context-switched away
			 * from either from an interrupt handler (timer/kbd) or
			 * of its own accord. We need to wait for it to get back
			 * to its own execution before triggering an interrupt
			 * on it; in the former case, this will be just after it
			 * irets; in the latter, just after the c-s returns. */
			if (kern_timer_exiting(ls->eip) ||
			     (!HANDLING_INTERRUPT(s) &&
			      kern_context_switch_exiting(ls->eip)) ||
			     ACTION(s, just_forked)) {
				/* an undesirable agent just got switched to;
				 * keep the pending schedule in the air. */
				// XXX: this seems to get taken too soon? change
				// it somehow to cause_.._immediately. and then
				// see the asserts/comments in the action
				// handling_timer sections above.
				/* some kernels (pathos) still have interrupts
				 * off or scheduler locked at this point; so
				 * properties of !R */
				if (interrupts_enabled(ls->cpu0) &&
				    kern_ready_for_timer_interrupt(ls->cpu0)) {
					lsprintf(INFO, "keeping schedule in-"
						 "flight at 0x%x\n", ls->eip);
					cause_timer_interrupt(ls->cpu0);
					s->entering_timer = true;
					s->delayed_in_flight = false;
				} else {
					lsprintf(INFO, "Want to keep schedule "
						 "in-flight at 0x%x; have to "
						 "delay\n", ls->eip);
					s->delayed_in_flight = true;
				}
				/* If this was the special case where the
				 * undesirable thread was just forked, keeping
				 * the schedule in flight will cause it to do a
				 * normal context switch. So just_forked is no
				 * longer needed. */
				ACTION(s, just_forked) = false;
			} else if (s->delayed_in_flight &&
				   interrupts_enabled(ls->cpu0) &&
				   kern_ready_for_timer_interrupt(ls->cpu0)) {
				lsprintf(INFO, "Delayed in-flight timer tick "
					 "at 0x%x\n", ls->eip);
				cause_timer_interrupt(ls->cpu0);
				s->entering_timer = true;
				s->delayed_in_flight = false;
			} else {
				/* they'd better not have "escaped" */
				assert(ACTION(s, cs_free_pass) ||
				       ACTION(s, context_switch) ||
				       HANDLING_INTERRUPT(s) ||
				       !interrupts_enabled(ls->cpu0) ||
				       !kern_ready_for_timer_interrupt(ls->cpu0));
			}
		}
		/* in any case we have no more decisions to make here */
		return;
	} else if (ACTION(s, just_forked)) {
		ACTION(s, just_forked) = false;
		s->just_finished_reschedule = true;
	}
	assert(!s->schedule_in_flight);

	/* Can't do anything before the test actually starts. */
	if (ls->test.current_test == NULL) {
		return;
	}

	/* XXX TODO: This will "leak" an undesirable thread to execute an
	 * instruction if the timer/kbd handler is an interrupt gate, so check
	 * also if we're about to iret and then examine the eflags on the
	 * stack. Also, "sti" and "popf" are interesting, so check for those.
	 * Also, do trap gates enable interrupts if they were off? o_O */
	if (!interrupts_enabled(ls->cpu0)) {
		return;
	}

	/* If a schedule operation is just finishing, we should allow the thread
	 * to get back to its own execution before making another choice. Note
	 * that when we previously decided to interrupt the thread, it will have
	 * executed the single instruction we made the choice at then taken the
	 * interrupt, so we return to the next instruction, not the same one. */
	if (ACTION(s, schedule_target)) {
		return;
	}

	/* TODO: have an extra mode which will allow us to preempt the timer
	 * handler. */
	if (HANDLING_INTERRUPT(s) || !kern_ready_for_timer_interrupt(ls->cpu0)) {
		return;
	}

	/* As kernel_specifics.h says, no preempting during mutex unblocking. */
	if (ACTION(s, mutex_unlocking)) {
		return;
	}

	/* Okay, are we at a choice point? */
	bool voluntary;
	bool just_finished_reschedule = s->just_finished_reschedule;
	s->just_finished_reschedule = false;
	/* TODO: arbiter may also want to see the trace_entry_t */
	if (arbiter_interested(ls, just_finished_reschedule, &voluntary)) {
		struct agent *a;
		bool our_choice;
		/* TODO: as an optimisation (in serialisation state / etc), the
		 * arbiter may return NULL if there was only one possible
		 * choice. */
		if (arbiter_choose(ls, &a, &our_choice)) {
			/* Effect the choice that was made... */
			if (a != s->cur_agent) {
				lsprintf(CHOICE, "from agent %d, arbiter chose "
					 "%d at 0x%x (called at 0x%x)\n",
					 s->cur_agent->tid, a->tid, ls->eip,
					 (unsigned int)READ_STACK(ls->cpu0, 0));
				set_schedule_target(s, a);
				cause_timer_interrupt(ls->cpu0);
				s->entering_timer = true;
			}
			/* Record the choice that was just made. */
			if (ls->test.test_ever_caused &&
			    ls->test.start_population != s->most_agents_ever) {
				save_setjmp(&ls->save, ls, a->tid, our_choice,
					    false, voluntary);
			}
		} else {
			lsprintf(BUG, "no agent was chosen at eip 0x%x\n",
				 ls->eip);
		}
	}
	/* XXX TODO: it may be that not every timer interrupt triggers a context
	 * switch, so we should watch out if a handler doesn't enter the c-s. */
}
Beispiel #27
0
void
assert_ints_enabled(void)
{
	ASSERT(!interrupts_unleashed || interrupts_enabled());
}
Beispiel #28
0
/*ARGSUSED*/
void
do_interrupt(struct regs *rp, trap_trace_rec_t *ttp)
{
	struct cpu *cpu = CPU;
	int newipl, oldipl = cpu->cpu_pri;
	uint_t vector;
	caddr_t newsp;

#ifdef TRAPTRACE
	ttp->ttr_marker = TT_INTERRUPT;
	ttp->ttr_ipl = 0xff;
	ttp->ttr_pri = oldipl;
	ttp->ttr_spl = cpu->cpu_base_spl;
	ttp->ttr_vector = 0xff;
#endif	/* TRAPTRACE */

	cpu_idle_exit(CPU_IDLE_CB_FLAG_INTR);

	++*(uint16_t *)&cpu->cpu_m.mcpu_istamp;

	/*
	 * If it's a softint go do it now.
	 */
	if (rp->r_trapno == T_SOFTINT) {
		dosoftint(rp);
		ASSERT(!interrupts_enabled());
		return;
	}

	/*
	 * Raise the interrupt priority.
	 */
	newipl = (*setlvl)(oldipl, (int *)&rp->r_trapno);
#ifdef TRAPTRACE
	ttp->ttr_ipl = newipl;
#endif	/* TRAPTRACE */

	/*
	 * Bail if it is a spurious interrupt
	 */
	if (newipl == -1)
		return;
	cpu->cpu_pri = newipl;
	vector = rp->r_trapno;
#ifdef TRAPTRACE
	ttp->ttr_vector = vector;
#endif	/* TRAPTRACE */
	if (newipl > LOCK_LEVEL) {
		/*
		 * High priority interrupts run on this cpu's interrupt stack.
		 */
		if (hilevel_intr_prolog(cpu, newipl, oldipl, rp) == 0) {
			newsp = cpu->cpu_intr_stack;
			switch_sp_and_call(newsp, dispatch_hilevel, vector, 0);
		} else { /* already on the interrupt stack */
			dispatch_hilevel(vector, 0);
		}
		(void) hilevel_intr_epilog(cpu, newipl, oldipl, vector);
	} else {
		/*
		 * Run this interrupt in a separate thread.
		 */
		newsp = intr_thread_prolog(cpu, (caddr_t)rp, newipl);
		switch_sp_and_call(newsp, dispatch_hardint, vector, oldipl);
	}

#if !defined(__xpv)
	/*
	 * Deliver any pending soft interrupts.
	 */
	if (cpu->cpu_softinfo.st_pending)
		dosoftint(rp);
#endif	/* !__xpv */
}
Beispiel #29
0
void
test_wakeup(int all)
{
	Tid ret;
	long ii;
	static Tid child[NTHREADS];
	unintr_printf("starting wakeup test\n");

	done = 0;
	nr_sleeping = 0;

	queue = wait_queue_create();
	assert(queue);

	/* initial thread sleep and wake up tests */
	ret = thread_sleep(NULL);
	assert(ret == THREAD_INVALID);
	unintr_printf("initial thread returns from sleep(NULL)\n");

	ret = thread_sleep(queue);
	assert(ret == THREAD_NONE);
	unintr_printf("initial thread returns from sleep(NONE)\n");

	ret = thread_wakeup(NULL, 0);
	assert(ret == 0);
	ret = thread_wakeup(queue, 1);
	assert(ret == 0);

	/* create all threads */
	for (ii = 0; ii < NTHREADS; ii++) {
		child[ii] = thread_create((void (*)(void *))test_wakeup_thread,
					  (void *)ii);
		assert(thread_ret_ok(child[ii]));
	}
out:
	while (__sync_fetch_and_add(&done, 0) < NTHREADS) {
		if (all) {
			/* wait until all threads have slept */
			if (__sync_fetch_and_add(&nr_sleeping, 0) < NTHREADS) {
				goto out;
			}
			/* we will wake up all threads in the thread_wakeup
			 * call below so set nr_sleeping to 0 */
			nr_sleeping = 0;
		} else {
			/* wait until at least one thread has slept */
			if (__sync_fetch_and_add(&nr_sleeping, 0) < 1) {
				goto out;
			}
			/* wake up one thread in the wakeup call below */
			__sync_fetch_and_add(&nr_sleeping, -1);
		}
		/* spin for 5 ms. this allows testing that the sleeping thread
		 * sleeps for at least 5 ms. */
		spin(5000);

		/* tests thread_wakeup */
		assert(interrupts_enabled());
		ret = thread_wakeup(queue, all);
		assert(interrupts_enabled());
		assert(ret >= 0);
		assert(all ? ret == NTHREADS : ret == 1);
	}
	/* we expect nr_sleeping is 0 at this point */
	assert(nr_sleeping == 0);
	assert(interrupts_enabled());
	wait_queue_destroy(queue);
	unintr_printf("wakeup test done\n");
}
Beispiel #30
0
/*
 * Interrupt service routine, called with interrupts disabled.
 */
void
apix_do_interrupt(struct regs *rp, trap_trace_rec_t *ttp)
{
	struct cpu *cpu = CPU;
	int vector = rp->r_trapno, newipl, oldipl = cpu->cpu_pri, ret;
	apix_vector_t *vecp = NULL;

#ifdef TRAPTRACE
	ttp->ttr_marker = TT_INTERRUPT;
	ttp->ttr_cpuid = cpu->cpu_id;
	ttp->ttr_ipl = 0xff;
	ttp->ttr_pri = (uchar_t)oldipl;
	ttp->ttr_spl = cpu->cpu_base_spl;
	ttp->ttr_vector = 0xff;
#endif	/* TRAPTRACE */

	cpu_idle_exit(CPU_IDLE_CB_FLAG_INTR);

	++*(uint16_t *)&cpu->cpu_m.mcpu_istamp;

	/*
	 * If it's a softint go do it now.
	 */
	if (rp->r_trapno == T_SOFTINT) {
		/*
		 * It might be the case that when an interrupt is triggered,
		 * the spl is raised to high by splhigh(). Later when do_splx()
		 * is called to restore the spl, both hardware and software
		 * interrupt pending flags are check and an SOFTINT is faked
		 * accordingly.
		 */
		(void) apix_do_pending_hilevel(cpu, rp);
		(void) apix_do_pending_hardint(cpu, rp);
		(void) apix_do_softint(rp);
		ASSERT(!interrupts_enabled());
#ifdef TRAPTRACE
	ttp->ttr_vector = T_SOFTINT;
#endif
		return;
	}

	/*
	 * Send EOI to local APIC
	 */
	newipl = (*setlvl)(oldipl, (int *)&rp->r_trapno);
#ifdef TRAPTRACE
	ttp->ttr_ipl = (uchar_t)newipl;
#endif	/* TRAPTRACE */

	/*
	 * Bail if it is a spurious interrupt
	 */
	if (newipl == -1)
		return;

	vector = rp->r_trapno;
	vecp = xv_vector(cpu->cpu_id, vector);
#ifdef TRAPTRACE
	ttp->ttr_vector = (short)vector;
#endif	/* TRAPTRACE */

	/*
	 * Direct dispatch for IPI, MSI, MSI-X
	 */
	if (vecp && vecp->v_type != APIX_TYPE_FIXED &&
	    newipl > MAX(oldipl, cpu->cpu_base_spl)) {
		caddr_t newsp;

		if (newipl > LOCK_LEVEL) {
			if (apix_hilevel_intr_prolog(cpu, newipl, oldipl, rp)
			    == 0) {
				newsp = cpu->cpu_intr_stack;
				switch_sp_and_call(newsp, apix_dispatch_hilevel,
				    vector, 0);
			} else {
				apix_dispatch_hilevel(vector, 0);
			}
			(void) apix_hilevel_intr_epilog(cpu, oldipl);
		} else {
			newsp = apix_intr_thread_prolog(cpu, newipl,
			    (caddr_t)rp);
			switch_sp_and_call(newsp, apix_dispatch_lowlevel,
			    vector, oldipl);
		}
	} else {
		/* Add to per-pil pending queue */
		apix_add_pending_hardint(vector);
		if (newipl <= MAX(oldipl, cpu->cpu_base_spl) ||
		    !apixs[cpu->cpu_id]->x_intr_pending)
			return;
	}

	if (apix_do_pending_hilevel(cpu, rp) < 0)
		return;

	do {
		ret = apix_do_pending_hardint(cpu, rp);

		/*
		 * Deliver any pending soft interrupts.
		 */
		(void) apix_do_softint(rp);
	} while (!ret && LOWLEVEL_PENDING(cpu));
}