Esempio n. 1
0
/* The user mem heap tracking can only work for a single address space. We want
 * to pay attention to the userspace program under test, not the shell or init
 * or idle or anything like that. Figure out what that process's cr3 is. */
static bool ignore_user_access(struct ls_state *ls)
{
	unsigned int current_tid = ls->sched.cur_agent->tid;
	unsigned int cr3 = GET_CPU_ATTR(ls->cpu0, cr3);;

	if (!testing_userspace()) {
		/* Don't attempt to track user accesses for kernelspace tests.
		 * Tests like vanish_vanish require multiple user cr3s, which
		 * we don't support when tracking user accesses. When doing a
		 * userspace test, we need to do the below cr3 assertion, but
		 * when doing a kernel test we cannot, so instead we have to
		 * ignore all user accesses entirely. */
		return true;
	} else if (current_tid == kern_get_init_tid() ||
	    current_tid == kern_get_shell_tid() ||
	    (kern_has_idle() && current_tid == kern_get_idle_tid())) {
		return true;
	} else if (ls->user_mem.cr3 == USER_CR3_WAITING_FOR_THUNDERBIRDS) {
		ls->user_mem.cr3 = USER_CR3_WAITING_FOR_EXEC;
		ls->user_mem.cr3_tid = current_tid;
		return true;
	} else if (ls->user_mem.cr3 == USER_CR3_WAITING_FOR_EXEC) {
		/* must wait for a trip through kernelspace; see below */
		return true;
	} else if (ls->user_mem.cr3 == USER_CR3_EXEC_HAPPENED) {
		/* recognized non-shell-non-idle-non-init user process has been
		 * through exec and back. hopefully its new cr3 is permanent. */
		assert(cr3 != USER_CR3_WAITING_FOR_EXEC);
		assert(cr3 != USER_CR3_EXEC_HAPPENED);
		ls->user_mem.cr3 = cr3;
		lsprintf(DEV, "Registered cr3 value 0x%x for userspace "
			 "tid %d.\n", cr3, current_tid);
		return false;
	} else if (ls->user_mem.cr3 != cr3) {
		lsprintf(ALWAYS, COLOUR_BOLD COLOUR_RED "Memory tracking for "
			 "more than 1 user address space is unsupported!\n");
		lsprintf(ALWAYS, COLOUR_BOLD COLOUR_RED "Already tracking for "
			 "cr3 0x%x, belonging to tid %d; current cr3 0x%x, "
			 "current tid %d\n", ls->user_mem.cr3,
			 ls->user_mem.cr3_tid, cr3, current_tid);
		lsprintf(ALWAYS, COLOUR_BOLD COLOUR_RED "If you're trying to "
			 "run vanish_vanish, make sure TESTING_USERSPACE=0.\n");
		lsprintf(ALWAYS, COLOUR_BOLD COLOUR_RED "Otherwise, make sure "
			 "your test case doesn't fork().\n" COLOUR_DEFAULT);
		assert(0);
		return false;
	} else {
		return false;
	}
}
Esempio n. 2
0
// TODO: some way of telling when actually ... use readline to know
static bool anybody_alive(conf_object_t *cpu, struct test_state *t,
			  struct sched_state *s)
{
	struct agent *shell;

	if (t->test_ever_caused) {
		if (t->start_population == s->most_agents_ever) {
			/* Then the shell hasn't even spawned it yet. */
			return true;
		} else if (t->start_population != s->num_agents) {
			/* Shell's descendants are still alive. */
			assert(t->start_population < s->num_agents);
			return true;
		}
	}

	/* Now we are either before the beginning of the test case, or waiting
	 * for the shell and init to clean up the last remains. Either way, wait
	 * for shell and init to finish switching back and forth until both of
	 * them are suitably blocked. */

	/* In atomic scheduler paths, both threads might be off the runqueue
	 * (i.e., one going to sleep and switching to the other). Since we
	 * assume the scheduler is sane, this condition should hold then. */
	if (!kern_ready_for_timer_interrupt(cpu) || !interrupts_enabled(cpu)) {
		return true;
	}

	/* */
	if ((shell = agent_by_tid_or_null(&s->rq, kern_get_shell_tid())) ||
	    (shell = agent_by_tid_or_null(&s->dq, kern_get_shell_tid()))) {
		if (shell->action.readlining) {
			if (kern_has_idle()) {
				return s->cur_agent->tid != kern_get_idle_tid();
			} else {
				return (Q_GET_SIZE(&s->rq) != 0 ||
					Q_GET_SIZE(&s->sq) != 0);
			}
		} else {
			return true;
		}
	}

	/* If we get here, the shell wasn't even created yet..! */
	return true;
}
Esempio n. 3
0
void sched_update(struct ls_state *ls)
{
	struct sched_state *s = &ls->sched;
	int old_tid = s->cur_agent->tid;
	int new_tid;

	/* wait until the guest is ready */
	if (!s->guest_init_done) {
		if (kern_sched_init_done(ls->eip)) {
			s->guest_init_done = true;
			/* Deprecated since kern_get_current_tid went away. */
			// assert(old_tid == new_tid && "init tid mismatch");
		} else {
			return;
		}
	}

	/* The Importance of Being Assertive, A Trivial Style Guideline for
	 * Serious Programmers, by Ben Blum */
	if (s->entering_timer) {
		assert(ls->eip == kern_get_timer_wrap_begin() &&
		       "simics is a clown and tried to delay our interrupt :<");
		s->entering_timer = false;
	} else {
		if (kern_timer_entering(ls->eip)) {
			lsprintf(DEV, "A timer tick that wasn't ours (0x%x).\n",
				 (int)READ_STACK(ls->cpu0, 0));
			ls->eip = avoid_timer_interrupt_immediately(ls->cpu0);
		}
	}

	/**********************************************************************
	 * Update scheduler state.
	 **********************************************************************/

	if (kern_thread_switch(ls->cpu0, ls->eip, &new_tid) && new_tid != old_tid) {
		/*
		 * So, fork needs to be handled twice, both here and below in the
		 * runnable case. And for kernels that trigger both, both places will
		 * need to have a check for whether the newly forked thread exists
		 * already.
		 *
		 * Sleep and vanish actually only need to happen here. They should
		 * check both the rq and the dq, 'cause there's no telling where the
		 * thread got moved to before. As for the descheduling case, that needs
		 * to check for a new type of action flag "asleep" or "vanished" (and
		 * I guess using last_vanished_agent might work), and probably just
		 * assert that that condition holds if the thread couldn't be found
		 * for the normal descheduling case.
		 */
		/* Has to be handled before updating cur_agent, of course. */
		handle_sleep(s);
		handle_vanish(s);
		handle_unsleep(s, new_tid);

		/* Careful! On some kernels, the trigger for a new agent forking
		 * (where it first gets added to the RQ) may happen AFTER its
		 * tcb is set to be the currently running thread. This would
		 * cause this case to be reached before agent_fork() is called,
		 * so agent_by_tid would fail. Instead, we have an option to
		 * find it later. (see the kern_thread_runnable case below.) */
		struct agent *next = agent_by_tid_or_null(&s->rq, new_tid);
		if (next == NULL) next = agent_by_tid_or_null(&s->dq, new_tid);

		if (next != NULL) {
			lsprintf(DEV, "switched threads %d -> %d\n", old_tid,
				 new_tid);
			s->last_agent = s->cur_agent;
			s->cur_agent = next;
		/* This fork check is for kernels which context switch to a
		 * newly-forked thread before adding it to the runqueue - and
		 * possibly won't do so at all (if current_extra_runnable). We
		 * need to do agent_fork now. (agent_fork *also* needs to be
		 * handled below, for kernels which don't c-s to the new thread
		 * immediately.) The */
		} else if (handle_fork(s, new_tid, false)) {
			next = agent_by_tid_or_null(&s->dq, new_tid);
			assert(next != NULL && "Newly forked thread not on DQ");
			lsprintf(DEV, "switching threads %d -> %d\n", old_tid,
				 new_tid);
			s->last_agent = s->cur_agent;
			s->cur_agent = next;
		} else {
			lsprintf(ALWAYS, COLOUR_BOLD COLOUR_RED "Couldn't find "
				 "new thread %d; current %d; did you forget to "
				 "tell_landslide_forking()?\n" COLOUR_DEFAULT,
				 new_tid, s->cur_agent->tid);
			assert(0);
		}
		/* Some debug info to help the studence. */
		if (s->cur_agent->tid == kern_get_init_tid()) {
			lsprintf(DEV, "Now running init.\n");
		} else if (s->cur_agent->tid == kern_get_shell_tid()) {
			lsprintf(DEV, "Now running shell.\n");
		} else if (kern_has_idle() &&
			   s->cur_agent->tid == kern_get_idle_tid()) {
			lsprintf(DEV, "Now idling.\n");
		}
	}

	s->current_extra_runnable = kern_current_extra_runnable(ls->cpu0);

	int target_tid;
	int mutex_addr;

	/* Timer interrupt handling. */
	if (kern_timer_entering(ls->eip)) {
		// XXX: same as the comment in the below condition.
		if (!kern_timer_exiting(READ_STACK(ls->cpu0, 0))) {
			assert(!ACTION(s, handling_timer));
		} else {
			lsprintf(DEV, "WARNING: allowing a nested timer on "
				 "tid %d's stack\n", s->cur_agent->tid);
		}
		ACTION(s, handling_timer) = true;
		lsprintf(INFO, "%d timer enter from 0x%x\n", s->cur_agent->tid,
		         (unsigned int)READ_STACK(ls->cpu0, 0));
	} else if (kern_timer_exiting(ls->eip)) {
		if (ACTION(s, handling_timer)) {
			// XXX: This condition is a hack to compensate for when
			// simics "sometimes", when keeping a schedule-in-
			// flight, takes the caused timer interrupt immediately,
			// even before the iret.
			if (!kern_timer_exiting(READ_STACK(ls->cpu0, 0))) {
				ACTION(s, handling_timer) = false;
				s->just_finished_reschedule = true;
			}
			/* If the schedule target was in a timer interrupt when we
			 * decided to schedule him, then now is when the operation
			 * finishes landing. (otherwise, see below)
			 * FIXME: should this be inside the above if statement? */
			if (ACTION(s, schedule_target)) {
				ACTION(s, schedule_target) = false;
				s->schedule_in_flight = NULL;
			}
		} else {
			lsprintf(INFO, "WARNING: exiting a non-timer interrupt "
				 "through a path shared with the timer..? (from 0x%x, #%d)\n", (int)READ_STACK(ls->cpu0, 0), (int)READ_STACK(ls->cpu0, -2));
		}
	/* Context switching. */
	} else if (kern_context_switch_entering(ls->eip)) {
		/* It -is- possible for a context switch to interrupt a
		 * context switch if a timer goes off before c-s disables
		 * interrupts. TODO: if we care, make this an int counter. */
		ACTION(s, context_switch) = true;
		/* Maybe update the voluntary resched trace. See schedule.h */
		if (!ACTION(s, handling_timer)) {
			lsprintf(DEV, "Voluntary resched tid ");
			print_agent(DEV, s->cur_agent);
			printf(DEV, "\n");
			s->voluntary_resched_tid = s->cur_agent->tid;
			if (s->voluntary_resched_stack != NULL)
				MM_FREE(s->voluntary_resched_stack);
			s->voluntary_resched_stack =
				stack_trace(ls->cpu0, ls->eip, s->cur_agent->tid);
		}
	} else if (kern_context_switch_exiting(ls->eip)) {
		assert(ACTION(s, cs_free_pass) || ACTION(s, context_switch));
		ACTION(s, context_switch) = false;
		ACTION(s, cs_free_pass) = false;
		/* For threads that context switched of their own accord. */
		if (!HANDLING_INTERRUPT(s)) {
			s->just_finished_reschedule = true;
			if (ACTION(s, schedule_target)) {
				ACTION(s, schedule_target) = false;
				s->schedule_in_flight = NULL;
			}
		}
	/* Lifecycle. */
	} else if (kern_forking(ls->eip)) {
		assert_no_action(s, "forking");
		ACTION(s, forking) = true;
	} else if (kern_sleeping(ls->eip)) {
		assert_no_action(s, "sleeping");
		ACTION(s, sleeping) = true;
	} else if (kern_vanishing(ls->eip)) {
		assert_no_action(s, "vanishing");
		ACTION(s, vanishing) = true;
	} else if (kern_readline_enter(ls->eip)) {
		assert_no_action(s, "readlining");
		ACTION(s, readlining) = true;
	} else if (kern_readline_exit(ls->eip)) {
		assert(ACTION(s, readlining));
		ACTION(s, readlining) = false;
	/* Runnable state change (incl. consequences of fork, vanish, sleep). */
	} else if (kern_thread_runnable(ls->cpu0, ls->eip, &target_tid)) {
		/* A thread is about to become runnable. Was it just spawned? */
		if (!handle_fork(s, target_tid, true)) {
			agent_wake(s, target_tid);
		}
	} else if (kern_thread_descheduling(ls->cpu0, ls->eip, &target_tid)) {
		/* A thread is about to deschedule. Is it vanishing/sleeping? */
		agent_deschedule(s, target_tid);
	/* Mutex tracking and noob deadlock detection */
	} else if (kern_mutex_locking(ls->cpu0, ls->eip, &mutex_addr)) {
		//assert(!ACTION(s, mutex_locking));
		assert(!ACTION(s, mutex_unlocking));
		ACTION(s, mutex_locking) = true;
		s->cur_agent->blocked_on_addr = mutex_addr;
	} else if (kern_mutex_blocking(ls->cpu0, ls->eip, &target_tid)) {
		/* Possibly not the case - if this thread entered mutex_lock,
		 * then switched and someone took it, these would be set already
		 * assert(s->cur_agent->blocked_on == NULL);
		 * assert(s->cur_agent->blocked_on_tid == -1); */
		lsprintf(DEV, "mutex: on 0x%x tid %d blocks, owned by %d\n",
			 s->cur_agent->blocked_on_addr, s->cur_agent->tid,
			 target_tid);
		s->cur_agent->blocked_on_tid = target_tid;
		if (deadlocked(s)) {
			lsprintf(BUG, COLOUR_BOLD COLOUR_RED "DEADLOCK! ");
			print_deadlock(BUG, s->cur_agent);
			printf(BUG, "\n");
			found_a_bug(ls);
		}
	} else if (kern_mutex_locking_done(ls->eip)) {
		//assert(ACTION(s, mutex_locking));
		assert(!ACTION(s, mutex_unlocking));
		ACTION(s, mutex_locking) = false;
		s->cur_agent->blocked_on = NULL;
		s->cur_agent->blocked_on_tid = -1;
		s->cur_agent->blocked_on_addr = -1;
		/* no need to check for deadlock; this can't create a cycle. */
		mutex_block_others(&s->rq, mutex_addr, s->cur_agent,
				   s->cur_agent->tid);
	} else if (kern_mutex_unlocking(ls->cpu0, ls->eip, &mutex_addr)) {
		/* It's allowed to have a mutex_unlock call inside a mutex_lock
		 * (and it can happen), or mutex_lock inside of mutex_lock, but
		 * not the other way around. */
		assert(!ACTION(s, mutex_unlocking));
		ACTION(s, mutex_unlocking) = true;
		mutex_block_others(&s->rq, mutex_addr, NULL, -1);
	} else if (kern_mutex_unlocking_done(ls->eip)) {
		assert(ACTION(s, mutex_unlocking));
		ACTION(s, mutex_unlocking) = false;
	}

	/**********************************************************************
	 * Exercise our will upon the guest kernel
	 **********************************************************************/

	/* Some checks before invoking the arbiter. First see if an operation of
	 * ours is already in-flight. */
	if (s->schedule_in_flight) {
		if (s->schedule_in_flight == s->cur_agent) {
			/* the in-flight schedule operation is cleared for
			 * landing. note that this may cause another one to
			 * be triggered again as soon as the context switcher
			 * and/or the timer handler finishes; it is up to the
			 * arbiter to decide this. */
			assert(ACTION(s, schedule_target));
			/* this condition should trigger in the middle of the
			 * switch, rather than after it finishes. (which is also
			 * why we leave the schedule_target flag turned on).
			 * the special case is for newly forked agents that are
			 * schedule targets - they won't exit timer or c-s above
			 * so here is where we have to clear it for them. */
			if (ACTION(s, just_forked)) {
				/* Interrupts are "probably" off, but that's why
				 * just_finished_reschedule is persistent. */
				lsprintf(DEV, "Finished flying to %d.\n",
					 s->cur_agent->tid);
				ACTION(s, schedule_target) = false;
				ACTION(s, just_forked) = false;
				s->schedule_in_flight = NULL;
				s->just_finished_reschedule = true;
			} else {
				assert(ACTION(s, cs_free_pass) ||
				       ACTION(s, context_switch) ||
				       HANDLING_INTERRUPT(s));
			}
			/* The schedule_in_flight flag itself is cleared above,
			 * along with schedule_target. Sometimes sched_recover
			 * sets in_flight and needs it not cleared here. */
		} else {
			/* An undesirable thread has been context-switched away
			 * from either from an interrupt handler (timer/kbd) or
			 * of its own accord. We need to wait for it to get back
			 * to its own execution before triggering an interrupt
			 * on it; in the former case, this will be just after it
			 * irets; in the latter, just after the c-s returns. */
			if (kern_timer_exiting(ls->eip) ||
			     (!HANDLING_INTERRUPT(s) &&
			      kern_context_switch_exiting(ls->eip)) ||
			     ACTION(s, just_forked)) {
				/* an undesirable agent just got switched to;
				 * keep the pending schedule in the air. */
				// XXX: this seems to get taken too soon? change
				// it somehow to cause_.._immediately. and then
				// see the asserts/comments in the action
				// handling_timer sections above.
				/* some kernels (pathos) still have interrupts
				 * off or scheduler locked at this point; so
				 * properties of !R */
				if (interrupts_enabled(ls->cpu0) &&
				    kern_ready_for_timer_interrupt(ls->cpu0)) {
					lsprintf(INFO, "keeping schedule in-"
						 "flight at 0x%x\n", ls->eip);
					cause_timer_interrupt(ls->cpu0);
					s->entering_timer = true;
					s->delayed_in_flight = false;
				} else {
					lsprintf(INFO, "Want to keep schedule "
						 "in-flight at 0x%x; have to "
						 "delay\n", ls->eip);
					s->delayed_in_flight = true;
				}
				/* If this was the special case where the
				 * undesirable thread was just forked, keeping
				 * the schedule in flight will cause it to do a
				 * normal context switch. So just_forked is no
				 * longer needed. */
				ACTION(s, just_forked) = false;
			} else if (s->delayed_in_flight &&
				   interrupts_enabled(ls->cpu0) &&
				   kern_ready_for_timer_interrupt(ls->cpu0)) {
				lsprintf(INFO, "Delayed in-flight timer tick "
					 "at 0x%x\n", ls->eip);
				cause_timer_interrupt(ls->cpu0);
				s->entering_timer = true;
				s->delayed_in_flight = false;
			} else {
				/* they'd better not have "escaped" */
				assert(ACTION(s, cs_free_pass) ||
				       ACTION(s, context_switch) ||
				       HANDLING_INTERRUPT(s) ||
				       !interrupts_enabled(ls->cpu0) ||
				       !kern_ready_for_timer_interrupt(ls->cpu0));
			}
		}
		/* in any case we have no more decisions to make here */
		return;
	} else if (ACTION(s, just_forked)) {
		ACTION(s, just_forked) = false;
		s->just_finished_reschedule = true;
	}
	assert(!s->schedule_in_flight);

	/* Can't do anything before the test actually starts. */
	if (ls->test.current_test == NULL) {
		return;
	}

	/* XXX TODO: This will "leak" an undesirable thread to execute an
	 * instruction if the timer/kbd handler is an interrupt gate, so check
	 * also if we're about to iret and then examine the eflags on the
	 * stack. Also, "sti" and "popf" are interesting, so check for those.
	 * Also, do trap gates enable interrupts if they were off? o_O */
	if (!interrupts_enabled(ls->cpu0)) {
		return;
	}

	/* If a schedule operation is just finishing, we should allow the thread
	 * to get back to its own execution before making another choice. Note
	 * that when we previously decided to interrupt the thread, it will have
	 * executed the single instruction we made the choice at then taken the
	 * interrupt, so we return to the next instruction, not the same one. */
	if (ACTION(s, schedule_target)) {
		return;
	}

	/* TODO: have an extra mode which will allow us to preempt the timer
	 * handler. */
	if (HANDLING_INTERRUPT(s) || !kern_ready_for_timer_interrupt(ls->cpu0)) {
		return;
	}

	/* As kernel_specifics.h says, no preempting during mutex unblocking. */
	if (ACTION(s, mutex_unlocking)) {
		return;
	}

	/* Okay, are we at a choice point? */
	bool voluntary;
	bool just_finished_reschedule = s->just_finished_reschedule;
	s->just_finished_reschedule = false;
	/* TODO: arbiter may also want to see the trace_entry_t */
	if (arbiter_interested(ls, just_finished_reschedule, &voluntary)) {
		struct agent *a;
		bool our_choice;
		/* TODO: as an optimisation (in serialisation state / etc), the
		 * arbiter may return NULL if there was only one possible
		 * choice. */
		if (arbiter_choose(ls, &a, &our_choice)) {
			/* Effect the choice that was made... */
			if (a != s->cur_agent) {
				lsprintf(CHOICE, "from agent %d, arbiter chose "
					 "%d at 0x%x (called at 0x%x)\n",
					 s->cur_agent->tid, a->tid, ls->eip,
					 (unsigned int)READ_STACK(ls->cpu0, 0));
				set_schedule_target(s, a);
				cause_timer_interrupt(ls->cpu0);
				s->entering_timer = true;
			}
			/* Record the choice that was just made. */
			if (ls->test.test_ever_caused &&
			    ls->test.start_population != s->most_agents_ever) {
				save_setjmp(&ls->save, ls, a->tid, our_choice,
					    false, voluntary);
			}
		} else {
			lsprintf(BUG, "no agent was chosen at eip 0x%x\n",
				 ls->eip);
		}
	}
	/* XXX TODO: it may be that not every timer interrupt triggers a context
	 * switch, so we should watch out if a handler doesn't enter the c-s. */
}