コード例 #1
0
ファイル: param.c プロジェクト: franfyh/structureanalysis
void param_read_from_text(const char *path, int use_dna_params, param_t p)
{
  DIR *cwd = opendir(".");
  if (!cwd)
    die("param_read_from_text: cannot open current directory");
  if (chdir(path))
    die("param_read_from_text: cannot change directories to %s", path);
  p->use_dna_params = use_dna_params;
#define READ_STACK(x) read_stack(#x, p->use_dna_params, &p->x)
  READ_STACK(coaxial);
  READ_STACK(coaxstack);
  READ_STACK(stack);
  READ_STACK(tstack);
  READ_STACK(tstackcoax);
  READ_STACK(tstackh);
  READ_STACK(tstacki);
  READ_STACK(tstacki1n);
  READ_STACK(tstacki23);
  READ_STACK(tstackm);
#undef READ_STACK
  read_loop(p);
  read_miscloop(p);
  read_triloop(p);
  read_tloop(p);
  read_hexaloop(p);
  read_dangle(p->use_dna_params, &p->dangle_3p, &p->dangle_5p);
  read_int11(p->use_dna_params, &p->int11);
  read_int22(p->use_dna_params, &p->int22);
  read_int21(p->use_dna_params, &p->int21);
  if (fchdir(dirfd(cwd)))
    die("param_read_from_text: cannot cd back to original working directory");
  closedir(cwd);
}
コード例 #2
0
/* Mode: 0 == read; 1 == write */
bool user_rwlock_lock_entering(conf_object_t *cpu, unsigned int eip, unsigned int *addr, bool *write) {
#ifdef USER_RWLOCK_LOCK_ENTER
	if (eip == USER_RWLOCK_LOCK_ENTER) {
		*addr = READ_STACK(cpu, 1);
		*write = READ_STACK(cpu, 2) != 0;
		return true;
	} else {
		return false;
	}
#else
	return false;
#endif
}
コード例 #3
0
bool user_mm_realloc_entering(conf_object_t *cpu, unsigned int eip,
			      unsigned int *orig_base, unsigned int *size)
{
#ifdef USER_MM_REALLOC_ENTER
	if (eip == USER_MM_REALLOC_ENTER) {
		*orig_base = READ_STACK(cpu, 1);
		*size = READ_STACK(cpu, 2);
		return true;
	} else {
		return false;
	}
#else
	return false;
#endif
}
コード例 #4
0
ファイル: x86.c プロジェクト: bblum/landslide-old
// FIXME: make this as intelligent as stack_trace.
bool within_function(conf_object_t *cpu, int eip, int func, int func_end)
{
	if (eip >= func && eip < func_end)
		return true;

	eip = READ_STACK(cpu, 0);

	if (eip >= func && eip < func_end)
		return true;

	int stop_ebp = 0;
	int ebp = GET_CPU_ATTR(cpu, ebp);
	int rabbit = ebp;
	int frame_count = 0;

	while (ebp != stop_ebp && (unsigned)ebp < USER_MEM_START && frame_count++ < 1024) {
		/* Test eip against given range. */
		eip = READ_MEMORY(cpu, ebp + WORD_SIZE);
		if (eip >= func && eip < func_end)
			return true;

		/* Advance ebp and rabbit. Rabbit must go first to set stop_ebp
		 * accurately. */
		// XXX XXX XXX Actually fix the cycle detection - read from
		// rabbit not ebp; and get rid of the frame counter.
		// Fix this same bug in stack trace function below.
		if (rabbit != stop_ebp) rabbit = READ_MEMORY(cpu, ebp);
		if (rabbit == ebp) stop_ebp = ebp;
		if (rabbit != stop_ebp) rabbit = READ_MEMORY(cpu, ebp);
		if (rabbit == ebp) stop_ebp = ebp;
		ebp = READ_MEMORY(cpu, ebp);
	}

	return false;
}
コード例 #5
0
bool user_report_end_fail(conf_object_t *cpu, unsigned int eip)
{
#ifdef USER_REPORT_END_ENTER
	return eip == USER_REPORT_END_ENTER &&
		READ_STACK(cpu, 1) == USER_REPORT_END_FAIL_VAL;
#else
	return false;
#endif
}
コード例 #6
0
bool user_sem_signal_entering(conf_object_t *cpu, unsigned int eip, unsigned int *addr) {
#ifdef USER_SEM_SIGNAL_ENTER
	if (eip == USER_SEM_SIGNAL_ENTER) {
		*addr = READ_STACK(cpu, 1);
		return true;
	} else {
		return false;
	}
#else
	return false;
#endif
}
コード例 #7
0
bool user_cond_broadcast_entering(conf_object_t *cpu, unsigned int eip, unsigned int *addr) {
#ifdef USER_COND_BROADCAST_ENTER
	if (eip == USER_COND_BROADCAST_ENTER) {
		*addr = READ_STACK(cpu, 1);
		return true;
	} else {
		return false;
	}
#else
	return false;
#endif
}
コード例 #8
0
bool user_mutex_destroy_entering(conf_object_t *cpu, unsigned int eip, unsigned int *addr) {
#ifdef USER_MUTEX_DESTROY_ENTER
	if (eip == USER_MUTEX_DESTROY_ENTER) {
		*addr = READ_STACK(cpu, 1);
		return true;
	} else {
		return false;
	}
#else
	return false;
#endif
}
コード例 #9
0
/* The following two functions look funny because of two possible names. */
bool user_mutex_trylock_entering(conf_object_t *cpu, unsigned int eip, unsigned int *addr) {
#ifdef USER_MUTEX_TRYLOCK_ENTER
	if (eip == USER_MUTEX_TRYLOCK_ENTER) {
		*addr = READ_STACK(cpu, 1);
		return true;
	} else {
		return false;
	}
#else
#ifdef USER_MUTEX_TRY_LOCK_ENTER
	if (eip == USER_MUTEX_TRY_LOCK_ENTER) {
		*addr = READ_STACK(cpu, 1);
		return true;
	} else {
		return false;
	}
#else
	return false;
#endif
#endif
}
コード例 #10
0
bool user_mm_free_entering(conf_object_t *cpu, unsigned int eip, unsigned int *base)
{
#ifdef USER_MM_FREE_ENTER
	if (eip == USER_MM_FREE_ENTER) {
		*base = READ_STACK(cpu, 1);
		return true;
	} else {
		return false;
	}
#else
	return false;
#endif
}
コード例 #11
0
ファイル: x86.c プロジェクト: bblum/landslide-old
/* Caller has to free the return value. */
char *stack_trace(conf_object_t *cpu, int eip, int tid)
{
	char *buf = MM_XMALLOC(MAX_TRACE_LEN, char);
	int pos = 0, old_pos;
	int stack_offset = 0; /* Counts by 1 - READ_STACK already multiplies */

	ADD_STR(buf, pos, MAX_TRACE_LEN, "TID%d at 0x%.8x in ", tid, eip);
	ADD_FRAME(buf, pos, MAX_TRACE_LEN, eip);

	int stop_ebp = 0;
	int ebp = GET_CPU_ATTR(cpu, ebp);
	int rabbit = ebp;
	int frame_count = 0;

	while (ebp != 0 && (unsigned)ebp < USER_MEM_START && frame_count++ < 1024) {
		bool extra_frame;

		do {
			int eip_offset;
			bool iret_block = false;

			extra_frame = false;
			/* at the beginning or end of a function, there is no
			 * frame, but a return address is still on the stack. */
			if (function_eip_offset(eip, &eip_offset)) {
				if (eip_offset == 0) {
					extra_frame = true;
				} else if (eip_offset == 1 &&
				           READ_BYTE(cpu, eip - 1)
				           == OPCODE_PUSH_EBP) {
					stack_offset++;
					extra_frame = true;
				}
			}
			if (!extra_frame) {
				int opcode = READ_BYTE(cpu, eip);
				if (opcode == OPCODE_RET) {
					extra_frame = true;
				} else if (opcode == OPCODE_IRET) {
					iret_block = true;
					extra_frame = true;
				}
			}
			if (extra_frame) {
				eip = READ_STACK(cpu, stack_offset);
				ADD_STR(buf, pos, MAX_TRACE_LEN, "%s0x%.8x in ",
					STACK_TRACE_SEPARATOR, eip);
				ADD_FRAME(buf, pos, MAX_TRACE_LEN, eip);
				if (iret_block)
					stack_offset += IRET_BLOCK_WORDS;
				else
					stack_offset++;;
			}
		} while (extra_frame);

		/* pushed return address behind the base pointer */
		eip = READ_MEMORY(cpu, ebp + WORD_SIZE);
		stack_offset = ebp + 2;
		ADD_STR(buf, pos, MAX_TRACE_LEN, "%s0x%.8x in ",
			STACK_TRACE_SEPARATOR, eip);
		old_pos = pos;
		ADD_FRAME(buf, pos, MAX_TRACE_LEN, eip);
		/* special-case termination condition */
		if (pos - old_pos >= strlen(ENTRY_POINT) &&
		    strncmp(buf + old_pos, ENTRY_POINT,
		            strlen(ENTRY_POINT)) == 0) {
			break;
		}

		if (rabbit != stop_ebp) rabbit = READ_MEMORY(cpu, ebp);
		if (rabbit == ebp) stop_ebp = ebp;
		if (rabbit != stop_ebp) rabbit = READ_MEMORY(cpu, ebp);
		if (rabbit == ebp) stop_ebp = ebp;
		ebp = READ_MEMORY(cpu, ebp);
	}

	char *buf2 = MM_XSTRDUP(buf); /* truncate to save space */
	MM_FREE(buf);

	return buf2;
}
コード例 #12
0
ファイル: schedule.c プロジェクト: jinlee/masters
void sched_update(struct ls_state *ls)
{
	struct sched_state *s = &ls->sched;
	int old_tid = s->cur_agent->tid;
	int new_tid;

	/* wait until the guest is ready */
	if (!s->guest_init_done) {
		if (kern_sched_init_done(ls->eip)) {
			s->guest_init_done = true;
			/* Deprecated since kern_get_current_tid went away. */
			// assert(old_tid == new_tid && "init tid mismatch");
		} else {
			return;
		}
	}

	/* The Importance of Being Assertive, A Trivial Style Guideline for
	 * Serious Programmers, by Ben Blum */
	if (s->entering_timer) {
		assert(ls->eip == kern_get_timer_wrap_begin() &&
		       "simics is a clown and tried to delay our interrupt :<");
		s->entering_timer = false;
	} else {
		if (kern_timer_entering(ls->eip)) {
			lsprintf(DEV, "A timer tick that wasn't ours (0x%x).\n",
				 (int)READ_STACK(ls->cpu0, 0));
			ls->eip = avoid_timer_interrupt_immediately(ls->cpu0);
		}
	}

	/**********************************************************************
	 * Update scheduler state.
	 **********************************************************************/

	if (kern_thread_switch(ls->cpu0, ls->eip, &new_tid) && new_tid != old_tid) {
		/*
		 * So, fork needs to be handled twice, both here and below in the
		 * runnable case. And for kernels that trigger both, both places will
		 * need to have a check for whether the newly forked thread exists
		 * already.
		 *
		 * Sleep and vanish actually only need to happen here. They should
		 * check both the rq and the dq, 'cause there's no telling where the
		 * thread got moved to before. As for the descheduling case, that needs
		 * to check for a new type of action flag "asleep" or "vanished" (and
		 * I guess using last_vanished_agent might work), and probably just
		 * assert that that condition holds if the thread couldn't be found
		 * for the normal descheduling case.
		 */
		/* Has to be handled before updating cur_agent, of course. */
		handle_sleep(s);
		handle_vanish(s);
		handle_unsleep(s, new_tid);

		/* Careful! On some kernels, the trigger for a new agent forking
		 * (where it first gets added to the RQ) may happen AFTER its
		 * tcb is set to be the currently running thread. This would
		 * cause this case to be reached before agent_fork() is called,
		 * so agent_by_tid would fail. Instead, we have an option to
		 * find it later. (see the kern_thread_runnable case below.) */
		struct agent *next = agent_by_tid_or_null(&s->rq, new_tid);
		if (next == NULL) next = agent_by_tid_or_null(&s->dq, new_tid);

		if (next != NULL) {
			lsprintf(DEV, "switched threads %d -> %d\n", old_tid,
				 new_tid);
			s->last_agent = s->cur_agent;
			s->cur_agent = next;
		/* This fork check is for kernels which context switch to a
		 * newly-forked thread before adding it to the runqueue - and
		 * possibly won't do so at all (if current_extra_runnable). We
		 * need to do agent_fork now. (agent_fork *also* needs to be
		 * handled below, for kernels which don't c-s to the new thread
		 * immediately.) The */
		} else if (handle_fork(s, new_tid, false)) {
			next = agent_by_tid_or_null(&s->dq, new_tid);
			assert(next != NULL && "Newly forked thread not on DQ");
			lsprintf(DEV, "switching threads %d -> %d\n", old_tid,
				 new_tid);
			s->last_agent = s->cur_agent;
			s->cur_agent = next;
		} else {
			lsprintf(ALWAYS, COLOUR_BOLD COLOUR_RED "Couldn't find "
				 "new thread %d; current %d; did you forget to "
				 "tell_landslide_forking()?\n" COLOUR_DEFAULT,
				 new_tid, s->cur_agent->tid);
			assert(0);
		}
		/* Some debug info to help the studence. */
		if (s->cur_agent->tid == kern_get_init_tid()) {
			lsprintf(DEV, "Now running init.\n");
		} else if (s->cur_agent->tid == kern_get_shell_tid()) {
			lsprintf(DEV, "Now running shell.\n");
		} else if (kern_has_idle() &&
			   s->cur_agent->tid == kern_get_idle_tid()) {
			lsprintf(DEV, "Now idling.\n");
		}
	}

	s->current_extra_runnable = kern_current_extra_runnable(ls->cpu0);

	int target_tid;
	int mutex_addr;

	/* Timer interrupt handling. */
	if (kern_timer_entering(ls->eip)) {
		// XXX: same as the comment in the below condition.
		if (!kern_timer_exiting(READ_STACK(ls->cpu0, 0))) {
			assert(!ACTION(s, handling_timer));
		} else {
			lsprintf(DEV, "WARNING: allowing a nested timer on "
				 "tid %d's stack\n", s->cur_agent->tid);
		}
		ACTION(s, handling_timer) = true;
		lsprintf(INFO, "%d timer enter from 0x%x\n", s->cur_agent->tid,
		         (unsigned int)READ_STACK(ls->cpu0, 0));
	} else if (kern_timer_exiting(ls->eip)) {
		if (ACTION(s, handling_timer)) {
			// XXX: This condition is a hack to compensate for when
			// simics "sometimes", when keeping a schedule-in-
			// flight, takes the caused timer interrupt immediately,
			// even before the iret.
			if (!kern_timer_exiting(READ_STACK(ls->cpu0, 0))) {
				ACTION(s, handling_timer) = false;
				s->just_finished_reschedule = true;
			}
			/* If the schedule target was in a timer interrupt when we
			 * decided to schedule him, then now is when the operation
			 * finishes landing. (otherwise, see below)
			 * FIXME: should this be inside the above if statement? */
			if (ACTION(s, schedule_target)) {
				ACTION(s, schedule_target) = false;
				s->schedule_in_flight = NULL;
			}
		} else {
			lsprintf(INFO, "WARNING: exiting a non-timer interrupt "
				 "through a path shared with the timer..? (from 0x%x, #%d)\n", (int)READ_STACK(ls->cpu0, 0), (int)READ_STACK(ls->cpu0, -2));
		}
	/* Context switching. */
	} else if (kern_context_switch_entering(ls->eip)) {
		/* It -is- possible for a context switch to interrupt a
		 * context switch if a timer goes off before c-s disables
		 * interrupts. TODO: if we care, make this an int counter. */
		ACTION(s, context_switch) = true;
		/* Maybe update the voluntary resched trace. See schedule.h */
		if (!ACTION(s, handling_timer)) {
			lsprintf(DEV, "Voluntary resched tid ");
			print_agent(DEV, s->cur_agent);
			printf(DEV, "\n");
			s->voluntary_resched_tid = s->cur_agent->tid;
			if (s->voluntary_resched_stack != NULL)
				MM_FREE(s->voluntary_resched_stack);
			s->voluntary_resched_stack =
				stack_trace(ls->cpu0, ls->eip, s->cur_agent->tid);
		}
	} else if (kern_context_switch_exiting(ls->eip)) {
		assert(ACTION(s, cs_free_pass) || ACTION(s, context_switch));
		ACTION(s, context_switch) = false;
		ACTION(s, cs_free_pass) = false;
		/* For threads that context switched of their own accord. */
		if (!HANDLING_INTERRUPT(s)) {
			s->just_finished_reschedule = true;
			if (ACTION(s, schedule_target)) {
				ACTION(s, schedule_target) = false;
				s->schedule_in_flight = NULL;
			}
		}
	/* Lifecycle. */
	} else if (kern_forking(ls->eip)) {
		assert_no_action(s, "forking");
		ACTION(s, forking) = true;
	} else if (kern_sleeping(ls->eip)) {
		assert_no_action(s, "sleeping");
		ACTION(s, sleeping) = true;
	} else if (kern_vanishing(ls->eip)) {
		assert_no_action(s, "vanishing");
		ACTION(s, vanishing) = true;
	} else if (kern_readline_enter(ls->eip)) {
		assert_no_action(s, "readlining");
		ACTION(s, readlining) = true;
	} else if (kern_readline_exit(ls->eip)) {
		assert(ACTION(s, readlining));
		ACTION(s, readlining) = false;
	/* Runnable state change (incl. consequences of fork, vanish, sleep). */
	} else if (kern_thread_runnable(ls->cpu0, ls->eip, &target_tid)) {
		/* A thread is about to become runnable. Was it just spawned? */
		if (!handle_fork(s, target_tid, true)) {
			agent_wake(s, target_tid);
		}
	} else if (kern_thread_descheduling(ls->cpu0, ls->eip, &target_tid)) {
		/* A thread is about to deschedule. Is it vanishing/sleeping? */
		agent_deschedule(s, target_tid);
	/* Mutex tracking and noob deadlock detection */
	} else if (kern_mutex_locking(ls->cpu0, ls->eip, &mutex_addr)) {
		//assert(!ACTION(s, mutex_locking));
		assert(!ACTION(s, mutex_unlocking));
		ACTION(s, mutex_locking) = true;
		s->cur_agent->blocked_on_addr = mutex_addr;
	} else if (kern_mutex_blocking(ls->cpu0, ls->eip, &target_tid)) {
		/* Possibly not the case - if this thread entered mutex_lock,
		 * then switched and someone took it, these would be set already
		 * assert(s->cur_agent->blocked_on == NULL);
		 * assert(s->cur_agent->blocked_on_tid == -1); */
		lsprintf(DEV, "mutex: on 0x%x tid %d blocks, owned by %d\n",
			 s->cur_agent->blocked_on_addr, s->cur_agent->tid,
			 target_tid);
		s->cur_agent->blocked_on_tid = target_tid;
		if (deadlocked(s)) {
			lsprintf(BUG, COLOUR_BOLD COLOUR_RED "DEADLOCK! ");
			print_deadlock(BUG, s->cur_agent);
			printf(BUG, "\n");
			found_a_bug(ls);
		}
	} else if (kern_mutex_locking_done(ls->eip)) {
		//assert(ACTION(s, mutex_locking));
		assert(!ACTION(s, mutex_unlocking));
		ACTION(s, mutex_locking) = false;
		s->cur_agent->blocked_on = NULL;
		s->cur_agent->blocked_on_tid = -1;
		s->cur_agent->blocked_on_addr = -1;
		/* no need to check for deadlock; this can't create a cycle. */
		mutex_block_others(&s->rq, mutex_addr, s->cur_agent,
				   s->cur_agent->tid);
	} else if (kern_mutex_unlocking(ls->cpu0, ls->eip, &mutex_addr)) {
		/* It's allowed to have a mutex_unlock call inside a mutex_lock
		 * (and it can happen), or mutex_lock inside of mutex_lock, but
		 * not the other way around. */
		assert(!ACTION(s, mutex_unlocking));
		ACTION(s, mutex_unlocking) = true;
		mutex_block_others(&s->rq, mutex_addr, NULL, -1);
	} else if (kern_mutex_unlocking_done(ls->eip)) {
		assert(ACTION(s, mutex_unlocking));
		ACTION(s, mutex_unlocking) = false;
	}

	/**********************************************************************
	 * Exercise our will upon the guest kernel
	 **********************************************************************/

	/* Some checks before invoking the arbiter. First see if an operation of
	 * ours is already in-flight. */
	if (s->schedule_in_flight) {
		if (s->schedule_in_flight == s->cur_agent) {
			/* the in-flight schedule operation is cleared for
			 * landing. note that this may cause another one to
			 * be triggered again as soon as the context switcher
			 * and/or the timer handler finishes; it is up to the
			 * arbiter to decide this. */
			assert(ACTION(s, schedule_target));
			/* this condition should trigger in the middle of the
			 * switch, rather than after it finishes. (which is also
			 * why we leave the schedule_target flag turned on).
			 * the special case is for newly forked agents that are
			 * schedule targets - they won't exit timer or c-s above
			 * so here is where we have to clear it for them. */
			if (ACTION(s, just_forked)) {
				/* Interrupts are "probably" off, but that's why
				 * just_finished_reschedule is persistent. */
				lsprintf(DEV, "Finished flying to %d.\n",
					 s->cur_agent->tid);
				ACTION(s, schedule_target) = false;
				ACTION(s, just_forked) = false;
				s->schedule_in_flight = NULL;
				s->just_finished_reschedule = true;
			} else {
				assert(ACTION(s, cs_free_pass) ||
				       ACTION(s, context_switch) ||
				       HANDLING_INTERRUPT(s));
			}
			/* The schedule_in_flight flag itself is cleared above,
			 * along with schedule_target. Sometimes sched_recover
			 * sets in_flight and needs it not cleared here. */
		} else {
			/* An undesirable thread has been context-switched away
			 * from either from an interrupt handler (timer/kbd) or
			 * of its own accord. We need to wait for it to get back
			 * to its own execution before triggering an interrupt
			 * on it; in the former case, this will be just after it
			 * irets; in the latter, just after the c-s returns. */
			if (kern_timer_exiting(ls->eip) ||
			     (!HANDLING_INTERRUPT(s) &&
			      kern_context_switch_exiting(ls->eip)) ||
			     ACTION(s, just_forked)) {
				/* an undesirable agent just got switched to;
				 * keep the pending schedule in the air. */
				// XXX: this seems to get taken too soon? change
				// it somehow to cause_.._immediately. and then
				// see the asserts/comments in the action
				// handling_timer sections above.
				/* some kernels (pathos) still have interrupts
				 * off or scheduler locked at this point; so
				 * properties of !R */
				if (interrupts_enabled(ls->cpu0) &&
				    kern_ready_for_timer_interrupt(ls->cpu0)) {
					lsprintf(INFO, "keeping schedule in-"
						 "flight at 0x%x\n", ls->eip);
					cause_timer_interrupt(ls->cpu0);
					s->entering_timer = true;
					s->delayed_in_flight = false;
				} else {
					lsprintf(INFO, "Want to keep schedule "
						 "in-flight at 0x%x; have to "
						 "delay\n", ls->eip);
					s->delayed_in_flight = true;
				}
				/* If this was the special case where the
				 * undesirable thread was just forked, keeping
				 * the schedule in flight will cause it to do a
				 * normal context switch. So just_forked is no
				 * longer needed. */
				ACTION(s, just_forked) = false;
			} else if (s->delayed_in_flight &&
				   interrupts_enabled(ls->cpu0) &&
				   kern_ready_for_timer_interrupt(ls->cpu0)) {
				lsprintf(INFO, "Delayed in-flight timer tick "
					 "at 0x%x\n", ls->eip);
				cause_timer_interrupt(ls->cpu0);
				s->entering_timer = true;
				s->delayed_in_flight = false;
			} else {
				/* they'd better not have "escaped" */
				assert(ACTION(s, cs_free_pass) ||
				       ACTION(s, context_switch) ||
				       HANDLING_INTERRUPT(s) ||
				       !interrupts_enabled(ls->cpu0) ||
				       !kern_ready_for_timer_interrupt(ls->cpu0));
			}
		}
		/* in any case we have no more decisions to make here */
		return;
	} else if (ACTION(s, just_forked)) {
		ACTION(s, just_forked) = false;
		s->just_finished_reschedule = true;
	}
	assert(!s->schedule_in_flight);

	/* Can't do anything before the test actually starts. */
	if (ls->test.current_test == NULL) {
		return;
	}

	/* XXX TODO: This will "leak" an undesirable thread to execute an
	 * instruction if the timer/kbd handler is an interrupt gate, so check
	 * also if we're about to iret and then examine the eflags on the
	 * stack. Also, "sti" and "popf" are interesting, so check for those.
	 * Also, do trap gates enable interrupts if they were off? o_O */
	if (!interrupts_enabled(ls->cpu0)) {
		return;
	}

	/* If a schedule operation is just finishing, we should allow the thread
	 * to get back to its own execution before making another choice. Note
	 * that when we previously decided to interrupt the thread, it will have
	 * executed the single instruction we made the choice at then taken the
	 * interrupt, so we return to the next instruction, not the same one. */
	if (ACTION(s, schedule_target)) {
		return;
	}

	/* TODO: have an extra mode which will allow us to preempt the timer
	 * handler. */
	if (HANDLING_INTERRUPT(s) || !kern_ready_for_timer_interrupt(ls->cpu0)) {
		return;
	}

	/* As kernel_specifics.h says, no preempting during mutex unblocking. */
	if (ACTION(s, mutex_unlocking)) {
		return;
	}

	/* Okay, are we at a choice point? */
	bool voluntary;
	bool just_finished_reschedule = s->just_finished_reschedule;
	s->just_finished_reschedule = false;
	/* TODO: arbiter may also want to see the trace_entry_t */
	if (arbiter_interested(ls, just_finished_reschedule, &voluntary)) {
		struct agent *a;
		bool our_choice;
		/* TODO: as an optimisation (in serialisation state / etc), the
		 * arbiter may return NULL if there was only one possible
		 * choice. */
		if (arbiter_choose(ls, &a, &our_choice)) {
			/* Effect the choice that was made... */
			if (a != s->cur_agent) {
				lsprintf(CHOICE, "from agent %d, arbiter chose "
					 "%d at 0x%x (called at 0x%x)\n",
					 s->cur_agent->tid, a->tid, ls->eip,
					 (unsigned int)READ_STACK(ls->cpu0, 0));
				set_schedule_target(s, a);
				cause_timer_interrupt(ls->cpu0);
				s->entering_timer = true;
			}
			/* Record the choice that was just made. */
			if (ls->test.test_ever_caused &&
			    ls->test.start_population != s->most_agents_ever) {
				save_setjmp(&ls->save, ls, a->tid, our_choice,
					    false, voluntary);
			}
		} else {
			lsprintf(BUG, "no agent was chosen at eip 0x%x\n",
				 ls->eip);
		}
	}
	/* XXX TODO: it may be that not every timer interrupt triggers a context
	 * switch, so we should watch out if a handler doesn't enter the c-s. */
}
コード例 #13
0
ファイル: arbiter.c プロジェクト: bblum/landslide
bool arbiter_interested(struct ls_state *ls, bool just_finished_reschedule,
			bool *voluntary, bool *need_handle_sleep, bool *data_race,
			bool *joined, bool *xbegin)
{
	*voluntary = false;
	*need_handle_sleep = false;
	*data_race = false;
	*joined = false;
	*xbegin = false;

	/* Attempt to see if a "voluntary" reschedule is just ending - did the
	 * last thread context switch not because of a timer?
	 * Also make sure to ignore null switches (timer-driven or not). */
	if (ls->sched.last_agent != NULL &&
	    !ls->sched.last_agent->action.handling_timer &&
	    ls->sched.last_agent != ls->sched.cur_agent &&
	    just_finished_reschedule) {
		lsprintf(DEV, "a voluntary reschedule: ");
		print_agent(DEV, ls->sched.last_agent);
		printf(DEV, " to ");
		print_agent(DEV, ls->sched.cur_agent);
		printf(DEV, "\n");
#ifndef PINTOS_KERNEL
		/* Pintos includes a semaphore implementation which can go
		 * around its anti-paradise-lost while loop a full time without
		 * interrupts coming back on. So, there can be a voluntary
		 * reschedule sequence where an uninterruptible, blocked thread
		 * gets jammed in the middle of this transition. Issue #165. */
		if (ls->save.next_tid != ls->sched.last_agent->tid) {
			ASSERT_ONE_THREAD_PER_PP(ls);
		}
#endif
		assert(ls->sched.voluntary_resched_tid != TID_NONE);
		*voluntary = true;
		return true;
	/* is the kernel idling, e.g. waiting for keyboard input? */
	} else if (ls->instruction_text[0] == OPCODE_HLT) {
		lskprintf(INFO, "What are you waiting for? (HLT state)\n");
		*need_handle_sleep = true;
		ASSERT_ONE_THREAD_PER_PP(ls);
		return true;
	/* Skip the instructions before the test case itself gets started. In
	 * many kernels' cases this will be redundant, but just in case. */
	} else if (!ls->test.test_ever_caused ||
		   ls->test.start_population == ls->sched.most_agents_ever) {
		return false;
	/* check for data races */
	} else if (suspected_data_race(ls)
		   /* if xchg-blocked, need NOT set DR PP. other case below. */
		   && !XCHG_BLOCKED(&ls->sched.cur_agent->user_yield)
#ifdef DR_PPS_RESPECT_WITHIN_FUNCTIONS
		   // NB. The use of KERNEL_MEMORY here used to be !testing_userspace.
		   // I needed to change it to implement preempt-everywhere mode,
		   // to handle the case of userspace shms in deschedule() syscall.
		   // Not entirely sure of all implications of this change.
		   && ((!KERNEL_MEMORY(ls->eip) && user_within_functions(ls)) ||
		      (KERNEL_MEMORY(ls->eip) && kern_within_functions(ls)))
#endif
#ifndef HTM_WEAK_ATOMICITY
		   && !ls->sched.cur_agent->action.user_txn
#endif
		   ) {
		*data_race = true;
		ASSERT_ONE_THREAD_PER_PP(ls);
		return true;
	/* user-mode-only preemption points */
	} else if (testing_userspace()) {
		unsigned int mutex_addr;
		if (KERNEL_MEMORY(ls->eip)) {
#ifdef GUEST_YIELD_ENTER
#ifndef GUEST_YIELD_EXIT
			STATIC_ASSERT(false && "missing guest yield exit");
#endif
			if ((ls->eip == GUEST_YIELD_ENTER &&
			     READ_STACK(ls->cpu0, 1) == ls->sched.cur_agent->tid) ||
			    (ls->eip == GUEST_YIELD_EXIT &&
			     ((signed int)GET_CPU_ATTR(ls->cpu0, eax)) < 0)) {
				/* Busted yield. Pretend it was yield -1. */
				ASSERT_ONE_THREAD_PER_PP(ls);
				return true;
			}
#endif
			return false;
		} else if (XCHG_BLOCKED(&ls->sched.cur_agent->user_yield)) {
			/* User thread is blocked on an "xchg-continue" mutex.
			 * Analogous to HLT state -- need to preempt it. */
			ASSERT_ONE_THREAD_PER_PP(ls);
#ifndef HTM_WEAK_ATOMICITY
			/* under strong atomicity, if for whatever reason a txn
			 * blocks, there's no way it should ever succeed */
			if (ls->sched.cur_agent->action.user_txn) {
				abort_transaction(ls->sched.cur_agent->tid,
						  ls->save.current, _XABORT_CAPACITY);
				ls->end_branch_early = true;
				return false;
			}
#endif
			return true;
#ifndef PINTOS_KERNEL
		} else if (!check_user_address_space(ls)) {
			return false;
#endif
		} else if ((user_mutex_lock_entering(ls->cpu0, ls->eip, &mutex_addr) ||
			    user_mutex_unlock_exiting(ls->eip)) &&
			   user_within_functions(ls)) {
			ASSERT_ONE_THREAD_PER_PP(ls);
#ifndef HTM_WEAK_ATOMICITY
			/* by the equivalence proof, it's sound to skip this pp
			 * because if anything were to conflict with it, it'd be
			 * the same as if the txn aborted to begin with */
			if (ls->sched.cur_agent->action.user_txn) {
				return false;
			}
			/* on other hand, under weak memory maybe the user needs
			 * this mutex to protect against some non-txnal code */
#endif
			return true;
#ifdef USER_MAKE_RUNNABLE_EXIT
		} else if (ls->eip == USER_MAKE_RUNNABLE_EXIT) {
			/* i think the reference kernel version i have might
			 * predate the make runnable misbehave mode, because it
			 * seems not to be putting yield pps on it.*/
			ASSERT_ONE_THREAD_PER_PP(ls);
			return true;
#endif
#ifdef TRUSTED_THR_JOIN
		} else if (user_thr_join_exiting(ls->eip)) {
			/* don't respect within functions, obv; this pp is for
			 * happens-before purposes, not scheduling, anyway */
			ASSERT_ONE_THREAD_PER_PP(ls);
			*joined = true;
			return true;
#ifndef USER_MAKE_RUNNABLE_EXIT
		} else if (true) {
			assert(0 && "need mkrun pp for trusted join soundness");
#endif
#endif
		} else if (user_xbegin_entering(ls->eip) ||
			   user_xend_entering(ls->eip)) {
			/* Have to disrespect within functions to properly
			 * respect htm-blocking if there's contention. */
			ASSERT_ONE_THREAD_PER_PP(ls);
			*xbegin = user_xbegin_entering(ls->eip);
			return true;
		} else {
			return false;
		}
	/* kernel-mode-only preemption points */
#ifdef PINTOS_KERNEL
	} else if ((ls->eip == GUEST_SEMA_DOWN_ENTER || ls->eip == GUEST_SEMA_UP_EXIT) && kern_within_functions(ls)) {
		ASSERT_ONE_THREAD_PER_PP(ls);
		return true;
	} else if ((ls->eip == GUEST_CLI_ENTER || ls->eip == GUEST_STI_EXIT) &&
		   !ls->sched.cur_agent->action.kern_mutex_locking &&
		   !ls->sched.cur_agent->action.kern_mutex_unlocking &&
		   kern_within_functions(ls)) {
		ASSERT_ONE_THREAD_PER_PP(ls);
		return true;
#endif
	} else if (kern_decision_point(ls->eip) &&
		   kern_within_functions(ls)) {
		ASSERT_ONE_THREAD_PER_PP(ls);
		return true;
	} else {
		return false;
	}
}