示例#1
0
bool user_mutex_trylock_exiting(conf_object_t *cpu, unsigned int eip, bool *succeeded) {
#ifdef USER_MUTEX_TRYLOCK_EXIT
	if (eip == USER_MUTEX_TRYLOCK_EXIT) {
		*succeeded = GET_CPU_ATTR(cpu, eax) == TRYLOCK_SUCCESS_VAL;
		return true;
	} else {
		return false;
	}
#else
#ifdef USER_MUTEX_TRYLOCK_ENTER
	STATIC_ASSERT(false && "MUTEX_TRYLOCK ENTER but not EXIT defined");
#endif
#ifdef USER_MUTEX_TRY_LOCK_EXIT
	if (eip == USER_MUTEX_TRYLOCK_EXIT) {
		*succeeded = GET_CPU_ATTR(cpu, eax) == TRYLOCK_SUCCESS_VAL;
		return true;
	} else {
		return false;
	}
#else
#ifdef USER_MUTEX_TRY_LOCK_ENTER
	STATIC_ASSERT(false && "MUTEX_TRY_LOCK ENTER but not EXIT defined");
#endif
	return false;
#endif
#endif
}
示例#2
0
文件: x86.c 项目: bblum/landslide-old
// FIXME: make this as intelligent as stack_trace.
bool within_function(conf_object_t *cpu, int eip, int func, int func_end)
{
	if (eip >= func && eip < func_end)
		return true;

	eip = READ_STACK(cpu, 0);

	if (eip >= func && eip < func_end)
		return true;

	int stop_ebp = 0;
	int ebp = GET_CPU_ATTR(cpu, ebp);
	int rabbit = ebp;
	int frame_count = 0;

	while (ebp != stop_ebp && (unsigned)ebp < USER_MEM_START && frame_count++ < 1024) {
		/* Test eip against given range. */
		eip = READ_MEMORY(cpu, ebp + WORD_SIZE);
		if (eip >= func && eip < func_end)
			return true;

		/* Advance ebp and rabbit. Rabbit must go first to set stop_ebp
		 * accurately. */
		// XXX XXX XXX Actually fix the cycle detection - read from
		// rabbit not ebp; and get rid of the frame counter.
		// Fix this same bug in stack trace function below.
		if (rabbit != stop_ebp) rabbit = READ_MEMORY(cpu, ebp);
		if (rabbit == ebp) stop_ebp = ebp;
		if (rabbit != stop_ebp) rabbit = READ_MEMORY(cpu, ebp);
		if (rabbit == ebp) stop_ebp = ebp;
		ebp = READ_MEMORY(cpu, ebp);
	}

	return false;
}
示例#3
0
/* Are we currently in the address space associated with the test program? */
bool check_user_address_space(struct ls_state *ls)
{
	return ls->user_mem.cr3 != USER_CR3_WAITING_FOR_THUNDERBIRDS &&
		ls->user_mem.cr3 != USER_CR3_WAITING_FOR_EXEC &&
		ls->user_mem.cr3 != USER_CR3_EXEC_HAPPENED &&
		ls->user_mem.cr3 == GET_CPU_ATTR(ls->cpu0, cr3);
}
示例#4
0
文件: x86.c 项目: bblum/landslide-old
void cause_timer_interrupt(conf_object_t *cpu)
{
	lsprintf(DEV, "tick! (0x%x)\n", (int)GET_CPU_ATTR(cpu, eip));

	if (GET_CPU_ATTR(cpu, pending_vector_valid)) {
		SET_CPU_ATTR(cpu, pending_vector,
			     GET_CPU_ATTR(cpu, pending_vector)
			     | TIMER_INTERRUPT_NUMBER);
	} else {
		SET_CPU_ATTR(cpu, pending_vector, TIMER_INTERRUPT_NUMBER);
		SET_CPU_ATTR(cpu, pending_vector_valid, 1);
	}

	SET_CPU_ATTR(cpu, pending_interrupt, 1);
	/* Causes simics to flush whatever pipeline, implicit or not, would
	 * otherwise let more instructions get executed before the interrupt be
	 * taken. */
	SIM_run_unrestricted(cpu, cause_timer_interrupt_soviet_style, NULL);
}
示例#5
0
文件: x86.c 项目: bblum/landslide-old
/* two possible methods for causing a timer interrupt - the lolol version crafts
 * an iret stack frame by hand and changes the cpu's registers manually; the
 * other way just manipulates the cpu's interrupt pending flags to make it do
 * the interrupt itself. */
int cause_timer_interrupt_immediately(conf_object_t *cpu)
{
	int esp = GET_CPU_ATTR(cpu, esp);
	int eip = GET_CPU_ATTR(cpu, eip);
	int eflags = GET_CPU_ATTR(cpu, eflags);
	int handler = kern_get_timer_wrap_begin();

	lsprintf(DEV, "tock! (0x%x)\n", eip);

	/* 12 is the size of an IRET frame only when already in kernel mode. */
	SET_CPU_ATTR(cpu, esp, esp - 12);
	esp = esp - 12; /* "oh, I can do common subexpression elimination!" */
	SIM_write_phys_memory(cpu, esp + 8, eflags, 4);
	SIM_write_phys_memory(cpu, esp + 4, KERNEL_SEGSEL_CS, 4);
	SIM_write_phys_memory(cpu, esp + 0, eip, 4);
	SET_CPU_ATTR(cpu, eip, handler);

	return handler;
}
示例#6
0
/* The user mem heap tracking can only work for a single address space. We want
 * to pay attention to the userspace program under test, not the shell or init
 * or idle or anything like that. Figure out what that process's cr3 is. */
static bool ignore_user_access(struct ls_state *ls)
{
	unsigned int current_tid = ls->sched.cur_agent->tid;
	unsigned int cr3 = GET_CPU_ATTR(ls->cpu0, cr3);;

	if (!testing_userspace()) {
		/* Don't attempt to track user accesses for kernelspace tests.
		 * Tests like vanish_vanish require multiple user cr3s, which
		 * we don't support when tracking user accesses. When doing a
		 * userspace test, we need to do the below cr3 assertion, but
		 * when doing a kernel test we cannot, so instead we have to
		 * ignore all user accesses entirely. */
		return true;
	} else if (current_tid == kern_get_init_tid() ||
	    current_tid == kern_get_shell_tid() ||
	    (kern_has_idle() && current_tid == kern_get_idle_tid())) {
		return true;
	} else if (ls->user_mem.cr3 == USER_CR3_WAITING_FOR_THUNDERBIRDS) {
		ls->user_mem.cr3 = USER_CR3_WAITING_FOR_EXEC;
		ls->user_mem.cr3_tid = current_tid;
		return true;
	} else if (ls->user_mem.cr3 == USER_CR3_WAITING_FOR_EXEC) {
		/* must wait for a trip through kernelspace; see below */
		return true;
	} else if (ls->user_mem.cr3 == USER_CR3_EXEC_HAPPENED) {
		/* recognized non-shell-non-idle-non-init user process has been
		 * through exec and back. hopefully its new cr3 is permanent. */
		assert(cr3 != USER_CR3_WAITING_FOR_EXEC);
		assert(cr3 != USER_CR3_EXEC_HAPPENED);
		ls->user_mem.cr3 = cr3;
		lsprintf(DEV, "Registered cr3 value 0x%x for userspace "
			 "tid %d.\n", cr3, current_tid);
		return false;
	} else if (ls->user_mem.cr3 != cr3) {
		lsprintf(ALWAYS, COLOUR_BOLD COLOUR_RED "Memory tracking for "
			 "more than 1 user address space is unsupported!\n");
		lsprintf(ALWAYS, COLOUR_BOLD COLOUR_RED "Already tracking for "
			 "cr3 0x%x, belonging to tid %d; current cr3 0x%x, "
			 "current tid %d\n", ls->user_mem.cr3,
			 ls->user_mem.cr3_tid, cr3, current_tid);
		lsprintf(ALWAYS, COLOUR_BOLD COLOUR_RED "If you're trying to "
			 "run vanish_vanish, make sure TESTING_USERSPACE=0.\n");
		lsprintf(ALWAYS, COLOUR_BOLD COLOUR_RED "Otherwise, make sure "
			 "your test case doesn't fork().\n" COLOUR_DEFAULT);
		assert(0);
		return false;
	} else {
		return false;
	}
}
示例#7
0
文件: schedule.c 项目: jinlee/masters
static struct agent *agent_by_tid(struct agent_q *q, int tid)
{
	struct agent *a = agent_by_tid_or_null(q, tid);
	if (a == NULL) {
		conf_object_t *cpu = SIM_get_object("cpu0");
		char *stack = stack_trace(cpu, GET_CPU_ATTR(cpu, eip), -1);
		lsprintf(ALWAYS, COLOUR_BOLD COLOUR_RED "TID %d isn't in the "
			 "right queue; probably incorrect annotations?\n", tid);
		lsprintf(ALWAYS, COLOUR_BOLD COLOUR_RED "Current stack: %s\n"
			 COLOUR_DEFAULT, stack);
		assert(0);
	}
	return a;
}
示例#8
0
bool user_mm_realloc_exiting(conf_object_t *cpu, unsigned int eip, unsigned int *base)
{
#ifdef USER_MM_REALLOC_EXIT
	if (eip == USER_MM_REALLOC_EXIT) {
		*base = GET_CPU_ATTR(cpu, eax);
		return true;
	} else {
		return false;
	}
#else
#ifdef USER_MM_REALLOC_ENTER
	STATIC_ASSERT(false && "user realloc enter but not exit defined");
#endif
	return false;
#endif
}
示例#9
0
文件: x86.c 项目: bblum/landslide-old
int avoid_timer_interrupt_immediately(conf_object_t *cpu)
{
	int buf = GET_CPU_ATTR(cpu, esp) -
		(CUSTOM_ASSEMBLY_CODES_SIZE + CUSTOM_ASSEMBLY_CODES_STACK);

	lsprintf(INFO, "Cuckoo!\n");

	STATIC_ASSERT(ARRAY_SIZE(custom_assembly_codes) ==
		      CUSTOM_ASSEMBLY_CODES_SIZE);
	for (int i = 0; i < CUSTOM_ASSEMBLY_CODES_SIZE; i++) {
		SIM_write_phys_memory(cpu, buf+i, custom_assembly_codes[i], 1);
	}

	SET_CPU_ATTR(cpu, eip, buf);
	return buf;
}
示例#10
0
文件: schedule.c 项目: jinlee/masters
/* A more user-friendly way of asserting NO_ACTION. */
static void assert_no_action(struct sched_state *s, const char *new_act)
{
	bool failed = false;
	CHECK_NOT_ACTION(failed, s, handling_timer);
	CHECK_NOT_ACTION(failed, s, context_switch);
	CHECK_NOT_ACTION(failed, s, forking);
	CHECK_NOT_ACTION(failed, s, sleeping);
	CHECK_NOT_ACTION(failed, s, vanishing);
	CHECK_NOT_ACTION(failed, s, readlining);
	if (failed) {
		conf_object_t *cpu = SIM_get_object("cpu0");
		char *stack = stack_trace(cpu, GET_CPU_ATTR(cpu, eip),
					  s->cur_agent->tid);
		lsprintf(ALWAYS, COLOUR_BOLD COLOUR_RED "While trying to do %s;"
			 " probably incorrect annotations?\n", new_act);
		lsprintf(ALWAYS, COLOUR_BOLD COLOUR_RED "Current stack: %s\n",
			 stack);
		assert(0);
	}
}
示例#11
0
文件: schedule.c 项目: jinlee/masters
static void agent_deschedule(struct sched_state *s, int tid)
{
	struct agent *a = agent_by_tid_or_null(&s->rq, tid);
	if (a != NULL) {
		Q_REMOVE(&s->rq, a, nobe);
		Q_INSERT_FRONT(&s->dq, a, nobe);
	/* If it's not on the runqueue, we must have already special-case moved
	 * it off in the thread-change event. */
	} else if (agent_by_tid_or_null(&s->sq, tid) == NULL) {
		/* Either it's on the sleep queue, or it vanished. */
		if (agent_by_tid_or_null(&s->dq, tid) != NULL) {
			conf_object_t *cpu = SIM_get_object("cpu0");
			char *stack = stack_trace(cpu, GET_CPU_ATTR(cpu, eip),
						  s->cur_agent->tid);
			lsprintf(ALWAYS, COLOUR_BOLD COLOUR_RED "TID %d is "
				 "already off the runqueue at tell_off_rq(); "
				 "probably incorrect annotations?\n", tid);
			lsprintf(ALWAYS, COLOUR_BOLD COLOUR_RED "Current stack: %s\n"
				 COLOUR_DEFAULT, stack);
			assert(0);
		}
	}
}
示例#12
0
文件: x86.c 项目: bblum/landslide-old
/* Caller has to free the return value. */
char *stack_trace(conf_object_t *cpu, int eip, int tid)
{
	char *buf = MM_XMALLOC(MAX_TRACE_LEN, char);
	int pos = 0, old_pos;
	int stack_offset = 0; /* Counts by 1 - READ_STACK already multiplies */

	ADD_STR(buf, pos, MAX_TRACE_LEN, "TID%d at 0x%.8x in ", tid, eip);
	ADD_FRAME(buf, pos, MAX_TRACE_LEN, eip);

	int stop_ebp = 0;
	int ebp = GET_CPU_ATTR(cpu, ebp);
	int rabbit = ebp;
	int frame_count = 0;

	while (ebp != 0 && (unsigned)ebp < USER_MEM_START && frame_count++ < 1024) {
		bool extra_frame;

		do {
			int eip_offset;
			bool iret_block = false;

			extra_frame = false;
			/* at the beginning or end of a function, there is no
			 * frame, but a return address is still on the stack. */
			if (function_eip_offset(eip, &eip_offset)) {
				if (eip_offset == 0) {
					extra_frame = true;
				} else if (eip_offset == 1 &&
				           READ_BYTE(cpu, eip - 1)
				           == OPCODE_PUSH_EBP) {
					stack_offset++;
					extra_frame = true;
				}
			}
			if (!extra_frame) {
				int opcode = READ_BYTE(cpu, eip);
				if (opcode == OPCODE_RET) {
					extra_frame = true;
				} else if (opcode == OPCODE_IRET) {
					iret_block = true;
					extra_frame = true;
				}
			}
			if (extra_frame) {
				eip = READ_STACK(cpu, stack_offset);
				ADD_STR(buf, pos, MAX_TRACE_LEN, "%s0x%.8x in ",
					STACK_TRACE_SEPARATOR, eip);
				ADD_FRAME(buf, pos, MAX_TRACE_LEN, eip);
				if (iret_block)
					stack_offset += IRET_BLOCK_WORDS;
				else
					stack_offset++;;
			}
		} while (extra_frame);

		/* pushed return address behind the base pointer */
		eip = READ_MEMORY(cpu, ebp + WORD_SIZE);
		stack_offset = ebp + 2;
		ADD_STR(buf, pos, MAX_TRACE_LEN, "%s0x%.8x in ",
			STACK_TRACE_SEPARATOR, eip);
		old_pos = pos;
		ADD_FRAME(buf, pos, MAX_TRACE_LEN, eip);
		/* special-case termination condition */
		if (pos - old_pos >= strlen(ENTRY_POINT) &&
		    strncmp(buf + old_pos, ENTRY_POINT,
		            strlen(ENTRY_POINT)) == 0) {
			break;
		}

		if (rabbit != stop_ebp) rabbit = READ_MEMORY(cpu, ebp);
		if (rabbit == ebp) stop_ebp = ebp;
		if (rabbit != stop_ebp) rabbit = READ_MEMORY(cpu, ebp);
		if (rabbit == ebp) stop_ebp = ebp;
		ebp = READ_MEMORY(cpu, ebp);
	}

	char *buf2 = MM_XSTRDUP(buf); /* truncate to save space */
	MM_FREE(buf);

	return buf2;
}
示例#13
0
文件: x86.c 项目: bblum/landslide-old
bool interrupts_enabled(conf_object_t *cpu)
{
	int eflags = GET_CPU_ATTR(cpu, eflags);
	return (eflags & EFL_IF) != 0;
}
示例#14
0
文件: arbiter.c 项目: bblum/landslide
bool arbiter_interested(struct ls_state *ls, bool just_finished_reschedule,
			bool *voluntary, bool *need_handle_sleep, bool *data_race,
			bool *joined, bool *xbegin)
{
	*voluntary = false;
	*need_handle_sleep = false;
	*data_race = false;
	*joined = false;
	*xbegin = false;

	/* Attempt to see if a "voluntary" reschedule is just ending - did the
	 * last thread context switch not because of a timer?
	 * Also make sure to ignore null switches (timer-driven or not). */
	if (ls->sched.last_agent != NULL &&
	    !ls->sched.last_agent->action.handling_timer &&
	    ls->sched.last_agent != ls->sched.cur_agent &&
	    just_finished_reschedule) {
		lsprintf(DEV, "a voluntary reschedule: ");
		print_agent(DEV, ls->sched.last_agent);
		printf(DEV, " to ");
		print_agent(DEV, ls->sched.cur_agent);
		printf(DEV, "\n");
#ifndef PINTOS_KERNEL
		/* Pintos includes a semaphore implementation which can go
		 * around its anti-paradise-lost while loop a full time without
		 * interrupts coming back on. So, there can be a voluntary
		 * reschedule sequence where an uninterruptible, blocked thread
		 * gets jammed in the middle of this transition. Issue #165. */
		if (ls->save.next_tid != ls->sched.last_agent->tid) {
			ASSERT_ONE_THREAD_PER_PP(ls);
		}
#endif
		assert(ls->sched.voluntary_resched_tid != TID_NONE);
		*voluntary = true;
		return true;
	/* is the kernel idling, e.g. waiting for keyboard input? */
	} else if (ls->instruction_text[0] == OPCODE_HLT) {
		lskprintf(INFO, "What are you waiting for? (HLT state)\n");
		*need_handle_sleep = true;
		ASSERT_ONE_THREAD_PER_PP(ls);
		return true;
	/* Skip the instructions before the test case itself gets started. In
	 * many kernels' cases this will be redundant, but just in case. */
	} else if (!ls->test.test_ever_caused ||
		   ls->test.start_population == ls->sched.most_agents_ever) {
		return false;
	/* check for data races */
	} else if (suspected_data_race(ls)
		   /* if xchg-blocked, need NOT set DR PP. other case below. */
		   && !XCHG_BLOCKED(&ls->sched.cur_agent->user_yield)
#ifdef DR_PPS_RESPECT_WITHIN_FUNCTIONS
		   // NB. The use of KERNEL_MEMORY here used to be !testing_userspace.
		   // I needed to change it to implement preempt-everywhere mode,
		   // to handle the case of userspace shms in deschedule() syscall.
		   // Not entirely sure of all implications of this change.
		   && ((!KERNEL_MEMORY(ls->eip) && user_within_functions(ls)) ||
		      (KERNEL_MEMORY(ls->eip) && kern_within_functions(ls)))
#endif
#ifndef HTM_WEAK_ATOMICITY
		   && !ls->sched.cur_agent->action.user_txn
#endif
		   ) {
		*data_race = true;
		ASSERT_ONE_THREAD_PER_PP(ls);
		return true;
	/* user-mode-only preemption points */
	} else if (testing_userspace()) {
		unsigned int mutex_addr;
		if (KERNEL_MEMORY(ls->eip)) {
#ifdef GUEST_YIELD_ENTER
#ifndef GUEST_YIELD_EXIT
			STATIC_ASSERT(false && "missing guest yield exit");
#endif
			if ((ls->eip == GUEST_YIELD_ENTER &&
			     READ_STACK(ls->cpu0, 1) == ls->sched.cur_agent->tid) ||
			    (ls->eip == GUEST_YIELD_EXIT &&
			     ((signed int)GET_CPU_ATTR(ls->cpu0, eax)) < 0)) {
				/* Busted yield. Pretend it was yield -1. */
				ASSERT_ONE_THREAD_PER_PP(ls);
				return true;
			}
#endif
			return false;
		} else if (XCHG_BLOCKED(&ls->sched.cur_agent->user_yield)) {
			/* User thread is blocked on an "xchg-continue" mutex.
			 * Analogous to HLT state -- need to preempt it. */
			ASSERT_ONE_THREAD_PER_PP(ls);
#ifndef HTM_WEAK_ATOMICITY
			/* under strong atomicity, if for whatever reason a txn
			 * blocks, there's no way it should ever succeed */
			if (ls->sched.cur_agent->action.user_txn) {
				abort_transaction(ls->sched.cur_agent->tid,
						  ls->save.current, _XABORT_CAPACITY);
				ls->end_branch_early = true;
				return false;
			}
#endif
			return true;
#ifndef PINTOS_KERNEL
		} else if (!check_user_address_space(ls)) {
			return false;
#endif
		} else if ((user_mutex_lock_entering(ls->cpu0, ls->eip, &mutex_addr) ||
			    user_mutex_unlock_exiting(ls->eip)) &&
			   user_within_functions(ls)) {
			ASSERT_ONE_THREAD_PER_PP(ls);
#ifndef HTM_WEAK_ATOMICITY
			/* by the equivalence proof, it's sound to skip this pp
			 * because if anything were to conflict with it, it'd be
			 * the same as if the txn aborted to begin with */
			if (ls->sched.cur_agent->action.user_txn) {
				return false;
			}
			/* on other hand, under weak memory maybe the user needs
			 * this mutex to protect against some non-txnal code */
#endif
			return true;
#ifdef USER_MAKE_RUNNABLE_EXIT
		} else if (ls->eip == USER_MAKE_RUNNABLE_EXIT) {
			/* i think the reference kernel version i have might
			 * predate the make runnable misbehave mode, because it
			 * seems not to be putting yield pps on it.*/
			ASSERT_ONE_THREAD_PER_PP(ls);
			return true;
#endif
#ifdef TRUSTED_THR_JOIN
		} else if (user_thr_join_exiting(ls->eip)) {
			/* don't respect within functions, obv; this pp is for
			 * happens-before purposes, not scheduling, anyway */
			ASSERT_ONE_THREAD_PER_PP(ls);
			*joined = true;
			return true;
#ifndef USER_MAKE_RUNNABLE_EXIT
		} else if (true) {
			assert(0 && "need mkrun pp for trusted join soundness");
#endif
#endif
		} else if (user_xbegin_entering(ls->eip) ||
			   user_xend_entering(ls->eip)) {
			/* Have to disrespect within functions to properly
			 * respect htm-blocking if there's contention. */
			ASSERT_ONE_THREAD_PER_PP(ls);
			*xbegin = user_xbegin_entering(ls->eip);
			return true;
		} else {
			return false;
		}
	/* kernel-mode-only preemption points */
#ifdef PINTOS_KERNEL
	} else if ((ls->eip == GUEST_SEMA_DOWN_ENTER || ls->eip == GUEST_SEMA_UP_EXIT) && kern_within_functions(ls)) {
		ASSERT_ONE_THREAD_PER_PP(ls);
		return true;
	} else if ((ls->eip == GUEST_CLI_ENTER || ls->eip == GUEST_STI_EXIT) &&
		   !ls->sched.cur_agent->action.kern_mutex_locking &&
		   !ls->sched.cur_agent->action.kern_mutex_unlocking &&
		   kern_within_functions(ls)) {
		ASSERT_ONE_THREAD_PER_PP(ls);
		return true;
#endif
	} else if (kern_decision_point(ls->eip) &&
		   kern_within_functions(ls)) {
		ASSERT_ONE_THREAD_PER_PP(ls);
		return true;
	} else {
		return false;
	}
}