コード例 #1
0
ファイル: memory.c プロジェクト: Jarvishappy/landslide
static void mem_exit_bad_place(struct ls_state *ls, bool in_kernel, unsigned int base)
{
	struct mem_state *m = in_kernel ? &ls->kern_mem : &ls->user_mem;

	assert(m->in_alloc && "attempt to exit malloc without being in!");
	assert(!m->in_free && "attempt to exit malloc while in free!");
	assert(!m->in_mm_init && "attempt to exit malloc while in init!");

	if (in_kernel != testing_userspace()) {
		lsprintf(DEV, "Malloc [0x%x | %d]\n", base, m->alloc_request_size);
	}

	if (in_kernel) {
		assert(KERNEL_MEMORY(base));
	} else {
		assert(base == 0 || USER_MEMORY(base));
	}

	if (base == 0) {
		lsprintf(INFO, "%s seems to be out of memory.\n", K_STR(in_kernel));
	} else {
		struct chunk *chunk = MM_XMALLOC(1, struct chunk);
		chunk->base = base;
		chunk->len = m->alloc_request_size;
		chunk->id = m->heap_next_id;
		chunk->malloc_trace = stack_trace(ls);
		chunk->free_trace = NULL;

		m->heap_size += m->alloc_request_size;
		assert(m->heap_next_id != INT_MAX && "need a wider type");
		m->heap_next_id++;
		insert_chunk(&m->heap, chunk, false);
	}

	m->in_alloc = false;
}
コード例 #2
0
ファイル: symtable.c プロジェクト: Jarvishappy/landslide
unsigned int symtable_lookup_data(char *buf, unsigned int maxlen, unsigned int addr)
{
	conf_object_t *table = get_symtable();
	if (table == NULL) {
		return scnprintf(buf, maxlen, GLOBAL_COLOUR "global0x%.8x"
				 COLOUR_DEFAULT, addr);
	}

	attr_value_t idx = SIM_make_attr_integer(addr);
	attr_value_t result = SIM_get_attribute_idx(table, "data_at", &idx);
	if (!SIM_attr_is_list(result)) {
		SIM_free_attribute(idx);
		if (KERNEL_MEMORY(addr)) {
			return scnprintf(buf, maxlen, "<user global0x%x>", addr);
		} else {
			return scnprintf(buf, maxlen, GLOBAL_COLOUR
					 "<kernel global0x%.8x>" COLOUR_DEFAULT, addr);
		}
	}
	assert(SIM_attr_list_size(result) >= 4);

	const char *globalname = SIM_attr_string(SIM_attr_list_item(result, 1));
	const char *typename = SIM_attr_string(SIM_attr_list_item(result, 2));
	unsigned int offset = SIM_attr_integer(SIM_attr_list_item(result, 3));

	unsigned int ret = scnprintf(buf, maxlen, GLOBAL_COLOUR "%s", globalname);
	if (offset != 0) {
		ret += scnprintf(buf+ret, maxlen-ret, "+%d", offset);
	}
	ret += scnprintf(buf+ret, maxlen-ret, GLOBAL_INFO_COLOUR
			 " (%s at 0x%.8x)" COLOUR_DEFAULT, typename, addr);

	SIM_free_attribute(result);
	SIM_free_attribute(idx);
	return ret;
}
コード例 #3
0
ファイル: arbiter.c プロジェクト: bblum/landslide
bool arbiter_interested(struct ls_state *ls, bool just_finished_reschedule,
			bool *voluntary, bool *need_handle_sleep, bool *data_race,
			bool *joined, bool *xbegin)
{
	*voluntary = false;
	*need_handle_sleep = false;
	*data_race = false;
	*joined = false;
	*xbegin = false;

	/* Attempt to see if a "voluntary" reschedule is just ending - did the
	 * last thread context switch not because of a timer?
	 * Also make sure to ignore null switches (timer-driven or not). */
	if (ls->sched.last_agent != NULL &&
	    !ls->sched.last_agent->action.handling_timer &&
	    ls->sched.last_agent != ls->sched.cur_agent &&
	    just_finished_reschedule) {
		lsprintf(DEV, "a voluntary reschedule: ");
		print_agent(DEV, ls->sched.last_agent);
		printf(DEV, " to ");
		print_agent(DEV, ls->sched.cur_agent);
		printf(DEV, "\n");
#ifndef PINTOS_KERNEL
		/* Pintos includes a semaphore implementation which can go
		 * around its anti-paradise-lost while loop a full time without
		 * interrupts coming back on. So, there can be a voluntary
		 * reschedule sequence where an uninterruptible, blocked thread
		 * gets jammed in the middle of this transition. Issue #165. */
		if (ls->save.next_tid != ls->sched.last_agent->tid) {
			ASSERT_ONE_THREAD_PER_PP(ls);
		}
#endif
		assert(ls->sched.voluntary_resched_tid != TID_NONE);
		*voluntary = true;
		return true;
	/* is the kernel idling, e.g. waiting for keyboard input? */
	} else if (ls->instruction_text[0] == OPCODE_HLT) {
		lskprintf(INFO, "What are you waiting for? (HLT state)\n");
		*need_handle_sleep = true;
		ASSERT_ONE_THREAD_PER_PP(ls);
		return true;
	/* Skip the instructions before the test case itself gets started. In
	 * many kernels' cases this will be redundant, but just in case. */
	} else if (!ls->test.test_ever_caused ||
		   ls->test.start_population == ls->sched.most_agents_ever) {
		return false;
	/* check for data races */
	} else if (suspected_data_race(ls)
		   /* if xchg-blocked, need NOT set DR PP. other case below. */
		   && !XCHG_BLOCKED(&ls->sched.cur_agent->user_yield)
#ifdef DR_PPS_RESPECT_WITHIN_FUNCTIONS
		   // NB. The use of KERNEL_MEMORY here used to be !testing_userspace.
		   // I needed to change it to implement preempt-everywhere mode,
		   // to handle the case of userspace shms in deschedule() syscall.
		   // Not entirely sure of all implications of this change.
		   && ((!KERNEL_MEMORY(ls->eip) && user_within_functions(ls)) ||
		      (KERNEL_MEMORY(ls->eip) && kern_within_functions(ls)))
#endif
#ifndef HTM_WEAK_ATOMICITY
		   && !ls->sched.cur_agent->action.user_txn
#endif
		   ) {
		*data_race = true;
		ASSERT_ONE_THREAD_PER_PP(ls);
		return true;
	/* user-mode-only preemption points */
	} else if (testing_userspace()) {
		unsigned int mutex_addr;
		if (KERNEL_MEMORY(ls->eip)) {
#ifdef GUEST_YIELD_ENTER
#ifndef GUEST_YIELD_EXIT
			STATIC_ASSERT(false && "missing guest yield exit");
#endif
			if ((ls->eip == GUEST_YIELD_ENTER &&
			     READ_STACK(ls->cpu0, 1) == ls->sched.cur_agent->tid) ||
			    (ls->eip == GUEST_YIELD_EXIT &&
			     ((signed int)GET_CPU_ATTR(ls->cpu0, eax)) < 0)) {
				/* Busted yield. Pretend it was yield -1. */
				ASSERT_ONE_THREAD_PER_PP(ls);
				return true;
			}
#endif
			return false;
		} else if (XCHG_BLOCKED(&ls->sched.cur_agent->user_yield)) {
			/* User thread is blocked on an "xchg-continue" mutex.
			 * Analogous to HLT state -- need to preempt it. */
			ASSERT_ONE_THREAD_PER_PP(ls);
#ifndef HTM_WEAK_ATOMICITY
			/* under strong atomicity, if for whatever reason a txn
			 * blocks, there's no way it should ever succeed */
			if (ls->sched.cur_agent->action.user_txn) {
				abort_transaction(ls->sched.cur_agent->tid,
						  ls->save.current, _XABORT_CAPACITY);
				ls->end_branch_early = true;
				return false;
			}
#endif
			return true;
#ifndef PINTOS_KERNEL
		} else if (!check_user_address_space(ls)) {
			return false;
#endif
		} else if ((user_mutex_lock_entering(ls->cpu0, ls->eip, &mutex_addr) ||
			    user_mutex_unlock_exiting(ls->eip)) &&
			   user_within_functions(ls)) {
			ASSERT_ONE_THREAD_PER_PP(ls);
#ifndef HTM_WEAK_ATOMICITY
			/* by the equivalence proof, it's sound to skip this pp
			 * because if anything were to conflict with it, it'd be
			 * the same as if the txn aborted to begin with */
			if (ls->sched.cur_agent->action.user_txn) {
				return false;
			}
			/* on other hand, under weak memory maybe the user needs
			 * this mutex to protect against some non-txnal code */
#endif
			return true;
#ifdef USER_MAKE_RUNNABLE_EXIT
		} else if (ls->eip == USER_MAKE_RUNNABLE_EXIT) {
			/* i think the reference kernel version i have might
			 * predate the make runnable misbehave mode, because it
			 * seems not to be putting yield pps on it.*/
			ASSERT_ONE_THREAD_PER_PP(ls);
			return true;
#endif
#ifdef TRUSTED_THR_JOIN
		} else if (user_thr_join_exiting(ls->eip)) {
			/* don't respect within functions, obv; this pp is for
			 * happens-before purposes, not scheduling, anyway */
			ASSERT_ONE_THREAD_PER_PP(ls);
			*joined = true;
			return true;
#ifndef USER_MAKE_RUNNABLE_EXIT
		} else if (true) {
			assert(0 && "need mkrun pp for trusted join soundness");
#endif
#endif
		} else if (user_xbegin_entering(ls->eip) ||
			   user_xend_entering(ls->eip)) {
			/* Have to disrespect within functions to properly
			 * respect htm-blocking if there's contention. */
			ASSERT_ONE_THREAD_PER_PP(ls);
			*xbegin = user_xbegin_entering(ls->eip);
			return true;
		} else {
			return false;
		}
	/* kernel-mode-only preemption points */
#ifdef PINTOS_KERNEL
	} else if ((ls->eip == GUEST_SEMA_DOWN_ENTER || ls->eip == GUEST_SEMA_UP_EXIT) && kern_within_functions(ls)) {
		ASSERT_ONE_THREAD_PER_PP(ls);
		return true;
	} else if ((ls->eip == GUEST_CLI_ENTER || ls->eip == GUEST_STI_EXIT) &&
		   !ls->sched.cur_agent->action.kern_mutex_locking &&
		   !ls->sched.cur_agent->action.kern_mutex_unlocking &&
		   kern_within_functions(ls)) {
		ASSERT_ONE_THREAD_PER_PP(ls);
		return true;
#endif
	} else if (kern_decision_point(ls->eip) &&
		   kern_within_functions(ls)) {
		ASSERT_ONE_THREAD_PER_PP(ls);
		return true;
	} else {
		return false;
	}
}