/* New interface. Returns malloced strings through output parameters, * which caller must free result strings if returnval is true */ bool symtable_lookup(unsigned int eip, char **func, char **file, int *line) { conf_object_t *table = get_symtable(); if (table == NULL) { return false; } attr_value_t idx = SIM_make_attr_integer(eip); attr_value_t result = SIM_get_attribute_idx(table, "source_at", &idx); if (!SIM_attr_is_list(result)) { SIM_free_attribute(idx); return false; } assert(SIM_attr_list_size(result) >= 3); /* Copy out the function name and line number. However, need to * do some checks on the filename before copying it out as well. */ if (testing_userspace() && eip == GUEST_CONTEXT_SWITCH_ENTER) { *func = MM_XSTRDUP("[context switch]"); #ifdef GUEST_HLT_EXIT } else if (testing_userspace() && eip == GUEST_HLT_EXIT) { *func = MM_XSTRDUP("[kernel idle]"); #endif } else { *func = MM_XSTRDUP(SIM_attr_string(SIM_attr_list_item(result, 2))); } const char *maybe_file = SIM_attr_string(SIM_attr_list_item(result, 0)); *line = SIM_attr_integer(SIM_attr_list_item(result, 1)); /* A hack to make the filenames shorter */ if (strstr(maybe_file, LIKELY_DIR) != NULL) { maybe_file = strstr(maybe_file, LIKELY_DIR) + strlen(LIKELY_DIR); } /* The symbol table will claim that unknown assembly comes from * 410kern/boot/head.S. Print an 'unknown' message instead. */ if (strncmp(maybe_file, UNKNOWN_FILE, strlen(maybe_file)) == 0) { *file = NULL; } else { *file = MM_XSTRDUP(maybe_file); } SIM_free_attribute(result); SIM_free_attribute(idx); return true; }
/* The user mem heap tracking can only work for a single address space. We want * to pay attention to the userspace program under test, not the shell or init * or idle or anything like that. Figure out what that process's cr3 is. */ static bool ignore_user_access(struct ls_state *ls) { unsigned int current_tid = ls->sched.cur_agent->tid; unsigned int cr3 = GET_CPU_ATTR(ls->cpu0, cr3);; if (!testing_userspace()) { /* Don't attempt to track user accesses for kernelspace tests. * Tests like vanish_vanish require multiple user cr3s, which * we don't support when tracking user accesses. When doing a * userspace test, we need to do the below cr3 assertion, but * when doing a kernel test we cannot, so instead we have to * ignore all user accesses entirely. */ return true; } else if (current_tid == kern_get_init_tid() || current_tid == kern_get_shell_tid() || (kern_has_idle() && current_tid == kern_get_idle_tid())) { return true; } else if (ls->user_mem.cr3 == USER_CR3_WAITING_FOR_THUNDERBIRDS) { ls->user_mem.cr3 = USER_CR3_WAITING_FOR_EXEC; ls->user_mem.cr3_tid = current_tid; return true; } else if (ls->user_mem.cr3 == USER_CR3_WAITING_FOR_EXEC) { /* must wait for a trip through kernelspace; see below */ return true; } else if (ls->user_mem.cr3 == USER_CR3_EXEC_HAPPENED) { /* recognized non-shell-non-idle-non-init user process has been * through exec and back. hopefully its new cr3 is permanent. */ assert(cr3 != USER_CR3_WAITING_FOR_EXEC); assert(cr3 != USER_CR3_EXEC_HAPPENED); ls->user_mem.cr3 = cr3; lsprintf(DEV, "Registered cr3 value 0x%x for userspace " "tid %d.\n", cr3, current_tid); return false; } else if (ls->user_mem.cr3 != cr3) { lsprintf(ALWAYS, COLOUR_BOLD COLOUR_RED "Memory tracking for " "more than 1 user address space is unsupported!\n"); lsprintf(ALWAYS, COLOUR_BOLD COLOUR_RED "Already tracking for " "cr3 0x%x, belonging to tid %d; current cr3 0x%x, " "current tid %d\n", ls->user_mem.cr3, ls->user_mem.cr3_tid, cr3, current_tid); lsprintf(ALWAYS, COLOUR_BOLD COLOUR_RED "If you're trying to " "run vanish_vanish, make sure TESTING_USERSPACE=0.\n"); lsprintf(ALWAYS, COLOUR_BOLD COLOUR_RED "Otherwise, make sure " "your test case doesn't fork().\n" COLOUR_DEFAULT); assert(0); return false; } else { return false; } }
static void mem_exit_bad_place(struct ls_state *ls, bool in_kernel, unsigned int base) { struct mem_state *m = in_kernel ? &ls->kern_mem : &ls->user_mem; assert(m->in_alloc && "attempt to exit malloc without being in!"); assert(!m->in_free && "attempt to exit malloc while in free!"); assert(!m->in_mm_init && "attempt to exit malloc while in init!"); if (in_kernel != testing_userspace()) { lsprintf(DEV, "Malloc [0x%x | %d]\n", base, m->alloc_request_size); } if (in_kernel) { assert(KERNEL_MEMORY(base)); } else { assert(base == 0 || USER_MEMORY(base)); } if (base == 0) { lsprintf(INFO, "%s seems to be out of memory.\n", K_STR(in_kernel)); } else { struct chunk *chunk = MM_XMALLOC(1, struct chunk); chunk->base = base; chunk->len = m->alloc_request_size; chunk->id = m->heap_next_id; chunk->malloc_trace = stack_trace(ls); chunk->free_trace = NULL; m->heap_size += m->alloc_request_size; assert(m->heap_next_id != INT_MAX && "need a wider type"); m->heap_next_id++; insert_chunk(&m->heap, chunk, false); } m->in_alloc = false; }
bool arbiter_interested(struct ls_state *ls, bool just_finished_reschedule, bool *voluntary, bool *need_handle_sleep, bool *data_race, bool *joined, bool *xbegin) { *voluntary = false; *need_handle_sleep = false; *data_race = false; *joined = false; *xbegin = false; /* Attempt to see if a "voluntary" reschedule is just ending - did the * last thread context switch not because of a timer? * Also make sure to ignore null switches (timer-driven or not). */ if (ls->sched.last_agent != NULL && !ls->sched.last_agent->action.handling_timer && ls->sched.last_agent != ls->sched.cur_agent && just_finished_reschedule) { lsprintf(DEV, "a voluntary reschedule: "); print_agent(DEV, ls->sched.last_agent); printf(DEV, " to "); print_agent(DEV, ls->sched.cur_agent); printf(DEV, "\n"); #ifndef PINTOS_KERNEL /* Pintos includes a semaphore implementation which can go * around its anti-paradise-lost while loop a full time without * interrupts coming back on. So, there can be a voluntary * reschedule sequence where an uninterruptible, blocked thread * gets jammed in the middle of this transition. Issue #165. */ if (ls->save.next_tid != ls->sched.last_agent->tid) { ASSERT_ONE_THREAD_PER_PP(ls); } #endif assert(ls->sched.voluntary_resched_tid != TID_NONE); *voluntary = true; return true; /* is the kernel idling, e.g. waiting for keyboard input? */ } else if (ls->instruction_text[0] == OPCODE_HLT) { lskprintf(INFO, "What are you waiting for? (HLT state)\n"); *need_handle_sleep = true; ASSERT_ONE_THREAD_PER_PP(ls); return true; /* Skip the instructions before the test case itself gets started. In * many kernels' cases this will be redundant, but just in case. */ } else if (!ls->test.test_ever_caused || ls->test.start_population == ls->sched.most_agents_ever) { return false; /* check for data races */ } else if (suspected_data_race(ls) /* if xchg-blocked, need NOT set DR PP. other case below. */ && !XCHG_BLOCKED(&ls->sched.cur_agent->user_yield) #ifdef DR_PPS_RESPECT_WITHIN_FUNCTIONS // NB. The use of KERNEL_MEMORY here used to be !testing_userspace. // I needed to change it to implement preempt-everywhere mode, // to handle the case of userspace shms in deschedule() syscall. // Not entirely sure of all implications of this change. && ((!KERNEL_MEMORY(ls->eip) && user_within_functions(ls)) || (KERNEL_MEMORY(ls->eip) && kern_within_functions(ls))) #endif #ifndef HTM_WEAK_ATOMICITY && !ls->sched.cur_agent->action.user_txn #endif ) { *data_race = true; ASSERT_ONE_THREAD_PER_PP(ls); return true; /* user-mode-only preemption points */ } else if (testing_userspace()) { unsigned int mutex_addr; if (KERNEL_MEMORY(ls->eip)) { #ifdef GUEST_YIELD_ENTER #ifndef GUEST_YIELD_EXIT STATIC_ASSERT(false && "missing guest yield exit"); #endif if ((ls->eip == GUEST_YIELD_ENTER && READ_STACK(ls->cpu0, 1) == ls->sched.cur_agent->tid) || (ls->eip == GUEST_YIELD_EXIT && ((signed int)GET_CPU_ATTR(ls->cpu0, eax)) < 0)) { /* Busted yield. Pretend it was yield -1. */ ASSERT_ONE_THREAD_PER_PP(ls); return true; } #endif return false; } else if (XCHG_BLOCKED(&ls->sched.cur_agent->user_yield)) { /* User thread is blocked on an "xchg-continue" mutex. * Analogous to HLT state -- need to preempt it. */ ASSERT_ONE_THREAD_PER_PP(ls); #ifndef HTM_WEAK_ATOMICITY /* under strong atomicity, if for whatever reason a txn * blocks, there's no way it should ever succeed */ if (ls->sched.cur_agent->action.user_txn) { abort_transaction(ls->sched.cur_agent->tid, ls->save.current, _XABORT_CAPACITY); ls->end_branch_early = true; return false; } #endif return true; #ifndef PINTOS_KERNEL } else if (!check_user_address_space(ls)) { return false; #endif } else if ((user_mutex_lock_entering(ls->cpu0, ls->eip, &mutex_addr) || user_mutex_unlock_exiting(ls->eip)) && user_within_functions(ls)) { ASSERT_ONE_THREAD_PER_PP(ls); #ifndef HTM_WEAK_ATOMICITY /* by the equivalence proof, it's sound to skip this pp * because if anything were to conflict with it, it'd be * the same as if the txn aborted to begin with */ if (ls->sched.cur_agent->action.user_txn) { return false; } /* on other hand, under weak memory maybe the user needs * this mutex to protect against some non-txnal code */ #endif return true; #ifdef USER_MAKE_RUNNABLE_EXIT } else if (ls->eip == USER_MAKE_RUNNABLE_EXIT) { /* i think the reference kernel version i have might * predate the make runnable misbehave mode, because it * seems not to be putting yield pps on it.*/ ASSERT_ONE_THREAD_PER_PP(ls); return true; #endif #ifdef TRUSTED_THR_JOIN } else if (user_thr_join_exiting(ls->eip)) { /* don't respect within functions, obv; this pp is for * happens-before purposes, not scheduling, anyway */ ASSERT_ONE_THREAD_PER_PP(ls); *joined = true; return true; #ifndef USER_MAKE_RUNNABLE_EXIT } else if (true) { assert(0 && "need mkrun pp for trusted join soundness"); #endif #endif } else if (user_xbegin_entering(ls->eip) || user_xend_entering(ls->eip)) { /* Have to disrespect within functions to properly * respect htm-blocking if there's contention. */ ASSERT_ONE_THREAD_PER_PP(ls); *xbegin = user_xbegin_entering(ls->eip); return true; } else { return false; } /* kernel-mode-only preemption points */ #ifdef PINTOS_KERNEL } else if ((ls->eip == GUEST_SEMA_DOWN_ENTER || ls->eip == GUEST_SEMA_UP_EXIT) && kern_within_functions(ls)) { ASSERT_ONE_THREAD_PER_PP(ls); return true; } else if ((ls->eip == GUEST_CLI_ENTER || ls->eip == GUEST_STI_EXIT) && !ls->sched.cur_agent->action.kern_mutex_locking && !ls->sched.cur_agent->action.kern_mutex_unlocking && kern_within_functions(ls)) { ASSERT_ONE_THREAD_PER_PP(ls); return true; #endif } else if (kern_decision_point(ls->eip) && kern_within_functions(ls)) { ASSERT_ONE_THREAD_PER_PP(ls); return true; } else { return false; } }