static DrdThreadId VgThreadIdToNewDrdThreadId(const ThreadId tid) { int i; tl_assert(VgThreadIdToDrdThreadId(tid) == DRD_INVALID_THREADID); for (i = 1; i < DRD_N_THREADS; i++) { if (s_threadinfo[i].vg_thread_exists == False && s_threadinfo[i].posix_thread_exists == False && s_threadinfo[i].detached_posix_thread == False) { s_threadinfo[i].vg_thread_exists = True; s_threadinfo[i].vg_threadid = tid; s_threadinfo[i].pt_threadid = INVALID_POSIX_THREADID; s_threadinfo[i].stack_min = 0; s_threadinfo[i].stack_min_min = 0; s_threadinfo[i].stack_startup = 0; s_threadinfo[i].stack_max = 0; s_threadinfo[i].is_recording = True; s_threadinfo[i].synchr_nesting = 0; if (s_threadinfo[i].first != 0) VG_(printf)("drd thread id = %d\n", i); tl_assert(s_threadinfo[i].first == 0); tl_assert(s_threadinfo[i].last == 0); return i; } } tl_assert(False); return DRD_INVALID_THREADID; }
static void drd_report_data_race2(Error* const err, const DataRaceErrInfo* const dri) { AddrInfo ai; Char descr1[256]; Char descr2[256]; tl_assert(dri); tl_assert(dri->addr); tl_assert(dri->size > 0); descr1[0] = 0; descr2[0] = 0; VG_(get_data_description)(descr1, descr2, sizeof(descr1), dri->addr); if (descr1[0] == 0) { describe_addr(dri->addr, dri->size, &ai); } VG_(message)(Vg_UserMsg, "Conflicting %s by %s at 0x%08lx size %ld", dri->access_type == eStore ? "store" : "load", thread_get_name(VgThreadIdToDrdThreadId(dri->tid)), dri->addr, dri->size); VG_(pp_ExeContext)(VG_(get_error_where)(err)); if (descr1[0]) { VG_(message)(Vg_UserMsg, "%s", descr1); VG_(message)(Vg_UserMsg, "%s", descr2); } else if (ai.akind == eMallocd && ai.lastchange) { VG_(message)(Vg_UserMsg, "Address 0x%lx is at offset %ld from 0x%lx." " Allocation context:", dri->addr, ai.rwoffset, dri->addr - ai.rwoffset); VG_(pp_ExeContext)(ai.lastchange); } else { VG_(message)(Vg_UserMsg, "Allocation context: unknown."); } thread_report_conflicting_segments(VgThreadIdToDrdThreadId(dri->tid), dri->addr, dri->size, dri->access_type); }
void thread_set_vg_running_tid(const ThreadId vg_tid) { tl_assert(vg_tid != VG_INVALID_THREADID); if (vg_tid != s_vg_running_tid) { thread_set_running_tid(vg_tid, VgThreadIdToDrdThreadId(vg_tid)); } tl_assert(s_vg_running_tid != VG_INVALID_THREADID); tl_assert(s_drd_running_tid != DRD_INVALID_THREADID); }
VG_REGPARM(2) void drd_trace_store(Addr addr, SizeT size) { #ifdef ENABLE_DRD_CONSISTENCY_CHECKS /* The assert below has been commented out because of performance reasons.*/ tl_assert(thread_get_running_tid() == VgThreadIdToDrdThreadId(VG_(get_running_tid()))); #endif if (running_thread_is_recording() && (s_drd_check_stack_accesses || ! thread_address_on_stack(addr)) && bm_access_store_triggers_conflict(addr, addr + size) && ! drd_is_suppressed(addr, addr + size)) { drd_report_race(addr, size, eStore); } }
/** Allocate the first segment for a thread. Call this just after * pthread_create(). */ DrdThreadId thread_post_create(const ThreadId vg_created) { const DrdThreadId created = VgThreadIdToDrdThreadId(vg_created); tl_assert(0 <= (int)created && created < DRD_N_THREADS && created != DRD_INVALID_THREADID); s_threadinfo[created].stack_max = VG_(thread_get_stack_max)(vg_created); s_threadinfo[created].stack_startup = s_threadinfo[created].stack_max; s_threadinfo[created].stack_min = s_threadinfo[created].stack_max; s_threadinfo[created].stack_min_min = s_threadinfo[created].stack_max; s_threadinfo[created].stack_size = VG_(thread_get_stack_size)(vg_created); tl_assert(s_threadinfo[created].stack_max != 0); return created; }
DrdThreadId thread_pre_create(const DrdThreadId creator, const ThreadId vg_created) { DrdThreadId created; tl_assert(VgThreadIdToDrdThreadId(vg_created) == DRD_INVALID_THREADID); created = VgThreadIdToNewDrdThreadId(vg_created); tl_assert(0 <= (int)created && created < DRD_N_THREADS && created != DRD_INVALID_THREADID); tl_assert(s_threadinfo[created].first == 0); tl_assert(s_threadinfo[created].last == 0); thread_append_segment(created, sg_new(creator, created)); return created; }
static void drd_pre_thread_create(const ThreadId creator, const ThreadId created) { const DrdThreadId drd_creator = VgThreadIdToDrdThreadId(creator); tl_assert(created != VG_INVALID_THREADID); thread_pre_create(drd_creator, created); if (IsValidDrdThreadId(drd_creator)) { thread_new_segment(drd_creator); } if (s_drd_trace_fork_join) { VG_(message)(Vg_DebugMsg, "drd_pre_thread_create creator = %d/%d, created = %d", creator, drd_creator, created); } }
/* Called after a thread has performed its last memory access. */ static void drd_thread_finished(ThreadId vg_tid) { DrdThreadId drd_tid; tl_assert(VG_(get_running_tid)() == vg_tid); drd_tid = VgThreadIdToDrdThreadId(vg_tid); if (s_drd_trace_fork_join) { VG_(message)(Vg_DebugMsg, "drd_thread_finished tid = %d/%d%s", vg_tid, drd_tid, thread_get_joinable(drd_tid) ? "" : " (which is a detached thread)"); } if (s_show_stack_usage) { const SizeT stack_size = thread_get_stack_size(drd_tid); const SizeT used_stack = thread_get_stack_max(drd_tid) - thread_get_stack_min_min(drd_tid); VG_(message)(Vg_UserMsg, "thread %d/%d%s finished and used %ld bytes out of %ld" " on its stack. Margin: %ld bytes.", vg_tid, drd_tid, thread_get_joinable(drd_tid) ? "" : " (which is a detached thread)", used_stack, stack_size, stack_size - used_stack); } drd_stop_using_mem(thread_get_stack_min(drd_tid), thread_get_stack_max(drd_tid) - thread_get_stack_min(drd_tid), True); thread_stop_recording(drd_tid); thread_finished(drd_tid); }
VG_REGPARM(2) void drd_trace_store(Addr addr, SizeT size) { Segment* sg; #if 0 /* The assert below has been commented out because of performance reasons.*/ tl_assert(thread_get_running_tid() == VgThreadIdToDrdThreadId(VG_(get_running_tid()))); #endif if (! running_thread_is_recording()) return; if (range_any_is_traced(addr, size)) { drd_trace_mem_access(addr, size, eStore); } sg = running_thread_get_segment(); bm_access_range_store(sg->bm, addr, addr + size); if (bm_store_has_conflict_with(thread_get_danger_set(), addr, addr + size)) { drd_report_race(addr, size, eStore); } }
static Bool drd_handle_client_request(ThreadId tid, UWord* arg, UWord* ret) { UWord result = 0; switch (arg[0]) { case VG_USERREQ__GET_THREAD_SELF: result = tid; break; case VG_USERREQ__SET_THREAD_NAME: thread_set_name_fmt(VgThreadIdToDrdThreadId(VG_(get_running_tid)()), (char*)arg[1], arg[2]); break; case VG_USERREQ__DRD_START_SUPPRESSION: drd_start_suppression(arg[1], arg[1] + arg[2], "client"); break; case VG_USERREQ__DRD_FINISH_SUPPRESSION: drd_finish_suppression(arg[1], arg[1] + arg[2]); break; case VG_USERREQ__DRD_SUPPRESS_CURRENT_STACK: thread_set_stack_startup(thread_get_running_tid(), VG_(get_SP)(VG_(get_running_tid)())); break; case VG_USERREQ__DRD_START_NEW_SEGMENT: thread_new_segment(PtThreadIdToDrdThreadId(arg[1])); break; case VG_USERREQ__DRD_START_RECORDING: thread_start_recording(PtThreadIdToDrdThreadId(arg[1])); break; case VG_USERREQ__DRD_STOP_RECORDING: thread_stop_recording(PtThreadIdToDrdThreadId(arg[1])); break; case VG_USERREQ__SET_PTHREADID: thread_set_pthreadid(thread_get_running_tid(), arg[1]); break; case VG_USERREQ__SET_JOINABLE: thread_set_joinable(PtThreadIdToDrdThreadId(arg[1]), (Bool)arg[2]); break; case VG_USERREQ__POST_THREAD_JOIN: tl_assert(arg[1]); drd_post_thread_join(thread_get_running_tid(), PtThreadIdToDrdThreadId(arg[1])); break; case VG_USERREQ__PRE_MUTEX_INIT: drd_pre_mutex_init(arg[1], arg[2], arg[3]); break; case VG_USERREQ__POST_MUTEX_DESTROY: drd_post_mutex_destroy(arg[1], arg[2]); break; case VG_USERREQ__PRE_PTHREAD_MUTEX_LOCK: drd_pre_mutex_lock(thread_get_running_tid(), arg[1], arg[2], arg[3]); break; case VG_USERREQ__POST_PTHREAD_MUTEX_LOCK: drd_post_mutex_lock(thread_get_running_tid(), arg[1], arg[2], arg[3]); break; case VG_USERREQ__PRE_PTHREAD_MUTEX_UNLOCK: drd_pre_mutex_unlock(thread_get_running_tid(), arg[1], arg[3]); break; case VG_USERREQ__SPIN_INIT_OR_UNLOCK: drd_spin_init_or_unlock(arg[1], arg[2]); break; case VG_USERREQ__POST_PTHREAD_COND_INIT: drd_post_cond_init(arg[1], arg[2]); break; case VG_USERREQ__PRE_PTHREAD_COND_DESTROY: drd_pre_cond_destroy(arg[1]); break; case VG_USERREQ__PRE_PTHREAD_COND_WAIT: drd_pre_cond_wait(arg[1]/*cond*/, arg[2]/*cond_size*/, arg[3]/*mutex*/); break; case VG_USERREQ__POST_PTHREAD_COND_WAIT: drd_post_cond_wait(arg[1]/*cond*/, arg[3]/*mutex*/, arg[4]/*mutex_size*/); break; case VG_USERREQ__PRE_PTHREAD_COND_SIGNAL: drd_pre_cond_signal(arg[1]); break; case VG_USERREQ__PRE_PTHREAD_COND_BROADCAST: drd_pre_cond_broadcast(arg[1]); break; default: VG_(message)(Vg_DebugMsg, "Unrecognized client request 0x%lx 0x%lx", arg[0], arg[1]); tl_assert(0); return False; } *ret = result; return True; }
static Bool drd_handle_client_request(ThreadId vg_tid, UWord* arg, UWord* ret) { UWord result = 0; const DrdThreadId drd_tid = thread_get_running_tid(); tl_assert(vg_tid == VG_(get_running_tid())); tl_assert(VgThreadIdToDrdThreadId(vg_tid) == drd_tid); switch (arg[0]) { case VG_USERREQ__DRD_GET_VALGRIND_THREAD_ID: result = vg_tid; break; case VG_USERREQ__DRD_GET_DRD_THREAD_ID: result = drd_tid; break; case VG_USERREQ__DRD_START_SUPPRESSION: drd_start_suppression(arg[1], arg[1] + arg[2], "client"); break; case VG_USERREQ__DRD_FINISH_SUPPRESSION: drd_finish_suppression(arg[1], arg[1] + arg[2]); break; case VG_USERREQ__DRD_SUPPRESS_CURRENT_STACK: { const Addr topmost_sp = highest_used_stack_address(vg_tid); #if 0 UInt nframes; const UInt n_ips = 20; Addr ips[n_ips], sps[n_ips], fps[n_ips]; Char desc[128]; unsigned i; nframes = VG_(get_StackTrace)(vg_tid, ips, n_ips, sps, fps, 0); VG_(message)(Vg_DebugMsg, "thread %d/%d", vg_tid, drd_tid); for (i = 0; i < nframes; i++) { VG_(describe_IP)(ips[i], desc, sizeof(desc)); VG_(message)(Vg_DebugMsg, "[%2d] sp 0x%09lx fp 0x%09lx ip %s", i, sps[i], fps[i], desc); } #endif thread_set_stack_startup(drd_tid, VG_(get_SP)(vg_tid)); drd_start_suppression(topmost_sp, VG_(thread_get_stack_max)(vg_tid), "stack top"); break; } case VG_USERREQ__DRD_START_NEW_SEGMENT: thread_new_segment(PtThreadIdToDrdThreadId(arg[1])); break; case VG_USERREQ__DRD_START_TRACE_ADDR: drd_start_tracing_address_range(arg[1], arg[1] + arg[2]); break; case VG_USERREQ__DRD_STOP_TRACE_ADDR: drd_stop_tracing_address_range(arg[1], arg[1] + arg[2]); break; case VG_USERREQ__DRD_STOP_RECORDING: thread_stop_recording(drd_tid); break; case VG_USERREQ__DRD_START_RECORDING: thread_start_recording(drd_tid); break; case VG_USERREQ__SET_PTHREADID: // pthread_self() returns 0 for programs not linked with libpthread.so. if (arg[1] != INVALID_POSIX_THREADID) thread_set_pthreadid(drd_tid, arg[1]); break; case VG_USERREQ__SET_JOINABLE: thread_set_joinable(PtThreadIdToDrdThreadId(arg[1]), (Bool)arg[2]); break; case VG_USERREQ__POST_THREAD_JOIN: tl_assert(arg[1]); drd_post_thread_join(drd_tid, PtThreadIdToDrdThreadId(arg[1])); break; case VG_USERREQ__PRE_THREAD_CANCEL: tl_assert(arg[1]); drd_pre_thread_cancel(drd_tid, PtThreadIdToDrdThreadId(arg[1])); break; case VG_USERREQ__POST_THREAD_CANCEL: tl_assert(arg[1]); drd_post_thread_cancel(drd_tid, PtThreadIdToDrdThreadId(arg[1]), arg[2]); break; case VG_USERREQ__PRE_MUTEX_INIT: if (thread_enter_synchr(drd_tid) == 0) drd_pre_mutex_init(arg[1], arg[2]); break; case VG_USERREQ__POST_MUTEX_INIT: thread_leave_synchr(drd_tid); break; case VG_USERREQ__PRE_MUTEX_DESTROY: thread_enter_synchr(drd_tid); break; case VG_USERREQ__POST_MUTEX_DESTROY: if (thread_leave_synchr(drd_tid) == 0) drd_post_mutex_destroy(arg[1], arg[2]); break; case VG_USERREQ__PRE_MUTEX_LOCK: if (thread_enter_synchr(drd_tid) == 0) drd_pre_mutex_lock(arg[1], arg[2], arg[3]); break; case VG_USERREQ__POST_MUTEX_LOCK: if (thread_leave_synchr(drd_tid) == 0) drd_post_mutex_lock(arg[1], arg[2]); break; case VG_USERREQ__PRE_MUTEX_UNLOCK: if (thread_enter_synchr(drd_tid) == 0) drd_pre_mutex_unlock(arg[1], arg[2]); break; case VG_USERREQ__POST_MUTEX_UNLOCK: thread_leave_synchr(drd_tid); break; case VG_USERREQ__PRE_SPIN_INIT_OR_UNLOCK: if (thread_enter_synchr(drd_tid) == 0) drd_spin_init_or_unlock(arg[1]); break; case VG_USERREQ__POST_SPIN_INIT_OR_UNLOCK: thread_leave_synchr(drd_tid); break; case VG_USERREQ__PRE_COND_INIT: if (thread_enter_synchr(drd_tid) == 0) drd_pre_cond_init(arg[1]); break; case VG_USERREQ__POST_COND_INIT: thread_leave_synchr(drd_tid); break; case VG_USERREQ__PRE_COND_DESTROY: thread_enter_synchr(drd_tid); break; case VG_USERREQ__POST_COND_DESTROY: if (thread_leave_synchr(drd_tid) == 0) drd_post_cond_destroy(arg[1]); break; case VG_USERREQ__PRE_COND_WAIT: if (thread_enter_synchr(drd_tid) == 0) drd_pre_cond_wait(arg[1], arg[2], arg[3]); break; case VG_USERREQ__POST_COND_WAIT: if (thread_leave_synchr(drd_tid) == 0) drd_post_cond_wait(arg[1], arg[2], arg[3]); break; case VG_USERREQ__PRE_COND_SIGNAL: if (thread_enter_synchr(drd_tid) == 0) drd_pre_cond_signal(arg[1]); break; case VG_USERREQ__POST_COND_SIGNAL: thread_leave_synchr(drd_tid); break; case VG_USERREQ__PRE_COND_BROADCAST: if (thread_enter_synchr(drd_tid) == 0) drd_pre_cond_broadcast(arg[1]); break; case VG_USERREQ__POST_COND_BROADCAST: thread_leave_synchr(drd_tid); break; case VG_USERREQ__PRE_SEM_INIT: if (thread_enter_synchr(drd_tid) == 0) drd_semaphore_init(arg[1], arg[2], arg[3]); break; case VG_USERREQ__POST_SEM_INIT: thread_leave_synchr(drd_tid); break; case VG_USERREQ__PRE_SEM_DESTROY: thread_enter_synchr(drd_tid); break; case VG_USERREQ__POST_SEM_DESTROY: if (thread_leave_synchr(drd_tid) == 0) drd_semaphore_destroy(arg[1]); break; case VG_USERREQ__PRE_SEM_WAIT: if (thread_enter_synchr(drd_tid) == 0) drd_semaphore_pre_wait(drd_tid, arg[1]); break; case VG_USERREQ__POST_SEM_WAIT: if (thread_leave_synchr(drd_tid) == 0) drd_semaphore_post_wait(drd_tid, arg[1], arg[2]); break; case VG_USERREQ__PRE_SEM_POST: if (thread_enter_synchr(drd_tid) == 0) drd_semaphore_pre_post(drd_tid, arg[1]); break; case VG_USERREQ__POST_SEM_POST: if (thread_leave_synchr(drd_tid) == 0) drd_semaphore_post_post(drd_tid, arg[1], arg[2]); break; case VG_USERREQ__PRE_BARRIER_INIT: if (thread_enter_synchr(drd_tid) == 0) drd_barrier_init(arg[1], arg[2], arg[3], arg[4]); break; case VG_USERREQ__POST_BARRIER_INIT: thread_leave_synchr(drd_tid); break; case VG_USERREQ__PRE_BARRIER_DESTROY: thread_enter_synchr(drd_tid); break; case VG_USERREQ__POST_BARRIER_DESTROY: if (thread_leave_synchr(drd_tid) == 0) drd_barrier_destroy(arg[1], arg[2]); break; case VG_USERREQ__PRE_BARRIER_WAIT: if (thread_enter_synchr(drd_tid) == 0) drd_barrier_pre_wait(drd_tid, arg[1], arg[2]); break; case VG_USERREQ__POST_BARRIER_WAIT: if (thread_leave_synchr(drd_tid) == 0) drd_barrier_post_wait(drd_tid, arg[1], arg[2], arg[3]); break; case VG_USERREQ__PRE_RWLOCK_INIT: rwlock_pre_init(arg[1]); break; case VG_USERREQ__POST_RWLOCK_DESTROY: rwlock_post_destroy(arg[1]); break; case VG_USERREQ__PRE_RWLOCK_RDLOCK: if (thread_enter_synchr(drd_tid) == 0) rwlock_pre_rdlock(arg[1]); break; case VG_USERREQ__POST_RWLOCK_RDLOCK: if (thread_leave_synchr(drd_tid) == 0) rwlock_post_rdlock(arg[1], arg[2]); break; case VG_USERREQ__PRE_RWLOCK_WRLOCK: if (thread_enter_synchr(drd_tid) == 0) rwlock_pre_wrlock(arg[1]); break; case VG_USERREQ__POST_RWLOCK_WRLOCK: if (thread_leave_synchr(drd_tid) == 0) rwlock_post_wrlock(arg[1], arg[2]); break; case VG_USERREQ__PRE_RWLOCK_UNLOCK: if (thread_enter_synchr(drd_tid) == 0) rwlock_pre_unlock(arg[1]); break; case VG_USERREQ__POST_RWLOCK_UNLOCK: thread_leave_synchr(drd_tid); break; default: VG_(message)(Vg_DebugMsg, "Unrecognized client request 0x%lx 0x%lx", arg[0], arg[1]); tl_assert(0); return False; } *ret = result; return True; }