void HG_(record_error_LockOrder)( Thread* thr, Lock* shouldbe_earlier_lk, Lock* shouldbe_later_lk, ExeContext* shouldbe_earlier_ec, ExeContext* shouldbe_later_ec, ExeContext* actual_earlier_ec ) { XError xe; tl_assert( HG_(is_sane_Thread)(thr) ); tl_assert(HG_(clo_track_lockorders)); init_XError(&xe); xe.tag = XE_LockOrder; xe.XE.LockOrder.thr = thr; xe.XE.LockOrder.shouldbe_earlier_lk = mk_LockP_from_LockN(shouldbe_earlier_lk, False/*!allowed_to_be_invalid*/); xe.XE.LockOrder.shouldbe_earlier_ec = shouldbe_earlier_ec; xe.XE.LockOrder.shouldbe_later_lk = mk_LockP_from_LockN(shouldbe_later_lk, False/*!allowed_to_be_invalid*/); xe.XE.LockOrder.shouldbe_later_ec = shouldbe_later_ec; xe.XE.LockOrder.actual_earlier_ec = actual_earlier_ec; // FIXME: tid vs thr tl_assert( HG_(is_sane_ThreadId)(thr->coretid) ); tl_assert( thr->coretid != VG_INVALID_THREADID ); VG_(maybe_record_error)( thr->coretid, XE_LockOrder, 0, NULL, &xe ); }
void HG_(record_error_Race) ( Thread* thr, Addr data_addr, Int szB, Bool isWrite, Thread* h1_ct, ExeContext* h1_ct_segstart, ExeContext* h1_ct_mbsegendEC ) { XError xe; tl_assert( HG_(is_sane_Thread)(thr) ); # if defined(VGO_linux) /* Skip any races on locations apparently in GOTPLT sections. This is said to be caused by ld.so poking PLT table entries (or whatever) when it writes the resolved address of a dynamically linked routine, into the table (or whatever) when it is called for the first time. */ { VgSectKind sect = VG_(DebugInfo_sect_kind)( NULL, data_addr ); if (0) VG_(printf)("XXXXXXXXX RACE on %#lx %s\n", data_addr, VG_(pp_SectKind)(sect)); /* SectPLT is required on ???-linux */ if (sect == Vg_SectGOTPLT) return; /* SectPLT is required on ppc32/64-linux */ if (sect == Vg_SectPLT) return; /* SectGOT is required on arm-linux */ if (sect == Vg_SectGOT) return; } # endif init_XError(&xe); xe.tag = XE_Race; xe.XE.Race.data_addr = data_addr; xe.XE.Race.szB = szB; xe.XE.Race.isWrite = isWrite; xe.XE.Race.thr = thr; tl_assert(isWrite == False || isWrite == True); tl_assert(szB == 8 || szB == 4 || szB == 2 || szB == 1); /* Skip on the detailed description of the raced-on address at this point; it's expensive. Leave it for the update_extra function if we ever make it that far. */ xe.XE.Race.data_addrinfo.tag = Addr_Undescribed; // FIXME: tid vs thr // Skip on any of the conflicting-access info at this point. // It's expensive to obtain, and this error is more likely than // not to be discarded. We'll fill these fields in in // HG_(update_extra) just above, assuming the error ever makes // it that far (unlikely). xe.XE.Race.h2_ct_accSzB = 0; xe.XE.Race.h2_ct_accIsW = False; xe.XE.Race.h2_ct_accEC = NULL; xe.XE.Race.h2_ct = NULL; tl_assert( HG_(is_sane_ThreadId)(thr->coretid) ); tl_assert( thr->coretid != VG_INVALID_THREADID ); xe.XE.Race.h1_ct = h1_ct; xe.XE.Race.h1_ct_mbsegstartEC = h1_ct_segstart; xe.XE.Race.h1_ct_mbsegendEC = h1_ct_mbsegendEC; VG_(maybe_record_error)( thr->coretid, XE_Race, data_addr, NULL, &xe ); }
void HG_(record_error_UnlockBogus) ( Thread* thr, Addr lock_ga ) { XError xe; tl_assert( HG_(is_sane_Thread)(thr) ); init_XError(&xe); xe.tag = XE_UnlockBogus; xe.XE.UnlockBogus.thr = thr; xe.XE.UnlockBogus.lock_ga = lock_ga; // FIXME: tid vs thr tl_assert( HG_(is_sane_ThreadId)(thr->coretid) ); tl_assert( thr->coretid != VG_INVALID_THREADID ); VG_(maybe_record_error)( thr->coretid, XE_UnlockBogus, 0, NULL, &xe ); }
void HG_(record_error_Misc) ( Thread* thr, HChar* errstr ) { XError xe; tl_assert( HG_(is_sane_Thread)(thr) ); tl_assert(errstr); init_XError(&xe); xe.tag = XE_Misc; xe.XE.Misc.thr = thr; xe.XE.Misc.errstr = string_table_strdup(errstr); // FIXME: tid vs thr tl_assert( HG_(is_sane_ThreadId)(thr->coretid) ); tl_assert( thr->coretid != VG_INVALID_THREADID ); VG_(maybe_record_error)( thr->coretid, XE_Misc, 0, NULL, &xe ); }
void HG_(record_error_UnlockUnlocked) ( Thread* thr, Lock* lk ) { XError xe; tl_assert( HG_(is_sane_Thread)(thr) ); tl_assert( HG_(is_sane_LockN)(lk) ); init_XError(&xe); xe.tag = XE_UnlockUnlocked; xe.XE.UnlockUnlocked.thr = thr; xe.XE.UnlockUnlocked.lock = mk_LockP_from_LockN(lk); // FIXME: tid vs thr tl_assert( HG_(is_sane_ThreadId)(thr->coretid) ); tl_assert( thr->coretid != VG_INVALID_THREADID ); VG_(maybe_record_error)( thr->coretid, XE_UnlockUnlocked, 0, NULL, &xe ); }
void HG_(record_error_Misc_w_aux) ( Thread* thr, const HChar* errstr, const HChar* auxstr, ExeContext* auxctx ) { XError xe; tl_assert( HG_(is_sane_Thread)(thr) ); tl_assert(errstr); init_XError(&xe); xe.tag = XE_Misc; xe.XE.Misc.thr = thr; xe.XE.Misc.errstr = string_table_strdup(errstr); xe.XE.Misc.auxstr = auxstr ? string_table_strdup(auxstr) : NULL; xe.XE.Misc.auxctx = auxctx; // FIXME: tid vs thr tl_assert( HG_(is_sane_ThreadId)(thr->coretid) ); tl_assert( thr->coretid != VG_INVALID_THREADID ); VG_(maybe_record_error)( thr->coretid, XE_Misc, 0, NULL, &xe ); }
void HG_(record_error_PthAPIerror) ( Thread* thr, HChar* fnname, Word err, HChar* errstr ) { XError xe; tl_assert( HG_(is_sane_Thread)(thr) ); tl_assert(fnname); tl_assert(errstr); init_XError(&xe); xe.tag = XE_PthAPIerror; xe.XE.PthAPIerror.thr = thr; xe.XE.PthAPIerror.fnname = string_table_strdup(fnname); xe.XE.PthAPIerror.err = err; xe.XE.PthAPIerror.errstr = string_table_strdup(errstr); // FIXME: tid vs thr tl_assert( HG_(is_sane_ThreadId)(thr->coretid) ); tl_assert( thr->coretid != VG_INVALID_THREADID ); VG_(maybe_record_error)( thr->coretid, XE_PthAPIerror, 0, NULL, &xe ); }
void HG_(record_error_UnlockForeign) ( Thread* thr, Thread* owner, Lock* lk ) { XError xe; tl_assert( HG_(is_sane_Thread)(thr) ); tl_assert( HG_(is_sane_Thread)(owner) ); tl_assert( HG_(is_sane_LockN)(lk) ); init_XError(&xe); xe.tag = XE_UnlockForeign; xe.XE.UnlockForeign.thr = thr; xe.XE.UnlockForeign.owner = owner; xe.XE.UnlockForeign.lock = mk_LockP_from_LockN(lk, False/*!allowed_to_be_invalid*/); // FIXME: tid vs thr tl_assert( HG_(is_sane_ThreadId)(thr->coretid) ); tl_assert( thr->coretid != VG_INVALID_THREADID ); VG_(maybe_record_error)( thr->coretid, XE_UnlockForeign, 0, NULL, &xe ); }
void HG_(record_error_LockOrder)( Thread* thr, Addr before_ga, Addr after_ga, ExeContext* before_ec, ExeContext* after_ec ) { XError xe; tl_assert( HG_(is_sane_Thread)(thr) ); if (!HG_(clo_track_lockorders)) return; init_XError(&xe); xe.tag = XE_LockOrder; xe.XE.LockOrder.thr = thr; xe.XE.LockOrder.before_ga = before_ga; xe.XE.LockOrder.before_ec = before_ec; xe.XE.LockOrder.after_ga = after_ga; xe.XE.LockOrder.after_ec = after_ec; // FIXME: tid vs thr tl_assert( HG_(is_sane_ThreadId)(thr->coretid) ); tl_assert( thr->coretid != VG_INVALID_THREADID ); VG_(maybe_record_error)( thr->coretid, XE_LockOrder, 0, NULL, &xe ); }