void HG_(record_error_LockOrder)( Thread* thr, Lock* shouldbe_earlier_lk, Lock* shouldbe_later_lk, ExeContext* shouldbe_earlier_ec, ExeContext* shouldbe_later_ec, ExeContext* actual_earlier_ec ) { XError xe; tl_assert( HG_(is_sane_Thread)(thr) ); tl_assert(HG_(clo_track_lockorders)); init_XError(&xe); xe.tag = XE_LockOrder; xe.XE.LockOrder.thr = thr; xe.XE.LockOrder.shouldbe_earlier_lk = mk_LockP_from_LockN(shouldbe_earlier_lk, False/*!allowed_to_be_invalid*/); xe.XE.LockOrder.shouldbe_earlier_ec = shouldbe_earlier_ec; xe.XE.LockOrder.shouldbe_later_lk = mk_LockP_from_LockN(shouldbe_later_lk, False/*!allowed_to_be_invalid*/); xe.XE.LockOrder.shouldbe_later_ec = shouldbe_later_ec; xe.XE.LockOrder.actual_earlier_ec = actual_earlier_ec; // FIXME: tid vs thr tl_assert( HG_(is_sane_ThreadId)(thr->coretid) ); tl_assert( thr->coretid != VG_INVALID_THREADID ); VG_(maybe_record_error)( thr->coretid, XE_LockOrder, 0, NULL, &xe ); }
void HG_(record_error_UnlockUnlocked) ( Thread* thr, Lock* lk ) { XError xe; tl_assert( HG_(is_sane_Thread)(thr) ); tl_assert( HG_(is_sane_LockN)(lk) ); init_XError(&xe); xe.tag = XE_UnlockUnlocked; xe.XE.UnlockUnlocked.thr = thr; xe.XE.UnlockUnlocked.lock = mk_LockP_from_LockN(lk); // FIXME: tid vs thr tl_assert( HG_(is_sane_ThreadId)(thr->coretid) ); tl_assert( thr->coretid != VG_INVALID_THREADID ); VG_(maybe_record_error)( thr->coretid, XE_UnlockUnlocked, 0, NULL, &xe ); }
void HG_(record_error_UnlockForeign) ( Thread* thr, Thread* owner, Lock* lk ) { XError xe; tl_assert( HG_(is_sane_Thread)(thr) ); tl_assert( HG_(is_sane_Thread)(owner) ); tl_assert( HG_(is_sane_LockN)(lk) ); init_XError(&xe); xe.tag = XE_UnlockForeign; xe.XE.UnlockForeign.thr = thr; xe.XE.UnlockForeign.owner = owner; xe.XE.UnlockForeign.lock = mk_LockP_from_LockN(lk, False/*!allowed_to_be_invalid*/); // FIXME: tid vs thr tl_assert( HG_(is_sane_ThreadId)(thr->coretid) ); tl_assert( thr->coretid != VG_INVALID_THREADID ); VG_(maybe_record_error)( thr->coretid, XE_UnlockForeign, 0, NULL, &xe ); }