/* cc_op add/sub/mul adc/sbb shl/Shl/sar tmp = cond(cc_op(cc_dep1, cc_dep2)) and/or/xor inc/dec rol/ror tmp = cond(cc_op(cc_dep1, 0)) The taintness of tmp depends on taintness of both args. (we can't handle and(cc_dep1, 0) which gives an untainted result) cf. valgrind guest_x86_defs.h */ void instrument_WrTmp_CCall(IRStmt* st, IRSB* sb_out) { IRTemp tmp = st->Ist.WrTmp.tmp; IRExpr* data = st->Ist.WrTmp.data; IRCallee* cee = data->Iex.CCall.cee; IRExpr** args = data->Iex.CCall.args; IRDirty* di; if (VG_(strcmp)(cee->name, "x86g_calculate_condition") == 0) { IRExpr* cond = args[0]; IRExpr* cc_op = args[1]; IRExpr* cc_dep1 = args[2]; IRExpr* cc_dep2 = args[3]; tl_assert(cond->tag == Iex_Const && cond->Iex.Const.con->tag == Ico_U32); tl_assert(isIRAtom(cc_op)); tl_assert(isIRAtom(cc_dep1)); tl_assert(isIRAtom(cc_dep2)); if (cc_op->tag == Iex_Const) tl_assert(cc_op->Iex.Const.con->tag == Ico_U32); if (cc_dep1->tag == Iex_Const) tl_assert(cc_dep1->Iex.Const.con->tag == Ico_U32); if (cc_dep2->tag == Iex_Const) tl_assert(cc_dep2->Iex.Const.con->tag == Ico_U32); // typeOf(x86g_calculate_condition) == typeOf(tmp) == I32 di = unsafeIRDirty_0_N(0, "helper_instrument_WrTmp_CCall_x86g_calculate_condition", VG_(fnptr_to_fnentry)(helper_instrument_WrTmp_CCall_x86g_calculate_condition), mkIRExprVec_7(mkIRExpr_HWord(tmp), mkIRExpr_HWord((cc_dep1->tag == Iex_RdTmp) ? cc_dep1->Iex.RdTmp.tmp : IRTemp_INVALID), mkIRExpr_HWord((cc_dep2->tag == Iex_RdTmp) ? cc_dep2->Iex.RdTmp.tmp : IRTemp_INVALID), mkIRExpr_HWord(cond->Iex.Const.con->Ico.U32), (cc_op->tag == Iex_RdTmp) ? assignNew_HWord(sb_out, cc_op) : mkIRExpr_HWord(cc_op->Iex.Const.con->Ico.U32), (cc_dep1->tag == Iex_RdTmp) ? assignNew_HWord(sb_out, cc_dep1) : mkIRExpr_HWord(cc_dep1->Iex.Const.con->Ico.U32), (cc_dep2->tag == Iex_RdTmp) ? assignNew_HWord(sb_out, cc_dep2) : mkIRExpr_HWord(cc_dep2->Iex.Const.con->Ico.U32)) ); addStmtToIRSB(sb_out, IRStmt_Dirty(di)); } else { di = unsafeIRDirty_0_N(0, "helper_instrument_WrTmp_CCall_else", VG_(fnptr_to_fnentry)(helper_instrument_WrTmp_CCall_else), mkIRExprVec_0() ); addStmtToIRSB(sb_out, IRStmt_Dirty(di)); } }
static IRSB* lk_instrument ( VgCallbackClosure* closure, IRSB* sbIn, VexGuestLayout* layout, VexGuestExtents* vge, VexArchInfo* archinfo_host, IRType gWordTy, IRType hWordTy ) { IRDirty* di; Int i; IRSB* sbOut; HChar fnname[100]; IRTypeEnv* tyenv = sbIn->tyenv; Addr iaddr = 0, dst; UInt ilen = 0; Bool condition_inverted = False; if (gWordTy != hWordTy) { /* We don't currently support this case. */ VG_(tool_panic)("host/guest word size mismatch"); } /* Set up SB */ sbOut = deepCopyIRSBExceptStmts(sbIn); // Copy verbatim any IR preamble preceding the first IMark i = 0; while (i < sbIn->stmts_used && sbIn->stmts[i]->tag != Ist_IMark) { addStmtToIRSB( sbOut, sbIn->stmts[i] ); i++; } if (clo_basic_counts) { /* Count this superblock. */ di = unsafeIRDirty_0_N( 0, "add_one_SB_entered", VG_(fnptr_to_fnentry)( &add_one_SB_entered ), mkIRExprVec_0() ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); } if (clo_trace_sbs) { /* Print this superblock's address. */ di = unsafeIRDirty_0_N( 0, "trace_superblock", VG_(fnptr_to_fnentry)( &trace_superblock ), mkIRExprVec_1( mkIRExpr_HWord( vge->base[0] ) ) ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); } if (clo_trace_mem) { events_used = 0; } for (/*use current i*/; i < sbIn->stmts_used; i++) { IRStmt* st = sbIn->stmts[i]; if (!st || st->tag == Ist_NoOp) continue; if (clo_basic_counts) { /* Count one VEX statement. */ di = unsafeIRDirty_0_N( 0, "add_one_IRStmt", VG_(fnptr_to_fnentry)( &add_one_IRStmt ), mkIRExprVec_0() ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); } switch (st->tag) { case Ist_NoOp: case Ist_AbiHint: case Ist_Put: case Ist_PutI: case Ist_MBE: addStmtToIRSB( sbOut, st ); break; case Ist_IMark: if (clo_basic_counts) { /* Needed to be able to check for inverted condition in Ist_Exit */ iaddr = st->Ist.IMark.addr; ilen = st->Ist.IMark.len; /* Count guest instruction. */ di = unsafeIRDirty_0_N( 0, "add_one_guest_instr", VG_(fnptr_to_fnentry)( &add_one_guest_instr ), mkIRExprVec_0() ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); /* An unconditional branch to a known destination in the * guest's instructions can be represented, in the IRSB to * instrument, by the VEX statements that are the * translation of that known destination. This feature is * called 'SB chasing' and can be influenced by command * line option --vex-guest-chase-thresh. * * To get an accurate count of the calls to a specific * function, taking SB chasing into account, we need to * check for each guest instruction (Ist_IMark) if it is * the entry point of a function. */ tl_assert(clo_fnname); tl_assert(clo_fnname[0]); if (VG_(get_fnname_if_entry)(st->Ist.IMark.addr, fnname, sizeof(fnname)) && 0 == VG_(strcmp)(fnname, clo_fnname)) { di = unsafeIRDirty_0_N( 0, "add_one_func_call", VG_(fnptr_to_fnentry)( &add_one_func_call ), mkIRExprVec_0() ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); } } if (clo_trace_mem) { // WARNING: do not remove this function call, even if you // aren't interested in instruction reads. See the comment // above the function itself for more detail. addEvent_Ir( sbOut, mkIRExpr_HWord( (HWord)st->Ist.IMark.addr ), st->Ist.IMark.len ); } addStmtToIRSB( sbOut, st ); break; case Ist_WrTmp: // Add a call to trace_load() if --trace-mem=yes. if (clo_trace_mem) { IRExpr* data = st->Ist.WrTmp.data; if (data->tag == Iex_Load) { addEvent_Dr( sbOut, data->Iex.Load.addr, sizeofIRType(data->Iex.Load.ty) ); } } if (clo_detailed_counts) { IRExpr* expr = st->Ist.WrTmp.data; IRType type = typeOfIRExpr(sbOut->tyenv, expr); tl_assert(type != Ity_INVALID); switch (expr->tag) { case Iex_Load: instrument_detail( sbOut, OpLoad, type, NULL/*guard*/ ); break; case Iex_Unop: case Iex_Binop: case Iex_Triop: case Iex_Qop: case Iex_ITE: instrument_detail( sbOut, OpAlu, type, NULL/*guard*/ ); break; default: break; } } addStmtToIRSB( sbOut, st ); break; case Ist_Store: { IRExpr* data = st->Ist.Store.data; IRType type = typeOfIRExpr(tyenv, data); tl_assert(type != Ity_INVALID); if (clo_trace_mem) { addEvent_Dw( sbOut, st->Ist.Store.addr, sizeofIRType(type) ); } if (clo_detailed_counts) { instrument_detail( sbOut, OpStore, type, NULL/*guard*/ ); } addStmtToIRSB( sbOut, st ); break; } case Ist_StoreG: { IRStoreG* sg = st->Ist.StoreG.details; IRExpr* data = sg->data; IRType type = typeOfIRExpr(tyenv, data); tl_assert(type != Ity_INVALID); if (clo_trace_mem) { addEvent_Dw_guarded( sbOut, sg->addr, sizeofIRType(type), sg->guard ); } if (clo_detailed_counts) { instrument_detail( sbOut, OpStore, type, sg->guard ); } addStmtToIRSB( sbOut, st ); break; } case Ist_LoadG: { IRLoadG* lg = st->Ist.LoadG.details; IRType type = Ity_INVALID; /* loaded type */ IRType typeWide = Ity_INVALID; /* after implicit widening */ typeOfIRLoadGOp(lg->cvt, &typeWide, &type); tl_assert(type != Ity_INVALID); if (clo_trace_mem) { addEvent_Dr_guarded( sbOut, lg->addr, sizeofIRType(type), lg->guard ); } if (clo_detailed_counts) { instrument_detail( sbOut, OpLoad, type, lg->guard ); } addStmtToIRSB( sbOut, st ); break; } case Ist_Dirty: { if (clo_trace_mem) { Int dsize; IRDirty* d = st->Ist.Dirty.details; if (d->mFx != Ifx_None) { // This dirty helper accesses memory. Collect the details. tl_assert(d->mAddr != NULL); tl_assert(d->mSize != 0); dsize = d->mSize; if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) addEvent_Dr( sbOut, d->mAddr, dsize ); if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) addEvent_Dw( sbOut, d->mAddr, dsize ); } else { tl_assert(d->mAddr == NULL); tl_assert(d->mSize == 0); } } addStmtToIRSB( sbOut, st ); break; } case Ist_CAS: { /* We treat it as a read and a write of the location. I think that is the same behaviour as it was before IRCAS was introduced, since prior to that point, the Vex front ends would translate a lock-prefixed instruction into a (normal) read followed by a (normal) write. */ Int dataSize; IRType dataTy; IRCAS* cas = st->Ist.CAS.details; tl_assert(cas->addr != NULL); tl_assert(cas->dataLo != NULL); dataTy = typeOfIRExpr(tyenv, cas->dataLo); dataSize = sizeofIRType(dataTy); if (cas->dataHi != NULL) dataSize *= 2; /* since it's a doubleword-CAS */ if (clo_trace_mem) { addEvent_Dr( sbOut, cas->addr, dataSize ); addEvent_Dw( sbOut, cas->addr, dataSize ); } if (clo_detailed_counts) { instrument_detail( sbOut, OpLoad, dataTy, NULL/*guard*/ ); if (cas->dataHi != NULL) /* dcas */ instrument_detail( sbOut, OpLoad, dataTy, NULL/*guard*/ ); instrument_detail( sbOut, OpStore, dataTy, NULL/*guard*/ ); if (cas->dataHi != NULL) /* dcas */ instrument_detail( sbOut, OpStore, dataTy, NULL/*guard*/ ); } addStmtToIRSB( sbOut, st ); break; } case Ist_LLSC: { IRType dataTy; if (st->Ist.LLSC.storedata == NULL) { /* LL */ dataTy = typeOfIRTemp(tyenv, st->Ist.LLSC.result); if (clo_trace_mem) { addEvent_Dr( sbOut, st->Ist.LLSC.addr, sizeofIRType(dataTy) ); /* flush events before LL, helps SC to succeed */ flushEvents(sbOut); } if (clo_detailed_counts) instrument_detail( sbOut, OpLoad, dataTy, NULL/*guard*/ ); } else { /* SC */ dataTy = typeOfIRExpr(tyenv, st->Ist.LLSC.storedata); if (clo_trace_mem) addEvent_Dw( sbOut, st->Ist.LLSC.addr, sizeofIRType(dataTy) ); if (clo_detailed_counts) instrument_detail( sbOut, OpStore, dataTy, NULL/*guard*/ ); } addStmtToIRSB( sbOut, st ); break; } case Ist_Exit: if (clo_basic_counts) { // The condition of a branch was inverted by VEX if a taken // branch is in fact a fall trough according to client address tl_assert(iaddr != 0); dst = (sizeof(Addr) == 4) ? st->Ist.Exit.dst->Ico.U32 : st->Ist.Exit.dst->Ico.U64; condition_inverted = (dst == iaddr + ilen); /* Count Jcc */ if (!condition_inverted) di = unsafeIRDirty_0_N( 0, "add_one_Jcc", VG_(fnptr_to_fnentry)( &add_one_Jcc ), mkIRExprVec_0() ); else di = unsafeIRDirty_0_N( 0, "add_one_inverted_Jcc", VG_(fnptr_to_fnentry)( &add_one_inverted_Jcc ), mkIRExprVec_0() ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); } if (clo_trace_mem) { flushEvents(sbOut); } addStmtToIRSB( sbOut, st ); // Original statement if (clo_basic_counts) { /* Count non-taken Jcc */ if (!condition_inverted) di = unsafeIRDirty_0_N( 0, "add_one_Jcc_untaken", VG_(fnptr_to_fnentry)( &add_one_Jcc_untaken ), mkIRExprVec_0() ); else di = unsafeIRDirty_0_N( 0, "add_one_inverted_Jcc_untaken", VG_(fnptr_to_fnentry)( &add_one_inverted_Jcc_untaken ), mkIRExprVec_0() ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); } break; default: ppIRStmt(st); tl_assert(0); } } if (clo_basic_counts) { /* Count this basic block. */ di = unsafeIRDirty_0_N( 0, "add_one_SB_completed", VG_(fnptr_to_fnentry)( &add_one_SB_completed ), mkIRExprVec_0() ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); } if (clo_trace_mem) { /* At the end of the sbIn. Flush outstandings. */ flushEvents(sbOut); } return sbOut; }
static IRSB* sh_instrument ( VgCallbackClosure* closure, IRSB* sbIn, VexGuestLayout* layout, VexGuestExtents* vge, IRType gWordTy, IRType hWordTy ) { IRDirty* di; Int i; IRSB* sbOut; IRType type; IRTypeEnv* tyenv = sbIn->tyenv; IRStmt* imarkst; char *fnname = global_fnname; if (gWordTy != hWordTy) { /* We don't currently support this case. */ VG_(tool_panic)("host/guest word size mismatch"); } /* Set up SB */ sbOut = deepCopyIRSBExceptStmts(sbIn); // Copy verbatim any IR preamble preceding the first IMark i = 0; while (i < sbIn->stmts_used && sbIn->stmts[i]->tag != Ist_IMark) { addStmtToIRSB( sbOut, sbIn->stmts[i] ); i++; } if (clo_trace_mem) { events_used = 0; } for (/*use current i*/; i < sbIn->stmts_used; i++) { IRStmt* st = sbIn->stmts[i]; if (!st || st->tag == Ist_NoOp) continue; /* Prettyprint All IR statements Starting from main that valgrind has generated */ /*if(clo_trace_mem){ ppIRStmt(st); VG_(printf)("\n"); } */ switch (st->tag) { case Ist_NoOp: case Ist_AbiHint: case Ist_PutI: case Ist_MBE: break; case Ist_Put: if(clo_trace_mem){ Int reg_no = st->Ist.Put.offset; if(reg_no == layout->offset_SP || reg_no == 20){ IRExpr* data = st->Ist.Put.data; if(data->tag == Iex_RdTmp){ /* Add registerwrite instrumentation to IRSBout */ addEvent_RegW( sbOut, fnname, reg_no, data); } } } addStmtToIRSB( sbOut, st ); break; case Ist_IMark: imarkst = st; if (VG_(get_fnname_if_entry)( st->Ist.IMark.addr, fnname, 100)){ //VG_(printf)("-- %s --\n",fnname); if(0 == VG_(strcmp)(fnname, "main")) { di = unsafeIRDirty_0_N( 0, "trace_debug", VG_(fnptr_to_fnentry)(trace_debug), mkIRExprVec_0() ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); //VG_(printf)("SP:%d\n",layout->offset_SP); clo_trace_mem = True; } if(clo_trace_mem){ addEvent_FnEntry(sbOut, fnname); } } if (clo_trace_mem) { // WARNING: do not remove this function call, even if you // aren't interested in instruction reads. See the comment // above the function itself for more detail. addEvent_Ir( sbOut, mkIRExpr_HWord( (HWord)st->Ist.IMark.addr ), st->Ist.IMark.len ); } addStmtToIRSB( sbOut, st ); break; case Ist_WrTmp: // Add a call to trace_load() if --trace-mem=yes. if (clo_trace_mem) { IRExpr* data = st->Ist.WrTmp.data; if (data->tag == Iex_Load) { addEvent_Dr( sbOut, fnname, data->Iex.Load.addr, sizeofIRType(data->Iex.Load.ty) ); } } addStmtToIRSB( sbOut, st ); break; case Ist_Store: if (clo_trace_mem) { IRExpr* data = st->Ist.Store.data; addEvent_Dw( sbOut, fnname, st->Ist.Store.addr, sizeofIRType(typeOfIRExpr(tyenv, data)) ); } addStmtToIRSB( sbOut, st ); break; case Ist_Dirty: { if (clo_trace_mem) { Int dsize; IRDirty* d = st->Ist.Dirty.details; if (d->mFx != Ifx_None) { // This dirty helper accesses memory. Collect the details. tl_assert(d->mAddr != NULL); tl_assert(d->mSize != 0); dsize = d->mSize; if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) addEvent_Dr( sbOut, fnname, d->mAddr, dsize ); if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) addEvent_Dw( sbOut, fnname, d->mAddr, dsize ); } else { tl_assert(d->mAddr == NULL); tl_assert(d->mSize == 0); } } addStmtToIRSB( sbOut, st ); break; } case Ist_CAS: { /* We treat it as a read and a write of the location. I think that is the same behaviour as it was before IRCAS was introduced, since prior to that point, the Vex front ends would translate a lock-prefixed instruction into a (normal) read followed by a (normal) write. */ Int dataSize; IRType dataTy; IRCAS* cas = st->Ist.CAS.details; tl_assert(cas->addr != NULL); tl_assert(cas->dataLo != NULL); dataTy = typeOfIRExpr(tyenv, cas->dataLo); dataSize = sizeofIRType(dataTy); if (clo_trace_mem) { addEvent_Dr( sbOut, fnname, cas->addr, dataSize ); addEvent_Dw( sbOut, fnname, cas->addr, dataSize ); } addStmtToIRSB( sbOut, st ); break; } case Ist_LLSC: { IRType dataTy; if (st->Ist.LLSC.storedata == NULL) { /* LL */ dataTy = typeOfIRTemp(tyenv, st->Ist.LLSC.result); if (clo_trace_mem) addEvent_Dr( sbOut, fnname, st->Ist.LLSC.addr, sizeofIRType(dataTy) ); } else { /* SC */ dataTy = typeOfIRExpr(tyenv, st->Ist.LLSC.storedata); if (clo_trace_mem) addEvent_Dw( sbOut, fnname, st->Ist.LLSC.addr, sizeofIRType(dataTy) ); } addStmtToIRSB( sbOut, st ); break; } case Ist_Exit: if (clo_trace_mem) { flushEvents(sbOut); } addStmtToIRSB( sbOut, st ); // Original statement break; default: tl_assert(0); } } if(clo_trace_mem){ if(sbIn->jumpkind == Ijk_Ret){ VG_(get_fnname)(imarkst->Ist.IMark.addr, fnname, 100); addEvent_FnExit(sbOut,fnname); } } if (clo_trace_mem) { /* At the end of the sbIn. Flush outstandings. */ flushEvents(sbOut); } return sbOut; }
/* This version of flushEvents avoids callbacks entirely, except when the number of outstanding events is enough to be flushed - in which case a call to flush_data() is made. In all other cases, events are handled by creating IR to encode and store the memory access information to the array of outstanding events. */ static void flushEventsRange(IRSB* sb, Int start, Int size) { // Conditionally call the flush method if there's not enough room for // all the new events. This may flush an incomplete block. IRExpr *entries_addr = mkU64((ULong)&theEntries); IRExpr *entries = load(ENDIAN, Ity_I32, entries_addr); IRExpr *max_entries_addr = mkU64((ULong)&theMaxEntries); IRExpr *max_entries = load(ENDIAN, Ity_I32, max_entries_addr); IRDirty* di = unsafeIRDirty_0_N(0, "flush_data", VG_(fnptr_to_fnentry)( flush_data ), mkIRExprVec_0() ); di->guard = binop(Iop_CmpLT32S, max_entries, binop(Iop_Add32, entries, mkU32(size))); addStmtToIRSB( sb, IRStmt_Dirty(di) ); // Reload entries since it might have been changed by the callback entries = load(ENDIAN, Ity_I32, entries_addr); // Initialize the first address where we'll write trace information. // This will be advanced in the loop. IRExpr *addr = binop(Iop_Add64, load(ENDIAN, Ity_I64, mkU64((ULong)&theBlock)), unop(Iop_32Uto64, binop(Iop_Mul32, entries, mkU32(sizeof(MV_TraceAddr))))); // Grab the thread id IRExpr *thread = load(ENDIAN, Ity_I32, mkU64((ULong)&theThread)); Int i; for (i = start; i < start+size; i++) { Event* ev = &events[i]; uint32 type = 0; switch (ev->ekind) { case Event_Ir: type = MV_ShiftedInstr; break; case Event_Dr: type = MV_ShiftedRead; break; case Event_Dw: case Event_Dm: type = MV_ShiftedWrite; break; default: tl_assert(0); } type |= ev->type << MV_DataShift; type |= ((uint32)ev->size << MV_SizeShift); // Construct the address and store it IRExpr *data = binop(Iop_Or32, mkU32(type), thread); IRStmt *store; store = IRStmt_Store(ENDIAN, addr, ev->addr); addStmtToIRSB( sb, store ); // Advance to the type addr = binop(Iop_Add64, addr, mkU64(sizeof(uint64))); store = IRStmt_Store(ENDIAN, addr, data); addStmtToIRSB( sb, store ); // Advance to the next entry addr = binop(Iop_Add64, addr, mkU64(sizeof(MV_TraceAddr)-sizeof(uint64))); } // Store the new entry count IRStmt *entries_store = IRStmt_Store(ENDIAN, entries_addr, binop(Iop_Add32, entries, mkU32(size))); addStmtToIRSB( sb, entries_store ); }
static IRSB* fz_instrument ( VgCallbackClosure* closure, IRSB* sb_in, VexGuestLayout* layout, VexGuestExtents* vge, IRType gWordTy, IRType hWordTy ) { Int i; IRSB* sb_out; IRDirty* di; if (gWordTy != hWordTy) { /* We don't currently support this case. */ VG_(tool_panic)("host/guest word size mismatch"); } /* Set up SB */ sb_out = deepCopyIRSBExceptStmts(sb_in); // Copy verbatim any IR preamble preceding the first IMark i = 0; while (i < sb_in->stmts_used && sb_in->stmts[i]->tag != Ist_IMark) { addStmtToIRSB(sb_out, sb_in->stmts[i]); i++; } di = unsafeIRDirty_0_N(0, "helper_instrument_superblock", VG_(fnptr_to_fnentry)(helper_instrument_superblock), mkIRExprVec_0() ); addStmtToIRSB(sb_out, IRStmt_Dirty(di)); for (/*use current i*/; i < sb_in->stmts_used; i++) { IRStmt* st = sb_in->stmts[i]; if (!st) continue; switch (st->tag) { case Ist_NoOp: case Ist_IMark: case Ist_AbiHint: case Ist_Dirty: case Ist_MBE: break; case Ist_Put: instrument_Put(st, sb_out); break; case Ist_PutI: instrument_PutI(st, sb_out); break; case Ist_WrTmp: instrument_WrTmp(st, sb_out); break; case Ist_Store: instrument_Store(st, sb_out); break; case Ist_CAS: addStmtToIRSB(sb_out, st); // dirty helpers use temporaries (oldHi, oldLo) defined in the instruction instrument_CAS(st, sb_out); break; case Ist_LLSC: instrument_LLSC(st, sb_out); break; case Ist_Exit: instrument_Exit(st, sb_out); break; } if (st->tag != Ist_CAS) { addStmtToIRSB(sb_out, st); } } // ppIRSB(sb_out); return sb_out; }