static IRBB* cg_instrument ( IRBB* bbIn, VexGuestLayout* layout, Addr64 orig_addr_noredir, VexGuestExtents* vge, IRType gWordTy, IRType hWordTy ) { Int i, isize; IRStmt* st; Addr64 cia; /* address of current insn */ CgState cgs; IRTypeEnv* tyenv = bbIn->tyenv; InstrInfo* curr_inode = NULL; if (gWordTy != hWordTy) { /* We don't currently support this case. */ VG_(tool_panic)("host/guest word size mismatch"); } /* Set up BB, including copying of the where-next stuff. */ cgs.bbOut = emptyIRBB(); cgs.bbOut->tyenv = dopyIRTypeEnv(tyenv); tl_assert( isIRAtom(bbIn->next) ); cgs.bbOut->next = dopyIRExpr(bbIn->next); cgs.bbOut->jumpkind = bbIn->jumpkind; // Copy verbatim any IR preamble preceding the first IMark i = 0; while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) { addStmtToIRBB( cgs.bbOut, bbIn->stmts[i] ); i++; } // Get the first statement, and initial cia from it tl_assert(bbIn->stmts_used > 0); tl_assert(i < bbIn->stmts_used); st = bbIn->stmts[i]; tl_assert(Ist_IMark == st->tag); cia = st->Ist.IMark.addr; // Set up running state and get block info cgs.events_used = 0; cgs.bbInfo = get_BB_info(bbIn, (Addr)orig_addr_noredir); cgs.bbInfo_i = 0; if (DEBUG_CG) VG_(printf)("\n\n---------- cg_instrument ----------\n"); // Traverse the block, initialising inodes, adding events and flushing as // necessary. for (/*use current i*/; i < bbIn->stmts_used; i++) { st = bbIn->stmts[i]; tl_assert(isFlatIRStmt(st)); switch (st->tag) { case Ist_NoOp: case Ist_AbiHint: case Ist_Put: case Ist_PutI: case Ist_MFence: break; case Ist_IMark: cia = st->Ist.IMark.addr; isize = st->Ist.IMark.len; // If Vex fails to decode an instruction, the size will be zero. // Pretend otherwise. if (isize == 0) isize = VG_MIN_INSTR_SZB; // Sanity-check size. tl_assert( (VG_MIN_INSTR_SZB <= isize && isize <= VG_MAX_INSTR_SZB) || VG_CLREQ_SZB == isize ); // Get space for and init the inode, record it as the current one. // Subsequent Dr/Dw/Dm events from the same instruction will // also use it. curr_inode = setup_InstrInfo(&cgs, cia, isize); addEvent_Ir( &cgs, curr_inode ); break; case Ist_Tmp: { IRExpr* data = st->Ist.Tmp.data; if (data->tag == Iex_Load) { IRExpr* aexpr = data->Iex.Load.addr; // Note also, endianness info is ignored. I guess // that's not interesting. addEvent_Dr( &cgs, curr_inode, sizeofIRType(data->Iex.Load.ty), aexpr ); } break; } case Ist_Store: { IRExpr* data = st->Ist.Store.data; IRExpr* aexpr = st->Ist.Store.addr; addEvent_Dw( &cgs, curr_inode, sizeofIRType(typeOfIRExpr(tyenv, data)), aexpr ); break; } case Ist_Dirty: { Int dataSize; IRDirty* d = st->Ist.Dirty.details; if (d->mFx != Ifx_None) { /* This dirty helper accesses memory. Collect the details. */ tl_assert(d->mAddr != NULL); tl_assert(d->mSize != 0); dataSize = d->mSize; // Large (eg. 28B, 108B, 512B on x86) data-sized // instructions will be done inaccurately, but they're // very rare and this avoids errors from hitting more // than two cache lines in the simulation. if (dataSize > MIN_LINE_SIZE) dataSize = MIN_LINE_SIZE; if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) addEvent_Dr( &cgs, curr_inode, dataSize, d->mAddr ); if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) addEvent_Dw( &cgs, curr_inode, dataSize, d->mAddr ); } else { tl_assert(d->mAddr == NULL); tl_assert(d->mSize == 0); } break; } case Ist_Exit: /* We may never reach the next statement, so need to flush all outstanding transactions now. */ flushEvents( &cgs ); break; default: tl_assert(0); break; } /* Copy the original statement */ addStmtToIRBB( cgs.bbOut, st ); if (DEBUG_CG) { ppIRStmt(st); VG_(printf)("\n"); } } /* At the end of the bb. Flush outstandings. */ flushEvents( &cgs ); /* done. stay sane ... */ tl_assert(cgs.bbInfo_i == cgs.bbInfo->n_instrs); if (DEBUG_CG) { VG_(printf)( "goto {"); ppIRJumpKind(bbIn->jumpkind); VG_(printf)( "} "); ppIRExpr( bbIn->next ); VG_(printf)( "}\n"); } return cgs.bbOut; }
static IRSB* lk_instrument ( VgCallbackClosure* closure, IRSB* sbIn, VexGuestLayout* layout, VexGuestExtents* vge, VexArchInfo* archinfo_host, IRType gWordTy, IRType hWordTy ) { IRDirty* di; Int i; IRSB* sbOut; HChar fnname[100]; IRTypeEnv* tyenv = sbIn->tyenv; Addr iaddr = 0, dst; UInt ilen = 0; Bool condition_inverted = False; if (gWordTy != hWordTy) { /* We don't currently support this case. */ VG_(tool_panic)("host/guest word size mismatch"); } /* Set up SB */ sbOut = deepCopyIRSBExceptStmts(sbIn); // Copy verbatim any IR preamble preceding the first IMark i = 0; while (i < sbIn->stmts_used && sbIn->stmts[i]->tag != Ist_IMark) { addStmtToIRSB( sbOut, sbIn->stmts[i] ); i++; } if (clo_basic_counts) { /* Count this superblock. */ di = unsafeIRDirty_0_N( 0, "add_one_SB_entered", VG_(fnptr_to_fnentry)( &add_one_SB_entered ), mkIRExprVec_0() ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); } if (clo_trace_sbs) { /* Print this superblock's address. */ di = unsafeIRDirty_0_N( 0, "trace_superblock", VG_(fnptr_to_fnentry)( &trace_superblock ), mkIRExprVec_1( mkIRExpr_HWord( vge->base[0] ) ) ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); } if (clo_trace_mem) { events_used = 0; } for (/*use current i*/; i < sbIn->stmts_used; i++) { IRStmt* st = sbIn->stmts[i]; if (!st || st->tag == Ist_NoOp) continue; if (clo_basic_counts) { /* Count one VEX statement. */ di = unsafeIRDirty_0_N( 0, "add_one_IRStmt", VG_(fnptr_to_fnentry)( &add_one_IRStmt ), mkIRExprVec_0() ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); } switch (st->tag) { case Ist_NoOp: case Ist_AbiHint: case Ist_Put: case Ist_PutI: case Ist_MBE: addStmtToIRSB( sbOut, st ); break; case Ist_IMark: if (clo_basic_counts) { /* Needed to be able to check for inverted condition in Ist_Exit */ iaddr = st->Ist.IMark.addr; ilen = st->Ist.IMark.len; /* Count guest instruction. */ di = unsafeIRDirty_0_N( 0, "add_one_guest_instr", VG_(fnptr_to_fnentry)( &add_one_guest_instr ), mkIRExprVec_0() ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); /* An unconditional branch to a known destination in the * guest's instructions can be represented, in the IRSB to * instrument, by the VEX statements that are the * translation of that known destination. This feature is * called 'SB chasing' and can be influenced by command * line option --vex-guest-chase-thresh. * * To get an accurate count of the calls to a specific * function, taking SB chasing into account, we need to * check for each guest instruction (Ist_IMark) if it is * the entry point of a function. */ tl_assert(clo_fnname); tl_assert(clo_fnname[0]); if (VG_(get_fnname_if_entry)(st->Ist.IMark.addr, fnname, sizeof(fnname)) && 0 == VG_(strcmp)(fnname, clo_fnname)) { di = unsafeIRDirty_0_N( 0, "add_one_func_call", VG_(fnptr_to_fnentry)( &add_one_func_call ), mkIRExprVec_0() ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); } } if (clo_trace_mem) { // WARNING: do not remove this function call, even if you // aren't interested in instruction reads. See the comment // above the function itself for more detail. addEvent_Ir( sbOut, mkIRExpr_HWord( (HWord)st->Ist.IMark.addr ), st->Ist.IMark.len ); } addStmtToIRSB( sbOut, st ); break; case Ist_WrTmp: // Add a call to trace_load() if --trace-mem=yes. if (clo_trace_mem) { IRExpr* data = st->Ist.WrTmp.data; if (data->tag == Iex_Load) { addEvent_Dr( sbOut, data->Iex.Load.addr, sizeofIRType(data->Iex.Load.ty) ); } } if (clo_detailed_counts) { IRExpr* expr = st->Ist.WrTmp.data; IRType type = typeOfIRExpr(sbOut->tyenv, expr); tl_assert(type != Ity_INVALID); switch (expr->tag) { case Iex_Load: instrument_detail( sbOut, OpLoad, type, NULL/*guard*/ ); break; case Iex_Unop: case Iex_Binop: case Iex_Triop: case Iex_Qop: case Iex_ITE: instrument_detail( sbOut, OpAlu, type, NULL/*guard*/ ); break; default: break; } } addStmtToIRSB( sbOut, st ); break; case Ist_Store: { IRExpr* data = st->Ist.Store.data; IRType type = typeOfIRExpr(tyenv, data); tl_assert(type != Ity_INVALID); if (clo_trace_mem) { addEvent_Dw( sbOut, st->Ist.Store.addr, sizeofIRType(type) ); } if (clo_detailed_counts) { instrument_detail( sbOut, OpStore, type, NULL/*guard*/ ); } addStmtToIRSB( sbOut, st ); break; } case Ist_StoreG: { IRStoreG* sg = st->Ist.StoreG.details; IRExpr* data = sg->data; IRType type = typeOfIRExpr(tyenv, data); tl_assert(type != Ity_INVALID); if (clo_trace_mem) { addEvent_Dw_guarded( sbOut, sg->addr, sizeofIRType(type), sg->guard ); } if (clo_detailed_counts) { instrument_detail( sbOut, OpStore, type, sg->guard ); } addStmtToIRSB( sbOut, st ); break; } case Ist_LoadG: { IRLoadG* lg = st->Ist.LoadG.details; IRType type = Ity_INVALID; /* loaded type */ IRType typeWide = Ity_INVALID; /* after implicit widening */ typeOfIRLoadGOp(lg->cvt, &typeWide, &type); tl_assert(type != Ity_INVALID); if (clo_trace_mem) { addEvent_Dr_guarded( sbOut, lg->addr, sizeofIRType(type), lg->guard ); } if (clo_detailed_counts) { instrument_detail( sbOut, OpLoad, type, lg->guard ); } addStmtToIRSB( sbOut, st ); break; } case Ist_Dirty: { if (clo_trace_mem) { Int dsize; IRDirty* d = st->Ist.Dirty.details; if (d->mFx != Ifx_None) { // This dirty helper accesses memory. Collect the details. tl_assert(d->mAddr != NULL); tl_assert(d->mSize != 0); dsize = d->mSize; if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) addEvent_Dr( sbOut, d->mAddr, dsize ); if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) addEvent_Dw( sbOut, d->mAddr, dsize ); } else { tl_assert(d->mAddr == NULL); tl_assert(d->mSize == 0); } } addStmtToIRSB( sbOut, st ); break; } case Ist_CAS: { /* We treat it as a read and a write of the location. I think that is the same behaviour as it was before IRCAS was introduced, since prior to that point, the Vex front ends would translate a lock-prefixed instruction into a (normal) read followed by a (normal) write. */ Int dataSize; IRType dataTy; IRCAS* cas = st->Ist.CAS.details; tl_assert(cas->addr != NULL); tl_assert(cas->dataLo != NULL); dataTy = typeOfIRExpr(tyenv, cas->dataLo); dataSize = sizeofIRType(dataTy); if (cas->dataHi != NULL) dataSize *= 2; /* since it's a doubleword-CAS */ if (clo_trace_mem) { addEvent_Dr( sbOut, cas->addr, dataSize ); addEvent_Dw( sbOut, cas->addr, dataSize ); } if (clo_detailed_counts) { instrument_detail( sbOut, OpLoad, dataTy, NULL/*guard*/ ); if (cas->dataHi != NULL) /* dcas */ instrument_detail( sbOut, OpLoad, dataTy, NULL/*guard*/ ); instrument_detail( sbOut, OpStore, dataTy, NULL/*guard*/ ); if (cas->dataHi != NULL) /* dcas */ instrument_detail( sbOut, OpStore, dataTy, NULL/*guard*/ ); } addStmtToIRSB( sbOut, st ); break; } case Ist_LLSC: { IRType dataTy; if (st->Ist.LLSC.storedata == NULL) { /* LL */ dataTy = typeOfIRTemp(tyenv, st->Ist.LLSC.result); if (clo_trace_mem) { addEvent_Dr( sbOut, st->Ist.LLSC.addr, sizeofIRType(dataTy) ); /* flush events before LL, helps SC to succeed */ flushEvents(sbOut); } if (clo_detailed_counts) instrument_detail( sbOut, OpLoad, dataTy, NULL/*guard*/ ); } else { /* SC */ dataTy = typeOfIRExpr(tyenv, st->Ist.LLSC.storedata); if (clo_trace_mem) addEvent_Dw( sbOut, st->Ist.LLSC.addr, sizeofIRType(dataTy) ); if (clo_detailed_counts) instrument_detail( sbOut, OpStore, dataTy, NULL/*guard*/ ); } addStmtToIRSB( sbOut, st ); break; } case Ist_Exit: if (clo_basic_counts) { // The condition of a branch was inverted by VEX if a taken // branch is in fact a fall trough according to client address tl_assert(iaddr != 0); dst = (sizeof(Addr) == 4) ? st->Ist.Exit.dst->Ico.U32 : st->Ist.Exit.dst->Ico.U64; condition_inverted = (dst == iaddr + ilen); /* Count Jcc */ if (!condition_inverted) di = unsafeIRDirty_0_N( 0, "add_one_Jcc", VG_(fnptr_to_fnentry)( &add_one_Jcc ), mkIRExprVec_0() ); else di = unsafeIRDirty_0_N( 0, "add_one_inverted_Jcc", VG_(fnptr_to_fnentry)( &add_one_inverted_Jcc ), mkIRExprVec_0() ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); } if (clo_trace_mem) { flushEvents(sbOut); } addStmtToIRSB( sbOut, st ); // Original statement if (clo_basic_counts) { /* Count non-taken Jcc */ if (!condition_inverted) di = unsafeIRDirty_0_N( 0, "add_one_Jcc_untaken", VG_(fnptr_to_fnentry)( &add_one_Jcc_untaken ), mkIRExprVec_0() ); else di = unsafeIRDirty_0_N( 0, "add_one_inverted_Jcc_untaken", VG_(fnptr_to_fnentry)( &add_one_inverted_Jcc_untaken ), mkIRExprVec_0() ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); } break; default: ppIRStmt(st); tl_assert(0); } } if (clo_basic_counts) { /* Count this basic block. */ di = unsafeIRDirty_0_N( 0, "add_one_SB_completed", VG_(fnptr_to_fnentry)( &add_one_SB_completed ), mkIRExprVec_0() ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); } if (clo_trace_mem) { /* At the end of the sbIn. Flush outstandings. */ flushEvents(sbOut); } return sbOut; }
static IRSB* sh_instrument ( VgCallbackClosure* closure, IRSB* sbIn, VexGuestLayout* layout, VexGuestExtents* vge, IRType gWordTy, IRType hWordTy ) { IRDirty* di; Int i; IRSB* sbOut; IRType type; IRTypeEnv* tyenv = sbIn->tyenv; IRStmt* imarkst; char *fnname = global_fnname; if (gWordTy != hWordTy) { /* We don't currently support this case. */ VG_(tool_panic)("host/guest word size mismatch"); } /* Set up SB */ sbOut = deepCopyIRSBExceptStmts(sbIn); // Copy verbatim any IR preamble preceding the first IMark i = 0; while (i < sbIn->stmts_used && sbIn->stmts[i]->tag != Ist_IMark) { addStmtToIRSB( sbOut, sbIn->stmts[i] ); i++; } if (clo_trace_mem) { events_used = 0; } for (/*use current i*/; i < sbIn->stmts_used; i++) { IRStmt* st = sbIn->stmts[i]; if (!st || st->tag == Ist_NoOp) continue; /* Prettyprint All IR statements Starting from main that valgrind has generated */ /*if(clo_trace_mem){ ppIRStmt(st); VG_(printf)("\n"); } */ switch (st->tag) { case Ist_NoOp: case Ist_AbiHint: case Ist_PutI: case Ist_MBE: break; case Ist_Put: if(clo_trace_mem){ Int reg_no = st->Ist.Put.offset; if(reg_no == layout->offset_SP || reg_no == 20){ IRExpr* data = st->Ist.Put.data; if(data->tag == Iex_RdTmp){ /* Add registerwrite instrumentation to IRSBout */ addEvent_RegW( sbOut, fnname, reg_no, data); } } } addStmtToIRSB( sbOut, st ); break; case Ist_IMark: imarkst = st; if (VG_(get_fnname_if_entry)( st->Ist.IMark.addr, fnname, 100)){ //VG_(printf)("-- %s --\n",fnname); if(0 == VG_(strcmp)(fnname, "main")) { di = unsafeIRDirty_0_N( 0, "trace_debug", VG_(fnptr_to_fnentry)(trace_debug), mkIRExprVec_0() ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); //VG_(printf)("SP:%d\n",layout->offset_SP); clo_trace_mem = True; } if(clo_trace_mem){ addEvent_FnEntry(sbOut, fnname); } } if (clo_trace_mem) { // WARNING: do not remove this function call, even if you // aren't interested in instruction reads. See the comment // above the function itself for more detail. addEvent_Ir( sbOut, mkIRExpr_HWord( (HWord)st->Ist.IMark.addr ), st->Ist.IMark.len ); } addStmtToIRSB( sbOut, st ); break; case Ist_WrTmp: // Add a call to trace_load() if --trace-mem=yes. if (clo_trace_mem) { IRExpr* data = st->Ist.WrTmp.data; if (data->tag == Iex_Load) { addEvent_Dr( sbOut, fnname, data->Iex.Load.addr, sizeofIRType(data->Iex.Load.ty) ); } } addStmtToIRSB( sbOut, st ); break; case Ist_Store: if (clo_trace_mem) { IRExpr* data = st->Ist.Store.data; addEvent_Dw( sbOut, fnname, st->Ist.Store.addr, sizeofIRType(typeOfIRExpr(tyenv, data)) ); } addStmtToIRSB( sbOut, st ); break; case Ist_Dirty: { if (clo_trace_mem) { Int dsize; IRDirty* d = st->Ist.Dirty.details; if (d->mFx != Ifx_None) { // This dirty helper accesses memory. Collect the details. tl_assert(d->mAddr != NULL); tl_assert(d->mSize != 0); dsize = d->mSize; if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) addEvent_Dr( sbOut, fnname, d->mAddr, dsize ); if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) addEvent_Dw( sbOut, fnname, d->mAddr, dsize ); } else { tl_assert(d->mAddr == NULL); tl_assert(d->mSize == 0); } } addStmtToIRSB( sbOut, st ); break; } case Ist_CAS: { /* We treat it as a read and a write of the location. I think that is the same behaviour as it was before IRCAS was introduced, since prior to that point, the Vex front ends would translate a lock-prefixed instruction into a (normal) read followed by a (normal) write. */ Int dataSize; IRType dataTy; IRCAS* cas = st->Ist.CAS.details; tl_assert(cas->addr != NULL); tl_assert(cas->dataLo != NULL); dataTy = typeOfIRExpr(tyenv, cas->dataLo); dataSize = sizeofIRType(dataTy); if (clo_trace_mem) { addEvent_Dr( sbOut, fnname, cas->addr, dataSize ); addEvent_Dw( sbOut, fnname, cas->addr, dataSize ); } addStmtToIRSB( sbOut, st ); break; } case Ist_LLSC: { IRType dataTy; if (st->Ist.LLSC.storedata == NULL) { /* LL */ dataTy = typeOfIRTemp(tyenv, st->Ist.LLSC.result); if (clo_trace_mem) addEvent_Dr( sbOut, fnname, st->Ist.LLSC.addr, sizeofIRType(dataTy) ); } else { /* SC */ dataTy = typeOfIRExpr(tyenv, st->Ist.LLSC.storedata); if (clo_trace_mem) addEvent_Dw( sbOut, fnname, st->Ist.LLSC.addr, sizeofIRType(dataTy) ); } addStmtToIRSB( sbOut, st ); break; } case Ist_Exit: if (clo_trace_mem) { flushEvents(sbOut); } addStmtToIRSB( sbOut, st ); // Original statement break; default: tl_assert(0); } } if(clo_trace_mem){ if(sbIn->jumpkind == Ijk_Ret){ VG_(get_fnname)(imarkst->Ist.IMark.addr, fnname, 100); addEvent_FnExit(sbOut,fnname); } } if (clo_trace_mem) { /* At the end of the sbIn. Flush outstandings. */ flushEvents(sbOut); } return sbOut; }
static IRSB* el_instrument ( VgCallbackClosure* closure, IRSB* sbIn, VexGuestLayout* layout, VexGuestExtents* vge, IRType gWordTy, IRType hWordTy ) { IRDirty* di; Int i; IRSB* sbOut; Char fnname[100]; IRType type; IRTypeEnv* tyenv = sbIn->tyenv; Addr iaddr = 0, dst; UInt ilen = 0; Bool condition_inverted = False; if (gWordTy != hWordTy) { /* We don't currently support this case. */ VG_(tool_panic)("host/guest word size mismatch"); } /* Set up SB */ sbOut = deepCopyIRSBExceptStmts(sbIn); // Copy verbatim any IR preamble preceding the first IMark i = 0; while (i < sbIn->stmts_used && sbIn->stmts[i]->tag != Ist_IMark) { addStmtToIRSB( sbOut, sbIn->stmts[i] ); i++; } events_used = 0; for (/*use current i*/; i < sbIn->stmts_used; i++) { IRStmt* st = sbIn->stmts[i]; if (!st || st->tag == Ist_NoOp) continue; switch (st->tag) { case Ist_NoOp: case Ist_AbiHint: case Ist_Put: case Ist_PutI: case Ist_MBE: addStmtToIRSB( sbOut, st ); break; case Ist_IMark: // Store the last seen address lastAddress = st->Ist.IMark.addr; addEvent_Ir( sbOut, mkIRExpr_HWord( (HWord)st->Ist.IMark.addr ), st->Ist.IMark.len ); VG_(get_filename)(lastAddress, (char*) g_buff1, kBuffSize); if(VG_(strcmp)(g_buff1, clo_filename) == 0) { shouldInterpret = 1; ppIRStmt(st); VG_(printf)("\n"); } else { shouldInterpret = 0; } addStmtToIRSB( sbOut, st ); break; case Ist_WrTmp: // Add a call to trace_load() if --trace-mem=yes. { if(shouldInterpret) { IRExpr* data = st->Ist.WrTmp.data; if (data->tag == Iex_Load) { addEvent_Dr( sbOut, data->Iex.Load.addr, sizeofIRType(data->Iex.Load.ty) ); } } addStmtToIRSB( sbOut, st ); break; } case Ist_Store: { if(shouldInterpret) { IRExpr* data = st->Ist.Store.data; addEvent_Dw( sbOut, st->Ist.Store.addr, sizeofIRType(typeOfIRExpr(tyenv, data)) ); } addStmtToIRSB( sbOut, st ); break; } case Ist_Dirty: { if(shouldInterpret) { Int dsize; IRDirty* d = st->Ist.Dirty.details; if (d->mFx != Ifx_None) { // This dirty helper accesses memory. Collect the details. tl_assert(d->mAddr != NULL); tl_assert(d->mSize != 0); dsize = d->mSize; if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) addEvent_Dr( sbOut, d->mAddr, dsize ); if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) addEvent_Dw( sbOut, d->mAddr, dsize ); } else { tl_assert(d->mAddr == NULL); tl_assert(d->mSize == 0); } } addStmtToIRSB( sbOut, st ); break; } case Ist_CAS: { if(shouldInterpret) { Int dataSize; IRType dataTy; IRCAS* cas = st->Ist.CAS.details; tl_assert(cas->addr != NULL); tl_assert(cas->dataLo != NULL); dataTy = typeOfIRExpr(tyenv, cas->dataLo); dataSize = sizeofIRType(dataTy); if (cas->dataHi != NULL) dataSize *= 2; /* since it's a doubleword-CAS */ addEvent_Dr( sbOut, cas->addr, dataSize ); addEvent_Dw( sbOut, cas->addr, dataSize ); } addStmtToIRSB( sbOut, st ); break; } case Ist_LLSC: { if(shouldInterpret) { IRType dataTy; if (st->Ist.LLSC.storedata == NULL) { /* LL */ dataTy = typeOfIRTemp(tyenv, st->Ist.LLSC.result); addEvent_Dr( sbOut, st->Ist.LLSC.addr, sizeofIRType(dataTy) ); } else { /* SC */ dataTy = typeOfIRExpr(tyenv, st->Ist.LLSC.storedata); addEvent_Dw( sbOut, st->Ist.LLSC.addr, sizeofIRType(dataTy) ); } } addStmtToIRSB( sbOut, st ); break; } case Ist_Exit: if(shouldInterpret) { } flushEvents(sbOut); addStmtToIRSB( sbOut, st ); // Original statement break; default: tl_assert(0); } } /* At the end of the sbIn. Flush outstandings. */ flushEvents(sbOut); return sbOut; }
/* This is copied mostly verbatim from lackey */ static IRSB* mv_instrument ( VgCallbackClosure* closure, IRSB* sbIn, VexGuestLayout* layout, VexGuestExtents* vge, VexArchInfo* archinfo_host, IRType gWordTy, IRType hWordTy ) { Int i; IRSB* sbOut; IRTypeEnv* tyenv = sbIn->tyenv; if (gWordTy != hWordTy) { /* We don't currently support this case. */ VG_(tool_panic)("host/guest word size mismatch"); } //ppIRSB(sbIn); /* Set up SB */ sbOut = deepCopyIRSBExceptStmts(sbIn); // Copy verbatim any IR preamble preceding the first IMark i = 0; while (i < sbIn->stmts_used && sbIn->stmts[i]->tag != Ist_IMark) { addStmtToIRSB( sbOut, sbIn->stmts[i] ); i++; } events_used = 0; for (/*use current i*/; i < sbIn->stmts_used; i++) { IRStmt* st = sbIn->stmts[i]; if (!st || st->tag == Ist_NoOp) continue; switch (st->tag) { case Ist_NoOp: case Ist_AbiHint: case Ist_Put: case Ist_PutI: case Ist_MBE: addStmtToIRSB( sbOut, st ); break; case Ist_IMark: canCreateModify = False; if (clo_trace_instrs) { addEvent_Ir( sbOut, mkIRExpr_HWord( (HWord)st->Ist.IMark.addr ), st->Ist.IMark.len ); } addStmtToIRSB( sbOut, st ); break; case Ist_WrTmp: { IRExpr* data = st->Ist.WrTmp.data; if (data->tag == Iex_Load) { addEvent_Dr( sbOut, data->Iex.Load.addr, sizeofIRType(data->Iex.Load.ty), IRTypeToMVType(data->Iex.Load.ty) ); } } addStmtToIRSB( sbOut, st ); break; case Ist_Store: { IRExpr* data = st->Ist.Store.data; addEvent_Dw( sbOut, st->Ist.Store.addr, sizeofIRType(typeOfIRExpr(tyenv, data)), IRTypeToMVType(typeOfIRExpr(tyenv, data)) ); } addStmtToIRSB( sbOut, st ); break; case Ist_Dirty: { Int dsize; IRDirty* d = st->Ist.Dirty.details; if (d->mFx != Ifx_None) { // This dirty helper accesses memory. Collect the details. tl_assert(d->mAddr != NULL); tl_assert(d->mSize != 0); dsize = d->mSize; if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) addEvent_Dr( sbOut, d->mAddr, dsize, MV_DataInt32 ); if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) addEvent_Dw( sbOut, d->mAddr, dsize, MV_DataInt32 ); } else { tl_assert(d->mAddr == NULL); tl_assert(d->mSize == 0); } addStmtToIRSB( sbOut, st ); break; } case Ist_CAS: { /* We treat it as a read and a write of the location. I think that is the same behaviour as it was before IRCAS was introduced, since prior to that point, the Vex front ends would translate a lock-prefixed instruction into a (normal) read followed by a (normal) write. */ Int dataSize; IRType dataTy; IRCAS* cas = st->Ist.CAS.details; tl_assert(cas->addr != NULL); tl_assert(cas->dataLo != NULL); dataTy = typeOfIRExpr(tyenv, cas->dataLo); dataSize = sizeofIRType(dataTy); if (cas->dataHi != NULL) dataSize *= 2; /* since it's a doubleword-CAS */ addEvent_Dr( sbOut, cas->addr, dataSize, IRTypeToMVType(dataTy) ); addEvent_Dw( sbOut, cas->addr, dataSize, IRTypeToMVType(dataTy) ); addStmtToIRSB( sbOut, st ); break; } case Ist_LLSC: { IRType dataTy; if (st->Ist.LLSC.storedata == NULL) { /* LL */ dataTy = typeOfIRTemp(tyenv, st->Ist.LLSC.result); addEvent_Dr( sbOut, st->Ist.LLSC.addr, sizeofIRType(dataTy), IRTypeToMVType(dataTy) ); } else { /* SC */ dataTy = typeOfIRExpr(tyenv, st->Ist.LLSC.storedata); addEvent_Dw( sbOut, st->Ist.LLSC.addr, sizeofIRType(dataTy), IRTypeToMVType(dataTy) ); } addStmtToIRSB( sbOut, st ); break; } case Ist_Exit: flushEvents(sbOut); addStmtToIRSB( sbOut, st ); // Original statement break; default: tl_assert(0); } } /* At the end of the sbIn. Flush outstandings. */ flushEvents(sbOut); return sbOut; }