static IRSB* lk_instrument ( VgCallbackClosure* closure, IRSB* sbIn, VexGuestLayout* layout, VexGuestExtents* vge, VexArchInfo* archinfo_host, IRType gWordTy, IRType hWordTy ) { IRDirty* di; Int i; IRSB* sbOut; HChar fnname[100]; IRTypeEnv* tyenv = sbIn->tyenv; Addr iaddr = 0, dst; UInt ilen = 0; Bool condition_inverted = False; if (gWordTy != hWordTy) { /* We don't currently support this case. */ VG_(tool_panic)("host/guest word size mismatch"); } /* Set up SB */ sbOut = deepCopyIRSBExceptStmts(sbIn); // Copy verbatim any IR preamble preceding the first IMark i = 0; while (i < sbIn->stmts_used && sbIn->stmts[i]->tag != Ist_IMark) { addStmtToIRSB( sbOut, sbIn->stmts[i] ); i++; } if (clo_basic_counts) { /* Count this superblock. */ di = unsafeIRDirty_0_N( 0, "add_one_SB_entered", VG_(fnptr_to_fnentry)( &add_one_SB_entered ), mkIRExprVec_0() ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); } if (clo_trace_sbs) { /* Print this superblock's address. */ di = unsafeIRDirty_0_N( 0, "trace_superblock", VG_(fnptr_to_fnentry)( &trace_superblock ), mkIRExprVec_1( mkIRExpr_HWord( vge->base[0] ) ) ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); } if (clo_trace_mem) { events_used = 0; } for (/*use current i*/; i < sbIn->stmts_used; i++) { IRStmt* st = sbIn->stmts[i]; if (!st || st->tag == Ist_NoOp) continue; if (clo_basic_counts) { /* Count one VEX statement. */ di = unsafeIRDirty_0_N( 0, "add_one_IRStmt", VG_(fnptr_to_fnentry)( &add_one_IRStmt ), mkIRExprVec_0() ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); } switch (st->tag) { case Ist_NoOp: case Ist_AbiHint: case Ist_Put: case Ist_PutI: case Ist_MBE: addStmtToIRSB( sbOut, st ); break; case Ist_IMark: if (clo_basic_counts) { /* Needed to be able to check for inverted condition in Ist_Exit */ iaddr = st->Ist.IMark.addr; ilen = st->Ist.IMark.len; /* Count guest instruction. */ di = unsafeIRDirty_0_N( 0, "add_one_guest_instr", VG_(fnptr_to_fnentry)( &add_one_guest_instr ), mkIRExprVec_0() ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); /* An unconditional branch to a known destination in the * guest's instructions can be represented, in the IRSB to * instrument, by the VEX statements that are the * translation of that known destination. This feature is * called 'SB chasing' and can be influenced by command * line option --vex-guest-chase-thresh. * * To get an accurate count of the calls to a specific * function, taking SB chasing into account, we need to * check for each guest instruction (Ist_IMark) if it is * the entry point of a function. */ tl_assert(clo_fnname); tl_assert(clo_fnname[0]); if (VG_(get_fnname_if_entry)(st->Ist.IMark.addr, fnname, sizeof(fnname)) && 0 == VG_(strcmp)(fnname, clo_fnname)) { di = unsafeIRDirty_0_N( 0, "add_one_func_call", VG_(fnptr_to_fnentry)( &add_one_func_call ), mkIRExprVec_0() ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); } } if (clo_trace_mem) { // WARNING: do not remove this function call, even if you // aren't interested in instruction reads. See the comment // above the function itself for more detail. addEvent_Ir( sbOut, mkIRExpr_HWord( (HWord)st->Ist.IMark.addr ), st->Ist.IMark.len ); } addStmtToIRSB( sbOut, st ); break; case Ist_WrTmp: // Add a call to trace_load() if --trace-mem=yes. if (clo_trace_mem) { IRExpr* data = st->Ist.WrTmp.data; if (data->tag == Iex_Load) { addEvent_Dr( sbOut, data->Iex.Load.addr, sizeofIRType(data->Iex.Load.ty) ); } } if (clo_detailed_counts) { IRExpr* expr = st->Ist.WrTmp.data; IRType type = typeOfIRExpr(sbOut->tyenv, expr); tl_assert(type != Ity_INVALID); switch (expr->tag) { case Iex_Load: instrument_detail( sbOut, OpLoad, type, NULL/*guard*/ ); break; case Iex_Unop: case Iex_Binop: case Iex_Triop: case Iex_Qop: case Iex_ITE: instrument_detail( sbOut, OpAlu, type, NULL/*guard*/ ); break; default: break; } } addStmtToIRSB( sbOut, st ); break; case Ist_Store: { IRExpr* data = st->Ist.Store.data; IRType type = typeOfIRExpr(tyenv, data); tl_assert(type != Ity_INVALID); if (clo_trace_mem) { addEvent_Dw( sbOut, st->Ist.Store.addr, sizeofIRType(type) ); } if (clo_detailed_counts) { instrument_detail( sbOut, OpStore, type, NULL/*guard*/ ); } addStmtToIRSB( sbOut, st ); break; } case Ist_StoreG: { IRStoreG* sg = st->Ist.StoreG.details; IRExpr* data = sg->data; IRType type = typeOfIRExpr(tyenv, data); tl_assert(type != Ity_INVALID); if (clo_trace_mem) { addEvent_Dw_guarded( sbOut, sg->addr, sizeofIRType(type), sg->guard ); } if (clo_detailed_counts) { instrument_detail( sbOut, OpStore, type, sg->guard ); } addStmtToIRSB( sbOut, st ); break; } case Ist_LoadG: { IRLoadG* lg = st->Ist.LoadG.details; IRType type = Ity_INVALID; /* loaded type */ IRType typeWide = Ity_INVALID; /* after implicit widening */ typeOfIRLoadGOp(lg->cvt, &typeWide, &type); tl_assert(type != Ity_INVALID); if (clo_trace_mem) { addEvent_Dr_guarded( sbOut, lg->addr, sizeofIRType(type), lg->guard ); } if (clo_detailed_counts) { instrument_detail( sbOut, OpLoad, type, lg->guard ); } addStmtToIRSB( sbOut, st ); break; } case Ist_Dirty: { if (clo_trace_mem) { Int dsize; IRDirty* d = st->Ist.Dirty.details; if (d->mFx != Ifx_None) { // This dirty helper accesses memory. Collect the details. tl_assert(d->mAddr != NULL); tl_assert(d->mSize != 0); dsize = d->mSize; if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) addEvent_Dr( sbOut, d->mAddr, dsize ); if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) addEvent_Dw( sbOut, d->mAddr, dsize ); } else { tl_assert(d->mAddr == NULL); tl_assert(d->mSize == 0); } } addStmtToIRSB( sbOut, st ); break; } case Ist_CAS: { /* We treat it as a read and a write of the location. I think that is the same behaviour as it was before IRCAS was introduced, since prior to that point, the Vex front ends would translate a lock-prefixed instruction into a (normal) read followed by a (normal) write. */ Int dataSize; IRType dataTy; IRCAS* cas = st->Ist.CAS.details; tl_assert(cas->addr != NULL); tl_assert(cas->dataLo != NULL); dataTy = typeOfIRExpr(tyenv, cas->dataLo); dataSize = sizeofIRType(dataTy); if (cas->dataHi != NULL) dataSize *= 2; /* since it's a doubleword-CAS */ if (clo_trace_mem) { addEvent_Dr( sbOut, cas->addr, dataSize ); addEvent_Dw( sbOut, cas->addr, dataSize ); } if (clo_detailed_counts) { instrument_detail( sbOut, OpLoad, dataTy, NULL/*guard*/ ); if (cas->dataHi != NULL) /* dcas */ instrument_detail( sbOut, OpLoad, dataTy, NULL/*guard*/ ); instrument_detail( sbOut, OpStore, dataTy, NULL/*guard*/ ); if (cas->dataHi != NULL) /* dcas */ instrument_detail( sbOut, OpStore, dataTy, NULL/*guard*/ ); } addStmtToIRSB( sbOut, st ); break; } case Ist_LLSC: { IRType dataTy; if (st->Ist.LLSC.storedata == NULL) { /* LL */ dataTy = typeOfIRTemp(tyenv, st->Ist.LLSC.result); if (clo_trace_mem) { addEvent_Dr( sbOut, st->Ist.LLSC.addr, sizeofIRType(dataTy) ); /* flush events before LL, helps SC to succeed */ flushEvents(sbOut); } if (clo_detailed_counts) instrument_detail( sbOut, OpLoad, dataTy, NULL/*guard*/ ); } else { /* SC */ dataTy = typeOfIRExpr(tyenv, st->Ist.LLSC.storedata); if (clo_trace_mem) addEvent_Dw( sbOut, st->Ist.LLSC.addr, sizeofIRType(dataTy) ); if (clo_detailed_counts) instrument_detail( sbOut, OpStore, dataTy, NULL/*guard*/ ); } addStmtToIRSB( sbOut, st ); break; } case Ist_Exit: if (clo_basic_counts) { // The condition of a branch was inverted by VEX if a taken // branch is in fact a fall trough according to client address tl_assert(iaddr != 0); dst = (sizeof(Addr) == 4) ? st->Ist.Exit.dst->Ico.U32 : st->Ist.Exit.dst->Ico.U64; condition_inverted = (dst == iaddr + ilen); /* Count Jcc */ if (!condition_inverted) di = unsafeIRDirty_0_N( 0, "add_one_Jcc", VG_(fnptr_to_fnentry)( &add_one_Jcc ), mkIRExprVec_0() ); else di = unsafeIRDirty_0_N( 0, "add_one_inverted_Jcc", VG_(fnptr_to_fnentry)( &add_one_inverted_Jcc ), mkIRExprVec_0() ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); } if (clo_trace_mem) { flushEvents(sbOut); } addStmtToIRSB( sbOut, st ); // Original statement if (clo_basic_counts) { /* Count non-taken Jcc */ if (!condition_inverted) di = unsafeIRDirty_0_N( 0, "add_one_Jcc_untaken", VG_(fnptr_to_fnentry)( &add_one_Jcc_untaken ), mkIRExprVec_0() ); else di = unsafeIRDirty_0_N( 0, "add_one_inverted_Jcc_untaken", VG_(fnptr_to_fnentry)( &add_one_inverted_Jcc_untaken ), mkIRExprVec_0() ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); } break; default: ppIRStmt(st); tl_assert(0); } } if (clo_basic_counts) { /* Count this basic block. */ di = unsafeIRDirty_0_N( 0, "add_one_SB_completed", VG_(fnptr_to_fnentry)( &add_one_SB_completed ), mkIRExprVec_0() ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); } if (clo_trace_mem) { /* At the end of the sbIn. Flush outstandings. */ flushEvents(sbOut); } return sbOut; }
static IRSB* el_instrument ( VgCallbackClosure* closure, IRSB* sbIn, VexGuestLayout* layout, VexGuestExtents* vge, IRType gWordTy, IRType hWordTy ) { IRDirty* di; Int i; IRSB* sbOut; Char fnname[100]; IRType type; IRTypeEnv* tyenv = sbIn->tyenv; Addr iaddr = 0, dst; UInt ilen = 0; Bool condition_inverted = False; if (gWordTy != hWordTy) { /* We don't currently support this case. */ VG_(tool_panic)("host/guest word size mismatch"); } /* Set up SB */ sbOut = deepCopyIRSBExceptStmts(sbIn); // Copy verbatim any IR preamble preceding the first IMark i = 0; while (i < sbIn->stmts_used && sbIn->stmts[i]->tag != Ist_IMark) { addStmtToIRSB( sbOut, sbIn->stmts[i] ); i++; } events_used = 0; for (/*use current i*/; i < sbIn->stmts_used; i++) { IRStmt* st = sbIn->stmts[i]; if (!st || st->tag == Ist_NoOp) continue; switch (st->tag) { case Ist_NoOp: case Ist_AbiHint: case Ist_Put: case Ist_PutI: case Ist_MBE: addStmtToIRSB( sbOut, st ); break; case Ist_IMark: // Store the last seen address lastAddress = st->Ist.IMark.addr; addEvent_Ir( sbOut, mkIRExpr_HWord( (HWord)st->Ist.IMark.addr ), st->Ist.IMark.len ); VG_(get_filename)(lastAddress, (char*) g_buff1, kBuffSize); if(VG_(strcmp)(g_buff1, clo_filename) == 0) { shouldInterpret = 1; ppIRStmt(st); VG_(printf)("\n"); } else { shouldInterpret = 0; } addStmtToIRSB( sbOut, st ); break; case Ist_WrTmp: // Add a call to trace_load() if --trace-mem=yes. { if(shouldInterpret) { IRExpr* data = st->Ist.WrTmp.data; if (data->tag == Iex_Load) { addEvent_Dr( sbOut, data->Iex.Load.addr, sizeofIRType(data->Iex.Load.ty) ); } } addStmtToIRSB( sbOut, st ); break; } case Ist_Store: { if(shouldInterpret) { IRExpr* data = st->Ist.Store.data; addEvent_Dw( sbOut, st->Ist.Store.addr, sizeofIRType(typeOfIRExpr(tyenv, data)) ); } addStmtToIRSB( sbOut, st ); break; } case Ist_Dirty: { if(shouldInterpret) { Int dsize; IRDirty* d = st->Ist.Dirty.details; if (d->mFx != Ifx_None) { // This dirty helper accesses memory. Collect the details. tl_assert(d->mAddr != NULL); tl_assert(d->mSize != 0); dsize = d->mSize; if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) addEvent_Dr( sbOut, d->mAddr, dsize ); if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) addEvent_Dw( sbOut, d->mAddr, dsize ); } else { tl_assert(d->mAddr == NULL); tl_assert(d->mSize == 0); } } addStmtToIRSB( sbOut, st ); break; } case Ist_CAS: { if(shouldInterpret) { Int dataSize; IRType dataTy; IRCAS* cas = st->Ist.CAS.details; tl_assert(cas->addr != NULL); tl_assert(cas->dataLo != NULL); dataTy = typeOfIRExpr(tyenv, cas->dataLo); dataSize = sizeofIRType(dataTy); if (cas->dataHi != NULL) dataSize *= 2; /* since it's a doubleword-CAS */ addEvent_Dr( sbOut, cas->addr, dataSize ); addEvent_Dw( sbOut, cas->addr, dataSize ); } addStmtToIRSB( sbOut, st ); break; } case Ist_LLSC: { if(shouldInterpret) { IRType dataTy; if (st->Ist.LLSC.storedata == NULL) { /* LL */ dataTy = typeOfIRTemp(tyenv, st->Ist.LLSC.result); addEvent_Dr( sbOut, st->Ist.LLSC.addr, sizeofIRType(dataTy) ); } else { /* SC */ dataTy = typeOfIRExpr(tyenv, st->Ist.LLSC.storedata); addEvent_Dw( sbOut, st->Ist.LLSC.addr, sizeofIRType(dataTy) ); } } addStmtToIRSB( sbOut, st ); break; } case Ist_Exit: if(shouldInterpret) { } flushEvents(sbOut); addStmtToIRSB( sbOut, st ); // Original statement break; default: tl_assert(0); } } /* At the end of the sbIn. Flush outstandings. */ flushEvents(sbOut); return sbOut; }
IRSB* VG_(instrument_for_gdbserver_if_needed) (IRSB* sb_in, VexGuestLayout* layout, VexGuestExtents* vge, IRType gWordTy, IRType hWordTy) { IRSB* sb_out; Int i; const VgVgdb instr_needed = VG_(gdbserver_instrumentation_needed) (vge); if (instr_needed == Vg_VgdbNo) return sb_in; /* here, we need to instrument for gdbserver */ sb_out = deepCopyIRSBExceptStmts(sb_in); for (i = 0; i < sb_in->stmts_used; i++) { IRStmt* st = sb_in->stmts[i]; if (!st || st->tag == Ist_NoOp) continue; if (st->tag == Ist_Exit && instr_needed == Vg_VgdbYes) { VG_(invalidate_if_not_gdbserved) (hWordTy == Ity_I64 ? st->Ist.Exit.dst->Ico.U64 : st->Ist.Exit.dst->Ico.U32); } addStmtToIRSB( sb_out, st ); if (st->tag == Ist_IMark) { /* For an Ist_Mark, add a call to debugger. */ switch (instr_needed) { case Vg_VgdbNo: vg_assert (0); case Vg_VgdbYes: case Vg_VgdbFull: VG_(add_stmt_call_gdbserver) ( sb_in, layout, vge, gWordTy, hWordTy, st->Ist.IMark.addr, st->Ist.IMark.delta, sb_out); /* There is an optimisation possible here for Vg_VgdbFull: Put a guard ensuring we only call gdbserver if 'FullCallNeeded'. FullCallNeeded would be set to 1 we have just switched on Single Stepping or have just encountered a watchpoint or have just inserted a breakpoint. (as gdb by default removes and re-insert breakpoints), we would need to also implement the notion of 'breakpoint pending removal' to remove at the next 'continue/step' packet. */ break; default: vg_assert (0); } } } if (instr_needed == Vg_VgdbYes) { VG_(add_stmt_call_invalidate_exit_target_if_not_gdbserved) (sb_in, layout, vge, gWordTy, sb_out); } return sb_out; }
static IRSB* sh_instrument ( VgCallbackClosure* closure, IRSB* sbIn, VexGuestLayout* layout, VexGuestExtents* vge, IRType gWordTy, IRType hWordTy ) { IRDirty* di; Int i; IRSB* sbOut; IRType type; IRTypeEnv* tyenv = sbIn->tyenv; IRStmt* imarkst; char *fnname = global_fnname; if (gWordTy != hWordTy) { /* We don't currently support this case. */ VG_(tool_panic)("host/guest word size mismatch"); } /* Set up SB */ sbOut = deepCopyIRSBExceptStmts(sbIn); // Copy verbatim any IR preamble preceding the first IMark i = 0; while (i < sbIn->stmts_used && sbIn->stmts[i]->tag != Ist_IMark) { addStmtToIRSB( sbOut, sbIn->stmts[i] ); i++; } if (clo_trace_mem) { events_used = 0; } for (/*use current i*/; i < sbIn->stmts_used; i++) { IRStmt* st = sbIn->stmts[i]; if (!st || st->tag == Ist_NoOp) continue; /* Prettyprint All IR statements Starting from main that valgrind has generated */ /*if(clo_trace_mem){ ppIRStmt(st); VG_(printf)("\n"); } */ switch (st->tag) { case Ist_NoOp: case Ist_AbiHint: case Ist_PutI: case Ist_MBE: break; case Ist_Put: if(clo_trace_mem){ Int reg_no = st->Ist.Put.offset; if(reg_no == layout->offset_SP || reg_no == 20){ IRExpr* data = st->Ist.Put.data; if(data->tag == Iex_RdTmp){ /* Add registerwrite instrumentation to IRSBout */ addEvent_RegW( sbOut, fnname, reg_no, data); } } } addStmtToIRSB( sbOut, st ); break; case Ist_IMark: imarkst = st; if (VG_(get_fnname_if_entry)( st->Ist.IMark.addr, fnname, 100)){ //VG_(printf)("-- %s --\n",fnname); if(0 == VG_(strcmp)(fnname, "main")) { di = unsafeIRDirty_0_N( 0, "trace_debug", VG_(fnptr_to_fnentry)(trace_debug), mkIRExprVec_0() ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); //VG_(printf)("SP:%d\n",layout->offset_SP); clo_trace_mem = True; } if(clo_trace_mem){ addEvent_FnEntry(sbOut, fnname); } } if (clo_trace_mem) { // WARNING: do not remove this function call, even if you // aren't interested in instruction reads. See the comment // above the function itself for more detail. addEvent_Ir( sbOut, mkIRExpr_HWord( (HWord)st->Ist.IMark.addr ), st->Ist.IMark.len ); } addStmtToIRSB( sbOut, st ); break; case Ist_WrTmp: // Add a call to trace_load() if --trace-mem=yes. if (clo_trace_mem) { IRExpr* data = st->Ist.WrTmp.data; if (data->tag == Iex_Load) { addEvent_Dr( sbOut, fnname, data->Iex.Load.addr, sizeofIRType(data->Iex.Load.ty) ); } } addStmtToIRSB( sbOut, st ); break; case Ist_Store: if (clo_trace_mem) { IRExpr* data = st->Ist.Store.data; addEvent_Dw( sbOut, fnname, st->Ist.Store.addr, sizeofIRType(typeOfIRExpr(tyenv, data)) ); } addStmtToIRSB( sbOut, st ); break; case Ist_Dirty: { if (clo_trace_mem) { Int dsize; IRDirty* d = st->Ist.Dirty.details; if (d->mFx != Ifx_None) { // This dirty helper accesses memory. Collect the details. tl_assert(d->mAddr != NULL); tl_assert(d->mSize != 0); dsize = d->mSize; if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) addEvent_Dr( sbOut, fnname, d->mAddr, dsize ); if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) addEvent_Dw( sbOut, fnname, d->mAddr, dsize ); } else { tl_assert(d->mAddr == NULL); tl_assert(d->mSize == 0); } } addStmtToIRSB( sbOut, st ); break; } case Ist_CAS: { /* We treat it as a read and a write of the location. I think that is the same behaviour as it was before IRCAS was introduced, since prior to that point, the Vex front ends would translate a lock-prefixed instruction into a (normal) read followed by a (normal) write. */ Int dataSize; IRType dataTy; IRCAS* cas = st->Ist.CAS.details; tl_assert(cas->addr != NULL); tl_assert(cas->dataLo != NULL); dataTy = typeOfIRExpr(tyenv, cas->dataLo); dataSize = sizeofIRType(dataTy); if (clo_trace_mem) { addEvent_Dr( sbOut, fnname, cas->addr, dataSize ); addEvent_Dw( sbOut, fnname, cas->addr, dataSize ); } addStmtToIRSB( sbOut, st ); break; } case Ist_LLSC: { IRType dataTy; if (st->Ist.LLSC.storedata == NULL) { /* LL */ dataTy = typeOfIRTemp(tyenv, st->Ist.LLSC.result); if (clo_trace_mem) addEvent_Dr( sbOut, fnname, st->Ist.LLSC.addr, sizeofIRType(dataTy) ); } else { /* SC */ dataTy = typeOfIRExpr(tyenv, st->Ist.LLSC.storedata); if (clo_trace_mem) addEvent_Dw( sbOut, fnname, st->Ist.LLSC.addr, sizeofIRType(dataTy) ); } addStmtToIRSB( sbOut, st ); break; } case Ist_Exit: if (clo_trace_mem) { flushEvents(sbOut); } addStmtToIRSB( sbOut, st ); // Original statement break; default: tl_assert(0); } } if(clo_trace_mem){ if(sbIn->jumpkind == Ijk_Ret){ VG_(get_fnname)(imarkst->Ist.IMark.addr, fnname, 100); addEvent_FnExit(sbOut,fnname); } } if (clo_trace_mem) { /* At the end of the sbIn. Flush outstandings. */ flushEvents(sbOut); } return sbOut; }
static IRSB* CLG_(instrument)( VgCallbackClosure* closure, IRSB* bbIn, VexGuestLayout* layout, VexGuestExtents* vge, IRType gWordTy, IRType hWordTy ) { Int i; IRSB* bbOut; IRStmt* st, *stnext; Addr instrAddr, origAddr; UInt instrLen = 0, dataSize; UInt instrCount, costOffset; IRExpr *loadAddrExpr, *storeAddrExpr; BB* bb; IRDirty* di; IRExpr *arg1, **argv; Bool bb_seen_before = False; UInt cJumps = 0, cJumpsCorrected; Bool beforeIBoundary, instrIssued; if (gWordTy != hWordTy) { /* We don't currently support this case. */ VG_(tool_panic)("host/guest word size mismatch"); } // No instrumentation if it is switched off if (! CLG_(instrument_state)) { CLG_DEBUG(5, "instrument(BB %p) [Instrumentation OFF]\n", (Addr)closure->readdr); return bbIn; } CLG_DEBUG(3, "+ instrument(BB %p)\n", (Addr)closure->readdr); /* Set up SB for instrumented IR */ bbOut = deepCopyIRSBExceptStmts(bbIn); // Copy verbatim any IR preamble preceding the first IMark i = 0; while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) { addStmtToIRSB( bbOut, bbIn->stmts[i] ); i++; } // Get the first statement, and origAddr from it CLG_ASSERT(bbIn->stmts_used > 0); st = bbIn->stmts[i]; CLG_ASSERT(Ist_IMark == st->tag); instrAddr = origAddr = (Addr)st->Ist.IMark.addr; CLG_ASSERT(origAddr == st->Ist.IMark.addr); // XXX: check no overflow /* Get BB (creating if necessary). * JS: The hash table is keyed with orig_addr_noredir -- important! * JW: Why? If it is because of different chasing of the redirection, * this is not needed, as chasing is switched off in callgrind */ bb = CLG_(get_bb)(origAddr, bbIn, &bb_seen_before); //bb = CLG_(get_bb)(orig_addr_noredir, bbIn, &bb_seen_before); /* * Precondition: * - jmps_passed has number of cond.jumps passed in last executed BB * - current_bbcc has a pointer to the BBCC of the last executed BB * Thus, if bbcc_jmpkind is != -1 (JmpNone), * current_bbcc->bb->jmp_addr * gives the address of the jump source. * * The BBCC setup does 2 things: * - trace call: * * Unwind own call stack, i.e sync our ESP with real ESP * This is for ESP manipulation (longjmps, C++ exec handling) and RET * * For CALLs or JMPs crossing objects, record call arg + * push are on own call stack * * - prepare for cache log functions: * Set current_bbcc to BBCC that gets the costs for this BB execution * attached */ // helper call to setup_bbcc, with pointer to basic block info struct as argument arg1 = mkIRExpr_HWord( (HWord)bb ); argv = mkIRExprVec_1(arg1); di = unsafeIRDirty_0_N( 1, "setup_bbcc", VG_(fnptr_to_fnentry)( & CLG_(setup_bbcc) ), argv); addStmtToIRSB( bbOut, IRStmt_Dirty(di) ); instrCount = 0; costOffset = 0; // loop for each host instruction (starting from 'i') do { // We should be at an IMark statement CLG_ASSERT(Ist_IMark == st->tag); // Reset stuff for this original instruction loadAddrExpr = storeAddrExpr = NULL; instrIssued = False; dataSize = 0; // Process all the statements for this original instruction (ie. until // the next IMark statement, or the end of the block) do { i++; stnext = ( i < bbIn->stmts_used ? bbIn->stmts[i] : NULL ); beforeIBoundary = !stnext || (Ist_IMark == stnext->tag); collectStatementInfo(bbIn->tyenv, bbOut, st, &instrAddr, &instrLen, &loadAddrExpr, &storeAddrExpr, &dataSize, hWordTy); // instrument a simulator call before conditional jumps if (st->tag == Ist_Exit) { // Nb: instrLen will be zero if Vex failed to decode it. // Also Client requests can appear to be very large (eg. 18 // bytes on x86) because they are really multiple instructions. CLG_ASSERT( 0 == instrLen || bbIn->jumpkind == Ijk_ClientReq || (instrLen >= VG_MIN_INSTR_SZB && instrLen <= VG_MAX_INSTR_SZB) ); // Add instrumentation before this statement endOfInstr(bbOut, &(bb->instr[instrCount]), bb_seen_before, instrAddr - origAddr, instrLen, dataSize, &costOffset, instrIssued, loadAddrExpr, storeAddrExpr); // prepare for a possible further simcall in same host instr loadAddrExpr = storeAddrExpr = NULL; instrIssued = True; if (!bb_seen_before) { bb->jmp[cJumps].instr = instrCount; bb->jmp[cJumps].skip = False; } /* Update global variable jmps_passed (this is before the jump!) * A correction is needed if VEX inverted the last jump condition */ cJumpsCorrected = cJumps; if ((cJumps+1 == bb->cjmp_count) && bb->cjmp_inverted) cJumpsCorrected++; addConstMemStoreStmt( bbOut, (UWord) &CLG_(current_state).jmps_passed, cJumpsCorrected, hWordTy); cJumps++; } addStmtToIRSB( bbOut, st ); st = stnext; } while (!beforeIBoundary); // Add instrumentation for this original instruction. if (!instrIssued || (loadAddrExpr != 0) || (storeAddrExpr !=0)) endOfInstr(bbOut, &(bb->instr[instrCount]), bb_seen_before, instrAddr - origAddr, instrLen, dataSize, &costOffset, instrIssued, loadAddrExpr, storeAddrExpr); instrCount++; } while (st); /* Always update global variable jmps_passed (at end of BB) * A correction is needed if VEX inverted the last jump condition */ cJumpsCorrected = cJumps; if (bb->cjmp_inverted) cJumpsCorrected--; addConstMemStoreStmt( bbOut, (UWord) &CLG_(current_state).jmps_passed, cJumpsCorrected, hWordTy); /* This stores the instr of the call/ret at BB end */ bb->jmp[cJumps].instr = instrCount-1; CLG_ASSERT(bb->cjmp_count == cJumps); CLG_ASSERT(bb->instr_count == instrCount); instrAddr += instrLen; if (bb_seen_before) { CLG_ASSERT(bb->instr_len == instrAddr - origAddr); CLG_ASSERT(bb->cost_count == costOffset); CLG_ASSERT(bb->jmpkind == bbIn->jumpkind); } else { bb->instr_len = instrAddr - origAddr; bb->cost_count = costOffset; bb->jmpkind = bbIn->jumpkind; } CLG_DEBUG(3, "- instrument(BB %p): byteLen %u, CJumps %u, CostLen %u\n", origAddr, bb->instr_len, bb->cjmp_count, bb->cost_count); if (cJumps>0) { CLG_DEBUG(3, " [ "); for (i=0;i<cJumps;i++) CLG_DEBUG(3, "%d ", bb->jmp[i].instr); CLG_DEBUG(3, "], last inverted: %s \n", bb->cjmp_inverted ? "yes":"no"); } return bbOut; }
//----------------------------------------------------------------- static IRSB* oa_instrument (VgCallbackClosure* closure, IRSB* sbIn, VexGuestLayout* layout, VexGuestExtents* vge, IRType gWordTy, IRType hWordTy ) { Int i; IRSB* sbOut; IRType type; Addr64 cia; /* address of current insn */ IRStmt* st; if (gWordTy != hWordTy) { VG_(tool_panic)("host/guest word size mismatch"); // currently unsupported } thisWordWidth=gWordTy; //if (gWordTy != Ity_I32) ppIRType(gWordTy); /* Set up SB */ sbOut = deepCopyIRSBExceptStmts(sbIn); // Copy verbatim any IR preamble preceding the first IMark i = 0; while (i < sbIn->stmts_used && sbIn->stmts[i]->tag != Ist_IMark) { addStmtToIRSB( sbOut, sbIn->stmts[i] ); i++; } st = sbIn->stmts[i]; cia = st->Ist.IMark.addr; for (/*use current i*/; i < sbIn->stmts_used; i++) { st = sbIn->stmts[i]; if (!st || st->tag == Ist_NoOp) continue; IRExpr* expr; switch (st->tag) { case Ist_IMark: cia = st->Ist.IMark.addr; break; case Ist_WrTmp: // Add a call to trace_load() if --trace-mem=yes. expr = st->Ist.WrTmp.data; type = typeOfIRExpr(sbOut->tyenv, expr); tl_assert(type != Ity_INVALID); switch (expr->tag) { case Iex_Unop: instrument_Unop( sbOut, st, cia ); break; case Iex_Binop: instrument_Binop( sbOut, st, type, cia ); break; case Iex_Triop: instrument_Triop( sbOut, st, cia ); break; case Iex_Qop: //instrument_Qop( sbOut, expr, type ); break; case Iex_Mux0X: //instrument_Muxop( sbOut, expr, type ); break; default: break; } // switch break; default: break; } // switch addStmtToIRSB( sbOut, st ); } // for return sbOut; }
IRSB* h_instrument ( VgCallbackClosure* closure, IRSB* sbIn, const VexGuestLayout* layout, const VexGuestExtents* vge, const VexArchInfo* archinfo_host, IRType gWordTy, IRType hWordTy ) { Bool verboze = 0||False; Int i /*, j*/; PCEnv pce; struct _SGEnv* sgenv; if (gWordTy != hWordTy) { /* We don't currently support this case. */ VG_(tool_panic)("host/guest word size mismatch"); } /* Check we're not completely nuts */ tl_assert(sizeof(UWord) == sizeof(void*)); tl_assert(sizeof(Word) == sizeof(void*)); tl_assert(sizeof(Addr) == sizeof(void*)); tl_assert(sizeof(ULong) == 8); tl_assert(sizeof(Long) == 8); tl_assert(sizeof(Addr) == sizeof(void*)); tl_assert(sizeof(UInt) == 4); tl_assert(sizeof(Int) == 4); /* Set up the running environment. Both .sb and .tmpMap are modified as we go along. Note that tmps are added to both .sb->tyenv and .tmpMap together, so the valid index-set for those two arrays should always be identical. */ VG_(memset)(&pce, 0, sizeof(pce)); pce.sb = deepCopyIRSBExceptStmts(sbIn); pce.trace = verboze; pce.hWordTy = hWordTy; pce.gWordTy = gWordTy; pce.guest_state_sizeB = layout->total_sizeB; pce.qmpMap = VG_(newXA)( VG_(malloc), "pc.h_instrument.1", VG_(free), sizeof(TempMapEnt)); for (i = 0; i < sbIn->tyenv->types_used; i++) { TempMapEnt ent; ent.kind = NonShad; ent.shadow = IRTemp_INVALID; VG_(addToXA)( pce.qmpMap, &ent ); } tl_assert( VG_(sizeXA)( pce.qmpMap ) == sbIn->tyenv->types_used ); /* Also set up for the sg_ instrumenter. See comments at the top of this instrumentation section for details. The two parameters constitute a closure, which sg_ can use to correctly generate new IRTemps as needed. */ sgenv = sg_instrument_init( for_sg__newIRTemp_cb, (void*)&pce ); /* Copy verbatim any IR preamble preceding the first IMark */ i = 0; while (i < sbIn->stmts_used && sbIn->stmts[i]->tag != Ist_IMark) { IRStmt* st = sbIn->stmts[i]; tl_assert(st); tl_assert(isFlatIRStmt(st)); stmt( 'C', &pce, sbIn->stmts[i] ); i++; } /* Iterate over the remaining stmts to generate instrumentation. */ tl_assert(sbIn->stmts_used > 0); tl_assert(i >= 0); tl_assert(i < sbIn->stmts_used); tl_assert(sbIn->stmts[i]->tag == Ist_IMark); for (/*use current i*/; i < sbIn->stmts_used; i++) { /* generate sg_ instrumentation for this stmt */ sg_instrument_IRStmt( sgenv, pce.sb, sbIn->stmts[i], layout, gWordTy, hWordTy ); stmt( 'C', &pce, sbIn->stmts[i] ); } /* generate sg_ instrumentation for the final jump */ sg_instrument_final_jump( sgenv, pce.sb, sbIn->next, sbIn->jumpkind, layout, gWordTy, hWordTy ); /* and finalise .. */ sg_instrument_fini( sgenv ); /* If this fails, there's been some serious snafu with tmp management, that should be investigated. */ tl_assert( VG_(sizeXA)( pce.qmpMap ) == pce.sb->tyenv->types_used ); VG_(deleteXA)( pce.qmpMap ); return pce.sb; }
/* This is copied mostly verbatim from lackey */ static IRSB* mv_instrument ( VgCallbackClosure* closure, IRSB* sbIn, VexGuestLayout* layout, VexGuestExtents* vge, VexArchInfo* archinfo_host, IRType gWordTy, IRType hWordTy ) { Int i; IRSB* sbOut; IRTypeEnv* tyenv = sbIn->tyenv; if (gWordTy != hWordTy) { /* We don't currently support this case. */ VG_(tool_panic)("host/guest word size mismatch"); } //ppIRSB(sbIn); /* Set up SB */ sbOut = deepCopyIRSBExceptStmts(sbIn); // Copy verbatim any IR preamble preceding the first IMark i = 0; while (i < sbIn->stmts_used && sbIn->stmts[i]->tag != Ist_IMark) { addStmtToIRSB( sbOut, sbIn->stmts[i] ); i++; } events_used = 0; for (/*use current i*/; i < sbIn->stmts_used; i++) { IRStmt* st = sbIn->stmts[i]; if (!st || st->tag == Ist_NoOp) continue; switch (st->tag) { case Ist_NoOp: case Ist_AbiHint: case Ist_Put: case Ist_PutI: case Ist_MBE: addStmtToIRSB( sbOut, st ); break; case Ist_IMark: canCreateModify = False; if (clo_trace_instrs) { addEvent_Ir( sbOut, mkIRExpr_HWord( (HWord)st->Ist.IMark.addr ), st->Ist.IMark.len ); } addStmtToIRSB( sbOut, st ); break; case Ist_WrTmp: { IRExpr* data = st->Ist.WrTmp.data; if (data->tag == Iex_Load) { addEvent_Dr( sbOut, data->Iex.Load.addr, sizeofIRType(data->Iex.Load.ty), IRTypeToMVType(data->Iex.Load.ty) ); } } addStmtToIRSB( sbOut, st ); break; case Ist_Store: { IRExpr* data = st->Ist.Store.data; addEvent_Dw( sbOut, st->Ist.Store.addr, sizeofIRType(typeOfIRExpr(tyenv, data)), IRTypeToMVType(typeOfIRExpr(tyenv, data)) ); } addStmtToIRSB( sbOut, st ); break; case Ist_Dirty: { Int dsize; IRDirty* d = st->Ist.Dirty.details; if (d->mFx != Ifx_None) { // This dirty helper accesses memory. Collect the details. tl_assert(d->mAddr != NULL); tl_assert(d->mSize != 0); dsize = d->mSize; if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) addEvent_Dr( sbOut, d->mAddr, dsize, MV_DataInt32 ); if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) addEvent_Dw( sbOut, d->mAddr, dsize, MV_DataInt32 ); } else { tl_assert(d->mAddr == NULL); tl_assert(d->mSize == 0); } addStmtToIRSB( sbOut, st ); break; } case Ist_CAS: { /* We treat it as a read and a write of the location. I think that is the same behaviour as it was before IRCAS was introduced, since prior to that point, the Vex front ends would translate a lock-prefixed instruction into a (normal) read followed by a (normal) write. */ Int dataSize; IRType dataTy; IRCAS* cas = st->Ist.CAS.details; tl_assert(cas->addr != NULL); tl_assert(cas->dataLo != NULL); dataTy = typeOfIRExpr(tyenv, cas->dataLo); dataSize = sizeofIRType(dataTy); if (cas->dataHi != NULL) dataSize *= 2; /* since it's a doubleword-CAS */ addEvent_Dr( sbOut, cas->addr, dataSize, IRTypeToMVType(dataTy) ); addEvent_Dw( sbOut, cas->addr, dataSize, IRTypeToMVType(dataTy) ); addStmtToIRSB( sbOut, st ); break; } case Ist_LLSC: { IRType dataTy; if (st->Ist.LLSC.storedata == NULL) { /* LL */ dataTy = typeOfIRTemp(tyenv, st->Ist.LLSC.result); addEvent_Dr( sbOut, st->Ist.LLSC.addr, sizeofIRType(dataTy), IRTypeToMVType(dataTy) ); } else { /* SC */ dataTy = typeOfIRExpr(tyenv, st->Ist.LLSC.storedata); addEvent_Dw( sbOut, st->Ist.LLSC.addr, sizeofIRType(dataTy), IRTypeToMVType(dataTy) ); } addStmtToIRSB( sbOut, st ); break; } case Ist_Exit: flushEvents(sbOut); addStmtToIRSB( sbOut, st ); // Original statement break; default: tl_assert(0); } } /* At the end of the sbIn. Flush outstandings. */ flushEvents(sbOut); return sbOut; }
static IRSB* cg_instrument ( VgCallbackClosure* closure, IRSB* sbIn, VexGuestLayout* layout, VexGuestExtents* vge, IRType gWordTy, IRType hWordTy ) { Int i, isize; IRStmt* st; Addr64 cia; /* address of current insn */ CgState cgs; IRTypeEnv* tyenv = sbIn->tyenv; InstrInfo* curr_inode = NULL; if (gWordTy != hWordTy) { /* We don't currently support this case. */ VG_(tool_panic)("host/guest word size mismatch"); } // Set up new SB cgs.sbOut = deepCopyIRSBExceptStmts(sbIn); // Copy verbatim any IR preamble preceding the first IMark i = 0; while (i < sbIn->stmts_used && sbIn->stmts[i]->tag != Ist_IMark) { addStmtToIRSB( cgs.sbOut, sbIn->stmts[i] ); i++; } // Get the first statement, and initial cia from it tl_assert(sbIn->stmts_used > 0); tl_assert(i < sbIn->stmts_used); st = sbIn->stmts[i]; tl_assert(Ist_IMark == st->tag); cia = st->Ist.IMark.addr; // Set up running state and get block info tl_assert(closure->readdr == vge->base[0]); cgs.events_used = 0; cgs.sbInfo = get_SB_info(sbIn, (Addr)closure->readdr); cgs.sbInfo_i = 0; if (DEBUG_CG) VG_(printf)("\n\n---------- cg_instrument ----------\n"); // Traverse the block, initialising inodes, adding events and flushing as // necessary. for (/*use current i*/; i < sbIn->stmts_used; i++) { st = sbIn->stmts[i]; tl_assert(isFlatIRStmt(st)); switch (st->tag) { case Ist_NoOp: case Ist_AbiHint: case Ist_Put: case Ist_PutI: case Ist_MFence: break; case Ist_IMark: cia = st->Ist.IMark.addr; isize = st->Ist.IMark.len; // If Vex fails to decode an instruction, the size will be zero. // Pretend otherwise. if (isize == 0) isize = VG_MIN_INSTR_SZB; // Sanity-check size. tl_assert( (VG_MIN_INSTR_SZB <= isize && isize <= VG_MAX_INSTR_SZB) || VG_CLREQ_SZB == isize ); // Get space for and init the inode, record it as the current one. // Subsequent Dr/Dw/Dm events from the same instruction will // also use it. curr_inode = setup_InstrInfo(&cgs, cia, isize); addEvent_Ir( &cgs, curr_inode ); break; case Ist_WrTmp: { IRExpr* data = st->Ist.WrTmp.data; if (data->tag == Iex_Load) { IRExpr* aexpr = data->Iex.Load.addr; // Note also, endianness info is ignored. I guess // that's not interesting. addEvent_Dr( &cgs, curr_inode, sizeofIRType(data->Iex.Load.ty), aexpr ); } break; } case Ist_Store: { IRExpr* data = st->Ist.Store.data; IRExpr* aexpr = st->Ist.Store.addr; addEvent_Dw( &cgs, curr_inode, sizeofIRType(typeOfIRExpr(tyenv, data)), aexpr ); break; } case Ist_Dirty: { Int dataSize; IRDirty* d = st->Ist.Dirty.details; if (d->mFx != Ifx_None) { /* This dirty helper accesses memory. Collect the details. */ tl_assert(d->mAddr != NULL); tl_assert(d->mSize != 0); dataSize = d->mSize; // Large (eg. 28B, 108B, 512B on x86) data-sized // instructions will be done inaccurately, but they're // very rare and this avoids errors from hitting more // than two cache lines in the simulation. if (dataSize > MIN_LINE_SIZE) dataSize = MIN_LINE_SIZE; if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) addEvent_Dr( &cgs, curr_inode, dataSize, d->mAddr ); if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) addEvent_Dw( &cgs, curr_inode, dataSize, d->mAddr ); } else { tl_assert(d->mAddr == NULL); tl_assert(d->mSize == 0); } break; } case Ist_Exit: /* We may never reach the next statement, so need to flush all outstanding transactions now. */ flushEvents( &cgs ); break; default: tl_assert(0); break; } /* Copy the original statement */ addStmtToIRSB( cgs.sbOut, st ); if (DEBUG_CG) { ppIRStmt(st); VG_(printf)("\n"); } } /* At the end of the bb. Flush outstandings. */ flushEvents( &cgs ); /* done. stay sane ... */ tl_assert(cgs.sbInfo_i == cgs.sbInfo->n_instrs); if (DEBUG_CG) { VG_(printf)( "goto {"); ppIRJumpKind(sbIn->jumpkind); VG_(printf)( "} "); ppIRExpr( sbIn->next ); VG_(printf)( "}\n"); } return cgs.sbOut; }
static IRSB* tt_instrument ( VgCallbackClosure* closure, IRSB* bb, const VexGuestLayout* layout, const VexGuestExtents* vge, const VexArchInfo* archinfo_host, IRType gWordTy, IRType hWordTy ) { Int i; IRSB* sbOut; IRTypeEnv* tyenv = bb->tyenv; if (gWordTy != hWordTy) { /* We don't currently support this case. */ VG_(tool_panic)("host/guest word size mismatch"); } /* Set up SB */ sbOut = deepCopyIRSBExceptStmts(bb); //Move to a init data structures helgrind/hg_main tainted = VG_(newXA)(VG_(malloc), "tt_tainted", VG_(free), sizeof(IRExpr)); VG_(setCmpFnXA)(tainted, cmp_tainted_by_reg); kvStore = VG_(newFM)(VG_(malloc), "ss_kvstore", VG_(free), cmp_key_by_reg); // Copy verbatim any IR preamble preceding the first IMark i = 0; while (i < bb->stmts_used && bb->stmts[i]->tag != Ist_IMark) { addStmtToIRSB( sbOut, bb->stmts[i] ); i++; } // Make sure no temp writes in the un instrumented preamble for (int j=0; j<i; j++) { if (bb->stmts[j]->tag == Ist_WrTmp) { VG_(tool_panic)("Wrote to a temporary"); } } tl_assert(bb->stmts[i]->tag == Ist_IMark); // Iterate over remaining stmts for (/*use current i*/; i < bb->stmts_used; i++) { IRStmt* st = bb->stmts[i]; IRStmt* clone = deepMallocIRStmt(st); if (!st || st->tag == Ist_NoOp) continue; if (0) { ppIRStmt(st); VG_(printf)("\n"); } switch (st->tag) { case Ist_WrTmp: { IRExpr* data = st->Ist.WrTmp.data; VG_(addToFM)(kvStore, st->Ist.WrTmp.tmp, (UWord) data); switch (data->tag) { case Iex_Load: mkCall(sbOut, Event_Load, clone, data->Iex.Load.addr); break; case Iex_Binop: case Iex_Unop: mkCall(sbOut, Event_Op, clone, NULL); break; default: break; VG_(tool_panic)("Unfinished"); } addStmtToIRSB( sbOut, st ); break; } case Ist_Store: { IRType type = typeOfIRExpr(tyenv, st->Ist.Store.data); tl_assert(type != Ity_INVALID); mkCall(sbOut, Event_Store, clone, st->Ist.Store.addr); addStmtToIRSB( sbOut, st ); break; } default: addStmtToIRSB( sbOut, st ); } } return sbOut; }
static PyObject *pyIRSB_deepCopyExceptStmts(pyIRSB* self) { return (PyObject *)wrap_IRSB(deepCopyIRSBExceptStmts(self->wrapped)); }
IRSB* h_instrument ( VgCallbackClosure* closure, IRSB* sbIn, VexGuestLayout* layout, VexGuestExtents* vge, IRType gWordTy, IRType hWordTy ) { Bool verboze = 0||False; Int i ; PCEnv pce; struct _SGEnv* sgenv; if (gWordTy != hWordTy) { VG_(tool_panic)("host/guest word size mismatch"); } tl_assert(sizeof(UWord) == sizeof(void*)); tl_assert(sizeof(Word) == sizeof(void*)); tl_assert(sizeof(Addr) == sizeof(void*)); tl_assert(sizeof(ULong) == 8); tl_assert(sizeof(Long) == 8); tl_assert(sizeof(Addr64) == 8); tl_assert(sizeof(UInt) == 4); tl_assert(sizeof(Int) == 4); VG_(memset)(&pce, 0, sizeof(pce)); pce.sb = deepCopyIRSBExceptStmts(sbIn); pce.trace = verboze; pce.hWordTy = hWordTy; pce.gWordTy = gWordTy; pce.guest_state_sizeB = layout->total_sizeB; pce.qmpMap = VG_(newXA)( VG_(malloc), "pc.h_instrument.1", VG_(free), sizeof(TempMapEnt)); for (i = 0; i < sbIn->tyenv->types_used; i++) { TempMapEnt ent; ent.kind = NonShad; ent.shadow = IRTemp_INVALID; VG_(addToXA)( pce.qmpMap, &ent ); } tl_assert( VG_(sizeXA)( pce.qmpMap ) == sbIn->tyenv->types_used ); sgenv = sg_instrument_init( for_sg__newIRTemp_cb, (void*)&pce ); i = 0; while (i < sbIn->stmts_used && sbIn->stmts[i]->tag != Ist_IMark) { IRStmt* st = sbIn->stmts[i]; tl_assert(st); tl_assert(isFlatIRStmt(st)); stmt( 'C', &pce, sbIn->stmts[i] ); i++; } tl_assert(sbIn->stmts_used > 0); tl_assert(i >= 0); tl_assert(i < sbIn->stmts_used); tl_assert(sbIn->stmts[i]->tag == Ist_IMark); for (; i < sbIn->stmts_used; i++) { sg_instrument_IRStmt( sgenv, pce.sb, sbIn->stmts[i], layout, gWordTy, hWordTy ); stmt( 'C', &pce, sbIn->stmts[i] ); } sg_instrument_final_jump( sgenv, pce.sb, sbIn->next, sbIn->jumpkind, layout, gWordTy, hWordTy ); sg_instrument_fini( sgenv ); tl_assert( VG_(sizeXA)( pce.qmpMap ) == pce.sb->tyenv->types_used ); VG_(deleteXA)( pce.qmpMap ); return pce.sb; }
static IRSB* fz_instrument ( VgCallbackClosure* closure, IRSB* sb_in, VexGuestLayout* layout, VexGuestExtents* vge, IRType gWordTy, IRType hWordTy ) { Int i; IRSB* sb_out; IRDirty* di; if (gWordTy != hWordTy) { /* We don't currently support this case. */ VG_(tool_panic)("host/guest word size mismatch"); } /* Set up SB */ sb_out = deepCopyIRSBExceptStmts(sb_in); // Copy verbatim any IR preamble preceding the first IMark i = 0; while (i < sb_in->stmts_used && sb_in->stmts[i]->tag != Ist_IMark) { addStmtToIRSB(sb_out, sb_in->stmts[i]); i++; } di = unsafeIRDirty_0_N(0, "helper_instrument_superblock", VG_(fnptr_to_fnentry)(helper_instrument_superblock), mkIRExprVec_0() ); addStmtToIRSB(sb_out, IRStmt_Dirty(di)); for (/*use current i*/; i < sb_in->stmts_used; i++) { IRStmt* st = sb_in->stmts[i]; if (!st) continue; switch (st->tag) { case Ist_NoOp: case Ist_IMark: case Ist_AbiHint: case Ist_Dirty: case Ist_MBE: break; case Ist_Put: instrument_Put(st, sb_out); break; case Ist_PutI: instrument_PutI(st, sb_out); break; case Ist_WrTmp: instrument_WrTmp(st, sb_out); break; case Ist_Store: instrument_Store(st, sb_out); break; case Ist_CAS: addStmtToIRSB(sb_out, st); // dirty helpers use temporaries (oldHi, oldLo) defined in the instruction instrument_CAS(st, sb_out); break; case Ist_LLSC: instrument_LLSC(st, sb_out); break; case Ist_Exit: instrument_Exit(st, sb_out); break; } if (st->tag != Ist_CAS) { addStmtToIRSB(sb_out, st); } } // ppIRSB(sb_out); return sb_out; }
static IRSB* fr_instrument(VgCallbackClosure* closure, IRSB* sbIn, VexGuestLayout* layout, VexGuestExtents* vge, IRType gWordTy, IRType hWordTy) { Int i; IRSB* sbOut; IRTypeEnv* tyenv = sbIn->tyenv; IRDirty* di; IRType dataTy; IRExpr** argv; IRCAS* cas; // We don't care about mmaps if (!clo_mmap) return sbIn; // From lackey tool tl_assert(gWordTy == hWordTy); sbOut = deepCopyIRSBExceptStmts(sbIn); // Copy verbatim any IR preamble preceding the first IMark i = 0; while (i < sbIn->stmts_used && sbIn->stmts[i]->tag != Ist_IMark) { addStmtToIRSB( sbOut, sbIn->stmts[i] ); i++; } for (/*use current i*/; i < sbIn->stmts_used; i++) { IRStmt* st = sbIn->stmts[i]; if (!st || st->tag == Ist_NoOp) continue; switch (st->tag) { case Ist_NoOp: // Make compiler happy case Ist_AbiHint: case Ist_Put: case Ist_PutI: case Ist_MBE: case Ist_IMark: case Ist_WrTmp: case Ist_Exit: addStmtToIRSB( sbOut, st ); break; case Ist_Store: dataTy = typeOfIRExpr( tyenv, st->Ist.Store.data ); argv = mkIRExprVec_2( st->Ist.Store.addr, mkIRExpr_HWord( sizeofIRType( dataTy ) ) ); di = unsafeIRDirty_0_N(/*regparms*/2, "trace_store", VG_(fnptr_to_fnentry)( trace_store ), argv); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); addStmtToIRSB( sbOut, st ); break; case Ist_LLSC: if (st->Ist.LLSC.storedata != NULL) { dataTy = typeOfIRExpr( tyenv, st->Ist.LLSC.storedata ); argv = mkIRExprVec_2( st->Ist.LLSC.addr, mkIRExpr_HWord( sizeofIRType( dataTy ) ) ); di = unsafeIRDirty_0_N(/*regparms*/2, "trace_store", VG_(fnptr_to_fnentry)( trace_store ), argv); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); addStmtToIRSB( sbOut, st ); } break; case Ist_Dirty: di = st->Ist.Dirty.details; if (di->mFx != Ifx_None) { // This dirty helper accesses memory. Collect the details. tl_assert(di->mAddr != NULL); tl_assert(di->mSize != 0); if (di->mFx == Ifx_Write || di->mFx == Ifx_Modify) { argv = mkIRExprVec_2( di->mAddr, mkIRExpr_HWord( di->mSize ) ); di = unsafeIRDirty_0_N( /*regparms*/2, "trace_store", VG_(fnptr_to_fnentry)( trace_store ), argv ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); } } else { tl_assert(di->mAddr == NULL); tl_assert(di->mSize == 0); } addStmtToIRSB( sbOut, st ); break; case Ist_CAS: cas = st->Ist.CAS.details; tl_assert(cas->addr != NULL); tl_assert(cas->dataLo != NULL); argv = mkIRExprVec_2( cas->addr, mkIRExpr_HWord( sizeofIRType(typeOfIRExpr(tyenv, cas->dataLo)) * (cas->dataHi != NULL ? 2 : 1) ) ); di = unsafeIRDirty_0_N( /*regparms*/2, "trace_store", VG_(fnptr_to_fnentry)( trace_store ), argv ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); addStmtToIRSB( sbOut, st ); break; } } return sbOut; }