void instrument_WrTmp_Mux0X(IRStmt* st, IRSB* sb_out) { IRTemp tmp = st->Ist.WrTmp.tmp; IRExpr* data = st->Ist.WrTmp.data; IRExpr* cond = data->Iex.Mux0X.cond; IRExpr* expr0 = data->Iex.Mux0X.expr0; IRExpr* exprX = data->Iex.Mux0X.exprX; Int size = sizeofIRType_bits(typeOfIRExpr(sb_out->tyenv, expr0)); IRDirty* di; tl_assert(cond->tag == Iex_RdTmp); tl_assert(isIRAtom(expr0)); tl_assert(isIRAtom(exprX)); tl_assert(typeOfIRTemp(sb_out->tyenv, tmp) == typeOfIRExpr(sb_out->tyenv, expr0)); tl_assert(typeOfIRTemp(sb_out->tyenv, tmp) == typeOfIRExpr(sb_out->tyenv, exprX)); di = unsafeIRDirty_0_N(0, "helper_instrument_WrTmp_Mux0X", VG_(fnptr_to_fnentry)(helper_instrument_WrTmp_Mux0X), mkIRExprVec_5(mkIRExpr_HWord(tmp), assignNew_HWord(sb_out, cond), mkIRExpr_HWord((expr0->tag == Iex_RdTmp) ? expr0->Iex.RdTmp.tmp : IRTemp_INVALID), mkIRExpr_HWord((exprX->tag == Iex_RdTmp) ? exprX->Iex.RdTmp.tmp : IRTemp_INVALID), mkIRExpr_HWord(size)) ); addStmtToIRSB(sb_out, IRStmt_Dirty(di)); }
void instrument_WrTmp_Binop(IRStmt* st, IRSB* sb_out) { IRTemp tmp = st->Ist.WrTmp.tmp; IRExpr* data = st->Ist.WrTmp.data; IROp op = data->Iex.Binop.op; IRExpr* arg1 = data->Iex.Binop.arg1; IRExpr* arg2 = data->Iex.Binop.arg2; UInt arg1_value = 0, arg2_value = 0; IRExpr* expr = IRExpr_Binop(op, arg1, arg2); Int size = sizeofIRType_bits(typeOfIRExpr(sb_out->tyenv, expr)); IRDirty* di; // we don't care about floating point and SIMD operations if (op > Iop_AddF64) return; tl_assert(isIRAtom(arg1)); tl_assert(isIRAtom(arg2)); tl_assert(typeOfIRTemp(sb_out->tyenv, tmp) == typeOfIRExpr(sb_out->tyenv, expr)); if (arg1->tag == Iex_Const) { switch (arg1->Iex.Const.con->tag) { case Ico_U1: arg1_value = arg1->Iex.Const.con->Ico.U1; break; case Ico_U8: arg1_value = arg1->Iex.Const.con->Ico.U8; break; case Ico_U16: arg1_value = arg1->Iex.Const.con->Ico.U16; break; case Ico_U32: arg1_value = arg1->Iex.Const.con->Ico.U32; break; case Ico_U64: arg1_value = arg1->Iex.Const.con->Ico.U64; break; default: VG_(tool_panic)("instrument_WrTmp_Binop"); } } if (arg2->tag == Iex_Const) { switch (arg2->Iex.Const.con->tag) { case Ico_U1: arg2_value = arg2->Iex.Const.con->Ico.U1; break; case Ico_U8: arg2_value = arg2->Iex.Const.con->Ico.U8; break; case Ico_U16: arg2_value = arg2->Iex.Const.con->Ico.U16; break; case Ico_U32: arg2_value = arg2->Iex.Const.con->Ico.U32; break; case Ico_U64: arg2_value = arg2->Iex.Const.con->Ico.U64; break; default: VG_(tool_panic)("instrument_WrTmp_Binop"); } } di = unsafeIRDirty_0_N(0, "helper_instrument_WrTmp_Binop", VG_(fnptr_to_fnentry)(helper_instrument_WrTmp_Binop), mkIRExprVec_7(mkIRExpr_HWord(tmp), mkIRExpr_HWord((arg1->tag == Iex_RdTmp) ? arg1->Iex.RdTmp.tmp : IRTemp_INVALID), mkIRExpr_HWord((arg2->tag == Iex_RdTmp) ? arg2->Iex.RdTmp.tmp : IRTemp_INVALID), mkIRExpr_HWord(op), mkIRExpr_HWord(size), (arg1->tag == Iex_RdTmp) ? assignNew_HWord(sb_out, arg1) : mkIRExpr_HWord(arg1_value), (arg2->tag == Iex_RdTmp) ? assignNew_HWord(sb_out, arg2) : mkIRExpr_HWord(arg2_value)) ); addStmtToIRSB(sb_out, IRStmt_Dirty(di)); }
static void store_aux(IRSB *irsb, IREndness endian, IRExpr *addr, IRExpr *data) { if (typeOfIRExpr(irsb->tyenv, data) == Ity_D64) { /* The insn selectors do not support writing a DFP value to memory. So we need to fix it here by reinterpreting the DFP value as an integer and storing that. */ data = unop(Iop_ReinterpD64asI64, data); } if (typeOfIRExpr(irsb->tyenv, data) == Ity_I1) { /* We cannot store a single bit. So we store it in a 32-bit container. See also load_aux. */ data = unop(Iop_1Uto32, data); } stmt(irsb, IRStmt_Store(endian, addr, data)); }
static IRExpr* assignNew_HWord(IRSB* sb_out, IRExpr* expr) { IRTemp tmp = newIRTemp(sb_out->tyenv, Ity_I32); switch (typeOfIRExpr(sb_out->tyenv, expr)) { case Ity_I1: addStmtToIRSB(sb_out, IRStmt_WrTmp(tmp, IRExpr_Unop(Iop_1Uto32, expr))); break; case Ity_I8: addStmtToIRSB(sb_out, IRStmt_WrTmp(tmp, IRExpr_Unop(Iop_8Uto32, expr))); break; case Ity_I16: addStmtToIRSB(sb_out, IRStmt_WrTmp(tmp, IRExpr_Unop(Iop_16Uto32, expr))); break; case Ity_I32: addStmtToIRSB(sb_out, IRStmt_WrTmp(tmp, expr)); break; case Ity_I64: addStmtToIRSB(sb_out, IRStmt_WrTmp(tmp, IRExpr_Unop(Iop_64to32, expr))); break; default: VG_(tool_panic)("assignNew_HWord"); } return IRExpr_RdTmp(tmp); }
/* Converts an IRExpr into 1 or 2 "flat" expressions of type Long */ static void packToI64 ( IRSB* sb, IRExpr* e, IRExpr* res[2], IROp irop) { res[1]=NULL; // won't be used at all IRTemp tmp64; switch (typeOfIRExpr(sb->tyenv,e)) { case Ity_I1: res[0]= IRExpr_Unop(Iop_1Uto64,e); break; case Ity_I8: res[0]= IRExpr_Unop(Iop_8Uto64,e); break; case Ity_I16: res[0]= IRExpr_Unop(Iop_16Uto64,e); break; case Ity_I32: res[0]= IRExpr_Unop(Iop_32Uto64,e); break; case Ity_F32: e = IRExpr_Unop(Iop_ReinterpF32asI32,e); res[0]= IRExpr_Unop(Iop_32Uto64,e); break; case Ity_I64: res[0]= e; return; case Ity_F64: res[0]= IRExpr_Unop(Iop_ReinterpF64asI64,e); break; case Ity_V128: /* 128-bit SIMD */ //tmp64 = newIRTemp(sb->tyenv, Ity_I64); //addStmtToIRSB(sb, IRStmt_WrTmp(tmp64, IRExpr_Unop(Iop_V128to64,e))); //res[0]= IRExpr_Unop(Iop_64to32, IRExpr_RdTmp(tmp64) ); //res[1]= IRExpr_Unop(Iop_64HIto32, IRExpr_RdTmp(tmp64) ); res[0]= IRExpr_Unop(Iop_V128to64, e ); if (dropV128HiPart(irop)) break; res[1]= IRExpr_Unop(Iop_V128HIto64, e ); break; case Ity_I128: /* 128-bit scalar */ case Ity_INVALID: default: VG_(tool_panic)("COJAC cannot packToI64..."); break; } IRTemp myArg = newIRTemp(sb->tyenv, Ity_I64); addStmtToIRSB(sb, IRStmt_WrTmp(myArg, res[0])); res[0]=IRExpr_RdTmp(myArg); // now that's a "flat" expression if (res[1]==NULL) return; // ... else it was 128-bit SIMD IRTemp myArg2 = newIRTemp(sb->tyenv, Ity_I64); addStmtToIRSB(sb, IRStmt_WrTmp(myArg2, res[1])); res[1]=IRExpr_RdTmp(myArg2); // now that's a "flat" expression }
/* Converts an IRExpr into 1 or 2 "flat" expressions of type Int */ static void packToI32 ( IRSB* sb, IRExpr* e, IRExpr* res[2]) { //IRType eType = Ity_I32; IRTemp tmp64; res[1]=NULL; switch (typeOfIRExpr(sb->tyenv,e)) { case Ity_I1: res[0]= IRExpr_Unop(Iop_1Uto32,e); break; case Ity_I8: res[0]= IRExpr_Unop(Iop_8Uto32,e); break; case Ity_I16: res[0]= IRExpr_Unop(Iop_16Uto32,e); break; case Ity_I32: res[0]= e; return; case Ity_F32: res[0]= IRExpr_Unop(Iop_ReinterpF32asI32,e); break; case Ity_I64: res[0]= IRExpr_Unop(Iop_64to32, e); res[1]= IRExpr_Unop(Iop_64HIto32, e); break; case Ity_F64: tmp64 = newIRTemp(sb->tyenv, Ity_I64); addStmtToIRSB(sb, IRStmt_WrTmp(tmp64, IRExpr_Unop(Iop_ReinterpF64asI64,e))); res[0]= IRExpr_Unop(Iop_64to32, IRExpr_RdTmp(tmp64) ); res[1]= IRExpr_Unop(Iop_64HIto32, IRExpr_RdTmp(tmp64) ); break; case Ity_V128: /* 128-bit SIMD */ case Ity_I128: /* 128-bit scalar */ case Ity_INVALID: default: VG_(tool_panic)("COJAC cannot packToI32..."); break; } IRTemp myArg = newIRTemp(sb->tyenv, Ity_I32); addStmtToIRSB(sb, IRStmt_WrTmp(myArg, res[0])); res[0]=IRExpr_RdTmp(myArg); // now that's a "flat" expression if (res[1]==NULL) return; IRTemp myArg2 = newIRTemp(sb->tyenv, Ity_I32); addStmtToIRSB(sb, IRStmt_WrTmp(myArg2, res[1])); res[1]=IRExpr_RdTmp(myArg2); // now that's a "flat" expression }
/* Bind the given expression to a new temporary, and return the temporary. This effectively converts an arbitrary expression into an IRAtom. */ static IRExpr* assignNew(IRSB* sb_out, IRExpr* expr) { IRTemp tmp = newIRTemp(sb_out->tyenv, typeOfIRExpr(sb_out->tyenv, expr)); addStmtToIRSB(sb_out, IRStmt_WrTmp(tmp, expr)); return IRExpr_RdTmp(tmp); }
Exp *arm_translate_get( IRExpr *expr, IRSB *irbb, vector<Stmt *> *irout ) { int offset = expr->Iex.Get.offset; assert(typeOfIRExpr(irbb->tyenv, expr) == Ity_I32); return translate_get_reg_32(offset); }
static IRAtom* assignNew ( IRSB* sb, IRExpr* e ) { IRTemp t; IRType ty = typeOfIRExpr(sb->tyenv, e); t = newIRTemp(sb->tyenv, ty); assign(sb, t, e); return mkexpr(t); }
/* DO NOT CALL THIS DIRECTLY ! */ static ARMAMode3* iselIntExpr_AMode3_wrk ( ISelEnv* env, IRExpr* e ) { IRType ty = typeOfIRExpr(env->type_env,e); vassert(ty == Ity_I32); // ARMam3_RI, /* Reg +/- Imm8 */ // ARMam3_RR, /* Reg +/- Reg */ return NULL; }
Stmt *arm_translate_put( IRStmt *stmt, IRSB *irbb, vector<Stmt *> *irout ) { int offset = stmt->Ist.Put.offset; Exp *data = translate_expr(stmt->Ist.Put.data, irbb, irout); assert(typeOfIRExpr(irbb->tyenv, stmt->Ist.Put.data) == Ity_I32); return translate_put_reg_32(offset, data, irbb); }
void instrument_CAS_double_element(IRStmt* st, IRSB* sb_out) { IRCAS* cas = st->Ist.CAS.details; IRTemp oldHi = cas->oldHi, oldLo = cas->oldLo; IREndness end = cas->end; IRExpr* addr = cas->addr; IRExpr *expdHi = cas->expdHi, *expdLo = cas->expdLo; IRExpr *dataHi = cas->dataHi, *dataLo = cas->dataLo; Int size = sizeofIRType_bits(typeOfIRExpr(sb_out->tyenv, dataLo)); IROp op; IRExpr *expr, *expr2; IRDirty* di; tl_assert(isIRAtom(addr)); tl_assert(end == Iend_LE); // we assume endianness is little endian tl_assert(isIRAtom(dataLo)); tl_assert(isIRAtom(dataHi)); if (addr->tag == Iex_Const) tl_assert(addr->Iex.Const.con->tag == Ico_U32); tl_assert(typeOfIRExpr(sb_out->tyenv, addr) == typeOfIRExpr(sb_out->tyenv, dataLo)); switch (size) { case 8: op = Iop_CasCmpEQ8; break; case 16: op = Iop_CasCmpEQ16; break; case 32: op = Iop_CasCmpEQ32; break; default: VG_(tool_panic)("instrument_CAS_double_element"); } expr = assignNew(sb_out, IRExpr_Binop(op, IRExpr_RdTmp(oldLo), expdLo)); // statement has to be flat expr2 = assignNew(sb_out, IRExpr_Binop(op, IRExpr_RdTmp(oldHi), expdHi)); // statement has to be flat di = unsafeIRDirty_0_N(0, "helper_instrument_CAS_double_element", VG_(fnptr_to_fnentry)(helper_instrument_CAS_double_element), mkIRExprVec_6((addr->tag == Iex_RdTmp) ? assignNew_HWord(sb_out, addr) : mkIRExpr_HWord(addr->Iex.Const.con->Ico.U32), mkIRExpr_HWord((dataLo->tag == Iex_RdTmp) ? dataLo->Iex.RdTmp.tmp : IRTemp_INVALID), mkIRExpr_HWord((dataHi->tag == Iex_RdTmp) ? dataHi->Iex.RdTmp.tmp : IRTemp_INVALID), mkIRExpr_HWord(size), assignNew_HWord(sb_out, expr), assignNew_HWord(sb_out, expr2)) ); addStmtToIRSB(sb_out, IRStmt_Dirty(di)); }
/* DO NOT CALL THIS DIRECTLY ! */ static ARMAMode2* iselIntExpr_AMode2 ( ISelEnv* env, IRExpr* e ) { IRType ty = typeOfIRExpr(env->type_env,e); vassert(ty == Ity_I32); // ARMam2_RI, /* Reg +/- Imm12 */ // ARMam2_RR, /* Reg +/- Reg */ // ARMam2_RRS, /* Reg +/- (Reg << Imm5) */ return NULL; }
void instrument_WrTmp_Unop(IRStmt* st, IRSB* sb_out) { IRTemp tmp = st->Ist.WrTmp.tmp; IRExpr* data = st->Ist.WrTmp.data; IROp op = data->Iex.Unop.op; IRExpr* arg = data->Iex.Unop.arg; IRExpr* expr = IRExpr_Unop(op, arg); Int size = sizeofIRType_bits(typeOfIRExpr(sb_out->tyenv, expr)); IRDirty* di; tl_assert(isIRAtom(arg)); tl_assert(typeOfIRTemp(sb_out->tyenv, tmp) == typeOfIRExpr(sb_out->tyenv, expr)); di = unsafeIRDirty_0_N(0, "helper_instrument_WrTmp_Unop", VG_(fnptr_to_fnentry)(helper_instrument_WrTmp_Unop), mkIRExprVec_4(mkIRExpr_HWord(tmp), mkIRExpr_HWord((arg->tag == Iex_RdTmp) ? arg->Iex.RdTmp.tmp : IRTemp_INVALID), mkIRExpr_HWord(op), mkIRExpr_HWord(size)) ); addStmtToIRSB(sb_out, IRStmt_Dirty(di)); }
void instrument_Put(IRStmt* st, IRSB* sb_out) { Int offset = st->Ist.Put.offset; IRExpr* data = st->Ist.Put.data; Int size = sizeofIRType_bits(typeOfIRExpr(sb_out->tyenv, data)); IRDirty* di; tl_assert(isIRAtom(data)); // the data transfer type is the type of data di = unsafeIRDirty_0_N(0, "helper_instrument_Put", VG_(fnptr_to_fnentry)(helper_instrument_Put), mkIRExprVec_3(mkIRExpr_HWord(offset), mkIRExpr_HWord((data->tag == Iex_RdTmp) ? data->Iex.RdTmp.tmp : IRTemp_INVALID), mkIRExpr_HWord(size)) ); addStmtToIRSB(sb_out, IRStmt_Dirty(di)); }
/* Store a value to memory. If a value requires more than 8 bytes a series of 8-byte stores will be generated. */ static __inline__ void store(IRSB *irsb, IREndness endian, HWord haddr, IRExpr *data) { IROp high, low; IRExpr *addr, *next_addr; if (VEX_HOST_WORDSIZE == 8) { addr = mkU64(haddr); next_addr = binop(Iop_Add64, addr, mkU64(8)); } else if (VEX_HOST_WORDSIZE == 4) { addr = mkU32(haddr); next_addr = binop(Iop_Add32, addr, mkU32(8)); } else { vpanic("invalid #bytes for address"); } IRType type = typeOfIRExpr(irsb->tyenv, data); vassert(type == Ity_I1 || sizeofIRType(type) <= 16); switch (type) { case Ity_I128: high = Iop_128HIto64; low = Iop_128to64; goto store128; case Ity_F128: high = Iop_F128HItoF64; low = Iop_F128LOtoF64; goto store128; case Ity_D128: high = Iop_D128HItoD64; low = Iop_D128LOtoD64; goto store128; store128: /* Two stores of 64 bit each. */ if (endian == Iend_BE) { /* The more significant bits are at the lower address. */ store_aux(irsb, endian, addr, unop(high, data)); store_aux(irsb, endian, next_addr, unop(low, data)); } else { /* The more significant bits are at the higher address. */ store_aux(irsb, endian, addr, unop(low, data)); store_aux(irsb, endian, next_addr, unop(high, data)); } return; default: store_aux(irsb, endian, addr, data); return; } }
void instrument_Store(IRStmt* st, IRSB* sb_out) { IRExpr* addr = st->Ist.Store.addr; IRExpr* data = st->Ist.Store.data; Int size = sizeofIRType_bits(typeOfIRExpr(sb_out->tyenv, st->Ist.Store.data)); IRDirty* di; tl_assert(isIRAtom(addr)); tl_assert(isIRAtom(data)); if (addr->tag == Iex_Const) tl_assert(addr->Iex.Const.con->tag == Ico_U32); // the data transfer type is the type of data di = unsafeIRDirty_0_N(0, "helper_instrument_Store", VG_(fnptr_to_fnentry)(helper_instrument_Store), mkIRExprVec_3((addr->tag == Iex_RdTmp) ? assignNew_HWord(sb_out, addr) : mkIRExpr_HWord(addr->Iex.Const.con->Ico.U32), mkIRExpr_HWord((data->tag == Iex_RdTmp) ? data->Iex.RdTmp.tmp : IRTemp_INVALID), mkIRExpr_HWord(size)) ); addStmtToIRSB(sb_out, IRStmt_Dirty(di)); }
static Int pushArg ( ISelEnv* env, IRExpr* arg ) { IRType arg_ty = typeOfIRExpr(env->type_env, arg); if (arg_ty == Ity_I32) { // CAB: This right? addInstr(env, ARMInstr_StoreW( GET_SP_REG(), iselIntExpr_AMode2(env, arg) ) ); return 1; } #if 0 else if (arg_ty == Ity_I64) { HReg rHi, rLo; iselInt64Expr(&rHi, &rLo, env, arg); addInstr(env, X86Instr_Push(X86RMI_Reg(rHi))); addInstr(env, X86Instr_Push(X86RMI_Reg(rLo))); return 2; } #endif ppIRExpr(arg); vpanic("pushArg(arm): can't handle arg of this type"); }
void instrument_LLSC_Store_Conditional(IRStmt* st, IRSB* sb_out) { IRTemp result = st->Ist.LLSC.result; IRExpr* addr = st->Ist.LLSC.addr; IRExpr* storedata = st->Ist.LLSC.storedata; Int size = sizeofIRType_bits(typeOfIRExpr(sb_out->tyenv, storedata)); IRExpr* result_expr = IRExpr_RdTmp(result); IRDirty* di; tl_assert(isIRAtom(addr)); tl_assert(isIRAtom(storedata)); if (addr->tag == Iex_Const) tl_assert(addr->Iex.Const.con->tag == Ico_U32); // the data transfer type is the type of storedata di = unsafeIRDirty_0_N(0, "helper_instrument_LLSC_Store_Conditional", VG_(fnptr_to_fnentry)(helper_instrument_LLSC_Store_Conditional), mkIRExprVec_4((addr->tag == Iex_RdTmp) ? assignNew_HWord(sb_out, addr) : mkIRExpr_HWord(addr->Iex.Const.con->Ico.U32), mkIRExpr_HWord((storedata->tag == Iex_RdTmp) ? storedata->Iex.RdTmp.tmp : IRTemp_INVALID), mkIRExpr_HWord(size), assignNew_HWord(sb_out, result_expr)) ); addStmtToIRSB(sb_out, IRStmt_Dirty(di)); }
/* DO NOT CALL THIS DIRECTLY ! */ static ARMAMode1* iselIntExpr_AMode1_wrk ( ISelEnv* env, IRExpr* e ) { IRType ty = typeOfIRExpr(env->type_env,e); vassert(ty == Ity_I32); // ARMam1_I12A, /* Imm12A: extended (rotated) immedate */ // ARMam1_ShlI, /* ShlI reg Imm5 */ // ARMam1_ShrI, /* ShrI reg Imm5 */ // ARMam1_SarI, /* SarI reg Imm5 */ // ARMam1_ShlR, /* ShlR reg reg */ // ARMam1_ShrR, /* ShrR reg reg */ // ARMam1_SarR, /* SarR reg reg */ // ALU ops: /* ARMalu_And, ARMalu_Orr, ARMalu_Eor, ARMalu_Bic, // Logic ARMalu_Sub, ARMalu_Rsb, ARMalu_Add, ARMalu_Adc, ARMalu_Sbc, ARMalu_Rsc, // Arith ARMalu_Tst, ARMalu_Teq, ARMalu_Cmp, ARMalu_Cmn, // test ARMalu_Mov, ARMalu_Mvn // Move */ return NULL; }
static IRSB* drd_instrument(VgCallbackClosure* const closure, IRSB* const bb_in, VexGuestLayout* const layout, VexGuestExtents* const vge, IRType const gWordTy, IRType const hWordTy) { IRDirty* di; Int i; IRSB* bb; IRExpr** argv; Bool instrument = True; Bool bus_locked = False; /* Set up BB */ bb = emptyIRSB(); bb->tyenv = deepCopyIRTypeEnv(bb_in->tyenv); bb->next = deepCopyIRExpr(bb_in->next); bb->jumpkind = bb_in->jumpkind; for (i = 0; i < bb_in->stmts_used; i++) { IRStmt* const st = bb_in->stmts[i]; tl_assert(st); if (st->tag == Ist_NoOp) continue; switch (st->tag) { /* Note: the code for not instrumenting the code in .plt */ /* sections is only necessary on CentOS 3.0 x86 (kernel 2.4.21 */ /* + glibc 2.3.2 + NPTL 0.60 + binutils 2.14.90.0.4). */ /* This is because on this platform dynamic library symbols are */ /* relocated in another way than by later binutils versions. The */ /* linker e.g. does not generate .got.plt sections on CentOS 3.0. */ case Ist_IMark: instrument = VG_(seginfo_sect_kind)(NULL, 0, st->Ist.IMark.addr) != Vg_SectPLT; addStmtToIRSB(bb, st); break; case Ist_MBE: switch (st->Ist.MBE.event) { case Imbe_Fence: break; /* not interesting */ case Imbe_BusLock: case Imbe_SnoopedStoreBegin: tl_assert(! bus_locked); bus_locked = True; break; case Imbe_BusUnlock: case Imbe_SnoopedStoreEnd: tl_assert(bus_locked); bus_locked = False; break; default: tl_assert(0); } addStmtToIRSB(bb, st); break; case Ist_Store: if (instrument && ! bus_locked) { instrument_store(bb, st->Ist.Store.addr, sizeofIRType(typeOfIRExpr(bb->tyenv, st->Ist.Store.data))); } addStmtToIRSB(bb, st); break; case Ist_WrTmp: if (instrument) { const IRExpr* const data = st->Ist.WrTmp.data; if (data->tag == Iex_Load) { instrument_load(bb, data->Iex.Load.addr, sizeofIRType(data->Iex.Load.ty)); } } addStmtToIRSB(bb, st); break; case Ist_Dirty: if (instrument) { IRDirty* d = st->Ist.Dirty.details; IREffect const mFx = d->mFx; switch (mFx) { case Ifx_None: break; case Ifx_Read: case Ifx_Write: case Ifx_Modify: tl_assert(d->mAddr); tl_assert(d->mSize > 0); argv = mkIRExprVec_2(d->mAddr, mkIRExpr_HWord(d->mSize)); if (mFx == Ifx_Read || mFx == Ifx_Modify) { di = unsafeIRDirty_0_N( /*regparms*/2, "drd_trace_load", VG_(fnptr_to_fnentry)(drd_trace_load), argv); addStmtToIRSB(bb, IRStmt_Dirty(di)); } if ((mFx == Ifx_Write || mFx == Ifx_Modify) && ! bus_locked) { di = unsafeIRDirty_0_N( /*regparms*/2, "drd_trace_store", VG_(fnptr_to_fnentry)(drd_trace_store), argv); addStmtToIRSB(bb, IRStmt_Dirty(di)); } break; default: tl_assert(0); } } addStmtToIRSB(bb, st); break; default: addStmtToIRSB(bb, st); break; } } tl_assert(! bus_locked); return bb; }
static void collectStatementInfo(IRTypeEnv* tyenv, IRBB* bbOut, IRStmt* st, Addr* instrAddr, UInt* instrLen, IRExpr** loadAddrExpr, IRExpr** storeAddrExpr, UInt* dataSize, IRType hWordTy) { CLG_ASSERT(isFlatIRStmt(st)); switch (st->tag) { case Ist_NoOp: break; case Ist_AbiHint: /* ABI hints aren't interesting. Ignore. */ break; case Ist_IMark: /* st->Ist.IMark.addr is a 64-bit int. ULong_to_Ptr casts this to the host's native pointer type; if that is 32 bits then it discards the upper 32 bits. If we are cachegrinding on a 32-bit host then we are also ensured that the guest word size is 32 bits, due to the assertion in cg_instrument that the host and guest word sizes must be the same. Hence st->Ist.IMark.addr will have been derived from a 32-bit guest code address and truncation of it is safe. I believe this assignment should be correct for both 32- and 64-bit machines. */ *instrAddr = (Addr)ULong_to_Ptr(st->Ist.IMark.addr); *instrLen = st->Ist.IMark.len; break; case Ist_Tmp: { IRExpr* data = st->Ist.Tmp.data; if (data->tag == Iex_Load) { IRExpr* aexpr = data->Iex.Load.addr; CLG_ASSERT( isIRAtom(aexpr) ); // Note also, endianness info is ignored. I guess that's not // interesting. // XXX: repe cmpsb does two loads... the first one is ignored here! //tl_assert( NULL == *loadAddrExpr ); // XXX: ??? *loadAddrExpr = aexpr; *dataSize = sizeofIRType(data->Iex.Load.ty); } break; } case Ist_Store: { IRExpr* data = st->Ist.Store.data; IRExpr* aexpr = st->Ist.Store.addr; CLG_ASSERT( isIRAtom(aexpr) ); if ( NULL == *storeAddrExpr ) { /* this is a kludge: ignore all except the first store from an instruction. */ *storeAddrExpr = aexpr; *dataSize = sizeofIRType(typeOfIRExpr(tyenv, data)); } break; } case Ist_Dirty: { IRDirty* d = st->Ist.Dirty.details; if (d->mFx != Ifx_None) { /* This dirty helper accesses memory. Collect the details. */ CLG_ASSERT(d->mAddr != NULL); CLG_ASSERT(d->mSize != 0); *dataSize = d->mSize; if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) *loadAddrExpr = d->mAddr; if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) *storeAddrExpr = d->mAddr; } else { CLG_ASSERT(d->mAddr == NULL); CLG_ASSERT(d->mSize == 0); } break; } case Ist_Put: case Ist_PutI: case Ist_MFence: case Ist_Exit: break; default: VG_(printf)("\n"); ppIRStmt(st); VG_(printf)("\n"); VG_(tool_panic)("Callgrind: unhandled IRStmt"); } }
static IRBB* cg_instrument ( IRBB* bbIn, VexGuestLayout* layout, Addr64 orig_addr_noredir, VexGuestExtents* vge, IRType gWordTy, IRType hWordTy ) { Int i, isize; IRStmt* st; Addr64 cia; /* address of current insn */ CgState cgs; IRTypeEnv* tyenv = bbIn->tyenv; InstrInfo* curr_inode = NULL; if (gWordTy != hWordTy) { /* We don't currently support this case. */ VG_(tool_panic)("host/guest word size mismatch"); } /* Set up BB, including copying of the where-next stuff. */ cgs.bbOut = emptyIRBB(); cgs.bbOut->tyenv = dopyIRTypeEnv(tyenv); tl_assert( isIRAtom(bbIn->next) ); cgs.bbOut->next = dopyIRExpr(bbIn->next); cgs.bbOut->jumpkind = bbIn->jumpkind; // Copy verbatim any IR preamble preceding the first IMark i = 0; while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) { addStmtToIRBB( cgs.bbOut, bbIn->stmts[i] ); i++; } // Get the first statement, and initial cia from it tl_assert(bbIn->stmts_used > 0); tl_assert(i < bbIn->stmts_used); st = bbIn->stmts[i]; tl_assert(Ist_IMark == st->tag); cia = st->Ist.IMark.addr; // Set up running state and get block info cgs.events_used = 0; cgs.bbInfo = get_BB_info(bbIn, (Addr)orig_addr_noredir); cgs.bbInfo_i = 0; if (DEBUG_CG) VG_(printf)("\n\n---------- cg_instrument ----------\n"); // Traverse the block, initialising inodes, adding events and flushing as // necessary. for (/*use current i*/; i < bbIn->stmts_used; i++) { st = bbIn->stmts[i]; tl_assert(isFlatIRStmt(st)); switch (st->tag) { case Ist_NoOp: case Ist_AbiHint: case Ist_Put: case Ist_PutI: case Ist_MFence: break; case Ist_IMark: cia = st->Ist.IMark.addr; isize = st->Ist.IMark.len; // If Vex fails to decode an instruction, the size will be zero. // Pretend otherwise. if (isize == 0) isize = VG_MIN_INSTR_SZB; // Sanity-check size. tl_assert( (VG_MIN_INSTR_SZB <= isize && isize <= VG_MAX_INSTR_SZB) || VG_CLREQ_SZB == isize ); // Get space for and init the inode, record it as the current one. // Subsequent Dr/Dw/Dm events from the same instruction will // also use it. curr_inode = setup_InstrInfo(&cgs, cia, isize); addEvent_Ir( &cgs, curr_inode ); break; case Ist_Tmp: { IRExpr* data = st->Ist.Tmp.data; if (data->tag == Iex_Load) { IRExpr* aexpr = data->Iex.Load.addr; // Note also, endianness info is ignored. I guess // that's not interesting. addEvent_Dr( &cgs, curr_inode, sizeofIRType(data->Iex.Load.ty), aexpr ); } break; } case Ist_Store: { IRExpr* data = st->Ist.Store.data; IRExpr* aexpr = st->Ist.Store.addr; addEvent_Dw( &cgs, curr_inode, sizeofIRType(typeOfIRExpr(tyenv, data)), aexpr ); break; } case Ist_Dirty: { Int dataSize; IRDirty* d = st->Ist.Dirty.details; if (d->mFx != Ifx_None) { /* This dirty helper accesses memory. Collect the details. */ tl_assert(d->mAddr != NULL); tl_assert(d->mSize != 0); dataSize = d->mSize; // Large (eg. 28B, 108B, 512B on x86) data-sized // instructions will be done inaccurately, but they're // very rare and this avoids errors from hitting more // than two cache lines in the simulation. if (dataSize > MIN_LINE_SIZE) dataSize = MIN_LINE_SIZE; if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) addEvent_Dr( &cgs, curr_inode, dataSize, d->mAddr ); if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) addEvent_Dw( &cgs, curr_inode, dataSize, d->mAddr ); } else { tl_assert(d->mAddr == NULL); tl_assert(d->mSize == 0); } break; } case Ist_Exit: /* We may never reach the next statement, so need to flush all outstanding transactions now. */ flushEvents( &cgs ); break; default: tl_assert(0); break; } /* Copy the original statement */ addStmtToIRBB( cgs.bbOut, st ); if (DEBUG_CG) { ppIRStmt(st); VG_(printf)("\n"); } } /* At the end of the bb. Flush outstandings. */ flushEvents( &cgs ); /* done. stay sane ... */ tl_assert(cgs.bbInfo_i == cgs.bbInfo->n_instrs); if (DEBUG_CG) { VG_(printf)( "goto {"); ppIRJumpKind(bbIn->jumpkind); VG_(printf)( "} "); ppIRExpr( bbIn->next ); VG_(printf)( "}\n"); } return cgs.bbOut; }
static IRSB* lk_instrument ( VgCallbackClosure* closure, IRSB* sbIn, VexGuestLayout* layout, VexGuestExtents* vge, VexArchInfo* archinfo_host, IRType gWordTy, IRType hWordTy ) { IRDirty* di; Int i; IRSB* sbOut; HChar fnname[100]; IRTypeEnv* tyenv = sbIn->tyenv; Addr iaddr = 0, dst; UInt ilen = 0; Bool condition_inverted = False; if (gWordTy != hWordTy) { /* We don't currently support this case. */ VG_(tool_panic)("host/guest word size mismatch"); } /* Set up SB */ sbOut = deepCopyIRSBExceptStmts(sbIn); // Copy verbatim any IR preamble preceding the first IMark i = 0; while (i < sbIn->stmts_used && sbIn->stmts[i]->tag != Ist_IMark) { addStmtToIRSB( sbOut, sbIn->stmts[i] ); i++; } if (clo_basic_counts) { /* Count this superblock. */ di = unsafeIRDirty_0_N( 0, "add_one_SB_entered", VG_(fnptr_to_fnentry)( &add_one_SB_entered ), mkIRExprVec_0() ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); } if (clo_trace_sbs) { /* Print this superblock's address. */ di = unsafeIRDirty_0_N( 0, "trace_superblock", VG_(fnptr_to_fnentry)( &trace_superblock ), mkIRExprVec_1( mkIRExpr_HWord( vge->base[0] ) ) ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); } if (clo_trace_mem) { events_used = 0; } for (/*use current i*/; i < sbIn->stmts_used; i++) { IRStmt* st = sbIn->stmts[i]; if (!st || st->tag == Ist_NoOp) continue; if (clo_basic_counts) { /* Count one VEX statement. */ di = unsafeIRDirty_0_N( 0, "add_one_IRStmt", VG_(fnptr_to_fnentry)( &add_one_IRStmt ), mkIRExprVec_0() ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); } switch (st->tag) { case Ist_NoOp: case Ist_AbiHint: case Ist_Put: case Ist_PutI: case Ist_MBE: addStmtToIRSB( sbOut, st ); break; case Ist_IMark: if (clo_basic_counts) { /* Needed to be able to check for inverted condition in Ist_Exit */ iaddr = st->Ist.IMark.addr; ilen = st->Ist.IMark.len; /* Count guest instruction. */ di = unsafeIRDirty_0_N( 0, "add_one_guest_instr", VG_(fnptr_to_fnentry)( &add_one_guest_instr ), mkIRExprVec_0() ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); /* An unconditional branch to a known destination in the * guest's instructions can be represented, in the IRSB to * instrument, by the VEX statements that are the * translation of that known destination. This feature is * called 'SB chasing' and can be influenced by command * line option --vex-guest-chase-thresh. * * To get an accurate count of the calls to a specific * function, taking SB chasing into account, we need to * check for each guest instruction (Ist_IMark) if it is * the entry point of a function. */ tl_assert(clo_fnname); tl_assert(clo_fnname[0]); if (VG_(get_fnname_if_entry)(st->Ist.IMark.addr, fnname, sizeof(fnname)) && 0 == VG_(strcmp)(fnname, clo_fnname)) { di = unsafeIRDirty_0_N( 0, "add_one_func_call", VG_(fnptr_to_fnentry)( &add_one_func_call ), mkIRExprVec_0() ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); } } if (clo_trace_mem) { // WARNING: do not remove this function call, even if you // aren't interested in instruction reads. See the comment // above the function itself for more detail. addEvent_Ir( sbOut, mkIRExpr_HWord( (HWord)st->Ist.IMark.addr ), st->Ist.IMark.len ); } addStmtToIRSB( sbOut, st ); break; case Ist_WrTmp: // Add a call to trace_load() if --trace-mem=yes. if (clo_trace_mem) { IRExpr* data = st->Ist.WrTmp.data; if (data->tag == Iex_Load) { addEvent_Dr( sbOut, data->Iex.Load.addr, sizeofIRType(data->Iex.Load.ty) ); } } if (clo_detailed_counts) { IRExpr* expr = st->Ist.WrTmp.data; IRType type = typeOfIRExpr(sbOut->tyenv, expr); tl_assert(type != Ity_INVALID); switch (expr->tag) { case Iex_Load: instrument_detail( sbOut, OpLoad, type, NULL/*guard*/ ); break; case Iex_Unop: case Iex_Binop: case Iex_Triop: case Iex_Qop: case Iex_ITE: instrument_detail( sbOut, OpAlu, type, NULL/*guard*/ ); break; default: break; } } addStmtToIRSB( sbOut, st ); break; case Ist_Store: { IRExpr* data = st->Ist.Store.data; IRType type = typeOfIRExpr(tyenv, data); tl_assert(type != Ity_INVALID); if (clo_trace_mem) { addEvent_Dw( sbOut, st->Ist.Store.addr, sizeofIRType(type) ); } if (clo_detailed_counts) { instrument_detail( sbOut, OpStore, type, NULL/*guard*/ ); } addStmtToIRSB( sbOut, st ); break; } case Ist_StoreG: { IRStoreG* sg = st->Ist.StoreG.details; IRExpr* data = sg->data; IRType type = typeOfIRExpr(tyenv, data); tl_assert(type != Ity_INVALID); if (clo_trace_mem) { addEvent_Dw_guarded( sbOut, sg->addr, sizeofIRType(type), sg->guard ); } if (clo_detailed_counts) { instrument_detail( sbOut, OpStore, type, sg->guard ); } addStmtToIRSB( sbOut, st ); break; } case Ist_LoadG: { IRLoadG* lg = st->Ist.LoadG.details; IRType type = Ity_INVALID; /* loaded type */ IRType typeWide = Ity_INVALID; /* after implicit widening */ typeOfIRLoadGOp(lg->cvt, &typeWide, &type); tl_assert(type != Ity_INVALID); if (clo_trace_mem) { addEvent_Dr_guarded( sbOut, lg->addr, sizeofIRType(type), lg->guard ); } if (clo_detailed_counts) { instrument_detail( sbOut, OpLoad, type, lg->guard ); } addStmtToIRSB( sbOut, st ); break; } case Ist_Dirty: { if (clo_trace_mem) { Int dsize; IRDirty* d = st->Ist.Dirty.details; if (d->mFx != Ifx_None) { // This dirty helper accesses memory. Collect the details. tl_assert(d->mAddr != NULL); tl_assert(d->mSize != 0); dsize = d->mSize; if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) addEvent_Dr( sbOut, d->mAddr, dsize ); if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) addEvent_Dw( sbOut, d->mAddr, dsize ); } else { tl_assert(d->mAddr == NULL); tl_assert(d->mSize == 0); } } addStmtToIRSB( sbOut, st ); break; } case Ist_CAS: { /* We treat it as a read and a write of the location. I think that is the same behaviour as it was before IRCAS was introduced, since prior to that point, the Vex front ends would translate a lock-prefixed instruction into a (normal) read followed by a (normal) write. */ Int dataSize; IRType dataTy; IRCAS* cas = st->Ist.CAS.details; tl_assert(cas->addr != NULL); tl_assert(cas->dataLo != NULL); dataTy = typeOfIRExpr(tyenv, cas->dataLo); dataSize = sizeofIRType(dataTy); if (cas->dataHi != NULL) dataSize *= 2; /* since it's a doubleword-CAS */ if (clo_trace_mem) { addEvent_Dr( sbOut, cas->addr, dataSize ); addEvent_Dw( sbOut, cas->addr, dataSize ); } if (clo_detailed_counts) { instrument_detail( sbOut, OpLoad, dataTy, NULL/*guard*/ ); if (cas->dataHi != NULL) /* dcas */ instrument_detail( sbOut, OpLoad, dataTy, NULL/*guard*/ ); instrument_detail( sbOut, OpStore, dataTy, NULL/*guard*/ ); if (cas->dataHi != NULL) /* dcas */ instrument_detail( sbOut, OpStore, dataTy, NULL/*guard*/ ); } addStmtToIRSB( sbOut, st ); break; } case Ist_LLSC: { IRType dataTy; if (st->Ist.LLSC.storedata == NULL) { /* LL */ dataTy = typeOfIRTemp(tyenv, st->Ist.LLSC.result); if (clo_trace_mem) { addEvent_Dr( sbOut, st->Ist.LLSC.addr, sizeofIRType(dataTy) ); /* flush events before LL, helps SC to succeed */ flushEvents(sbOut); } if (clo_detailed_counts) instrument_detail( sbOut, OpLoad, dataTy, NULL/*guard*/ ); } else { /* SC */ dataTy = typeOfIRExpr(tyenv, st->Ist.LLSC.storedata); if (clo_trace_mem) addEvent_Dw( sbOut, st->Ist.LLSC.addr, sizeofIRType(dataTy) ); if (clo_detailed_counts) instrument_detail( sbOut, OpStore, dataTy, NULL/*guard*/ ); } addStmtToIRSB( sbOut, st ); break; } case Ist_Exit: if (clo_basic_counts) { // The condition of a branch was inverted by VEX if a taken // branch is in fact a fall trough according to client address tl_assert(iaddr != 0); dst = (sizeof(Addr) == 4) ? st->Ist.Exit.dst->Ico.U32 : st->Ist.Exit.dst->Ico.U64; condition_inverted = (dst == iaddr + ilen); /* Count Jcc */ if (!condition_inverted) di = unsafeIRDirty_0_N( 0, "add_one_Jcc", VG_(fnptr_to_fnentry)( &add_one_Jcc ), mkIRExprVec_0() ); else di = unsafeIRDirty_0_N( 0, "add_one_inverted_Jcc", VG_(fnptr_to_fnentry)( &add_one_inverted_Jcc ), mkIRExprVec_0() ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); } if (clo_trace_mem) { flushEvents(sbOut); } addStmtToIRSB( sbOut, st ); // Original statement if (clo_basic_counts) { /* Count non-taken Jcc */ if (!condition_inverted) di = unsafeIRDirty_0_N( 0, "add_one_Jcc_untaken", VG_(fnptr_to_fnentry)( &add_one_Jcc_untaken ), mkIRExprVec_0() ); else di = unsafeIRDirty_0_N( 0, "add_one_inverted_Jcc_untaken", VG_(fnptr_to_fnentry)( &add_one_inverted_Jcc_untaken ), mkIRExprVec_0() ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); } break; default: ppIRStmt(st); tl_assert(0); } } if (clo_basic_counts) { /* Count this basic block. */ di = unsafeIRDirty_0_N( 0, "add_one_SB_completed", VG_(fnptr_to_fnentry)( &add_one_SB_completed ), mkIRExprVec_0() ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); } if (clo_trace_mem) { /* At the end of the sbIn. Flush outstandings. */ flushEvents(sbOut); } return sbOut; }
/* Instrumentation before a conditional jump or at the end * of each original instruction. * Fills the InstrInfo struct if not seen before */ static void endOfInstr(IRBB* bbOut, InstrInfo* ii, Bool bb_seen_before, UInt instr_offset, UInt instrLen, UInt dataSize, UInt* cost_offset, Bool instrIssued, IRExpr* loadAddrExpr, IRExpr* storeAddrExpr) { IRType wordTy; EventSet* es; // Stay sane ... CLG_ASSERT(sizeof(HWord) == sizeof(void*)); if (sizeof(HWord) == 4) { wordTy = Ity_I32; } else if (sizeof(HWord) == 8) { wordTy = Ity_I64; } else { VG_(tool_panic)("endOfInstr: strange word size"); } if (loadAddrExpr) CLG_ASSERT(wordTy == typeOfIRExpr(bbOut->tyenv, loadAddrExpr)); if (storeAddrExpr) CLG_ASSERT(wordTy == typeOfIRExpr(bbOut->tyenv, storeAddrExpr)); // Large (eg. 28B, 108B, 512B on x86) data-sized instructions will be // done inaccurately, but they're very rare and this avoids errors from // hitting more than two cache lines in the simulation. if (dataSize > MIN_LINE_SIZE) dataSize = MIN_LINE_SIZE; /* returns 0 if simulator needs no instrumentation */ es = insert_simcall(bbOut, ii, dataSize, instrIssued, loadAddrExpr, storeAddrExpr); CLG_DEBUG(5, " Instr +%2d (Size %d, DSize %d): ESet %s (Size %d)\n", instr_offset, instrLen, dataSize, es ? es->name : (Char*)"(no instrumentation)", es ? es->size : 0); if (bb_seen_before) { CLG_DEBUG(5, " before: Instr +%2d (Size %d, DSize %d)\n", ii->instr_offset, ii->instr_size, ii->data_size); CLG_ASSERT(ii->instr_offset == instr_offset); CLG_ASSERT(ii->instr_size == instrLen); CLG_ASSERT(ii->cost_offset == *cost_offset); CLG_ASSERT(ii->eventset == es); /* Only check size if data size >0. * This is needed: e.g. for rep or cmov x86 instructions, the same InstrInfo * is used both for 2 simulator calls: for the pure instruction fetch and * separately for an memory access (which may not happen depending on flags). * If checked always, this triggers an assertion failure on retranslation. */ if (dataSize>0) CLG_ASSERT(ii->data_size == dataSize); } else { ii->instr_offset = instr_offset; ii->instr_size = instrLen; ii->cost_offset = *cost_offset; ii->eventset = es; /* data size only relevant if >0 */ if (dataSize > 0) ii->data_size = dataSize; CLG_(stat).distinct_instrs++; } *cost_offset += es ? es->size : 0; }
static IRSB* el_instrument ( VgCallbackClosure* closure, IRSB* sbIn, VexGuestLayout* layout, VexGuestExtents* vge, IRType gWordTy, IRType hWordTy ) { IRDirty* di; Int i; IRSB* sbOut; Char fnname[100]; IRType type; IRTypeEnv* tyenv = sbIn->tyenv; Addr iaddr = 0, dst; UInt ilen = 0; Bool condition_inverted = False; if (gWordTy != hWordTy) { /* We don't currently support this case. */ VG_(tool_panic)("host/guest word size mismatch"); } /* Set up SB */ sbOut = deepCopyIRSBExceptStmts(sbIn); // Copy verbatim any IR preamble preceding the first IMark i = 0; while (i < sbIn->stmts_used && sbIn->stmts[i]->tag != Ist_IMark) { addStmtToIRSB( sbOut, sbIn->stmts[i] ); i++; } events_used = 0; for (/*use current i*/; i < sbIn->stmts_used; i++) { IRStmt* st = sbIn->stmts[i]; if (!st || st->tag == Ist_NoOp) continue; switch (st->tag) { case Ist_NoOp: case Ist_AbiHint: case Ist_Put: case Ist_PutI: case Ist_MBE: addStmtToIRSB( sbOut, st ); break; case Ist_IMark: // Store the last seen address lastAddress = st->Ist.IMark.addr; addEvent_Ir( sbOut, mkIRExpr_HWord( (HWord)st->Ist.IMark.addr ), st->Ist.IMark.len ); VG_(get_filename)(lastAddress, (char*) g_buff1, kBuffSize); if(VG_(strcmp)(g_buff1, clo_filename) == 0) { shouldInterpret = 1; ppIRStmt(st); VG_(printf)("\n"); } else { shouldInterpret = 0; } addStmtToIRSB( sbOut, st ); break; case Ist_WrTmp: // Add a call to trace_load() if --trace-mem=yes. { if(shouldInterpret) { IRExpr* data = st->Ist.WrTmp.data; if (data->tag == Iex_Load) { addEvent_Dr( sbOut, data->Iex.Load.addr, sizeofIRType(data->Iex.Load.ty) ); } } addStmtToIRSB( sbOut, st ); break; } case Ist_Store: { if(shouldInterpret) { IRExpr* data = st->Ist.Store.data; addEvent_Dw( sbOut, st->Ist.Store.addr, sizeofIRType(typeOfIRExpr(tyenv, data)) ); } addStmtToIRSB( sbOut, st ); break; } case Ist_Dirty: { if(shouldInterpret) { Int dsize; IRDirty* d = st->Ist.Dirty.details; if (d->mFx != Ifx_None) { // This dirty helper accesses memory. Collect the details. tl_assert(d->mAddr != NULL); tl_assert(d->mSize != 0); dsize = d->mSize; if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) addEvent_Dr( sbOut, d->mAddr, dsize ); if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) addEvent_Dw( sbOut, d->mAddr, dsize ); } else { tl_assert(d->mAddr == NULL); tl_assert(d->mSize == 0); } } addStmtToIRSB( sbOut, st ); break; } case Ist_CAS: { if(shouldInterpret) { Int dataSize; IRType dataTy; IRCAS* cas = st->Ist.CAS.details; tl_assert(cas->addr != NULL); tl_assert(cas->dataLo != NULL); dataTy = typeOfIRExpr(tyenv, cas->dataLo); dataSize = sizeofIRType(dataTy); if (cas->dataHi != NULL) dataSize *= 2; /* since it's a doubleword-CAS */ addEvent_Dr( sbOut, cas->addr, dataSize ); addEvent_Dw( sbOut, cas->addr, dataSize ); } addStmtToIRSB( sbOut, st ); break; } case Ist_LLSC: { if(shouldInterpret) { IRType dataTy; if (st->Ist.LLSC.storedata == NULL) { /* LL */ dataTy = typeOfIRTemp(tyenv, st->Ist.LLSC.result); addEvent_Dr( sbOut, st->Ist.LLSC.addr, sizeofIRType(dataTy) ); } else { /* SC */ dataTy = typeOfIRExpr(tyenv, st->Ist.LLSC.storedata); addEvent_Dw( sbOut, st->Ist.LLSC.addr, sizeofIRType(dataTy) ); } } addStmtToIRSB( sbOut, st ); break; } case Ist_Exit: if(shouldInterpret) { } flushEvents(sbOut); addStmtToIRSB( sbOut, st ); // Original statement break; default: tl_assert(0); } } /* At the end of the sbIn. Flush outstandings. */ flushEvents(sbOut); return sbOut; }
// Instrumentation for the end of each original instruction. static void instrumentInstr(IRBB* bbOut, instr_info* i_node, Bool bbSeenBefore, UInt instrAddr, UInt instrLen, UInt dataSize, IRExpr* loadAddrExpr, IRExpr* storeAddrExpr) { IRDirty* di; IRExpr *arg1, *arg2, *arg3, **argv; Int argc; Char* helperName; void* helperAddr; IRType wordTy; // Stay sane ... tl_assert(sizeof(HWord) == sizeof(void*)); if (sizeof(HWord) == 4) { wordTy = Ity_I32; } else if (sizeof(HWord) == 8) { wordTy = Ity_I64; } else { VG_(tool_panic)("instrumentInstr: strange word size"); } if (loadAddrExpr) tl_assert(wordTy == typeOfIRExpr(bbOut->tyenv, loadAddrExpr)); if (storeAddrExpr) tl_assert(wordTy == typeOfIRExpr(bbOut->tyenv, storeAddrExpr)); // Nb: instrLen will be zero if Vex failed to decode it. tl_assert( 0 == instrLen || (instrLen >= VG_MIN_INSTR_SZB && instrLen <= VG_MAX_INSTR_SZB) ); // Large (eg. 28B, 108B, 512B on x86) data-sized instructions will be // done inaccurately, but they're very rare and this avoids errors from // hitting more than two cache lines in the simulation. if (dataSize > MIN_LINE_SIZE) dataSize = MIN_LINE_SIZE; // Setup 1st arg: instr_info node's address // Believed to be 64-bit clean do_details(i_node, bbSeenBefore, instrAddr, instrLen, dataSize ); arg1 = mkIRExpr_HWord( (HWord)i_node ); if (!loadAddrExpr && !storeAddrExpr) { // no load/store tl_assert(0 == dataSize); helperName = "log_1I_0D_cache_access"; helperAddr = &log_1I_0D_cache_access; argc = 1; argv = mkIRExprVec_1(arg1); } else if (loadAddrExpr && !storeAddrExpr) { // load tl_assert( isIRAtom(loadAddrExpr) ); helperName = "log_1I_1Dr_cache_access"; helperAddr = &log_1I_1Dr_cache_access; argc = 2; arg2 = loadAddrExpr; argv = mkIRExprVec_2(arg1, arg2); } else if (!loadAddrExpr && storeAddrExpr) { // store tl_assert( isIRAtom(storeAddrExpr) ); helperName = "log_1I_1Dw_cache_access"; helperAddr = &log_1I_1Dw_cache_access; argc = 2; arg2 = storeAddrExpr; argv = mkIRExprVec_2(arg1, arg2); } else { tl_assert( loadAddrExpr && storeAddrExpr ); tl_assert( isIRAtom(loadAddrExpr) ); tl_assert( isIRAtom(storeAddrExpr) ); if ( loadStoreAddrsMatch(loadAddrExpr, storeAddrExpr) ) { // modify helperName = "log_1I_1Dr_cache_access"; helperAddr = &log_1I_1Dr_cache_access; argc = 2; arg2 = loadAddrExpr; argv = mkIRExprVec_2(arg1, arg2); } else { // load/store helperName = "log_1I_2D_cache_access"; helperAddr = &log_1I_2D_cache_access; argc = 3; arg2 = loadAddrExpr; arg3 = storeAddrExpr; argv = mkIRExprVec_3(arg1, arg2, arg3); } } // Add call to the instrumentation function di = unsafeIRDirty_0_N( argc, helperName, helperAddr, argv); addStmtToIRBB( bbOut, IRStmt_Dirty(di) ); }
static Bool handleOneStatement(IRTypeEnv* tyenv, IRBB* bbOut, IRStmt* st, IRStmt* st2, Addr* instrAddr, UInt* instrLen, IRExpr** loadAddrExpr, IRExpr** storeAddrExpr, UInt* dataSize) { tl_assert(isFlatIRStmt(st)); switch (st->tag) { case Ist_NoOp: case Ist_AbiHint: case Ist_Put: case Ist_PutI: case Ist_MFence: break; case Ist_Exit: { // This is a conditional jump. Most of the time, we want to add the // instrumentation before it, to ensure it gets executed. Eg, (1) if // this conditional jump is just before an IMark: // // t108 = Not1(t107) // [add instrumentation here] // if (t108) goto {Boring} 0x3A96637D:I32 // ------ IMark(0x3A966370, 7) ------ // // or (2) if this conditional jump is the last thing before the // block-ending unconditional jump: // // t111 = Not1(t110) // [add instrumentation here] // if (t111) goto {Boring} 0x3A96637D:I32 // goto {Boring} 0x3A966370:I32 // // One case (3) where we want the instrumentation after the conditional // jump is when the conditional jump is for an x86 REP instruction: // // ------ IMark(0x3A967F13, 2) ------ // t1 = GET:I32(4) // t6 = CmpEQ32(t1,0x0:I32) // if (t6) goto {Boring} 0x3A967F15:I32 # ignore this cond jmp // t7 = Sub32(t1,0x1:I32) // PUT(4) = t7 // ... // t56 = Not1(t55) // [add instrumentation here] // if (t56) goto {Boring} 0x3A967F15:I32 // // Therefore, we return true if the next statement is an IMark, or if // there is no next statement (which matches case (2), as the final // unconditional jump is not represented in the IRStmt list). // // Note that this approach won't do in the long run for supporting // PPC, but it's good enough for x86/AMD64 for the 3.0.X series. if (NULL == st2 || Ist_IMark == st2->tag) return True; else return False; } case Ist_IMark: /* st->Ist.IMark.addr is a 64-bit int. ULong_to_Ptr casts this to the host's native pointer type; if that is 32 bits then it discards the upper 32 bits. If we are cachegrinding on a 32-bit host then we are also ensured that the guest word size is 32 bits, due to the assertion in cg_instrument that the host and guest word sizes must be the same. Hence st->Ist.IMark.addr will have been derived from a 32-bit guest code address and truncation of it is safe. I believe this assignment should be correct for both 32- and 64-bit machines. */ *instrAddr = (Addr)ULong_to_Ptr(st->Ist.IMark.addr); *instrLen = st->Ist.IMark.len; break; case Ist_Tmp: { IRExpr* data = st->Ist.Tmp.data; if (data->tag == Iex_Load) { IRExpr* aexpr = data->Iex.Load.addr; tl_assert( isIRAtom(aexpr) ); // Note also, endianness info is ignored. I guess that's not // interesting. // XXX: repe cmpsb does two loads... the first one is ignored here! //tl_assert( NULL == *loadAddrExpr ); // XXX: ??? *loadAddrExpr = aexpr; *dataSize = sizeofIRType(data->Iex.Load.ty); } break; } case Ist_Store: { IRExpr* data = st->Ist.Store.data; IRExpr* aexpr = st->Ist.Store.addr; tl_assert( isIRAtom(aexpr) ); tl_assert( NULL == *storeAddrExpr ); // XXX: ??? *storeAddrExpr = aexpr; *dataSize = sizeofIRType(typeOfIRExpr(tyenv, data)); break; } case Ist_Dirty: { IRDirty* d = st->Ist.Dirty.details; if (d->mFx != Ifx_None) { /* This dirty helper accesses memory. Collect the details. */ tl_assert(d->mAddr != NULL); tl_assert(d->mSize != 0); *dataSize = d->mSize; if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) *loadAddrExpr = d->mAddr; if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) *storeAddrExpr = d->mAddr; } else { tl_assert(d->mAddr == NULL); tl_assert(d->mSize == 0); } break; } default: VG_(printf)("\n"); ppIRStmt(st); VG_(printf)("\n"); VG_(tool_panic)("Cachegrind: unhandled IRStmt"); } return False; }
static IRSB* sh_instrument ( VgCallbackClosure* closure, IRSB* sbIn, VexGuestLayout* layout, VexGuestExtents* vge, IRType gWordTy, IRType hWordTy ) { IRDirty* di; Int i; IRSB* sbOut; IRType type; IRTypeEnv* tyenv = sbIn->tyenv; IRStmt* imarkst; char *fnname = global_fnname; if (gWordTy != hWordTy) { /* We don't currently support this case. */ VG_(tool_panic)("host/guest word size mismatch"); } /* Set up SB */ sbOut = deepCopyIRSBExceptStmts(sbIn); // Copy verbatim any IR preamble preceding the first IMark i = 0; while (i < sbIn->stmts_used && sbIn->stmts[i]->tag != Ist_IMark) { addStmtToIRSB( sbOut, sbIn->stmts[i] ); i++; } if (clo_trace_mem) { events_used = 0; } for (/*use current i*/; i < sbIn->stmts_used; i++) { IRStmt* st = sbIn->stmts[i]; if (!st || st->tag == Ist_NoOp) continue; /* Prettyprint All IR statements Starting from main that valgrind has generated */ /*if(clo_trace_mem){ ppIRStmt(st); VG_(printf)("\n"); } */ switch (st->tag) { case Ist_NoOp: case Ist_AbiHint: case Ist_PutI: case Ist_MBE: break; case Ist_Put: if(clo_trace_mem){ Int reg_no = st->Ist.Put.offset; if(reg_no == layout->offset_SP || reg_no == 20){ IRExpr* data = st->Ist.Put.data; if(data->tag == Iex_RdTmp){ /* Add registerwrite instrumentation to IRSBout */ addEvent_RegW( sbOut, fnname, reg_no, data); } } } addStmtToIRSB( sbOut, st ); break; case Ist_IMark: imarkst = st; if (VG_(get_fnname_if_entry)( st->Ist.IMark.addr, fnname, 100)){ //VG_(printf)("-- %s --\n",fnname); if(0 == VG_(strcmp)(fnname, "main")) { di = unsafeIRDirty_0_N( 0, "trace_debug", VG_(fnptr_to_fnentry)(trace_debug), mkIRExprVec_0() ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); //VG_(printf)("SP:%d\n",layout->offset_SP); clo_trace_mem = True; } if(clo_trace_mem){ addEvent_FnEntry(sbOut, fnname); } } if (clo_trace_mem) { // WARNING: do not remove this function call, even if you // aren't interested in instruction reads. See the comment // above the function itself for more detail. addEvent_Ir( sbOut, mkIRExpr_HWord( (HWord)st->Ist.IMark.addr ), st->Ist.IMark.len ); } addStmtToIRSB( sbOut, st ); break; case Ist_WrTmp: // Add a call to trace_load() if --trace-mem=yes. if (clo_trace_mem) { IRExpr* data = st->Ist.WrTmp.data; if (data->tag == Iex_Load) { addEvent_Dr( sbOut, fnname, data->Iex.Load.addr, sizeofIRType(data->Iex.Load.ty) ); } } addStmtToIRSB( sbOut, st ); break; case Ist_Store: if (clo_trace_mem) { IRExpr* data = st->Ist.Store.data; addEvent_Dw( sbOut, fnname, st->Ist.Store.addr, sizeofIRType(typeOfIRExpr(tyenv, data)) ); } addStmtToIRSB( sbOut, st ); break; case Ist_Dirty: { if (clo_trace_mem) { Int dsize; IRDirty* d = st->Ist.Dirty.details; if (d->mFx != Ifx_None) { // This dirty helper accesses memory. Collect the details. tl_assert(d->mAddr != NULL); tl_assert(d->mSize != 0); dsize = d->mSize; if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) addEvent_Dr( sbOut, fnname, d->mAddr, dsize ); if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) addEvent_Dw( sbOut, fnname, d->mAddr, dsize ); } else { tl_assert(d->mAddr == NULL); tl_assert(d->mSize == 0); } } addStmtToIRSB( sbOut, st ); break; } case Ist_CAS: { /* We treat it as a read and a write of the location. I think that is the same behaviour as it was before IRCAS was introduced, since prior to that point, the Vex front ends would translate a lock-prefixed instruction into a (normal) read followed by a (normal) write. */ Int dataSize; IRType dataTy; IRCAS* cas = st->Ist.CAS.details; tl_assert(cas->addr != NULL); tl_assert(cas->dataLo != NULL); dataTy = typeOfIRExpr(tyenv, cas->dataLo); dataSize = sizeofIRType(dataTy); if (clo_trace_mem) { addEvent_Dr( sbOut, fnname, cas->addr, dataSize ); addEvent_Dw( sbOut, fnname, cas->addr, dataSize ); } addStmtToIRSB( sbOut, st ); break; } case Ist_LLSC: { IRType dataTy; if (st->Ist.LLSC.storedata == NULL) { /* LL */ dataTy = typeOfIRTemp(tyenv, st->Ist.LLSC.result); if (clo_trace_mem) addEvent_Dr( sbOut, fnname, st->Ist.LLSC.addr, sizeofIRType(dataTy) ); } else { /* SC */ dataTy = typeOfIRExpr(tyenv, st->Ist.LLSC.storedata); if (clo_trace_mem) addEvent_Dw( sbOut, fnname, st->Ist.LLSC.addr, sizeofIRType(dataTy) ); } addStmtToIRSB( sbOut, st ); break; } case Ist_Exit: if (clo_trace_mem) { flushEvents(sbOut); } addStmtToIRSB( sbOut, st ); // Original statement break; default: tl_assert(0); } } if(clo_trace_mem){ if(sbIn->jumpkind == Ijk_Ret){ VG_(get_fnname)(imarkst->Ist.IMark.addr, fnname, 100); addEvent_FnExit(sbOut,fnname); } } if (clo_trace_mem) { /* At the end of the sbIn. Flush outstandings. */ flushEvents(sbOut); } return sbOut; }
//----------------------------------------------------------------- static IRSB* oa_instrument (VgCallbackClosure* closure, IRSB* sbIn, VexGuestLayout* layout, VexGuestExtents* vge, IRType gWordTy, IRType hWordTy ) { Int i; IRSB* sbOut; IRType type; Addr64 cia; /* address of current insn */ IRStmt* st; if (gWordTy != hWordTy) { VG_(tool_panic)("host/guest word size mismatch"); // currently unsupported } thisWordWidth=gWordTy; //if (gWordTy != Ity_I32) ppIRType(gWordTy); /* Set up SB */ sbOut = deepCopyIRSBExceptStmts(sbIn); // Copy verbatim any IR preamble preceding the first IMark i = 0; while (i < sbIn->stmts_used && sbIn->stmts[i]->tag != Ist_IMark) { addStmtToIRSB( sbOut, sbIn->stmts[i] ); i++; } st = sbIn->stmts[i]; cia = st->Ist.IMark.addr; for (/*use current i*/; i < sbIn->stmts_used; i++) { st = sbIn->stmts[i]; if (!st || st->tag == Ist_NoOp) continue; IRExpr* expr; switch (st->tag) { case Ist_IMark: cia = st->Ist.IMark.addr; break; case Ist_WrTmp: // Add a call to trace_load() if --trace-mem=yes. expr = st->Ist.WrTmp.data; type = typeOfIRExpr(sbOut->tyenv, expr); tl_assert(type != Ity_INVALID); switch (expr->tag) { case Iex_Unop: instrument_Unop( sbOut, st, cia ); break; case Iex_Binop: instrument_Binop( sbOut, st, type, cia ); break; case Iex_Triop: instrument_Triop( sbOut, st, cia ); break; case Iex_Qop: //instrument_Qop( sbOut, expr, type ); break; case Iex_Mux0X: //instrument_Muxop( sbOut, expr, type ); break; default: break; } // switch break; default: break; } // switch addStmtToIRSB( sbOut, st ); } // for return sbOut; }