static VG_REGPARM(2) void trace_load(Addr addr, SizeT size, IRStmt *expr) { if (addr == taint || spreadTaint(expr->Ist.WrTmp.data)) { IRExpr* dest = IRExpr_RdTmp(expr->Ist.WrTmp.tmp); VG_(addToXA)(tainted, dest); VG_(printf)("TAINTED: "); ppIRStmt(expr); VG_(printf)("\n"); } }
static VG_REGPARM(3) void trace_op(IRStmt *expr, UInt one, UInt two) { if (spreadTaint(expr->Ist.WrTmp.data)) { IRExpr* dest = IRExpr_RdTmp(expr->Ist.WrTmp.tmp); VG_(addToXA)(tainted, dest); VG_(printf)("TAINTED: "); ppIRStmt(expr); VG_(printf)("\n"); } }
void instrument_CAS_double_element(IRStmt* st, IRSB* sb_out) { IRCAS* cas = st->Ist.CAS.details; IRTemp oldHi = cas->oldHi, oldLo = cas->oldLo; IREndness end = cas->end; IRExpr* addr = cas->addr; IRExpr *expdHi = cas->expdHi, *expdLo = cas->expdLo; IRExpr *dataHi = cas->dataHi, *dataLo = cas->dataLo; Int size = sizeofIRType_bits(typeOfIRExpr(sb_out->tyenv, dataLo)); IROp op; IRExpr *expr, *expr2; IRDirty* di; tl_assert(isIRAtom(addr)); tl_assert(end == Iend_LE); // we assume endianness is little endian tl_assert(isIRAtom(dataLo)); tl_assert(isIRAtom(dataHi)); if (addr->tag == Iex_Const) tl_assert(addr->Iex.Const.con->tag == Ico_U32); tl_assert(typeOfIRExpr(sb_out->tyenv, addr) == typeOfIRExpr(sb_out->tyenv, dataLo)); switch (size) { case 8: op = Iop_CasCmpEQ8; break; case 16: op = Iop_CasCmpEQ16; break; case 32: op = Iop_CasCmpEQ32; break; default: VG_(tool_panic)("instrument_CAS_double_element"); } expr = assignNew(sb_out, IRExpr_Binop(op, IRExpr_RdTmp(oldLo), expdLo)); // statement has to be flat expr2 = assignNew(sb_out, IRExpr_Binop(op, IRExpr_RdTmp(oldHi), expdHi)); // statement has to be flat di = unsafeIRDirty_0_N(0, "helper_instrument_CAS_double_element", VG_(fnptr_to_fnentry)(helper_instrument_CAS_double_element), mkIRExprVec_6((addr->tag == Iex_RdTmp) ? assignNew_HWord(sb_out, addr) : mkIRExpr_HWord(addr->Iex.Const.con->Ico.U32), mkIRExpr_HWord((dataLo->tag == Iex_RdTmp) ? dataLo->Iex.RdTmp.tmp : IRTemp_INVALID), mkIRExpr_HWord((dataHi->tag == Iex_RdTmp) ? dataHi->Iex.RdTmp.tmp : IRTemp_INVALID), mkIRExpr_HWord(size), assignNew_HWord(sb_out, expr), assignNew_HWord(sb_out, expr2)) ); addStmtToIRSB(sb_out, IRStmt_Dirty(di)); }
IRExpr* pyvex_deepCopyIRExpr ( IRExpr* e ) { switch (e->tag) { case Iex_Get: return IRExpr_Get(e->Iex.Get.offset, e->Iex.Get.ty); case Iex_GetI: return IRExpr_GetI(pyvex_deepCopyIRRegArray(e->Iex.GetI.descr), pyvex_deepCopyIRExpr(e->Iex.GetI.ix), e->Iex.GetI.bias); case Iex_RdTmp: return IRExpr_RdTmp(e->Iex.RdTmp.tmp); case Iex_Qop: { IRQop* qop = e->Iex.Qop.details; return IRExpr_Qop(qop->op, pyvex_deepCopyIRExpr(qop->arg1), pyvex_deepCopyIRExpr(qop->arg2), pyvex_deepCopyIRExpr(qop->arg3), pyvex_deepCopyIRExpr(qop->arg4)); } case Iex_Triop: { IRTriop *triop = e->Iex.Triop.details; return IRExpr_Triop(triop->op, pyvex_deepCopyIRExpr(triop->arg1), pyvex_deepCopyIRExpr(triop->arg2), pyvex_deepCopyIRExpr(triop->arg3)); } case Iex_Binop: return IRExpr_Binop(e->Iex.Binop.op, pyvex_deepCopyIRExpr(e->Iex.Binop.arg1), pyvex_deepCopyIRExpr(e->Iex.Binop.arg2)); case Iex_Unop: return IRExpr_Unop(e->Iex.Unop.op, pyvex_deepCopyIRExpr(e->Iex.Unop.arg)); case Iex_Load: return IRExpr_Load(e->Iex.Load.end, e->Iex.Load.ty, pyvex_deepCopyIRExpr(e->Iex.Load.addr)); case Iex_Const: return IRExpr_Const(pyvex_deepCopyIRConst(e->Iex.Const.con)); case Iex_CCall: return IRExpr_CCall(pyvex_deepCopyIRCallee(e->Iex.CCall.cee), e->Iex.CCall.retty, pyvex_deepCopyIRExprVec(e->Iex.CCall.args)); case Iex_Mux0X: return IRExpr_Mux0X(pyvex_deepCopyIRExpr(e->Iex.Mux0X.cond), pyvex_deepCopyIRExpr(e->Iex.Mux0X.expr0), pyvex_deepCopyIRExpr(e->Iex.Mux0X.exprX)); default: vpanic("pyvex_deepCopyIRExpr"); } }
static IRSB * bcg_instrument(VgCallbackClosure* closure, IRSB* bb, VexGuestLayout* layout, VexGuestExtents* vge, IRType gWordTy, IRType hWordTy) { unsigned long rip = 0; int i; if (bb->jumpkind != Ijk_Ret && bb->next->tag != Iex_Const) { IRTemp tmp; for (i = bb->stmts_used - 1; i >= 0; i--) { if (bb->stmts[i]->tag == Ist_IMark) { rip = bb->stmts[i]->Ist.IMark.addr; break; } } tl_assert(i >= 0); if (bb->next->tag == Iex_RdTmp) { tmp = bb->next->Iex.RdTmp.tmp; } else { tmp = newIRTemp(bb->tyenv, Ity_I64); addStmtToIRSB(bb, IRStmt_WrTmp(tmp, bb->next)); bb->next = IRExpr_RdTmp(tmp); } addStmtToIRSB(bb, IRStmt_Dirty( unsafeIRDirty_0_N( 0, "log_call", log_call, mkIRExprVec_3( IRExpr_Const(IRConst_U64(rip)), IRExpr_Const(IRConst_U64(bb->jumpkind == Ijk_Call)), bb->next)))); } return bb; }
void instrument_LLSC_Store_Conditional(IRStmt* st, IRSB* sb_out) { IRTemp result = st->Ist.LLSC.result; IRExpr* addr = st->Ist.LLSC.addr; IRExpr* storedata = st->Ist.LLSC.storedata; Int size = sizeofIRType_bits(typeOfIRExpr(sb_out->tyenv, storedata)); IRExpr* result_expr = IRExpr_RdTmp(result); IRDirty* di; tl_assert(isIRAtom(addr)); tl_assert(isIRAtom(storedata)); if (addr->tag == Iex_Const) tl_assert(addr->Iex.Const.con->tag == Ico_U32); // the data transfer type is the type of storedata di = unsafeIRDirty_0_N(0, "helper_instrument_LLSC_Store_Conditional", VG_(fnptr_to_fnentry)(helper_instrument_LLSC_Store_Conditional), mkIRExprVec_4((addr->tag == Iex_RdTmp) ? assignNew_HWord(sb_out, addr) : mkIRExpr_HWord(addr->Iex.Const.con->Ico.U32), mkIRExpr_HWord((storedata->tag == Iex_RdTmp) ? storedata->Iex.RdTmp.tmp : IRTemp_INVALID), mkIRExpr_HWord(size), assignNew_HWord(sb_out, result_expr)) ); addStmtToIRSB(sb_out, IRStmt_Dirty(di)); }
static IRExpr * log_reads_expr(unsigned tid, IRSB *sb, IRExpr *exp) { if (!exp) return NULL; switch (exp->tag) { case Iex_Get: case Iex_FreeVariable: case Iex_EntryPoint: case Iex_ControlFlow: return exp; case Iex_GetI: { IRExprGetI *e = (IRExprGetI *)exp; return IRExpr_GetI(e->descr, log_reads_expr(tid, sb, e->ix), e->bias, e->tid); } case Iex_Qop: { IRExprQop *e = (IRExprQop *)exp; return IRExpr_Qop(e->op, log_reads_expr(tid, sb, e->arg1), log_reads_expr(tid, sb, e->arg2), log_reads_expr(tid, sb, e->arg3), log_reads_expr(tid, sb, e->arg4)); } case Iex_Triop: { IRExprTriop *e = (IRExprTriop *)exp; return IRExpr_Triop(e->op, log_reads_expr(tid, sb, e->arg1), log_reads_expr(tid, sb, e->arg2), log_reads_expr(tid, sb, e->arg3)); } case Iex_Binop: { IRExprBinop *e = (IRExprBinop *)exp; return IRExpr_Binop(e->op, log_reads_expr(tid, sb, e->arg1), log_reads_expr(tid, sb, e->arg2)); } case Iex_Associative: { IRExprAssociative *e = (IRExprAssociative *)exp; IRExpr **newArgs = alloc_irexpr_array(e->nr_arguments); for (int x = 0; x < e->nr_arguments; x++) newArgs[x] = log_reads_expr(tid, sb, e->contents[x]); return IRExpr_Associative_Claim(e->op, e->nr_arguments, newArgs); } case Iex_Unop: { IRExprUnop *e = (IRExprUnop *)exp; return IRExpr_Unop(e->op, log_reads_expr(tid, sb, e->arg)); } case Iex_Load: { IRExprLoad *e = (IRExprLoad *)exp; IRExpr **args; void *helper; const char *helper_name; IRTemp dest; IRDirty *f; assert(e->addr->type() == Ity_I64); /* Shut compiler up */ helper = (void *)0xf001; helper_name = (const char *)0xdead; #define HLP(x) helper_name = "helper_load_" #x ; helper = (void *)helper_load_ ## x ; switch (e->ty) { case Ity_INVALID: abort(); case Ity_I1: abort(); case Ity_I8: HLP(8); break; case Ity_I16: HLP(16); break; case Ity_I32: HLP(32); break; case Ity_I64: HLP(64); break; case Ity_I128: HLP(128); break; } #undef HLP args = mkIRExprVec_3(log_reads_expr(tid, sb, e->addr), IRExpr_Get(OFFSET_amd64_RSP, Ity_I64, tid, 0), IRExpr_Get(OFFSET_amd64_RIP, Ity_I64, tid, 0)); dest = newIRTemp(sb->tyenv); f = unsafeIRDirty_1_N(threadAndRegister::temp(tid, dest, 0), 0, helper_name, helper, args); addStmtToIRSB(sb, IRStmt_Dirty(f)); return IRExpr_RdTmp(dest, e->ty, tid, 0); } case Iex_Const: return exp; case Iex_CCall: { IRExprCCall *e = (IRExprCCall *)exp; IRExpr **args; int x; int nr_args; for (nr_args = 0; e->args[nr_args]; nr_args++) ; args = alloc_irexpr_array(nr_args + 1); args[nr_args] = NULL; for (x = 0; x < nr_args; x++) args[x] = log_reads_expr(tid, sb, e->args[x]); return IRExpr_CCall(e->cee, e->retty, args); } case Iex_Mux0X: { IRExprMux0X *e = (IRExprMux0X *)exp; return IRExpr_Mux0X(log_reads_expr(tid, sb, e->cond), log_reads_expr(tid, sb, e->expr0), log_reads_expr(tid, sb, e->exprX)); } case Iex_HappensBefore: abort(); } abort(); }
IRSB* bb_to_IR ( /*OUT*/VexGuestExtents* vge, /*IN*/ void* callback_opaque, /*IN*/ DisOneInstrFn dis_instr_fn, /*IN*/ UChar* guest_code, /*IN*/ Addr64 guest_IP_bbstart, /*IN*/ Bool (*chase_into_ok)(void*,Addr64), /*IN*/ Bool host_bigendian, /*IN*/ VexArch arch_guest, /*IN*/ VexArchInfo* archinfo_guest, /*IN*/ VexAbiInfo* abiinfo_both, /*IN*/ IRType guest_word_type, /*IN*/ Bool do_self_check, /*IN*/ Bool (*preamble_function)(void*,IRSB*), /*IN*/ Int offB_TISTART, /*IN*/ Int offB_TILEN ) { Long delta; Int i, n_instrs, first_stmt_idx; Bool resteerOK, need_to_put_IP, debug_print; DisResult dres; IRStmt* imark; static Int n_resteers = 0; Int d_resteers = 0; Int selfcheck_idx = 0; IRSB* irsb; Addr64 guest_IP_curr_instr; IRConst* guest_IP_bbstart_IRConst = NULL; Bool (*resteerOKfn)(void*,Addr64) = NULL; debug_print = toBool(vex_traceflags & VEX_TRACE_FE); /* Note: for adler32 to work without % operation for the self check, need to limit length of stuff it scans to 5552 bytes. Therefore limiting the max bb len to 100 insns seems generously conservative. */ /* check sanity .. */ vassert(sizeof(HWord) == sizeof(void*)); vassert(vex_control.guest_max_insns >= 1); vassert(vex_control.guest_max_insns < 100); vassert(vex_control.guest_chase_thresh >= 0); vassert(vex_control.guest_chase_thresh < vex_control.guest_max_insns); vassert(guest_word_type == Ity_I32 || guest_word_type == Ity_I64); /* Start a new, empty extent. */ vge->n_used = 1; vge->base[0] = guest_IP_bbstart; vge->len[0] = 0; /* And a new IR superblock to dump the result into. */ irsb = emptyIRSB(); /* Delta keeps track of how far along the guest_code array we have so far gone. */ delta = 0; n_instrs = 0; /* Guest addresses as IRConsts. Used in the two self-checks generated. */ if (do_self_check) { guest_IP_bbstart_IRConst = guest_word_type==Ity_I32 ? IRConst_U32(toUInt(guest_IP_bbstart)) : IRConst_U64(guest_IP_bbstart); } /* If asked to make a self-checking translation, leave 5 spaces in which to put the check statements. We'll fill them in later when we know the length and adler32 of the area to check. */ if (do_self_check) { selfcheck_idx = irsb->stmts_used; addStmtToIRSB( irsb, IRStmt_NoOp() ); addStmtToIRSB( irsb, IRStmt_NoOp() ); addStmtToIRSB( irsb, IRStmt_NoOp() ); addStmtToIRSB( irsb, IRStmt_NoOp() ); addStmtToIRSB( irsb, IRStmt_NoOp() ); } /* If the caller supplied a function to add its own preamble, use it now. */ if (preamble_function) { Bool stopNow = preamble_function( callback_opaque, irsb ); if (stopNow) { /* The callback has completed the IR block without any guest insns being disassembled into it, so just return it at this point, even if a self-check was requested - as there is nothing to self-check. The five self-check no-ops will still be in place, but they are harmless. */ return irsb; } } /* Process instructions. */ while (True) { vassert(n_instrs < vex_control.guest_max_insns); /* Regardless of what chase_into_ok says, is chasing permissible at all right now? Set resteerOKfn accordingly. */ resteerOK = toBool( n_instrs < vex_control.guest_chase_thresh /* If making self-checking translations, don't chase .. it makes the checks too complicated. We only want to scan just one sequence of bytes in the check, not a whole bunch. */ && !do_self_check /* we can't afford to have a resteer once we're on the last extent slot. */ && vge->n_used < 3 ); resteerOKfn = resteerOK ? chase_into_ok : const_False; /* This is the IP of the instruction we're just about to deal with. */ guest_IP_curr_instr = guest_IP_bbstart + delta; /* This is the irsb statement array index of the first stmt in this insn. That will always be the instruction-mark descriptor. */ first_stmt_idx = irsb->stmts_used; /* Add an instruction-mark statement. We won't know until after disassembling the instruction how long it instruction is, so just put in a zero length and we'll fix it up later. */ addStmtToIRSB( irsb, IRStmt_IMark( guest_IP_curr_instr, 0 )); /* for the first insn, the dispatch loop will have set %IP, but for all the others we have to do it ourselves. */ need_to_put_IP = toBool(n_instrs > 0); /* Finally, actually disassemble an instruction. */ dres = dis_instr_fn ( irsb, need_to_put_IP, resteerOKfn, callback_opaque, guest_code, delta, guest_IP_curr_instr, arch_guest, archinfo_guest, abiinfo_both, host_bigendian ); /* stay sane ... */ vassert(dres.whatNext == Dis_StopHere || dres.whatNext == Dis_Continue || dres.whatNext == Dis_Resteer); vassert(dres.len >= 0 && dres.len <= 20); if (dres.whatNext != Dis_Resteer) vassert(dres.continueAt == 0); /* Fill in the insn-mark length field. */ vassert(first_stmt_idx >= 0 && first_stmt_idx < irsb->stmts_used); imark = irsb->stmts[first_stmt_idx]; vassert(imark); vassert(imark->tag == Ist_IMark); vassert(imark->Ist.IMark.len == 0); imark->Ist.IMark.len = toUInt(dres.len); /* Print the resulting IR, if needed. */ if (vex_traceflags & VEX_TRACE_FE) { for (i = first_stmt_idx; i < irsb->stmts_used; i++) { vex_printf(" "); ppIRStmt(irsb->stmts[i]); vex_printf("\n"); } } /* If dis_instr_fn terminated the BB at this point, check it also filled in the irsb->next field. */ if (dres.whatNext == Dis_StopHere) { vassert(irsb->next != NULL); if (debug_print) { vex_printf(" "); vex_printf( "goto {"); ppIRJumpKind(irsb->jumpkind); vex_printf( "} "); ppIRExpr( irsb->next ); vex_printf( "\n"); } } /* Update the VexGuestExtents we are constructing. */ /* If vex_control.guest_max_insns is required to be < 100 and each insn is at max 20 bytes long, this limit of 5000 then seems reasonable since the max possible extent length will be 100 * 20 == 2000. */ vassert(vge->len[vge->n_used-1] < 5000); vge->len[vge->n_used-1] = toUShort(toUInt( vge->len[vge->n_used-1] + dres.len )); n_instrs++; if (debug_print) vex_printf("\n"); /* Advance delta (inconspicuous but very important :-) */ delta += (Long)dres.len; switch (dres.whatNext) { case Dis_Continue: vassert(irsb->next == NULL); if (n_instrs < vex_control.guest_max_insns) { /* keep going */ } else { /* We have to stop. */ irsb->next = IRExpr_Const( guest_word_type == Ity_I32 ? IRConst_U32(toUInt(guest_IP_bbstart+delta)) : IRConst_U64(guest_IP_bbstart+delta) ); goto done; } break; case Dis_StopHere: vassert(irsb->next != NULL); goto done; case Dis_Resteer: /* Check that we actually allowed a resteer .. */ vassert(resteerOK); vassert(irsb->next == NULL); /* figure out a new delta to continue at. */ vassert(resteerOKfn(callback_opaque,dres.continueAt)); delta = dres.continueAt - guest_IP_bbstart; /* we now have to start a new extent slot. */ vge->n_used++; vassert(vge->n_used <= 3); vge->base[vge->n_used-1] = dres.continueAt; vge->len[vge->n_used-1] = 0; n_resteers++; d_resteers++; if (0 && (n_resteers & 0xFF) == 0) vex_printf("resteer[%d,%d] to 0x%llx (delta = %lld)\n", n_resteers, d_resteers, dres.continueAt, delta); break; default: vpanic("bb_to_IR"); } } /*NOTREACHED*/ vassert(0); done: /* We're done. The only thing that might need attending to is that a self-checking preamble may need to be created. */ if (do_self_check) { UInt len2check, adler32; IRTemp tistart_tmp, tilen_tmp; HWord p_adler_helper; vassert(vge->n_used == 1); len2check = vge->len[0]; if (len2check == 0) len2check = 1; adler32 = genericg_compute_adler32( (HWord)guest_code, len2check ); /* Set TISTART and TILEN. These will describe to the despatcher the area of guest code to invalidate should we exit with a self-check failure. */ tistart_tmp = newIRTemp(irsb->tyenv, guest_word_type); tilen_tmp = newIRTemp(irsb->tyenv, guest_word_type); irsb->stmts[selfcheck_idx+0] = IRStmt_WrTmp(tistart_tmp, IRExpr_Const(guest_IP_bbstart_IRConst) ); irsb->stmts[selfcheck_idx+1] = IRStmt_WrTmp(tilen_tmp, guest_word_type==Ity_I32 ? IRExpr_Const(IRConst_U32(len2check)) : IRExpr_Const(IRConst_U64(len2check)) ); irsb->stmts[selfcheck_idx+2] = IRStmt_Put( offB_TISTART, IRExpr_RdTmp(tistart_tmp) ); irsb->stmts[selfcheck_idx+3] = IRStmt_Put( offB_TILEN, IRExpr_RdTmp(tilen_tmp) ); p_adler_helper = abiinfo_both->host_ppc_calls_use_fndescrs ? ((HWord*)(&genericg_compute_adler32))[0] : (HWord)&genericg_compute_adler32; irsb->stmts[selfcheck_idx+4] = IRStmt_Exit( IRExpr_Binop( Iop_CmpNE32, mkIRExprCCall( Ity_I32, 2/*regparms*/, "genericg_compute_adler32", (void*)p_adler_helper, mkIRExprVec_2( mkIRExpr_HWord( (HWord)guest_code ), mkIRExpr_HWord( (HWord)len2check ) ) ), IRExpr_Const(IRConst_U32(adler32)) ), Ijk_TInval, guest_IP_bbstart_IRConst ); } return irsb; }