static void addConstMemStoreStmt( IRBB* bbOut, UWord addr, UInt val, IRType hWordTy) { addStmtToIRBB( bbOut, IRStmt_Store(CLGEndness, IRExpr_Const(hWordTy == Ity_I32 ? IRConst_U32( addr ) : IRConst_U64( addr )), IRExpr_Const(IRConst_U32(val)) )); }
IRExpr* pyvex_deepCopyIRExpr ( IRExpr* e ) { switch (e->tag) { case Iex_Get: return IRExpr_Get(e->Iex.Get.offset, e->Iex.Get.ty); case Iex_GetI: return IRExpr_GetI(pyvex_deepCopyIRRegArray(e->Iex.GetI.descr), pyvex_deepCopyIRExpr(e->Iex.GetI.ix), e->Iex.GetI.bias); case Iex_RdTmp: return IRExpr_RdTmp(e->Iex.RdTmp.tmp); case Iex_Qop: { IRQop* qop = e->Iex.Qop.details; return IRExpr_Qop(qop->op, pyvex_deepCopyIRExpr(qop->arg1), pyvex_deepCopyIRExpr(qop->arg2), pyvex_deepCopyIRExpr(qop->arg3), pyvex_deepCopyIRExpr(qop->arg4)); } case Iex_Triop: { IRTriop *triop = e->Iex.Triop.details; return IRExpr_Triop(triop->op, pyvex_deepCopyIRExpr(triop->arg1), pyvex_deepCopyIRExpr(triop->arg2), pyvex_deepCopyIRExpr(triop->arg3)); } case Iex_Binop: return IRExpr_Binop(e->Iex.Binop.op, pyvex_deepCopyIRExpr(e->Iex.Binop.arg1), pyvex_deepCopyIRExpr(e->Iex.Binop.arg2)); case Iex_Unop: return IRExpr_Unop(e->Iex.Unop.op, pyvex_deepCopyIRExpr(e->Iex.Unop.arg)); case Iex_Load: return IRExpr_Load(e->Iex.Load.end, e->Iex.Load.ty, pyvex_deepCopyIRExpr(e->Iex.Load.addr)); case Iex_Const: return IRExpr_Const(pyvex_deepCopyIRConst(e->Iex.Const.con)); case Iex_CCall: return IRExpr_CCall(pyvex_deepCopyIRCallee(e->Iex.CCall.cee), e->Iex.CCall.retty, pyvex_deepCopyIRExprVec(e->Iex.CCall.args)); case Iex_ITE: return IRExpr_ITE(pyvex_deepCopyIRExpr(e->Iex.ITE.cond), pyvex_deepCopyIRExpr(e->Iex.ITE.iftrue), pyvex_deepCopyIRExpr(e->Iex.ITE.iffalse)); case Iex_VECRET: return IRExpr_VECRET(); case Iex_BBPTR: return IRExpr_BBPTR(); default: vpanic("pyvex_deepCopyIRExpr"); } }
static IRSB * bcg_instrument(VgCallbackClosure* closure, IRSB* bb, VexGuestLayout* layout, VexGuestExtents* vge, IRType gWordTy, IRType hWordTy) { unsigned long rip = 0; int i; if (bb->jumpkind != Ijk_Ret && bb->next->tag != Iex_Const) { IRTemp tmp; for (i = bb->stmts_used - 1; i >= 0; i--) { if (bb->stmts[i]->tag == Ist_IMark) { rip = bb->stmts[i]->Ist.IMark.addr; break; } } tl_assert(i >= 0); if (bb->next->tag == Iex_RdTmp) { tmp = bb->next->Iex.RdTmp.tmp; } else { tmp = newIRTemp(bb->tyenv, Ity_I64); addStmtToIRSB(bb, IRStmt_WrTmp(tmp, bb->next)); bb->next = IRExpr_RdTmp(tmp); } addStmtToIRSB(bb, IRStmt_Dirty( unsafeIRDirty_0_N( 0, "log_call", log_call, mkIRExprVec_3( IRExpr_Const(IRConst_U64(rip)), IRExpr_Const(IRConst_U64(bb->jumpkind == Ijk_Call)), bb->next)))); } return bb; }
static void iselStmt ( ISelEnv* env, IRStmt* stmt ) { if (vex_traceflags & VEX_TRACE_VCODE) { vex_printf("\n-- "); ppIRStmt(stmt); vex_printf("\n"); } switch (stmt->tag) { /* --------- STORE --------- */ /* little-endian write to memory */ case Ist_Store: { HReg reg; IRType tya = typeOfIRExpr(env->type_env, stmt->Ist.Store.addr); IRType tyd = typeOfIRExpr(env->type_env, stmt->Ist.Store.data); IREndness end = stmt->Ist.Store.end; if (tya != Ity_I32 || end != Iend_LE) goto stmt_fail; reg = iselIntExpr_R(env, stmt->Ist.Store.data); if (tyd == Ity_I8) { ARMAMode2* am2 = iselIntExpr_AMode2(env, stmt->Ist.Store.addr); addInstr(env, ARMInstr_StoreB(reg,am2)); return; } if (tyd == Ity_I16) { ARMAMode3* am3 = iselIntExpr_AMode3(env, stmt->Ist.Store.addr); addInstr(env, ARMInstr_StoreH(reg,am3)); return; } if (tyd == Ity_I32) { ARMAMode2* am2 = iselIntExpr_AMode2(env, stmt->Ist.Store.addr); addInstr(env, ARMInstr_StoreW(reg,am2)); return; } } /* --------- PUT --------- */ /* write guest state, fixed offset */ case Ist_Put: { IRType tyd = typeOfIRExpr(env->type_env, stmt->Ist.Put.data); HReg reg = iselIntExpr_R(env, stmt->Ist.Put.data); // CAB: This anywhere near right?! if (tyd == Ity_I32) { ARMAMode2* am2 = ARMAMode2_RI(GET_BP_REG(), stmt->Ist.Put.offset); addInstr(env, ARMInstr_StoreW(reg, am2)); return; } if (tyd == Ity_I16) { ARMAMode3* am3 = ARMAMode3_RI(GET_BP_REG(), stmt->Ist.Put.offset); addInstr(env, ARMInstr_StoreH(reg, am3)); return; } if (tyd == Ity_I8) { ARMAMode2* am2 = ARMAMode2_RI(GET_BP_REG(), stmt->Ist.Put.offset); addInstr(env, ARMInstr_StoreB(reg, am2)); return; } // CAB: Ity_I32, Ity_I16 ? break; } /* --------- Indexed PUT --------- */ /* write guest state, run-time offset */ case Ist_PutI: { ARMAMode2* am2 = genGuestArrayOffset( env, stmt->Ist.PutI.descr, stmt->Ist.PutI.ix, stmt->Ist.PutI.bias ); IRType tyd = typeOfIRExpr(env->type_env, stmt->Ist.PutI.data); if (tyd == Ity_I8) { HReg reg = iselIntExpr_R(env, stmt->Ist.PutI.data); addInstr(env, ARMInstr_StoreB(reg, am2)); return; } // CAB: Ity_I32, Ity_I16 ? break; } /* --------- TMP --------- */ /* assign value to temporary */ case Ist_WrTmp: { IRTemp tmp = stmt->Ist.WrTmp.tmp; IRType ty = typeOfIRTemp(env->type_env, tmp); if (ty == Ity_I32 || ty == Ity_I16 || ty == Ity_I8) { ARMAMode1* am = iselIntExpr_AMode1(env, stmt->Ist.WrTmp.data); HReg dst = lookupIRTemp(env, tmp); addInstr(env, ARMInstr_DPInstr1(ARMalu_MOV,dst,am)); return; } // CAB: Ity_I1 ? break; } /* --------- Call to DIRTY helper --------- */ /* call complex ("dirty") helper function */ case Ist_Dirty: { //IRType retty; IRDirty* d = stmt->Ist.Dirty.details; Bool passBBP = False; if (d->nFxState == 0) vassert(!d->needsBBP); passBBP = toBool(d->nFxState > 0 && d->needsBBP); /* Marshal args, do the call, clear stack. */ doHelperCall( env, passBBP, d->guard, d->cee, d->args ); /* Now figure out what to do with the returned value, if any. */ if (d->tmp == IRTemp_INVALID) /* No return value. Nothing to do. */ return; //retty = typeOfIRTemp(env->type_env, d->tmp); // CAB: ? if (retty == Ity_I64) { #if 0 if (retty == Ity_I32 || retty == Ity_I16 || retty == Ity_I8) { /* The returned value is in %eax. Park it in the register associated with tmp. */ HReg dst = lookupIRTemp(env, d->tmp); addInstr(env, mk_iMOVsd_RR(hregX86_EAX(),dst) ); return; } #endif break; } /* --------- EXIT --------- */ /* conditional exit from BB */ case Ist_Exit: { ARMBranchDest* dst; ARMCondCode cc; if (stmt->Ist.Exit.dst->tag != Ico_U32) vpanic("isel_arm: Ist_Exit: dst is not a 32-bit value"); // CAB: Where does jumpkind fit in ? // stmt->Ist.Exit.jk dst = iselIntExpr_BD(env, IRExpr_Const(stmt->Ist.Exit.dst)); cc = iselCondCode(env,stmt->Ist.Exit.guard); addInstr(env, ARMInstr_Branch(cc, dst)); return; } default: break; } stmt_fail: ppIRStmt(stmt); vpanic("iselStmt"); }
// Handle a function exit statement, which contains a jump kind of // 'Ret'. It seems pretty accurate to cue off of currentAddr, a value // that is updated every time an Ist_IMark statement is translated, // which is quite often void handle_possible_exit(MCEnv* mce, IRJumpKind jk) { if (Ijk_Ret == jk) { IRDirty *di; FunctionEntry* curFuncPtr = getFunctionEntryFromAddr(currentAddr); if (curFuncPtr && // Also, if fjalar_trace_prog_pts_filename is on (we are // reading in a ppt list file), then DO NOT generate IR code // to call helper functions for functions whose names are NOT // located in prog_pts_tree. This will greatly speed up // processing because these functions are filtered out at // translation-time, not at run-time (!fjalar_trace_prog_pts_filename || prog_pts_tree_entry_found(curFuncPtr))) { FJALAR_DPRINTF("[handle_possible_exit] %s - %x\n", curFuncPtr->fjalar_name, (UInt)currentAddr); // The only argument to exit_function() is a pointer to the // FunctionEntry for the function that we are exiting di = unsafeIRDirty_0_N(1/*regparms*/, "exit_function", &exit_function, mkIRExprVec_1(IRExpr_Const(IRConst_UWord((Addr)curFuncPtr)))); // For function exit, we are interested in observing all general purpose // integer registers, FTOP, and FPREG[], so make sure that they are // updated by setting the proper annotations. di->nFxState = 11; vex_bzero(&di->fxState, sizeof(di->fxState)); di->fxState[0].fx = Ifx_Read; di->fxState[0].offset = mce->layout->offset_SP; di->fxState[0].size = mce->layout->sizeof_SP; di->fxState[1].fx = Ifx_Read; di->fxState[1].offset = mce->layout->offset_FP; di->fxState[1].size = mce->layout->sizeof_FP; di->fxState[2].fx = Ifx_Read; di->fxState[2].offset = mce->layout->offset_IP; di->fxState[2].size = mce->layout->sizeof_IP; di->fxState[3].fx = Ifx_Read; di->fxState[3].offset = mce->layout->offset_xAX; di->fxState[3].size = mce->layout->sizeof_xAX; di->fxState[4].fx = Ifx_Read; di->fxState[4].offset = mce->layout->offset_xBX; di->fxState[4].size = mce->layout->sizeof_xBX; di->fxState[5].fx = Ifx_Read; di->fxState[5].offset = mce->layout->offset_xCX; di->fxState[5].size = mce->layout->sizeof_xCX; di->fxState[6].fx = Ifx_Read; di->fxState[6].offset = mce->layout->offset_xDX; di->fxState[6].size = mce->layout->sizeof_xDX; di->fxState[7].fx = Ifx_Read; di->fxState[7].offset = mce->layout->offset_xSI; di->fxState[7].size = mce->layout->sizeof_xSI; di->fxState[8].fx = Ifx_Read; di->fxState[8].offset = mce->layout->offset_xDI; di->fxState[8].size = mce->layout->sizeof_xDI; di->fxState[9].fx = Ifx_Read; di->fxState[9].offset = offsetof(VexGuestArchState, guest_FTOP); di->fxState[9].size = sizeof(UInt); /* FTOP is 4 bytes even on x64 */ di->fxState[10].fx = Ifx_Read; di->fxState[10].offset = offsetof(VexGuestArchState, guest_FPREG); di->fxState[10].size = 8 * sizeof(ULong); stmt('V', mce, IRStmt_Dirty(di) ); } } }
// This inserts an IR Statement responsible for calling func // code before the instruction at addr is executed. This is primarily // used for inserting the call to enter_function on function entry. // It is also used for handling of 'function priming' for GCC 3 (see // comment above prime_function). The result of looking up addr in // table will be passed to func as it's only argument. This function // does nothing if it is unable to successfully look up addr in the // provided table. static void handle_possible_entry_func(MCEnv *mce, Addr64 addr, struct genhashtable *table, const char *func_name, entry_func func) { IRDirty *di; FunctionEntry *entry = gengettable(table, (void *)(Addr)addr); if(!entry) { return; } // If fjalar_trace_prog_pts_filename is on (we are using a ppt list // file), then DO NOT generate IR code to call helper functions for // functions whose name is NOT located in prog_pts_tree. It's faster // to filter them out at translation-time instead of run-time if (entry && (!fjalar_trace_prog_pts_filename || prog_pts_tree_entry_found(entry))) { UWord entry_w = (UWord)entry; di = unsafeIRDirty_0_N(1/*regparms*/, func_name, func, mkIRExprVec_1(IRExpr_Const(IRConst_UWord(entry_w)))); // For function entry, we are interested in observing the stack // and frame pointers so make sure that they're updated by setting // the proper annotations: entry->entryPC = addr; FJALAR_DPRINTF("Found a valid entry point at %x for\n", (UInt)addr); // We need all general purpose registers. di->nFxState = 9; vex_bzero(&di->fxState, sizeof(di->fxState)); di->fxState[0].fx = Ifx_Read; di->fxState[0].offset = mce->layout->offset_SP; di->fxState[0].size = mce->layout->sizeof_SP; di->fxState[1].fx = Ifx_Read; di->fxState[1].offset = mce->layout->offset_FP; di->fxState[1].size = mce->layout->sizeof_FP; di->fxState[2].fx = Ifx_Read; di->fxState[2].offset = mce->layout->offset_IP; di->fxState[2].size = mce->layout->sizeof_IP; di->fxState[3].fx = Ifx_Read; di->fxState[3].offset = mce->layout->offset_xAX; di->fxState[3].size = mce->layout->sizeof_xAX; di->fxState[4].fx = Ifx_Read; di->fxState[4].offset = mce->layout->offset_xBX; di->fxState[4].size = mce->layout->sizeof_xBX; di->fxState[5].fx = Ifx_Read; di->fxState[5].offset = mce->layout->offset_xCX; di->fxState[5].size = mce->layout->sizeof_xCX; di->fxState[6].fx = Ifx_Read; di->fxState[6].offset = mce->layout->offset_xDX; di->fxState[6].size = mce->layout->sizeof_xDX; di->fxState[7].fx = Ifx_Read; di->fxState[7].offset = mce->layout->offset_xSI; di->fxState[7].size = mce->layout->sizeof_xSI; di->fxState[8].fx = Ifx_Read; di->fxState[8].offset = mce->layout->offset_xDI; di->fxState[8].size = mce->layout->sizeof_xDI; stmt('V', mce, IRStmt_Dirty(di) ); } }
IRBB* bb_to_IR ( /*OUT*/VexGuestExtents* vge, /*IN*/ void* callback_opaque, /*IN*/ DisOneInstrFn dis_instr_fn, /*IN*/ UChar* guest_code, /*IN*/ Addr64 guest_IP_bbstart, /*IN*/ Bool (*chase_into_ok)(void*,Addr64), /*IN*/ Bool host_bigendian, /*IN*/ VexArch arch_guest, /*IN*/ VexArchInfo* archinfo_guest, /*IN*/ IRType guest_word_type, /*IN*/ Bool do_self_check, /*IN*/ Bool (*preamble_function)(void*,IRBB*), /*IN*/ Int offB_TISTART, /*IN*/ Int offB_TILEN ) { Long delta; Int i, n_instrs, first_stmt_idx; Bool resteerOK, need_to_put_IP, debug_print; DisResult dres; IRStmt* imark; static Int n_resteers = 0; Int d_resteers = 0; Int selfcheck_idx = 0; IRBB* irbb; Addr64 guest_IP_curr_instr; IRConst* guest_IP_bbstart_IRConst = NULL; Bool (*resteerOKfn)(void*,Addr64) = NULL; debug_print = toBool(vex_traceflags & VEX_TRACE_FE); /* Note: for adler32 to work without % operation for the self check, need to limit length of stuff it scans to 5552 bytes. Therefore limiting the max bb len to 100 insns seems generously conservative. */ /* check sanity .. */ vassert(sizeof(HWord) == sizeof(void*)); vassert(vex_control.guest_max_insns >= 1); vassert(vex_control.guest_max_insns < 100); vassert(vex_control.guest_chase_thresh >= 0); vassert(vex_control.guest_chase_thresh < vex_control.guest_max_insns); vassert(guest_word_type == Ity_I32 || guest_word_type == Ity_I64); /* Start a new, empty extent. */ vge->n_used = 1; vge->base[0] = guest_IP_bbstart; vge->len[0] = 0; /* And a new IR BB to dump the result into. */ irbb = emptyIRBB(); /* Delta keeps track of how far along the guest_code array we have so far gone. */ delta = 0; n_instrs = 0; /* Guest addresses as IRConsts. Used in the two self-checks generated. */ if (do_self_check) { guest_IP_bbstart_IRConst = guest_word_type==Ity_I32 ? IRConst_U32(toUInt(guest_IP_bbstart)) : IRConst_U64(guest_IP_bbstart); } /* If asked to make a self-checking translation, leave 5 spaces in which to put the check statements. We'll fill them in later when we know the length and adler32 of the area to check. */ if (do_self_check) { selfcheck_idx = irbb->stmts_used; addStmtToIRBB( irbb, IRStmt_NoOp() ); addStmtToIRBB( irbb, IRStmt_NoOp() ); addStmtToIRBB( irbb, IRStmt_NoOp() ); addStmtToIRBB( irbb, IRStmt_NoOp() ); addStmtToIRBB( irbb, IRStmt_NoOp() ); } /* If the caller supplied a function to add its own preamble, use it now. */ if (preamble_function) { Bool stopNow = preamble_function( callback_opaque, irbb ); if (stopNow) { /* The callback has completed the IR block without any guest insns being disassembled into it, so just return it at this point, even if a self-check was requested - as there is nothing to self-check. The five self-check no-ops will still be in place, but they are harmless. */ return irbb; } } /* Process instructions. */ while (True) { vassert(n_instrs < vex_control.guest_max_insns); /* Regardless of what chase_into_ok says, is chasing permissible at all right now? Set resteerOKfn accordingly. */ resteerOK = toBool( n_instrs < vex_control.guest_chase_thresh /* If making self-checking translations, don't chase .. it makes the checks too complicated. We only want to scan just one sequence of bytes in the check, not a whole bunch. */ && !do_self_check /* we can't afford to have a resteer once we're on the last extent slot. */ && vge->n_used < 3 ); resteerOKfn = resteerOK ? chase_into_ok : const_False; /* This is the IP of the instruction we're just about to deal with. */ guest_IP_curr_instr = guest_IP_bbstart + delta; /* This is the irbb statement array index of the first stmt in this insn. That will always be the instruction-mark descriptor. */ first_stmt_idx = irbb->stmts_used; /* Add an instruction-mark statement. We won't know until after disassembling the instruction how long it instruction is, so just put in a zero length and we'll fix it up later. */ addStmtToIRBB( irbb, IRStmt_IMark( guest_IP_curr_instr, 0 )); /* for the first insn, the dispatch loop will have set %IP, but for all the others we have to do it ourselves. */ need_to_put_IP = toBool(n_instrs > 0); /* Finally, actually disassemble an instruction. */ dres = dis_instr_fn ( irbb, need_to_put_IP, resteerOKfn, callback_opaque, guest_code, delta, guest_IP_curr_instr, arch_guest, archinfo_guest, host_bigendian ); /* stay sane ... */ vassert(dres.whatNext == Dis_StopHere || dres.whatNext == Dis_Continue || dres.whatNext == Dis_Resteer); vassert(dres.len >= 0 && dres.len <= 20); if (dres.whatNext != Dis_Resteer) vassert(dres.continueAt == 0); /* Fill in the insn-mark length field. */ vassert(first_stmt_idx >= 0 && first_stmt_idx < irbb->stmts_used); imark = irbb->stmts[first_stmt_idx]; vassert(imark); vassert(imark->tag == Ist_IMark); vassert(imark->Ist.IMark.len == 0); imark->Ist.IMark.len = toUInt(dres.len); /* Print the resulting IR, if needed. */ if (vex_traceflags & VEX_TRACE_FE) { for (i = first_stmt_idx; i < irbb->stmts_used; i++) { vex_printf(" "); ppIRStmt(irbb->stmts[i]); vex_printf("\n"); } } /* If dis_instr_fn terminated the BB at this point, check it also filled in the irbb->next field. */ if (dres.whatNext == Dis_StopHere) { vassert(irbb->next != NULL); if (debug_print) { vex_printf(" "); vex_printf( "goto {"); ppIRJumpKind(irbb->jumpkind); vex_printf( "} "); ppIRExpr( irbb->next ); vex_printf( "\n"); } } /* Update the VexGuestExtents we are constructing. */ /* If vex_control.guest_max_insns is required to be < 100 and each insn is at max 20 bytes long, this limit of 5000 then seems reasonable since the max possible extent length will be 100 * 20 == 2000. */ vassert(vge->len[vge->n_used-1] < 5000); vge->len[vge->n_used-1] = toUShort(toUInt( vge->len[vge->n_used-1] + dres.len )); n_instrs++; if (debug_print) vex_printf("\n"); /* Advance delta (inconspicuous but very important :-) */ delta += (Long)dres.len; switch (dres.whatNext) { case Dis_Continue: vassert(irbb->next == NULL); if (n_instrs < vex_control.guest_max_insns) { /* keep going */ } else { /* We have to stop. */ irbb->next = IRExpr_Const( guest_word_type == Ity_I32 ? IRConst_U32(toUInt(guest_IP_bbstart+delta)) : IRConst_U64(guest_IP_bbstart+delta) ); goto done; } break; case Dis_StopHere: vassert(irbb->next != NULL); goto done; case Dis_Resteer: /* Check that we actually allowed a resteer .. */ vassert(resteerOK); vassert(irbb->next == NULL); /* figure out a new delta to continue at. */ vassert(resteerOKfn(callback_opaque,dres.continueAt)); delta = dres.continueAt - guest_IP_bbstart; /* we now have to start a new extent slot. */ vge->n_used++; vassert(vge->n_used <= 3); vge->base[vge->n_used-1] = dres.continueAt; vge->len[vge->n_used-1] = 0; n_resteers++; d_resteers++; if (0 && (n_resteers & 0xFF) == 0) vex_printf("resteer[%d,%d] to 0x%llx (delta = %lld)\n", n_resteers, d_resteers, dres.continueAt, delta); break; default: vpanic("bb_to_IR"); } } /*NOTREACHED*/ vassert(0); done: /* We're done. The only thing that might need attending to is that a self-checking preamble may need to be created. */ if (do_self_check) { UInt len2check, adler32; IRTemp tistart_tmp, tilen_tmp; vassert(vge->n_used == 1); len2check = vge->len[0]; if (len2check == 0) len2check = 1; adler32 = genericg_compute_adler32( (HWord)guest_code, len2check ); /* Set TISTART and TILEN. These will describe to the despatcher the area of guest code to invalidate should we exit with a self-check failure. */ tistart_tmp = newIRTemp(irbb->tyenv, guest_word_type); tilen_tmp = newIRTemp(irbb->tyenv, guest_word_type); irbb->stmts[selfcheck_idx+0] = IRStmt_Tmp(tistart_tmp, IRExpr_Const(guest_IP_bbstart_IRConst) ); irbb->stmts[selfcheck_idx+1] = IRStmt_Tmp(tilen_tmp, guest_word_type==Ity_I32 ? IRExpr_Const(IRConst_U32(len2check)) : IRExpr_Const(IRConst_U64(len2check)) ); irbb->stmts[selfcheck_idx+2] = IRStmt_Put( offB_TISTART, IRExpr_Tmp(tistart_tmp) ); irbb->stmts[selfcheck_idx+3] = IRStmt_Put( offB_TILEN, IRExpr_Tmp(tilen_tmp) ); irbb->stmts[selfcheck_idx+4] = IRStmt_Exit( IRExpr_Binop( Iop_CmpNE32, mkIRExprCCall( Ity_I32, 2/*regparms*/, "genericg_compute_adler32", #if defined(__powerpc__) && defined(__powerpc64__) (void*)((ULong*)(&genericg_compute_adler32))[0], #else &genericg_compute_adler32, #endif mkIRExprVec_2( mkIRExpr_HWord( (HWord)guest_code ), mkIRExpr_HWord( (HWord)len2check ) ) ), IRExpr_Const(IRConst_U32(adler32)) ), Ijk_TInval, guest_IP_bbstart_IRConst ); } return irbb; }