void addDynamicDisownNonNull(IRSB* sbOut, IRTemp idx){ IRDirty* disownDirty = unsafeIRDirty_0_N(1, "disownShadowTempNonNullDynamic", VG_(fnptr_to_fnentry)(disownShadowTempNonNullDynamic), mkIRExprVec_1(mkU64(idx))); disownDirty->mFx = Ifx_Modify; disownDirty->mAddr = mkU64((uintptr_t)&(shadowTemps[idx])); disownDirty->mSize = sizeof(ShadowTemp*); addStmtToIRSB(sbOut, IRStmt_Dirty(disownDirty)); }
void addSVDisownNonNullG(IRSB* sbOut, IRExpr* guard, IRExpr* sv){ IRExpr* refCountAddr = runArrowAddr(sbOut, sv, ShadowValue, ref_count); IRExpr* prevRefCount = runLoadG64(sbOut, refCountAddr, guard); IRExpr* newRefCount = runBinop(sbOut, Iop_Sub64, prevRefCount, mkU64(1)); addStoreG(sbOut, guard, newRefCount, refCountAddr); IRExpr* lastRef = runBinop(sbOut, Iop_CmpEQ64, prevRefCount, mkU64(1)); IRStmt* freeVal = mkDirtyG_0_1(freeShadowValue, sv, lastRef); addStmtToIRSB(sbOut, freeVal); }
void addSVOwnNonNull(IRSB* sbOut, IRExpr* sv){ IRExpr* prevRefCount = runArrow(sbOut, sv, ShadowValue, ref_count); IRExpr* newRefCount = runBinop(sbOut, Iop_Add64, prevRefCount, mkU64(1)); if (PRINT_VALUE_MOVES){ addPrint3("[3] Owning %p, new ref_count %d\n", sv, newRefCount); } addStoreArrow(sbOut, sv, ShadowValue, ref_count, newRefCount); }
/* Store a value to memory. If a value requires more than 8 bytes a series of 8-byte stores will be generated. */ static __inline__ void store(IRSB *irsb, IREndness endian, HWord haddr, IRExpr *data) { IROp high, low; IRExpr *addr, *next_addr; if (VEX_HOST_WORDSIZE == 8) { addr = mkU64(haddr); next_addr = binop(Iop_Add64, addr, mkU64(8)); } else if (VEX_HOST_WORDSIZE == 4) { addr = mkU32(haddr); next_addr = binop(Iop_Add32, addr, mkU32(8)); } else { vpanic("invalid #bytes for address"); } IRType type = typeOfIRExpr(irsb->tyenv, data); vassert(type == Ity_I1 || sizeofIRType(type) <= 16); switch (type) { case Ity_I128: high = Iop_128HIto64; low = Iop_128to64; goto store128; case Ity_F128: high = Iop_F128HItoF64; low = Iop_F128LOtoF64; goto store128; case Ity_D128: high = Iop_D128HItoD64; low = Iop_D128LOtoD64; goto store128; store128: /* Two stores of 64 bit each. */ if (endian == Iend_BE) { /* The more significant bits are at the lower address. */ store_aux(irsb, endian, addr, unop(high, data)); store_aux(irsb, endian, next_addr, unop(low, data)); } else { /* The more significant bits are at the higher address. */ store_aux(irsb, endian, addr, unop(low, data)); store_aux(irsb, endian, next_addr, unop(high, data)); } return; default: store_aux(irsb, endian, addr, data); return; } }
/* Load a value from memory. Loads of more than 8 byte are split into a series of 8-byte loads and combined using appropriate IROps. */ static IRExpr * load(IREndness endian, IRType type, HWord haddr) { IROp concat; IRExpr *addr, *next_addr; vassert(type == Ity_I1 || sizeofIRType(type) <= 16); if (VEX_HOST_WORDSIZE == 8) { addr = mkU64(haddr); next_addr = binop(Iop_Add64, addr, mkU64(8)); } else if (VEX_HOST_WORDSIZE == 4) { addr = mkU32(haddr); next_addr = binop(Iop_Add32, addr, mkU32(8)); } else { vpanic("invalid #bytes for address"); } switch (type) { case Ity_I128: concat = Iop_64HLto128; type = Ity_I64; goto load128; case Ity_F128: concat = Iop_F64HLtoF128; type = Ity_F64; goto load128; case Ity_D128: concat = Iop_D64HLtoD128; type = Ity_D64; goto load128; load128: /* Two loads of 64 bit each. */ if (endian == Iend_BE) { /* The more significant bits are at the lower address. */ return binop(concat, load_aux(endian, type, addr), load_aux(endian, type, next_addr)); } else { /* The more significant bits are at the higher address. */ return binop(concat, load_aux(endian, type, next_addr), load_aux(endian, type, addr)); } default: return load_aux(endian, type, addr); } }
void cleanupBlockOwnership(IRSB* sbOut, IRExpr* guard){ if (VG_(sizeXA)(tempDebt) == 0){ addStoreGC(sbOut, guard, mkU64(0), &blockStateDirty); return; } IRTemp* curDebtContents = VG_(perm_malloc)(sizeof(IRTemp) * VG_(sizeXA)(tempDebt), vg_alignof(IRTemp)); for(int i = 0; i < VG_(sizeXA)(tempDebt); ++i){ curDebtContents[i] = *(IRTemp*)VG_(indexXA)(tempDebt, i); } IRDirty* dynCleanupDirty = unsafeIRDirty_0_N(2, "dynamicCleanup", VG_(fnptr_to_fnentry)(dynamicCleanup), mkIRExprVec_2(mkU64(VG_(sizeXA)(tempDebt)), mkU64((uintptr_t)curDebtContents))); dynCleanupDirty->mFx = Ifx_Modify; dynCleanupDirty->guard = guard; dynCleanupDirty->mAddr = mkU64((uintptr_t)shadowTemps); dynCleanupDirty->mSize = sizeof(ShadowTemp) * MAX_TEMPS; addStmtToIRSB(sbOut, IRStmt_Dirty(dynCleanupDirty)); }
void addClear(IRSB* sbOut, IRTemp dest, int num_vals){ IRExpr* oldShadowTemp = runLoad64C(sbOut, &(shadowTemps[dest])); addDisownNonNull(sbOut, oldShadowTemp, num_vals); addStoreC(sbOut, mkU64(0), &(shadowTemps[dest])); }
/* This version of flushEvents avoids callbacks entirely, except when the number of outstanding events is enough to be flushed - in which case a call to flush_data() is made. In all other cases, events are handled by creating IR to encode and store the memory access information to the array of outstanding events. */ static void flushEventsRange(IRSB* sb, Int start, Int size) { // Conditionally call the flush method if there's not enough room for // all the new events. This may flush an incomplete block. IRExpr *entries_addr = mkU64((ULong)&theEntries); IRExpr *entries = load(ENDIAN, Ity_I32, entries_addr); IRExpr *max_entries_addr = mkU64((ULong)&theMaxEntries); IRExpr *max_entries = load(ENDIAN, Ity_I32, max_entries_addr); IRDirty* di = unsafeIRDirty_0_N(0, "flush_data", VG_(fnptr_to_fnentry)( flush_data ), mkIRExprVec_0() ); di->guard = binop(Iop_CmpLT32S, max_entries, binop(Iop_Add32, entries, mkU32(size))); addStmtToIRSB( sb, IRStmt_Dirty(di) ); // Reload entries since it might have been changed by the callback entries = load(ENDIAN, Ity_I32, entries_addr); // Initialize the first address where we'll write trace information. // This will be advanced in the loop. IRExpr *addr = binop(Iop_Add64, load(ENDIAN, Ity_I64, mkU64((ULong)&theBlock)), unop(Iop_32Uto64, binop(Iop_Mul32, entries, mkU32(sizeof(MV_TraceAddr))))); // Grab the thread id IRExpr *thread = load(ENDIAN, Ity_I32, mkU64((ULong)&theThread)); Int i; for (i = start; i < start+size; i++) { Event* ev = &events[i]; uint32 type = 0; switch (ev->ekind) { case Event_Ir: type = MV_ShiftedInstr; break; case Event_Dr: type = MV_ShiftedRead; break; case Event_Dw: case Event_Dm: type = MV_ShiftedWrite; break; default: tl_assert(0); } type |= ev->type << MV_DataShift; type |= ((uint32)ev->size << MV_SizeShift); // Construct the address and store it IRExpr *data = binop(Iop_Or32, mkU32(type), thread); IRStmt *store; store = IRStmt_Store(ENDIAN, addr, ev->addr); addStmtToIRSB( sb, store ); // Advance to the type addr = binop(Iop_Add64, addr, mkU64(sizeof(uint64))); store = IRStmt_Store(ENDIAN, addr, data); addStmtToIRSB( sb, store ); // Advance to the next entry addr = binop(Iop_Add64, addr, mkU64(sizeof(MV_TraceAddr)-sizeof(uint64))); } // Store the new entry count IRStmt *entries_store = IRStmt_Store(ENDIAN, entries_addr, binop(Iop_Add32, entries, mkU32(size))); addStmtToIRSB( sb, entries_store ); }