static void setBindee ( MatchInfo* mi, Int n, const IRExpr* bindee ) { if (n < 0 || n >= N_IRMATCH_BINDERS) vpanic("setBindee: out of range index"); if (mi->bindee[n] != NULL) vpanic("setBindee: bindee already set"); mi->bindee[n] = bindee; }
void addToHRegRemap ( HRegRemap* map, HReg orig, HReg replacement ) { Int i; for (i = 0; i < map->n_used; i++) if (map->orig[i] == orig) vpanic("addToHRegMap: duplicate entry"); if (!hregIsVirtual(orig)) vpanic("addToHRegMap: orig is not a vreg"); if (hregIsVirtual(replacement)) vpanic("addToHRegMap: replacement is a vreg"); vassert(map->n_used+1 < N_HREG_REMAP); map->orig[map->n_used] = orig; map->replacement[map->n_used] = replacement; map->n_used++; }
IRExpr* pyvex_deepCopyIRExpr ( IRExpr* e ) { switch (e->tag) { case Iex_Get: return IRExpr_Get(e->Iex.Get.offset, e->Iex.Get.ty); case Iex_GetI: return IRExpr_GetI(pyvex_deepCopyIRRegArray(e->Iex.GetI.descr), pyvex_deepCopyIRExpr(e->Iex.GetI.ix), e->Iex.GetI.bias); case Iex_RdTmp: return IRExpr_RdTmp(e->Iex.RdTmp.tmp); case Iex_Qop: { IRQop* qop = e->Iex.Qop.details; return IRExpr_Qop(qop->op, pyvex_deepCopyIRExpr(qop->arg1), pyvex_deepCopyIRExpr(qop->arg2), pyvex_deepCopyIRExpr(qop->arg3), pyvex_deepCopyIRExpr(qop->arg4)); } case Iex_Triop: { IRTriop *triop = e->Iex.Triop.details; return IRExpr_Triop(triop->op, pyvex_deepCopyIRExpr(triop->arg1), pyvex_deepCopyIRExpr(triop->arg2), pyvex_deepCopyIRExpr(triop->arg3)); } case Iex_Binop: return IRExpr_Binop(e->Iex.Binop.op, pyvex_deepCopyIRExpr(e->Iex.Binop.arg1), pyvex_deepCopyIRExpr(e->Iex.Binop.arg2)); case Iex_Unop: return IRExpr_Unop(e->Iex.Unop.op, pyvex_deepCopyIRExpr(e->Iex.Unop.arg)); case Iex_Load: return IRExpr_Load(e->Iex.Load.end, e->Iex.Load.ty, pyvex_deepCopyIRExpr(e->Iex.Load.addr)); case Iex_Const: return IRExpr_Const(pyvex_deepCopyIRConst(e->Iex.Const.con)); case Iex_CCall: return IRExpr_CCall(pyvex_deepCopyIRCallee(e->Iex.CCall.cee), e->Iex.CCall.retty, pyvex_deepCopyIRExprVec(e->Iex.CCall.args)); case Iex_ITE: return IRExpr_ITE(pyvex_deepCopyIRExpr(e->Iex.ITE.cond), pyvex_deepCopyIRExpr(e->Iex.ITE.iftrue), pyvex_deepCopyIRExpr(e->Iex.ITE.iffalse)); case Iex_VECRET: return IRExpr_VECRET(); case Iex_BBPTR: return IRExpr_BBPTR(); default: vpanic("pyvex_deepCopyIRExpr"); } }
void ppHRegUsage ( const RRegUniverse* univ, HRegUsage* tab ) { /* This is going to fail miserably if N_RREGUNIVERSE_REGS exceeds 64. So let's cause it to fail in an obvious way. */ vassert(N_RREGUNIVERSE_REGS == 64); vex_printf("HRegUsage {\n"); /* First print the real regs */ for (UInt i = 0; i < N_RREGUNIVERSE_REGS; i++) { Bool rRd = (tab->rRead & (1ULL << i)) != 0; Bool rWr = (tab->rWritten & (1ULL << i)) != 0; const HChar* str = "Modify "; /**/ if (!rRd && !rWr) { continue; } else if ( rRd && !rWr) { str = "Read "; } else if (!rRd && rWr) { str = "Write "; } /* else "Modify" is correct */ vex_printf(" %s ", str); ppHReg(univ->regs[i]); vex_printf("\n"); } /* and now the virtual registers */ for (UInt i = 0; i < tab->n_vRegs; i++) { const HChar* str = NULL; switch (tab->vMode[i]) { case HRmRead: str = "Read "; break; case HRmWrite: str = "Write "; break; case HRmModify: str = "Modify "; break; default: vpanic("ppHRegUsage"); } vex_printf(" %s ", str); ppHReg(tab->vRegs[i]); vex_printf("\n"); } vex_printf("}\n"); }
static Bool sane_AMode3 ( ARMAMode3* am ) { switch (am->tag) { default: vpanic("sane_AMode3: unknown arm amode tag"); } }
/*static*/ UInt armg_calculate_condition ( UInt/*ARMCondcode*/ cond, UInt cc_op, UInt cc_dep1, UInt cc_dep2 ) { UInt nf,zf,vf,cf; UInt inv = cond & 1; UInt nzvc = armg_calculate_flags_all(cc_op, cc_dep1, cc_dep2); switch (cond) { case ARMCondEQ: // Z=1 => z case ARMCondNE: // Z=0 zf = nzvc >> ARMG_CC_SHIFT_Z; return 1 & (inv ^ zf); case ARMCondHS: // C=1 => c case ARMCondLO: // C=0 cf = nzvc >> ARMG_CC_SHIFT_C; return 1 & (inv ^ cf); case ARMCondMI: // N=1 => n case ARMCondPL: // N=0 nf = nzvc >> ARMG_CC_SHIFT_N; return 1 & (inv ^ nf); case ARMCondVS: // V=1 => v case ARMCondVC: // V=0 vf = nzvc >> ARMG_CC_SHIFT_V; return 1 & (inv ^ vf); case ARMCondHI: // C=1 && Z=0 => c & ~z case ARMCondLS: // C=0 || Z=1 cf = nzvc >> ARMG_CC_SHIFT_C; zf = nzvc >> ARMG_CC_SHIFT_Z; return 1 & (inv ^ (cf & ~zf)); case ARMCondGE: // N=V => ~(n^v) case ARMCondLT: // N!=V nf = nzvc >> ARMG_CC_SHIFT_N; vf = nzvc >> ARMG_CC_SHIFT_V; return 1 & (inv ^ ~(nf ^ vf)); case ARMCondGT: // Z=0 && N=V => (~z & ~(n^v) => ~(z | (n^v) case ARMCondLE: // Z=1 || N!=V nf = nzvc >> ARMG_CC_SHIFT_N; vf = nzvc >> ARMG_CC_SHIFT_V; zf = nzvc >> ARMG_CC_SHIFT_Z; return 1 & (inv ^ ~(zf | (nf ^ vf))); case ARMCondAL: // should never get here: Always => no flags to calc case ARMCondNV: // should never get here: Illegal instr default: /* shouldn't really make these calls from generated code */ vex_printf("armg_calculate_condition(ARM)( %u, %u, 0x%x, 0x%x )\n", cond, cc_op, cc_dep1, cc_dep2 ); vpanic("armg_calculate_condition(ARM)"); } }
/* * Panic is called on unresolvable fatal errors. It prints "panic: mesg", * and then reboots. If we are called twice, then we avoid trying to sync * the disks as this often leads to recursive panics. */ void panic(const char *fmt, ...) { va_list ap; va_start(ap, fmt); vpanic(fmt, ap); }
/* Special case function for Top/Signed/Unsigned/Bot lattice */ IntType setTypeOfToMeet(HWord varname, IntType rhstype) { IntType curType; curType = getTypeOf(varname); switch (curType) { case (BogusTy): setTypeOf(varname,rhstype); /* No current type, gets rhstype */ return rhstype; case (Top): setTypeOf(varname,rhstype); return rhstype; break; case (Bot): setTypeOf(varname,Bot); return Bot; break; case (SignedTy): if ((rhstype == UnsignedTy) || (rhstype == Bot)) { setTypeOf(varname,Bot); return Bot; } return SignedTy; break; case (UnsignedTy): if ((rhstype == SignedTy) || (rhstype == Bot)) { setTypeOf(varname,Bot); return Bot; } return UnsignedTy; break; default: /* Should never reach here */ vpanic("Reached default in setTypeOfToMeet\n"); break; } /* Should never reach here */ vpanic("Reached end of setTypeOfToMeet \n"); return 0; }
void panic(const char *fmt, ...) { va_list adx; va_start(adx, fmt); vpanic(fmt, adx); va_end(adx); }
void panic(const char *format, ...) { va_list alist; va_start(alist, format); vpanic(format, alist); va_end(alist); }
HReg lookupHRegRemap ( HRegRemap* map, HReg orig ) { Int i; if (!hregIsVirtual(orig)) return orig; for (i = 0; i < map->n_used; i++) if (map->orig[i] == orig) return map->replacement[i]; vpanic("lookupHRegRemap: not found"); }
static ARMAMode2* genGuestArrayOffset ( ISelEnv* env, IRRegArray* descr, IRExpr* off, Int bias ) { HReg tmp, tmp2, roff; Int elemSz = sizeofIRType(descr->elemTy); Int nElems = descr->nElems; ARMImm12A imm12a; /* throw out any cases not generated by an x86 front end. In theory there might be a day where we need to handle them -- if we ever run non-x86-guest on x86 host. */ if (nElems != 8 || (elemSz != 1 && elemSz != 8)) vpanic("genGuestArrayOffset(arm host)"); /* Compute off into a reg, %off. Then return: movl %off, %tmp addl $bias, %tmp (if bias != 0) andl %tmp, 7 ... base(%ebp, %tmp, shift) ... */ tmp = newVRegI(env); roff = iselIntExpr_R(env, off); addInstr(env, mk_iMOVsd_RR(roff, tmp)); if (bias != 0) { if ( mk_ARMImm12A( (UInt)bias, &imm12a ) ) { addInstr(env, ARMInstr_DPInstr2(ARMalu_ADD, tmp, tmp, ARMAMode1_I12A( imm12a ))); } else { HReg tmp3 = newVRegI(env); addInstr(env, ARMInstr_Literal( tmp, (UInt)bias )); addInstr(env, ARMInstr_DPInstr2(ARMalu_ADD, tmp, tmp, ARMAMode1_ShlI( tmp3, 0 ))); } } mk_ARMImm12A( (UInt)7, &imm12a ); addInstr(env, ARMInstr_DPInstr2(ARMalu_AND, tmp, tmp, ARMAMode1_I12A( imm12a ))); vassert(elemSz == 1 || elemSz == 8); // CAB: This anywhere near correct? // X86AMode_IRRS: Immediate + Reg1 + (Reg2 << Shift) // return X86AMode_IRRS( descr->base, hregX86_EBP(), tmp, elemSz==8 ? 3 : 0); tmp2 = newVRegI(env); // tmp2 = GET_BP_REG + (tmp << 3|0) addInstr(env, ARMInstr_DPInstr2(ARMalu_ADD, tmp2, GET_BP_REG(), ARMAMode1_ShlI(tmp, elemSz==8 ? 3 : 0))); return ARMAMode2_RI( tmp2, descr->base ); }
void ppHRegClass ( HRegClass hrc ) { switch (hrc) { case HRcInt32: vex_printf("HRcInt32"); break; case HRcInt64: vex_printf("HRcInt64"); break; case HRcFlt64: vex_printf("HRcFlt64"); break; case HRcVec64: vex_printf("HRcVec64"); break; case HRcVec128: vex_printf("HRcVec128"); break; default: vpanic("ppHRegClass"); } }
void vcmn_err(int ce, const char *fmt, va_list adx) { if (ce == CE_PANIC) vpanic(fmt, adx); if (ce != CE_NOTE) { /* suppress noise in userland stress testing */ (void) fprintf(stderr, "%s", ce_prefix[ce]); (void) vfprintf(stderr, fmt, adx); (void) fprintf(stderr, "%s", ce_suffix[ce]); } }
ufOgNode * ufUnion(ufOgNode * x, ufOgNode * y) { ufOgNode * xRoot; ufOgNode * yRoot; // VG_(printf)("XXX ufUnion x: %x y: %x \n", x, y); vassert(x != NULL); vassert(y != NULL); vassert(x->parent != NULL); vassert(y->parent != NULL); xRoot = ufFind(x); yRoot = ufFind(y); // VG_(printf)("XXX ufUnion xRoot: %x yRoot: %x \n", xRoot, yRoot); vassert(xRoot != NULL); vassert(yRoot != NULL); vassert(xRoot->parent != NULL); vassert(yRoot->parent != NULL); // VG_(printf)("XXX ufUnion xRoot rank: %u yRoot rank: %u \n", xRoot->rank, yRoot->rank); if (xRoot->rank > yRoot->rank) { yRoot->parent = xRoot; xRoot->tmpType = ufMeet(xRoot->tmpType, yRoot->tmpType); xRoot->childRefCount++; return xRoot; } else if (xRoot->rank < yRoot->rank) { xRoot->parent = yRoot; yRoot->tmpType = ufMeet(xRoot->tmpType, yRoot->tmpType); yRoot->childRefCount++; return yRoot; } else if (xRoot != yRoot) { yRoot->parent = xRoot; xRoot->rank++; xRoot->tmpType = ufMeet(xRoot->tmpType, yRoot->tmpType); xRoot->childRefCount++; return xRoot; } else if (xRoot == yRoot) { return xRoot; // is equal to yRoot } vpanic("Reached end of ufUnion! \n"); // Should not happen! return xRoot; }
void kern_assert(const char *fmt, ...) { va_list ap; #ifdef _KERNEL if (panicstr != NULL) return; #endif va_start(ap, fmt); vpanic(fmt, ap); va_end(ap); }
/* Call _before_ call to getVarOf or setVarOf */ HWord locToHashKey(HWord loc1, HWord loc2, LocType ltype) { switch (ltype) { case (MemLoc): return loc1; case(RegLoc): return loc1; case(TmpLoc): return ( (loc1<<14) | loc2); case(ErrorLoc): vpanic("ErrorLoc in locToHashKey.\n"); return 0; default: return 0; } return 0; }
/* Generic printing for registers. */ void ppHReg ( HReg r ) { HChar* maybe_v = hregIsVirtual(r) ? "v" : ""; Int regNo = hregNumber(r); switch (hregClass(r)) { case HRcInt32: vex_printf("%%%sr%d", maybe_v, regNo); return; case HRcInt64: vex_printf("%%%sR%d", maybe_v, regNo); return; case HRcFlt64: vex_printf("%%%sF%d", maybe_v, regNo); return; case HRcVec64: vex_printf("%%%sv%d", maybe_v, regNo); return; case HRcVec128: vex_printf("%%%sV%d", maybe_v, regNo); return; default: vpanic("ppHReg"); } }
static ARMBranchDest* iselIntExpr_BD ( ISelEnv* env, IRExpr* e ) { ARMBranchDest* bd = iselIntExpr_BD_wrk(env, e); /* sanity checks ... */ switch (bd->tag) { case ARMbdImm: return bd; case ARMbdReg: vassert(hregClass(bd->ARMbd.Reg.reg) == HRcInt32); // vassert(hregIsVirtual(bd->ARMbd.Reg.reg)); // CAB ? return bd; default: vpanic("iselIntExpr_BD: unknown arm BD tag"); } }
void ppARMAMode1 ( ARMAMode1* am ) { switch (am->tag) { case ARMam1_I12A: case ARMam1_ShlI: case ARMam1_ShrI: case ARMam1_SarI: case ARMam1_ShlR: case ARMam1_ShrR: case ARMam1_SarR: vex_printf("ppARMAMode1: Not implemented"); break; default: vpanic("ppARMAMode1"); } }
void private_LibVEX_alloc_OOM(void) { const char* pool = "???"; if (private_LibVEX_alloc_first == &temporary[0]) pool = "TEMP"; if (private_LibVEX_alloc_first == &permanent[0]) pool = "PERM"; vex_printf("VEX temporary storage exhausted.\n"); vex_printf("Pool = %s, start %p curr %p end %p (size %lld)\n", pool, private_LibVEX_alloc_first, private_LibVEX_alloc_curr, private_LibVEX_alloc_last, (Long)(private_LibVEX_alloc_last + 1 - private_LibVEX_alloc_first)); vpanic("VEX temporary storage exhausted.\n" "Increase N_{TEMPORARY,PERMANENT}_BYTES and recompile."); }
/* Calculate all the 4 flags from the supplied thunk parameters. */ UInt armg_calculate_flags_all ( UInt cc_op, UInt cc_dep1_formal, UInt cc_dep2_formal ) { switch (cc_op) { case ARMG_CC_OP_LOGIC: ACTIONS_LOGIC(); case ARMG_CC_OP_ADD: ACTIONS_ADD(); case ARMG_CC_OP_SUB: ACTIONS_SUB(); default: /* shouldn't really make these calls from generated code */ vex_printf("armg_calculate_flags_all(ARM)( %u, 0x%x, 0x%x )\n", cc_op, cc_dep1_formal, cc_dep2_formal ); vpanic("armg_calculate_flags_all(ARM)"); } }
IRStmt* pyvex_deepCopyIRStmt ( IRStmt* s ) { switch (s->tag) { case Ist_NoOp: return IRStmt_NoOp(); case Ist_AbiHint: return IRStmt_AbiHint(pyvex_deepCopyIRExpr(s->Ist.AbiHint.base), s->Ist.AbiHint.len, pyvex_deepCopyIRExpr(s->Ist.AbiHint.nia)); case Ist_IMark: return IRStmt_IMark(s->Ist.IMark.addr, s->Ist.IMark.len, s->Ist.IMark.delta); case Ist_Put: return IRStmt_Put(s->Ist.Put.offset, pyvex_deepCopyIRExpr(s->Ist.Put.data)); case Ist_PutI: return IRStmt_PutI(pyvex_deepCopyIRPutI(s->Ist.PutI.details)); case Ist_WrTmp: return IRStmt_WrTmp(s->Ist.WrTmp.tmp, pyvex_deepCopyIRExpr(s->Ist.WrTmp.data)); case Ist_Store: return IRStmt_Store(s->Ist.Store.end, pyvex_deepCopyIRExpr(s->Ist.Store.addr), pyvex_deepCopyIRExpr(s->Ist.Store.data)); case Ist_CAS: return IRStmt_CAS(pyvex_deepCopyIRCAS(s->Ist.CAS.details)); case Ist_LLSC: return IRStmt_LLSC(s->Ist.LLSC.end, s->Ist.LLSC.result, pyvex_deepCopyIRExpr(s->Ist.LLSC.addr), s->Ist.LLSC.storedata ? pyvex_deepCopyIRExpr(s->Ist.LLSC.storedata) : NULL); case Ist_Dirty: return IRStmt_Dirty(pyvex_deepCopyIRDirty(s->Ist.Dirty.details)); case Ist_MBE: return IRStmt_MBE(s->Ist.MBE.event); case Ist_Exit: return IRStmt_Exit(pyvex_deepCopyIRExpr(s->Ist.Exit.guard), s->Ist.Exit.jk, pyvex_deepCopyIRConst(s->Ist.Exit.dst), s->Ist.Exit.offsIP); default: vpanic("pyvex_deepCopyIRStmt"); } }
void AddStoreHelper(IRSB* sb, IRExpr* addr, IRExpr* data) { IRDirty* d; HWord tmpname; switch (addr->tag) { case (Iex_RdTmp): switch (data->tag) { case (Iex_RdTmp): tmpname = (HWord) data->Iex.RdTmp.tmp; d = unsafeIRDirty_0_N(0, "EmitStoreAddr2TmpHelper", &EmitStoreAddr2TmpHelper, mkIRExprVec_3(addr, mkIRExpr_HWord(tmpname), mkIRExpr_HWord(counter) ) ); setHelperAnns(d); addStmtToIRSB(sb, IRStmt_Dirty(d)); break; case (Iex_Const): /* add code to emit new tyvar for memory address */ d = unsafeIRDirty_0_N(0, "EmitStoreAddr2ConstHelper", &EmitStoreAddr2ConstHelper, mkIRExprVec_1(addr ) ); setHelperAnns(d); addStmtToIRSB(sb,IRStmt_Dirty(d)); break; default: /* Should not reach here. */ ppIRExpr(data); vpanic("Bad store address!\n"); break; } break; default: break; } return; }
/* * Called by KASSERT, this decides if we will panic * or if we will log via printf and/or ktr. */ void kassert_panic(const char *fmt, ...) { static char buf[256]; va_list ap; va_start(ap, fmt); (void)vsnprintf(buf, sizeof(buf), fmt, ap); va_end(ap); /* * panic if we're not just warning, or if we've exceeded * kassert_log_panic_at warnings. */ if (!kassert_warn_only || (kassert_log_panic_at > 0 && kassert_warnings >= kassert_log_panic_at)) { va_start(ap, fmt); vpanic(fmt, ap); /* NORETURN */ } #ifdef KTR if (kassert_do_ktr) CTR0(ktr_mask, buf); #endif /* KTR */ /* * log if we've not yet met the mute limit. */ if (kassert_do_log && (kassert_log_mute_at == 0 || kassert_warnings < kassert_log_mute_at)) { static struct timeval lasterr; static int curerr; if (ppsratecheck(&lasterr, &curerr, kassert_log_pps_limit)) { printf("KASSERT failed: %s\n", buf); kdb_backtrace(); } } #ifdef KDB if (kassert_do_kdb) { kdb_enter(KDB_WHY_KASSERT, buf); } #endif atomic_add_int(&kassert_warnings, 1); }
IRConst* pyvex_deepCopyIRConst ( IRConst* c ) { switch (c->tag) { case Ico_U1: return IRConst_U1(c->Ico.U1); case Ico_U8: return IRConst_U8(c->Ico.U8); case Ico_U16: return IRConst_U16(c->Ico.U16); case Ico_U32: return IRConst_U32(c->Ico.U32); case Ico_U64: return IRConst_U64(c->Ico.U64); case Ico_F32: return IRConst_F32(c->Ico.F32); case Ico_F32i: return IRConst_F32i(c->Ico.F32i); case Ico_F64: return IRConst_F64(c->Ico.F64); case Ico_F64i: return IRConst_F64i(c->Ico.F64i); case Ico_V128: return IRConst_V128(c->Ico.V128); case Ico_V256: return IRConst_V256(c->Ico.V256); default: vpanic("pyvex_deepCopyIRConst"); } }
static Bool matchWrk ( MatchInfo* mi, IRExpr* p/*attern*/, IRExpr* e/*xpr*/ ) { switch (p->tag) { case Iex_Binder: /* aha, what we were looking for. */ setBindee(mi, p->Iex.Binder.binder, e); return True; #if 0 case Iex_GetI: if (e->tag != Iex_GetI) return False; if (p->Iex.GetI.ty != e->Iex.GetI.ty) return False; /* we ignore the offset limit hints .. */ if (!matchWrk(mi, p->Iex.GetI.offset, e->Iex.GetI.offset)) return False; return True; #endif case Iex_Unop: if (e->tag != Iex_Unop) return False; if (p->Iex.Unop.op != e->Iex.Unop.op) return False; if (!matchWrk(mi, p->Iex.Unop.arg, e->Iex.Unop.arg)) return False; return True; case Iex_Binop: if (e->tag != Iex_Binop) return False; if (p->Iex.Binop.op != e->Iex.Binop.op) return False; if (!matchWrk(mi, p->Iex.Binop.arg1, e->Iex.Binop.arg1)) return False; if (!matchWrk(mi, p->Iex.Binop.arg2, e->Iex.Binop.arg2)) return False; return True; case Iex_Load: if (e->tag != Iex_Load) return False; if (p->Iex.Load.end != e->Iex.Load.end) return False; if (p->Iex.Load.ty != e->Iex.Load.ty) return False; if (!matchWrk(mi, p->Iex.Load.addr, e->Iex.Load.addr)) return False; return True; case Iex_Const: if (e->tag != Iex_Const) return False; return eqIRConst(p->Iex.Const.con, e->Iex.Const.con); default: ppIRExpr(p); vpanic("match"); } }
void vcmn_err(int ce, const char *fmt, va_list ap) { char msg[MAXMSGLEN]; if (ce == CE_PANIC) vpanic(fmt, ap); if (ce != CE_NOTE) { vsnprintf(msg, MAXMSGLEN - 1, fmt, ap); if (fmt[0] == '!') CDEBUG(D_INFO, "%s%s%s", ce_prefix[ce], msg, ce_suffix[ce]); else CERROR("%s%s%s", ce_prefix[ce], msg, ce_suffix[ce]); } } /* vcmn_err() */
void ppHRegUsage ( HRegUsage* tab ) { Int i; HChar* str; vex_printf("HRegUsage {\n"); for (i = 0; i < tab->n_used; i++) { switch (tab->mode[i]) { case HRmRead: str = "Read "; break; case HRmWrite: str = "Write "; break; case HRmModify: str = "Modify "; break; default: vpanic("ppHRegUsage"); } vex_printf(" %s ", str); ppHReg(tab->hreg[i]); vex_printf("\n"); } vex_printf("}\n"); }
IntType ufMeet(IntType x, IntType y) { if (x == Bot || y == Bot) { return Bot; } if (x == Top) { return y; } if (y == Top) { return x; } if (x == UnsignedTy) { if (y == SignedTy) { return Bot; } else { return UnsignedTy; } } if (x == SignedTy) { if (y == UnsignedTy) { return Bot; } else { return SignedTy; } } vpanic("Reached end of ufMeet!\n"); return Top; }