ImmVector getImmVector(const Opcode* opcode) { int numImm = numImmediates(*opcode); for (int k = 0; k < numImm; ++k) { ArgType t = immType(*opcode, k); if (t == MA) { void* vp = getImmPtr(opcode, k); return ImmVector::createFromStream( static_cast<const uint8_t*>(vp)); } else if (t == BLA || t == SLA) { void* vp = getImmPtr(opcode, k); return ImmVector::createFromStream( static_cast<const int32_t*>(vp)); } } NOT_REACHED(); }
ImmVector getImmVector(PC opcode) { auto const op = peek_op(opcode); int numImm = numImmediates(op); for (int k = 0; k < numImm; ++k) { ArgType t = immType(op, k); if (t == BLA || t == SLA || t == ILA || t == I32LA) { void* vp = getImmPtr(opcode, k); return ImmVector::createFromStream( static_cast<const int32_t*>(vp) ); } if (t == VSA) { const int32_t* vp = (int32_t*)getImmPtr(opcode, k); return ImmVector(reinterpret_cast<const uint8_t*>(vp + 1), vp[0], vp[0]); } } not_reached(); }
Offset* instrJumpOffset(Opcode* instr) { static const int8_t jumpMask[] = { #define NA 0 #define MA 0 #define IVA 0 #define I64A 0 #define DA 0 #define SA 0 #define AA 0 #define BA 1 #define HA 0 #define IA 0 #define OA 0 #define ONE(a) a #define TWO(a, b) (a + 2 * b) #define THREE(a, b, c) (a + 2 * b + 4 * c) #define FOUR(a, b, c, d) (a + 2 * b + 4 * c + 8 * d) #define O(name, imm, pop, push, flags) imm, OPCODES #undef NA #undef MA #undef IVA #undef I64A #undef DA #undef SA #undef AA #undef HA #undef IA #undef BA #undef OA #undef ONE #undef TWO #undef THREE #undef FOUR #undef O }; assert(!isSwitch(*instr)); int mask = jumpMask[*instr]; if (mask == 0) { return nullptr; } int immNum; switch (mask) { case 0: return nullptr; case 1: immNum = 0; break; case 2: immNum = 1; break; case 4: immNum = 2; break; case 8: immNum = 3; break; default: assert(false); return nullptr; } return &getImmPtr(instr, immNum)->u_BA; }
IterTable getIterTable(PC opcode) { auto const op = peek_op(opcode); auto const numImm = numImmediates(op); for (int k = 0; k < numImm; ++k) { auto const type = immType(op, k); if (type != ILA) continue; auto ptr = reinterpret_cast<PC>(getImmPtr(opcode, k)); return iterTableFromStream(ptr); } not_reached(); }
void inlSingletonSProp(IRGS& env, const Func* func, const Op* clsOp, const Op* propOp) { assertx(*clsOp == Op::String); assertx(*propOp == Op::String); TransFlags trflags; trflags.noinlineSingleton = true; auto exitBlock = makeExit(env, trflags); // Pull the class and property names. auto const unit = func->unit(); auto const clsName = unit->lookupLitstrId(getImmPtr(clsOp, 0)->u_SA); auto const propName = unit->lookupLitstrId(getImmPtr(propOp, 0)->u_SA); // Make sure we have a valid class. auto const cls = Unit::lookupClass(clsName); if (UNLIKELY(!classHasPersistentRDS(cls))) { PUNT(SingletonSProp-Persistent); } // Make sure the sprop is accessible from the singleton method's context. auto const lookup = cls->findSProp(func->cls(), propName); if (UNLIKELY(lookup.prop == kInvalidSlot || !lookup.accessible)) { PUNT(SingletonSProp-Accessibility); } // Look up the static property. auto const sprop = ldClsPropAddrKnown(env, cls, propName); auto const unboxed = gen(env, UnboxPtr, sprop); auto const value = gen(env, LdMem, unboxed->type().deref(), unboxed); // Side exit if the static property is null. auto isnull = gen(env, IsType, TNull, value); gen(env, JmpNZero, exitBlock, isnull); // Return the singleton. pushIncRef(env, value); }
ImmVector getImmVector(PC opcode) { auto const op = peek_op(opcode); int numImm = numImmediates(op); for (int k = 0; k < numImm; ++k) { ArgType t = immType(op, k); if (t == BLA || t == SLA || t == I32LA || t == BLLA || t == VSA) { PC vp = getImmPtr(opcode, k)->bytes; auto const size = decode_iva(vp); return ImmVector(vp, size, t == VSA ? size : 0); } } not_reached(); }
void inlSingletonSLoc(IRGS& env, const Func* func, const Op* op) { assertx(*op == Op::StaticLocInit); TransFlags trflags; trflags.noinlineSingleton = true; auto exit = makeExit(env, trflags); auto const name = func->unit()->lookupLitstrId(getImmPtr(op, 1)->u_SA); // Side exit if the static local is uninitialized. auto const box = gen(env, LdStaticLocCached, StaticLocName { func, name }); gen(env, CheckStaticLocInit, exit, box); // Side exit if the static local is null. auto const value = gen(env, LdRef, TInitCell, box); auto const isnull = gen(env, IsType, TInitNull, value); gen(env, JmpNZero, exit, isnull); // Return the singleton. pushIncRef(env, value); }
Offset* instrJumpOffset(const Op* instr) { static const int8_t jumpMask[] = { #define IMM_NA 0 #define IMM_MA 0 #define IMM_IVA 0 #define IMM_I64A 0 #define IMM_DA 0 #define IMM_SA 0 #define IMM_AA 0 #define IMM_RATA 0 #define IMM_BA 1 #define IMM_BLA 0 // these are jump offsets, but must be handled specially #define IMM_ILA 0 #define IMM_SLA 0 #define IMM_LA 0 #define IMM_IA 0 #define IMM_OA(x) 0 #define IMM_VSA 0 #define ONE(a) IMM_##a #define TWO(a, b) (IMM_##a + 2 * IMM_##b) #define THREE(a, b, c) (IMM_##a + 2 * IMM_##b + 4 * IMM_##c) #define FOUR(a, b, c, d) (IMM_##a + 2 * IMM_##b + 4 * IMM_##c + 8 * IMM_##d) #define O(name, imm, pop, push, flags) imm, OPCODES #undef IMM_NA #undef IMM_MA #undef IMM_IVA #undef IMM_I64A #undef IMM_DA #undef IMM_SA #undef IMM_AA #undef IMM_RATA #undef IMM_LA #undef IMM_IA #undef IMM_BA #undef IMM_BLA #undef IMM_ILA #undef IMM_SLA #undef IMM_OA #undef IMM_VSA #undef ONE #undef TWO #undef THREE #undef FOUR #undef O }; assert(!isSwitch(*instr)); // BLA doesn't work here if (Op(*instr) == OpIterBreak) { uint32_t veclen; std::memcpy(&veclen, instr + 1, sizeof veclen); assert(veclen > 0); auto const target = const_cast<Offset*>( reinterpret_cast<const Offset*>( reinterpret_cast<const uint32_t*>(instr + 1) + 2 * veclen + 1 ) ); return target; } int mask = jumpMask[uint8_t(*instr)]; if (mask == 0) { return nullptr; } int immNum; switch (mask) { case 0: return nullptr; case 1: immNum = 0; break; case 2: immNum = 1; break; case 4: immNum = 2; break; case 8: immNum = 3; break; default: assert(false); return nullptr; } return &getImmPtr(instr, immNum)->u_BA; }
Offset* instrJumpOffset(PC const origPC) { static const int8_t jumpMask[] = { #define IMM_NA 0 #define IMM_IVA 0 #define IMM_I64A 0 #define IMM_DA 0 #define IMM_SA 0 #define IMM_AA 0 #define IMM_RATA 0 #define IMM_BA 1 #define IMM_BLA 0 // these are jump offsets, but must be handled specially #define IMM_ILA 0 #define IMM_SLA 0 #define IMM_LA 0 #define IMM_IA 0 #define IMM_OA(x) 0 #define IMM_VSA 0 #define IMM_KA 0 #define ONE(a) IMM_##a #define TWO(a, b) (IMM_##a + 2 * IMM_##b) #define THREE(a, b, c) (IMM_##a + 2 * IMM_##b + 4 * IMM_##c) #define FOUR(a, b, c, d) (IMM_##a + 2 * IMM_##b + 4 * IMM_##c + 8 * IMM_##d) #define O(name, imm, pop, push, flags) imm, OPCODES #undef IMM_NA #undef IMM_IVA #undef IMM_I64A #undef IMM_DA #undef IMM_SA #undef IMM_AA #undef IMM_RATA #undef IMM_LA #undef IMM_IA #undef IMM_BA #undef IMM_BLA #undef IMM_ILA #undef IMM_SLA #undef IMM_OA #undef IMM_VSA #undef IMM_KA #undef ONE #undef TWO #undef THREE #undef FOUR #undef O }; auto pc = origPC; auto const op = decode_op(pc); assert(!isSwitch(op)); // BLA doesn't work here if (op == OpIterBreak) { auto const veclen = decode_raw<uint32_t>(pc); assert(veclen > 0); auto const target = const_cast<Offset*>( reinterpret_cast<const Offset*>( reinterpret_cast<const uint32_t*>(pc) + 2 * veclen ) ); return target; } int mask = jumpMask[size_t(op)]; if (mask == 0) { return nullptr; } int immNum; switch (mask) { case 0: return nullptr; case 1: immNum = 0; break; case 2: immNum = 1; break; case 4: immNum = 2; break; case 8: immNum = 3; break; default: assert(false); return nullptr; } return &getImmPtr(origPC, immNum)->u_BA; }
Offset* instrJumpOffset(Op* instr) { static const int8_t jumpMask[] = { #define NA 0 #define MA 0 #define IVA 0 #define I64A 0 #define DA 0 #define SA 0 #define AA 0 #define BA 1 #define LA 0 #define IA 0 #define OA 0 #define ONE(a) a #define TWO(a, b) (a + 2 * b) #define THREE(a, b, c) (a + 2 * b + 4 * c) #define FOUR(a, b, c, d) (a + 2 * b + 4 * c + 8 * d) #define O(name, imm, pop, push, flags) imm, OPCODES #undef NA #undef MA #undef IVA #undef I64A #undef DA #undef SA #undef AA #undef LA #undef IA #undef BA #undef OA #undef ONE #undef TWO #undef THREE #undef FOUR #undef O }; assert(!isSwitch(*instr)); if (Op(*instr) == OpIterBreak) { uint32_t veclen = *(uint32_t *)(instr + 1); assert(veclen > 0); Offset* target = (Offset *)((uint32_t *)(instr + 1) + 2 * veclen + 1); return target; } int mask = jumpMask[uint8_t(*instr)]; if (mask == 0) { return nullptr; } int immNum; switch (mask) { case 0: return nullptr; case 1: immNum = 0; break; case 2: immNum = 1; break; case 4: immNum = 2; break; case 8: immNum = 3; break; default: assert(false); return nullptr; } return &getImmPtr(instr, immNum)->u_BA; }
bool IRTranslator::tryTranslateSingletonInline(const NormalizedInstruction& i, const Func* funcd) { using Atom = BCPattern::Atom; using Captures = BCPattern::CaptureVec; if (!funcd) return false; // Make sure we have an acceptable FPush and non-null callee. assert(i.op() == Op::FPushFuncD || i.op() == Op::FPushClsMethodD); auto fcall = i.nextSk(); // Check if the next instruction is an acceptable FCall. if ((fcall.op() != Op::FCall && fcall.op() != Op::FCallD) || funcd->isResumable() || funcd->isReturnRef()) { return false; } // First, check for the static local singleton pattern... // Lambda to check if CGetL and StaticLocInit refer to the same local. auto has_same_local = [] (PC pc, const Captures& captures) { if (captures.size() == 0) return false; auto cgetl = (const Op*)pc; auto sli = (const Op*)captures[0]; assert(*cgetl == Op::CGetL); assert(*sli == Op::StaticLocInit); return (getImm(sli, 0).u_IVA == getImm(cgetl, 0).u_IVA); }; auto cgetl = Atom(Op::CGetL).onlyif(has_same_local); auto retc = Atom(Op::RetC); // Look for a static local singleton pattern. auto result = BCPattern { Atom(Op::Null), Atom(Op::StaticLocInit).capture(), Atom(Op::IsTypeL), Atom::alt( Atom(Op::JmpZ).taken({cgetl, retc}), Atom::seq(Atom(Op::JmpNZ), cgetl, retc) ) }.ignore( {Op::AssertRATL, Op::AssertRATStk} ).matchAnchored(funcd); if (result.found()) { try { hhbcTrans().emitSingletonSLoc( funcd, (const Op*)result.getCapture(0) ); } catch (const FailedIRGen& e) { return false; } catch (const FailedCodeGen& e) { return false; } TRACE(1, "[singleton-sloc] %s <- %s\n", funcd->fullName()->data(), fcall.func()->fullName()->data()); return true; } // Not found; check for the static property pattern. // Factory for String atoms that are required to match another captured // String opcode. auto same_string_as = [&] (int i) { return Atom(Op::String).onlyif([=] (PC pc, const Captures& captures) { auto string1 = (const Op*)pc; auto string2 = (const Op*)captures[i]; assert(*string1 == Op::String); assert(*string2 == Op::String); auto const unit = funcd->unit(); auto sd1 = unit->lookupLitstrId(getImmPtr(string1, 0)->u_SA); auto sd2 = unit->lookupLitstrId(getImmPtr(string2, 0)->u_SA); return (sd1 && sd1 == sd2); }); }; auto stringProp = same_string_as(0); auto stringCls = same_string_as(1); auto agetc = Atom(Op::AGetC); auto cgets = Atom(Op::CGetS); // Look for a class static singleton pattern. result = BCPattern { Atom(Op::String).capture(), Atom(Op::String).capture(), Atom(Op::AGetC), Atom(Op::CGetS), Atom(Op::IsTypeC), Atom::alt( Atom(Op::JmpZ).taken({stringProp, stringCls, agetc, cgets, retc}), Atom::seq(Atom(Op::JmpNZ), stringProp, stringCls, agetc, cgets, retc) ) }.ignore( {Op::AssertRATL, Op::AssertRATStk} ).matchAnchored(funcd); if (result.found()) { try { hhbcTrans().emitSingletonSProp( funcd, (const Op*)result.getCapture(1), (const Op*)result.getCapture(0) ); } catch (const FailedIRGen& e) { return false; } catch (const FailedCodeGen& e) { return false; } TRACE(1, "[singleton-sprop] %s <- %s\n", funcd->fullName()->data(), fcall.func()->fullName()->data()); return true; } return false; }
Offset* instrJumpOffset(const PC origPC) { static const int8_t jumpMask[] = { #define IMM_NA 0 #define IMM_IVA 0 #define IMM_I64A 0 #define IMM_DA 0 #define IMM_SA 0 #define IMM_AA 0 #define IMM_RATA 0 #define IMM_BA 1 #define IMM_BLA 0 // these are jump offsets, but must be handled specially #define IMM_ILA 0 #define IMM_I32LA 0 #define IMM_BLLA 0 #define IMM_SLA 0 #define IMM_LA 0 #define IMM_IA 0 #define IMM_CAR 0 #define IMM_CAW 0 #define IMM_OA(x) 0 #define IMM_VSA 0 #define IMM_KA 0 #define IMM_LAR 0 #define IMM_FCA 0 #define ONE(a) IMM_##a #define TWO(a, b) (IMM_##a + 2 * IMM_##b) #define THREE(a, b, c) (IMM_##a + 2 * IMM_##b + 4 * IMM_##c) #define FOUR(a, b, c, d) (IMM_##a + 2 * IMM_##b + 4 * IMM_##c + 8 * IMM_##d) #define FIVE(a, b, c, d, e) (IMM_##a + 2 * IMM_##b + 4 * IMM_##c + 8 * IMM_##d + 16 * IMM_##e) #define O(name, imm, pop, push, flags) imm, OPCODES #undef IMM_NA #undef IMM_IVA #undef IMM_I64A #undef IMM_DA #undef IMM_SA #undef IMM_AA #undef IMM_RATA #undef IMM_LA #undef IMM_IA #undef IMM_CAR #undef IMM_CAW #undef IMM_BA #undef IMM_BLA #undef IMM_ILA #undef IMM_I32LA #undef IMM_BLLA #undef IMM_SLA #undef IMM_OA #undef IMM_VSA #undef IMM_KA #undef IMM_LAR #undef IMM_FCA #undef ONE #undef TWO #undef THREE #undef FOUR #undef FIVE #undef O }; auto pc = origPC; auto const op = decode_op(pc); assertx(!isSwitch(op)); // BLA doesn't work here if (op == OpIterBreak) { // offset is imm number 0 return const_cast<Offset*>(reinterpret_cast<const Offset*>(pc)); } int mask = jumpMask[size_t(op)]; if (mask == 0) { return nullptr; } int immNum; switch (mask) { case 0: return nullptr; case 1: immNum = 0; break; case 2: immNum = 1; break; case 4: immNum = 2; break; case 8: immNum = 3; break; case 16: immNum = 4; break; default: assertx(false); return nullptr; } return &getImmPtr(origPC, immNum)->u_BA; }