void annotate(NormalizedInstruction* i) { switch(i->op()) { case OpFPushObjMethodD: case OpFPushClsMethodD: case OpFPushClsMethodF: case OpFPushFuncD: { // When we push predictable action records, we can use a simpler // translation for their corresponding FCall. const StringData* className = nullptr; const StringData* funcName = nullptr; if (i->op() == OpFPushFuncD) { funcName = curUnit()->lookupLitstrId(i->imm[1].u_SA); } else if (i->op() == OpFPushObjMethodD) { if (i->inputs[0]->valueType() != KindOfObject) break; const Class* cls = i->inputs[0]->rtt.valueClass(); if (!cls) break; funcName = curUnit()->lookupLitstrId(i->imm[1].u_SA); className = cls->name(); } else if (i->op() == OpFPushClsMethodF) { if (!i->inputs[1]->isString() || i->inputs[1]->rtt.valueString() == nullptr || i->inputs[0]->valueType() != KindOfClass) { break; } const Class* cls = i->inputs[0]->rtt.valueClass(); if (!cls) break; funcName = i->inputs[1]->rtt.valueString(); className = cls->name(); } else { assert(i->op() == OpFPushClsMethodD); funcName = curUnit()->lookupLitstrId(i->imm[1].u_SA); className = curUnit()->lookupLitstrId(i->imm[2].u_SA); } assert(funcName->isStatic()); recordActRecPush(*i, curUnit(), funcName, className, i->op() == OpFPushClsMethodD || i->op() == OpFPushClsMethodF); } break; case OpFCall: case OpFCallArray: { CallRecord callRec; if (mapGet(s_callDB, i->source, &callRec)) { if (callRec.m_type == Function) { i->funcd = callRec.m_func; } else { assert(callRec.m_type == EncodedNameAndArgs); i->funcName = callRec.m_encodedName; } } else { i->funcName = nullptr; } } break; default: break; } }
void interpOne(IRGS& env, folly::Optional<Type> outType, int popped, int pushed, InterpOneData& idata) { auto const unit = curUnit(env); spillStack(env); env.irb->exceptionStackBoundary(); auto const op = unit->getOp(bcOff(env)); idata.bcOff = bcOff(env); idata.cellsPopped = popped; idata.cellsPushed = pushed; idata.opcode = op; gen( env, opcodeChangesPC(idata.opcode) ? InterpOneCF : InterpOne, outType, idata, sp(env), fp(env) ); assertx(env.irb->stackDeficit() == 0); }
void endRegion(IRGS& env) { auto const curSk = curSrcKey(env); if (!instrAllowsFallThru(curSk.op())) return; // nothing to do here auto const nextSk = curSk.advanced(curUnit(env)); endRegion(env, nextSk); }
void interpOne(IRGS& env, folly::Optional<Type> outType, int popped, int pushed, InterpOneData& idata) { auto const unit = curUnit(env); spillStack(env); env.irb->exceptionStackBoundary(); auto const op = unit->getOpcode(bcOff(env)); auto& iInfo = getInstrInfo(op); if (iInfo.type == jit::InstrFlags::OutFDesc) { env.fpiStack.push(FPIInfo { sp(env), env.irb->spOffset(), nullptr }); } else if (isFCallStar(op) && !env.fpiStack.empty()) { env.fpiStack.pop(); } idata.bcOff = bcOff(env); idata.cellsPopped = popped; idata.cellsPushed = pushed; idata.opcode = op; gen( env, opcodeChangesPC(idata.opcode) ? InterpOneCF : InterpOne, outType, idata, sp(env), fp(env) ); assertx(env.irb->stackDeficit() == 0); }
void emitSSwitch(HTS& env, const ImmVector& iv) { const int numCases = iv.size() - 1; /* * We use a fast path translation with a hashtable if none of the * cases are numeric strings and if the input is actually a string. * * Otherwise we do a linear search through the cases calling string * conversion routines. */ const bool fastPath = topC(env)->isA(Type::Str) && std::none_of(iv.strvec(), iv.strvec() + numCases, [&](const StrVecItem& item) { return curUnit(env)->lookupLitstrId(item.str)->isNumeric(); } ); auto const testVal = popC(env); std::vector<LdSSwitchData::Elm> cases(numCases); for (int i = 0; i < numCases; ++i) { auto const& kv = iv.strvec()[i]; cases[i].str = curUnit(env)->lookupLitstrId(kv.str); cases[i].dest = SrcKey{curSrcKey(env), bcOff(env) + kv.dest}; } LdSSwitchData data; data.numCases = numCases; data.cases = &cases[0]; data.defaultSk = SrcKey{curSrcKey(env), bcOff(env) + iv.strvec()[iv.size() - 1].dest}; auto const dest = gen(env, fastPath ? LdSSwitchDestFast : LdSSwitchDestSlow, data, testVal); gen(env, DecRef, testVal); gen(env, AdjustSP, IRSPOffsetData { offsetFromIRSP(env, BCSPOffset{0}) }, sp(env)); gen(env, JmpSSwitchDest, dest, sp(env)); }
void TranslatorX64::fCallArrayHelper(const Offset pcOff, const Offset pcNext) { DECLARE_FRAME_POINTER(framePtr); ActRec* fp = (ActRec*)framePtr->m_savedRbp; VMExecutionContext *ec = g_vmContext; ec->m_fp = fp; ec->m_stack.top() = sp; ec->m_pc = curUnit()->at(pcOff); PC pc = curUnit()->at(pcNext); tl_regState = REGSTATE_CLEAN; bool runFunc = ec->doFCallArray(pc); sp = ec->m_stack.top(); tl_regState = REGSTATE_DIRTY; if (!runFunc) return; ec->m_fp->m_savedRip = framePtr->m_savedRip; // smash our return and frame pointer chain framePtr->m_savedRip = (uint64_t)ec->m_fp->m_func->getFuncBody(); framePtr->m_savedRbp = (uint64_t)ec->m_fp; }
void annotate(NormalizedInstruction* i) { switch(i->op()) { case OpFPushObjMethodD: case OpFPushClsMethodD: case OpFPushClsMethodF: case OpFPushFuncD: { // When we push predictable action records, we can use a simpler // translation for their corresponding FCall. SrcKey next(i->source); next.advance(curUnit()); const StringData* className = NULL; const StringData* funcName = NULL; if (i->op() == OpFPushFuncD) { funcName = curUnit()->lookupLitstrId(i->imm[1].u_SA); } else if (i->op() == OpFPushObjMethodD) { if (i->inputs[0]->valueType() != KindOfObject) break; const Class* cls = i->inputs[0]->rtt.valueClass(); if (!cls) break; funcName = curUnit()->lookupLitstrId(i->imm[1].u_SA); className = cls->name(); } else if (i->op() == OpFPushClsMethodF) { if (i->inputs[1]->rtt.valueString() == NULL || i->inputs[0]->valueType() != KindOfClass) { break; } const Class* cls = i->inputs[0]->rtt.valueClass(); if (!cls) break; funcName = i->inputs[1]->rtt.valueString(); className = cls->name(); } else { ASSERT(i->op() == OpFPushClsMethodD); funcName = curUnit()->lookupLitstrId(i->imm[1].u_SA); className = curUnit()->lookupLitstrId(i->imm[2].u_SA); } ASSERT(funcName->isStatic()); const FPIEnt *fe = curFunc()->findFPI(next.m_offset); ASSERT(fe); recordActRecPush(i->source, curUnit(), fe, funcName, className, i->op() == OpFPushClsMethodD || i->op() == OpFPushClsMethodF); } break; case OpFCall: { CallRecord callRec; if (mapGet(s_callDB, i->source, &callRec)) { if (callRec.m_type == Function) { i->funcd = callRec.m_func; } else { ASSERT(callRec.m_type == EncodedNameAndArgs); i->funcName = callRec.m_encodedName; } } else { i->funcName = NULL; } } break; default: break; } }
// All accesses to the stack and locals in this function use DataTypeGeneric so // this function should only be used for inspecting state; when the values are // actually used they must be constrained further. Type predictedTypeFromLocation(HTS& env, const Location& loc) { switch (loc.space) { case Location::Stack: { auto i = loc.offset; assert(i >= 0); if (i < env.irb->evalStack().size()) { return top(env, i, DataTypeGeneric)->type(); } else { auto stackVal = getStackValue( env.irb->sp(), i - env.irb->evalStack().size() + env.irb->stackDeficit() ); if (stackVal.knownType.isBoxed() && !(stackVal.predictedInner <= Type::Bottom)) { return ldRefReturn(stackVal.predictedInner.unbox()).box(); } return stackVal.knownType; } } break; case Location::Local: return env.irb->predictedLocalType(loc.offset); case Location::Litstr: return Type::cns(curUnit(env)->lookupLitstrId(loc.offset)); case Location::Litint: return Type::cns(loc.offset); case Location::This: // Don't specialize $this for cloned closures which may have been re-bound if (curFunc(env)->hasForeignThis()) return Type::Obj; if (auto const cls = curFunc(env)->cls()) { return Type::Obj.specialize(cls); } return Type::Obj; case Location::Iter: case Location::Invalid: break; } not_reached(); }
void interpOne(IRGS& env, folly::Optional<Type> outType, int popped, int pushed, InterpOneData& idata) { auto const unit = curUnit(env); auto const op = unit->getOp(bcOff(env)); idata.bcOff = bcOff(env); idata.cellsPopped = popped; idata.cellsPushed = pushed; idata.opcode = op; gen( env, opcodeChangesPC(idata.opcode) ? InterpOneCF : InterpOne, outType, idata, sp(env), fp(env) ); }
// All accesses to the stack and locals in this function use DataTypeGeneric so // this function should only be used for inspecting state; when the values are // actually used they must be constrained further. Type predictedTypeFromLocation(IRGS& env, const Location& loc) { switch (loc.space) { case Location::Stack: { auto i = loc.bcRelOffset; assertx(i >= 0); if (i < env.irb->evalStack().size()) { return topType(env, i, DataTypeGeneric); } else { auto stackTy = env.irb->stackType( offsetFromIRSP(env, i), DataTypeGeneric ); if (stackTy <= Type::BoxedCell) { return env.irb->stackInnerTypePrediction( offsetFromIRSP(env, i)).box(); } return stackTy; } } break; case Location::Local: return env.irb->predictedLocalType(loc.offset); case Location::Litstr: return Type::cns(curUnit(env)->lookupLitstrId(loc.offset)); case Location::Litint: return Type::cns(loc.offset); case Location::This: // Don't specialize $this for cloned closures which may have been re-bound if (curFunc(env)->hasForeignThis()) return Type::Obj; if (auto const cls = curFunc(env)->cls()) { return Type::SubObj(cls); } return Type::Obj; case Location::Iter: case Location::Invalid: break; } not_reached(); }
void emitNewStructArray(HTS& env, const ImmVector& immVec) { auto const numArgs = immVec.size(); auto const ids = immVec.vec32(); // The NewPackedArray opcode's helper needs array values passed to it // via the stack. We use spillStack() to flush the eval stack and // obtain a pointer to the topmost item; if over-flushing becomes // a problem then we should refactor the NewPackedArray opcode to // take its values directly as SSA operands. spillStack(env); NewStructData extra; extra.offset = offsetFromSP(env, 0); extra.numKeys = numArgs; extra.keys = new (env.unit.arena()) StringData*[numArgs]; for (auto i = size_t{0}; i < numArgs; ++i) { extra.keys[i] = curUnit(env)->lookupLitstrId(ids[i]); } discard(env, numArgs); push(env, gen(env, NewStructArray, extra, sp(env))); }
void emitAwait(IRGS& env, int32_t numIters) { auto const resumeOffset = nextBcOff(env); assertx(curFunc(env)->isAsync()); if (curFunc(env)->isAsyncGenerator()) PUNT(Await-AsyncGenerator); auto const exitSlow = makeExitSlow(env); if (!topC(env)->isA(TObj)) PUNT(Await-NonObject); auto const child = popC(env); gen(env, JmpZero, exitSlow, gen(env, IsWaitHandle, child)); // cns() would ODR-use these auto const kSucceeded = c_WaitHandle::STATE_SUCCEEDED; auto const kFailed = c_WaitHandle::STATE_FAILED; auto const state = gen(env, LdWHState, child); /* * HHBBC may have proven something about the inner type of this wait handle. * * So, we may have an assertion on the type of the top of the stack after * this instruction. We know the next bytecode instruction is reachable from * fallthrough on the Await, so if it is an AssertRATStk 0, anything coming * out of the wait handle must be a subtype of that type, so this is a safe * and conservative way to do this optimization (even if our successor * bytecode offset is a jump target from things we aren't thinking about * here). */ auto const knownTy = [&] { auto pc = curUnit(env)->at(resumeOffset); if (*reinterpret_cast<const Op*>(pc) != Op::AssertRATStk) return TInitCell; ++pc; auto const stkLoc = decodeVariableSizeImm(&pc); if (stkLoc != 0) return TInitCell; auto const rat = decodeRAT(curUnit(env), pc); auto const ty = ratToAssertType(env, rat); return ty ? *ty : TInitCell; }(); ifThenElse( env, [&] (Block* taken) { auto const succeeded = gen(env, EqInt, state, cns(env, kSucceeded)); gen(env, JmpNZero, taken, succeeded); }, [&] { // Next: the wait handle is not finished, we need to suspend auto const failed = gen(env, EqInt, state, cns(env, kFailed)); gen(env, JmpNZero, exitSlow, failed); if (resumed(env)) { implAwaitR(env, child, resumeOffset); } else { implAwaitE(env, child, resumeOffset, numIters); } }, [&] { // Taken: retrieve the result from the wait handle auto const res = gen(env, LdWHResult, knownTy, child); gen(env, IncRef, res); gen(env, DecRef, child); push(env, res); } ); }
void emitFile(HTS& env) { push(env, cns(env, curUnit(env)->filepath())); }
void emitDir(HTS& env) { push(env, cns(env, curUnit(env)->dirpath())); }
std::string show(const IRGS& irgs) { std::ostringstream out; auto header = [&](const std::string& str) { out << folly::format("+{:-^102}+\n", str); }; const int32_t frameCells = resumed(irgs) ? 0 : curFunc(irgs)->numSlotsInFrame(); auto const stackDepth = irgs.irb->fs().bcSPOff().offset - frameCells; assertx(stackDepth >= 0); auto spOffset = stackDepth; auto elem = [&](const std::string& str) { out << folly::format("| {:<100} |\n", folly::format("{:>2}: {}", stackDepth - spOffset, str)); assertx(spOffset > 0); --spOffset; }; auto fpi = curFunc(irgs)->findFPI(bcOff(irgs)); auto checkFpi = [&]() { if (fpi && spOffset + frameCells == fpi->m_fpOff) { auto fpushOff = fpi->m_fpushOff; auto after = fpushOff + instrLen(curUnit(irgs)->at(fpushOff)); std::ostringstream msg; msg << "ActRec from "; curUnit(irgs)->prettyPrint( msg, Unit::PrintOpts().range(fpushOff, after) .noLineNumbers() .indent(0) .noFuncs() ); auto msgStr = msg.str(); assertx(msgStr.back() == '\n'); msgStr.erase(msgStr.size() - 1); for (unsigned i = 0; i < kNumActRecCells; ++i) elem(msgStr); fpi = fpi->m_parentIndex != -1 ? &curFunc(irgs)->fpitab()[fpi->m_parentIndex] : nullptr; return true; } return false; }; header(folly::format(" {} stack element(s): ", stackDepth).str()); assertx(spOffset <= curFunc(irgs)->maxStackCells()); for (auto i = 0; i < spOffset; ) { if (checkFpi()) { i += kNumActRecCells; continue; } auto const spRel = offsetFromIRSP(irgs, BCSPRelOffset{i}); auto const stkTy = irgs.irb->stack(spRel, DataTypeGeneric).type; auto const stkVal = irgs.irb->stack(spRel, DataTypeGeneric).value; std::string elemStr; if (stkTy == TStkElem) { elemStr = "unknown"; } else if (stkVal) { elemStr = stkVal->inst()->toString(); } else { elemStr = stkTy.toString(); } auto const irSPRel = BCSPRelOffset{i} .to<FPInvOffset>(irgs.irb->fs().bcSPOff()); auto const predicted = predictedType(irgs, Location::Stack { irSPRel }); if (predicted < stkTy) { elemStr += folly::sformat(" (predict: {})", predicted); } elem(elemStr); ++i; } header(""); out << "\n"; header(folly::format(" {} local(s) ", curFunc(irgs)->numLocals()).str()); for (unsigned i = 0; i < curFunc(irgs)->numLocals(); ++i) { auto const localValue = irgs.irb->local(i, DataTypeGeneric).value; auto const localTy = localValue ? localValue->type() : irgs.irb->local(i, DataTypeGeneric).type; auto str = localValue ? localValue->inst()->toString() : localTy.toString(); auto const predicted = irgs.irb->fs().local(i).predictedType; if (predicted < localTy) str += folly::sformat(" (predict: {})", predicted); if (localTy <= TBoxedCell) { auto const pred = irgs.irb->predictedLocalInnerType(i); if (pred != TBottom) { str += folly::sformat(" (predict inner: {})", pred.toString()); } } out << folly::format("| {:<100} |\n", folly::format("{:>2}: {}", i, str)); } header(""); return out.str(); }
void endRegion(HTS& env) { auto const nextSk = curSrcKey(env).advanced(curUnit(env)); endRegion(env, nextSk.offset()); }