TransCFG::TransCFG(FuncId funcId, const ProfData* profData, const SrcDB& srcDB, const TcaTransIDMap& jmpToTransID) { assertx(profData); // add nodes for (auto tid : profData->funcProfTransIDs(funcId)) { assertx(profData->transRegion(tid) != nullptr); // This will skip DV Funclets if they were already // retranslated w/ the prologues: if (!profData->optimized(profData->transSrcKey(tid))) { int64_t weight = profData->absTransCounter(tid); addNode(tid, weight); } } // add arcs for (TransID dstId : nodes()) { SrcKey dstSK = profData->transSrcKey(dstId); RegionDesc::BlockPtr dstBlock = profData->transRegion(dstId)->entry(); FTRACE(5, "TransCFG: adding incoming arcs in dstId = {}\n", dstId); TransIDSet predIDs = findPredTrans(dstId, profData, srcDB, jmpToTransID); for (auto predId : predIDs) { if (hasNode(predId)) { auto predPostConds = profData->transRegion(predId)->blocks().back()->postConds(); SrcKey predSK = profData->transSrcKey(predId); if (preCondsAreSatisfied(dstBlock, predPostConds) && predSK.resumed() == dstSK.resumed()) { FTRACE(5, "TransCFG: adding arc {} -> {} ({} -> {})\n", predId, dstId, showShort(predSK), showShort(dstSK)); addArc(predId, dstId, TransCFG::Arc::kUnknownWeight); } } } } // infer arc weights bool changed; do { changed = false; for (TransID tid : nodes()) { int64_t nodeWeight = weight(tid); if (inferredArcWeight(inArcs(tid), nodeWeight)) changed = true; if (inferredArcWeight(outArcs(tid), nodeWeight)) changed = true; } } while (changed); // guess weight for non-inferred arcs for (TransID tid : nodes()) { for (auto arc : outArcs(tid)) { if (arc->weight() == Arc::kUnknownWeight) { arc->setGuessed(); int64_t arcWgt = std::min(weight(arc->src()), weight(arc->dst())) / 2; arc->setWeight(arcWgt); } } } }
TCA emit_bindjcc1st_stub(CodeBlock& cb, FPInvOffset spOff, TCA jcc, SrcKey taken, SrcKey next, ConditionCode cc) { always_assert_flog(taken.resumed() == next.resumed(), "bind_jcc_1st was confused about resumables"); return emit_ephemeral( cb, mcg->getFreeStub(cb, &mcg->cgFixups()), taken.resumed() ? folly::none : folly::make_optional(spOff), REQ_BIND_JCC_FIRST, jcc, taken.toAtomicInt(), next.toAtomicInt(), cc ); }
bool breaksRegion(SrcKey sk) { switch (sk.op()) { case Op::SSwitch: case Op::CreateCont: case Op::Yield: case Op::YieldK: case Op::RetC: case Op::RetV: case Op::Exit: case Op::Fatal: case Op::Throw: case Op::Unwind: case Op::Eval: case Op::NativeImpl: return true; case Op::Await: // We break regions at resumed Await instructions, to avoid // duplicating the translation of the resumed SrcKey after the // Await. return sk.resumed(); default: return false; } }
bool InliningDecider::canInlineAt(SrcKey callSK, const Func* callee) const { if (m_disabled || !callee || !RuntimeOption::EvalHHIREnableGenTimeInlining || RuntimeOption::EvalJitEnableRenameFunction || callee->attrs() & AttrInterceptable) { return false; } // We can only inline at normal FCalls. if (callSK.op() != Op::FCall && callSK.op() != Op::FCallD) { return false; } // Don't inline from resumed functions. The inlining mechanism doesn't have // support for these---it has no way to redefine stack pointers relative to // the frame pointer, because in a resumed function the frame pointer points // into the heap instead of into the eval stack. if (callSK.resumed()) return false; // TODO(#4238160): Inlining into pseudomain callsites is still buggy. if (callSK.func()->isPseudoMain()) return false; if (!isCalleeInlinable(callSK, callee) || !checkNumArgs(callSK, callee)) { return false; } return true; }
static void recordActRecPush(const SrcKey sk, const StringData* name, const StringData* clsName, bool staticCall) { auto unit = sk.unit(); FTRACE(2, "annotation: recordActRecPush: {}@{} {}{}{} ({}static)\n", unit->filepath()->data(), sk.offset(), clsName ? clsName->data() : "", clsName ? "::" : "", name, !staticCall ? "non" : ""); SrcKey next(sk); next.advance(unit); const FPIEnt *fpi = sk.func()->findFPI(next.offset()); assert(fpi); assert(name->isStatic()); assert(sk.offset() == fpi->m_fpushOff); auto const fcall = SrcKey { sk.func(), fpi->m_fcallOff, sk.resumed() }; assert(isFCallStar(*reinterpret_cast<const Op*>(unit->at(fcall.offset())))); auto const func = lookupDirectFunc(sk, name, clsName, staticCall); if (func) { recordFunc(fcall, func); } }
bool InliningDecider::canInlineAt(SrcKey callSK, const Func* callee) const { if (!callee || !RuntimeOption::EvalHHIREnableGenTimeInlining || RuntimeOption::EvalJitEnableRenameFunction || callee->attrs() & AttrInterceptable) { return false; } if (callee->cls()) { if (!classHasPersistentRDS(callee->cls())) { // if the callee's class is not persistent, its still ok // to use it if we're jitting into a method of a subclass auto ctx = callSK.func()->cls(); if (!ctx || !ctx->classof(callee->cls())) { return false; } } } else { auto const handle = callee->funcHandle(); if (handle == rds::kInvalidHandle || !rds::isPersistentHandle(handle)) { // if the callee isn't persistent, its still ok to // use it if its defined at the top level in the same // unit as the caller if (callee->unit() != callSK.unit() || !callee->top()) { return false; } } } // If inlining was disabled... don't inline. if (m_disabled) return false; // TODO(#3331014): We have this hack until more ARM codegen is working. if (arch() == Arch::ARM) return false; // We can only inline at normal FCalls. if (callSK.op() != Op::FCall && callSK.op() != Op::FCallD) { return false; } // Don't inline from resumed functions. The inlining mechanism doesn't have // support for these---it has no way to redefine stack pointers relative to // the frame pointer, because in a resumed function the frame pointer points // into the heap instead of into the eval stack. if (callSK.resumed()) return false; // TODO(#4238160): Inlining into pseudomain callsites is still buggy. if (callSK.func()->isPseudoMain()) return false; if (!isCalleeInlinable(callSK, callee) || !checkNumArgs(callSK, callee)) { return false; } return true; }
TCA emit_retranslate_opt_stub(CodeBlock& cb, FPInvOffset spOff, SrcKey target, TransID transID) { return emit_persistent( cb, target.resumed() ? folly::none : folly::make_optional(spOff), REQ_RETRANSLATE_OPT, target.toAtomicInt(), transID ); }
TCA emit_retranslate_stub(CodeBlock& cb, FPInvOffset spOff, SrcKey target, TransFlags trflags) { return emit_persistent( cb, target.resumed() ? folly::none : folly::make_optional(spOff), REQ_RETRANSLATE, target.offset(), trflags.packed ); }
std::string showShort(SrcKey sk) { if (!sk.valid()) return "<invalid SrcKey>"; return folly::format( "{}(id {:#x})@{}{}", sk.func()->fullName(), sk.funcID(), sk.offset(), sk.resumed() ? "r" : "" ).str(); }
static void recordFunc(const SrcKey sk, const Func* func) { FTRACE(2, "annotation: recordFunc: {}@{}{} {}\n", sk.unit()->filepath()->data(), sk.offset(), sk.resumed() ? "r" : "", func->fullName()->data()); s_callDB.insert(std::make_pair(sk, func)); }
TCA emit_bindaddr_stub(CodeBlock& cb, FPInvOffset spOff, TCA* addr, SrcKey target, TransFlags trflags) { return emit_ephemeral( cb, mcg->getFreeStub(cb, &mcg->cgFixups()), target.resumed() ? folly::none : folly::make_optional(spOff), REQ_BIND_ADDR, addr, target.toAtomicInt(), trflags.packed ); }
std::string show(SrcKey sk) { auto func = sk.func(); auto unit = sk.unit(); const char *filepath = "*anonFile*"; if (unit->filepath()->data() && unit->filepath()->size()) { filepath = unit->filepath()->data(); } return folly::format("{}:{} in {}(id 0x{:#x})@{: >6}{}", filepath, unit->getLineNumber(sk.offset()), func->isPseudoMain() ? "pseudoMain" : func->fullName()->data(), (uint32_t)sk.getFuncId(), sk.offset(), sk.resumed() ? "r" : "").str(); }
void addDbgGuardImpl(SrcKey sk, SrcRec* sr) { TCA realCode = sr->getTopTranslation(); if (!realCode) return; // No translations, nothing to do. auto& cb = mcg->code.main(); auto const dbgGuard = vwrap(cb, [&] (Vout& v) { if (!sk.resumed()) { auto const off = sr->nonResumedSPOff(); v << lea{rvmfp()[-cellsToBytes(off.offset)], rvmsp()}; } auto const tinfo = v.makeReg(); auto const attached = v.makeReg(); auto const sf = v.makeReg(); auto const done = v.makeBlock(); constexpr size_t dbgOff = offsetof(ThreadInfo, m_reqInjectionData) + RequestInjectionData::debuggerReadOnlyOffset(); v << ldimmq{reinterpret_cast<uintptr_t>(sk.pc()), rarg(0)}; emitTLSLoad(v, tls_datum(ThreadInfo::s_threadInfo), tinfo); v << loadb{tinfo[dbgOff], attached}; v << testbi{static_cast<int8_t>(0xffu), attached, sf}; v << jcci{CC_NZ, sf, done, mcg->ustubs().interpHelper}; v = done; v << fallthru{}; }, CodeKind::Helper); // Emit a jump to the actual code. auto const dbgBranchGuardSrc = emitSmashableJmp(cb, realCode); // Add the guard to the SrcRec. sr->addDebuggerGuard(dbgGuard, dbgBranchGuardSrc); }
bool InliningDecider::canInlineAt(SrcKey callSK, const Func* callee, const RegionDesc& region) const { if (!RuntimeOption::RepoAuthoritative || !RuntimeOption::EvalHHIREnableGenTimeInlining) { return false; } // If inlining was disabled... don't inline. if (m_disabled) return false; // TODO(#3331014): We have this hack until more ARM codegen is working. if (arch() == Arch::ARM) return false; // We can only inline at normal FCalls. if (callSK.op() != Op::FCall && callSK.op() != Op::FCallD) { return false; } // Don't inline from resumed functions. The inlining mechanism doesn't have // support for these---it has no way to redefine stack pointers relative to // the frame pointer, because in a resumed function the frame pointer points // into the heap instead of into the eval stack. if (callSK.resumed()) return false; // TODO(#4238160): Inlining into pseudomain callsites is still buggy. if (callSK.func()->isPseudoMain()) return false; if (!isCalleeInlinable(callSK, callee) || !checkNumArgs(callSK, callee) || !checkFPIRegion(callSK, callee, region)) { return false; } return true; }