static TransIDSet findPredTrans(TransID dstID, const ProfData* profData, const SrcDB& srcDB, const TcaTransIDMap& jmpToTransID) { SrcKey dstSK = profData->transSrcKey(dstID); const SrcRec* dstSR = srcDB.find(dstSK); assertx(dstSR); TransIDSet predSet; for (auto& inBr : dstSR->incomingBranches()) { TransID srcID = folly::get_default(jmpToTransID, inBr.toSmash(), kInvalidTransID); FTRACE(5, "findPredTrans: toSmash = {} srcID = {}\n", inBr.toSmash(), srcID); if (srcID != kInvalidTransID && profData->isKindProfile(srcID)) { auto srcSuccOffsets = profData->transLastSrcKey(srcID).succOffsets(); if (srcSuccOffsets.count(dstSK.offset())) { predSet.insert(srcID); } else { FTRACE(5, "findPredTrans: WARNING: incoming branch with impossible " "control flow between translations: {} -> {}" "(probably due to side exit)\n", srcID, dstID); } } } return predSet; }
TransCFG::TransCFG(FuncId funcId, const ProfData* profData, const SrcDB& srcDB, const TcaTransIDMap& jmpToTransID) { assertx(profData); // add nodes for (auto tid : profData->funcProfTransIDs(funcId)) { assertx(profData->transRegion(tid) != nullptr); // This will skip DV Funclets if they were already // retranslated w/ the prologues: if (!profData->optimized(profData->transSrcKey(tid))) { int64_t weight = profData->absTransCounter(tid); addNode(tid, weight); } } // add arcs for (TransID dstId : nodes()) { SrcKey dstSK = profData->transSrcKey(dstId); RegionDesc::BlockPtr dstBlock = profData->transRegion(dstId)->entry(); FTRACE(5, "TransCFG: adding incoming arcs in dstId = {}\n", dstId); TransIDSet predIDs = findPredTrans(dstId, profData, srcDB, jmpToTransID); for (auto predId : predIDs) { if (hasNode(predId)) { auto predPostConds = profData->transRegion(predId)->blocks().back()->postConds(); SrcKey predSK = profData->transSrcKey(predId); if (preCondsAreSatisfied(dstBlock, predPostConds) && predSK.resumed() == dstSK.resumed()) { FTRACE(5, "TransCFG: adding arc {} -> {} ({} -> {})\n", predId, dstId, showShort(predSK), showShort(dstSK)); addArc(predId, dstId, TransCFG::Arc::kUnknownWeight); } } } } // infer arc weights bool changed; do { changed = false; for (TransID tid : nodes()) { int64_t nodeWeight = weight(tid); if (inferredArcWeight(inArcs(tid), nodeWeight)) changed = true; if (inferredArcWeight(outArcs(tid), nodeWeight)) changed = true; } } while (changed); // guess weight for non-inferred arcs for (TransID tid : nodes()) { for (auto arc : outArcs(tid)) { if (arc->weight() == Arc::kUnknownWeight) { arc->setGuessed(); int64_t arcWgt = std::min(weight(arc->src()), weight(arc->dst())) / 2; arc->setWeight(arcWgt); } } } }
bool breaksRegion(SrcKey sk) { switch (sk.op()) { case Op::SSwitch: case Op::CreateCont: case Op::Yield: case Op::YieldK: case Op::RetC: case Op::RetV: case Op::Exit: case Op::Fatal: case Op::Throw: case Op::Unwind: case Op::Eval: case Op::NativeImpl: return true; case Op::Await: // We break regions at resumed Await instructions, to avoid // duplicating the translation of the resumed SrcKey after the // Await. return sk.resumed(); default: return false; } }
bool InliningDecider::canInlineAt(SrcKey callSK, const Func* callee) const { if (m_disabled || !callee || !RuntimeOption::EvalHHIREnableGenTimeInlining || RuntimeOption::EvalJitEnableRenameFunction || callee->attrs() & AttrInterceptable) { return false; } // We can only inline at normal FCalls. if (callSK.op() != Op::FCall && callSK.op() != Op::FCallD) { return false; } // Don't inline from resumed functions. The inlining mechanism doesn't have // support for these---it has no way to redefine stack pointers relative to // the frame pointer, because in a resumed function the frame pointer points // into the heap instead of into the eval stack. if (callSK.resumed()) return false; // TODO(#4238160): Inlining into pseudomain callsites is still buggy. if (callSK.func()->isPseudoMain()) return false; if (!isCalleeInlinable(callSK, callee) || !checkNumArgs(callSK, callee)) { return false; } return true; }
void prepareForNextHHBC(IRGS& env, const NormalizedInstruction* ni, SrcKey newSk, bool lastBcInst) { FTRACE(1, "------------------- prepareForNextHHBC ------------------\n"); env.currentNormalizedInstruction = ni; always_assert_flog( IMPLIES(isInlining(env), !env.lastBcInst), "Tried to end trace while inlining." ); always_assert_flog( IMPLIES(isInlining(env), !env.firstBcInst), "Inlining while still at the first region instruction." ); always_assert(env.bcStateStack.size() >= env.inlineLevel + 1); auto pops = env.bcStateStack.size() - 1 - env.inlineLevel; while (pops--) env.bcStateStack.pop_back(); always_assert_flog(env.bcStateStack.back().func() == newSk.func(), "Tried to update current SrcKey with a different func"); env.bcStateStack.back().setOffset(newSk.offset()); updateMarker(env); env.lastBcInst = lastBcInst; env.catchCreator = nullptr; env.irb->prepareForNextHHBC(); }
static void recordActRecPush(const SrcKey sk, const StringData* name, const StringData* clsName, bool staticCall) { auto unit = sk.unit(); FTRACE(2, "annotation: recordActRecPush: {}@{} {}{}{} ({}static)\n", unit->filepath()->data(), sk.offset(), clsName ? clsName->data() : "", clsName ? "::" : "", name, !staticCall ? "non" : ""); SrcKey next(sk); next.advance(unit); const FPIEnt *fpi = sk.func()->findFPI(next.offset()); assert(fpi); assert(name->isStatic()); assert(sk.offset() == fpi->m_fpushOff); auto const fcall = SrcKey { sk.func(), fpi->m_fcallOff }; assert(isFCallStar(*reinterpret_cast<const Op*>(unit->at(fcall.offset())))); auto const func = lookupDirectFunc(sk, name, clsName, staticCall); if (func) { recordFunc(fcall, func); } }
void IRTranslator::translateFCallArray(const NormalizedInstruction& i) { const Offset pcOffset = i.offset(); SrcKey next = i.nextSk(); const Offset after = next.offset(); HHIR_EMIT(FCallArray, pcOffset, after, jit::callDestroysLocals(i, m_hhbcTrans.curFunc())); }
static void recordFunc(const SrcKey sk, const Func* func) { FTRACE(2, "annotation: recordFunc: {}@{} {}\n", sk.unit()->filepath()->data(), sk.offset(), func->fullName()->data()); s_callDB.insert(std::make_pair(sk, func)); }
bool InliningDecider::canInlineAt(SrcKey callSK, const Func* callee) const { if (!callee || !RuntimeOption::EvalHHIREnableGenTimeInlining || RuntimeOption::EvalJitEnableRenameFunction || callee->attrs() & AttrInterceptable) { return false; } if (callee->cls()) { if (!classHasPersistentRDS(callee->cls())) { // if the callee's class is not persistent, its still ok // to use it if we're jitting into a method of a subclass auto ctx = callSK.func()->cls(); if (!ctx || !ctx->classof(callee->cls())) { return false; } } } else { auto const handle = callee->funcHandle(); if (handle == rds::kInvalidHandle || !rds::isPersistentHandle(handle)) { // if the callee isn't persistent, its still ok to // use it if its defined at the top level in the same // unit as the caller if (callee->unit() != callSK.unit() || !callee->top()) { return false; } } } // If inlining was disabled... don't inline. if (m_disabled) return false; // TODO(#3331014): We have this hack until more ARM codegen is working. if (arch() == Arch::ARM) return false; // We can only inline at normal FCalls. if (callSK.op() != Op::FCall && callSK.op() != Op::FCallD) { return false; } // Don't inline from resumed functions. The inlining mechanism doesn't have // support for these---it has no way to redefine stack pointers relative to // the frame pointer, because in a resumed function the frame pointer points // into the heap instead of into the eval stack. if (callSK.resumed()) return false; // TODO(#4238160): Inlining into pseudomain callsites is still buggy. if (callSK.func()->isPseudoMain()) return false; if (!isCalleeInlinable(callSK, callee) || !checkNumArgs(callSK, callee)) { return false; } return true; }
std::string showShort(SrcKey sk) { if (!sk.valid()) return "<invalid SrcKey>"; return folly::format( "{}(id {:#x})@{}{}", sk.func()->fullName(), sk.funcID(), sk.offset(), sk.resumed() ? "r" : "" ).str(); }
TCA emit_retranslate_opt_stub(CodeBlock& cb, FPInvOffset spOff, SrcKey target, TransID transID) { return emit_persistent( cb, target.resumed() ? folly::none : folly::make_optional(spOff), REQ_RETRANSLATE_OPT, target.toAtomicInt(), transID ); }
TCA emit_retranslate_stub(CodeBlock& cb, FPInvOffset spOff, SrcKey target, TransFlags trflags) { return emit_persistent( cb, target.resumed() ? folly::none : folly::make_optional(spOff), REQ_RETRANSLATE, target.offset(), trflags.packed ); }
void sktrace(SrcKey sk, const char *fmt, ...) { if (!Trace::enabled) return; auto inst = instrToString((Op*)sk.unit()->at(sk.offset())); Trace::trace("%s: %20s ", show(sk).c_str(), inst.c_str()); va_list a; va_start(a, fmt); Trace::vtrace(fmt, a); va_end(a); }
SrcKey RegionDesc::lastSrcKey() const { assertx(!empty()); FuncId startFuncId = start().funcID(); for (int i = m_blocks.size() - 1; i >= 0; i--) { SrcKey sk = m_blocks[i]->last(); if (sk.funcID() == startFuncId) { return sk; } } always_assert(0); }
TCA emit_bindaddr_stub(CodeBlock& cb, FPInvOffset spOff, TCA* addr, SrcKey target, TransFlags trflags) { return emit_ephemeral( cb, mcg->getFreeStub(cb, &mcg->cgFixups()), target.resumed() ? folly::none : folly::make_optional(spOff), REQ_BIND_ADDR, addr, target.toAtomicInt(), trflags.packed ); }
/* * Returns the last BC offset in the region that corresponds to the * function where the region starts. This will normally be the offset * of the last instruction in the last block, except if the function * ends with an inlined call. In this case, the offset of the * corresponding FCall* in the function that starts the region is * returned. */ static Offset findLastBcOffset(const RegionDescPtr region) { assert(region->blocks.size() > 0); auto& blocks = region->blocks; FuncId startFuncId = blocks[0]->start().getFuncId(); for (int i = blocks.size() - 1; i >= 0; i--) { SrcKey sk = blocks[i]->last(); if (sk.getFuncId() == startFuncId) { return sk.offset(); } } not_reached(); }
std::string show(SrcKey sk) { auto func = sk.func(); auto unit = sk.unit(); const char *filepath = "*anonFile*"; if (unit->filepath()->data() && unit->filepath()->size()) { filepath = unit->filepath()->data(); } return folly::format("{}:{} in {}(id 0x{:#x})@{: >6}", filepath, unit->getLineNumber(sk.offset()), func->isPseudoMain() ? "pseudoMain" : func->fullName()->data(), (unsigned long long)sk.getFuncId(), sk.offset()).str(); }
TCA emit_bindjcc1st_stub(CodeBlock& cb, FPInvOffset spOff, TCA jcc, SrcKey taken, SrcKey next, ConditionCode cc) { always_assert_flog(taken.resumed() == next.resumed(), "bind_jcc_1st was confused about resumables"); return emit_ephemeral( cb, mcg->getFreeStub(cb, &mcg->cgFixups()), taken.resumed() ? folly::none : folly::make_optional(spOff), REQ_BIND_JCC_FIRST, jcc, taken.toAtomicInt(), next.toAtomicInt(), cc ); }
/* * Returns true iff `block' ends the IR unit after finishing execution * of the bytecode instruction at `sk'. */ static bool endsUnitAtSrcKey(const Block* block, SrcKey sk) { if (!block->isExitNoThrow()) return false; const auto& inst = block->back(); const auto instSk = inst.marker().sk(); switch (inst.op()) { // These instructions end a unit after executing the bytecode // instruction they correspond to. case InterpOneCF: case JmpSSwitchDest: case JmpSwitchDest: case RaiseError: return instSk == sk;; // The RetCtrl is generally ending a bytecode instruction, with the // exception being in an Await bytecode instruction, where we consider the // end of the bytecode instruction to be the non-suspending path. case RetCtrl: case AsyncRetCtrl: return inst.marker().sk().op() != Op::Await; // A ReqBindJmp ends a unit and it jumps to the next instruction // to execute. case ReqBindJmp: { auto destOffset = inst.extra<ReqBindJmp>()->dest.offset(); return sk.succOffsets().count(destOffset); } default: return false; } }
void recordGdbTranslation(SrcKey sk, const Func* srcFunc, const CodeBlock& cb, const TCA start, const TCA end, bool exit, bool inPrologue) { assertx(cb.contains(start) && cb.contains(end)); if (start != end) { assertOwnsCodeLock(); if (!RuntimeOption::EvalJitNoGdb) { Debug::DebugInfo::Get()->recordTracelet( Debug::TCRange(start, end, &cb == &code().cold()), srcFunc, srcFunc->unit() ? srcFunc->unit()->at(sk.offset()) : nullptr, exit, inPrologue ); } if (RuntimeOption::EvalPerfPidMap) { Debug::DebugInfo::Get()->recordPerfMap( Debug::TCRange(start, end, &cb == &code().cold()), sk, srcFunc, exit, inPrologue ); } } }
void addDbgGuardImpl(SrcKey sk) { vixl::MacroAssembler a { tx64->mainCode }; vixl::Label after; vixl::Label interpReqAddr; // Get the debugger-attached flag from thread-local storage. Don't bother // saving caller-saved regs around the host call; this is between blocks. emitTLSLoad<ThreadInfo>(a, ThreadInfo::s_threadInfo, rAsm); // Is the debugger attached? a. Ldr (rAsm.W(), rAsm[dbgOff]); a. Tst (rAsm, 0xff); // skip jump to stubs if no debugger attached a. B (&after, vixl::eq); a. Ldr (rAsm, &interpReqAddr); a. Br (rAsm); if (!a.isFrontierAligned(8)) { a. Nop (); assert(a.isFrontierAligned(8)); } a. bind (&interpReqAddr); TCA interpReq = emitServiceReq(tx64->stubsCode, REQ_INTERPRET, sk.offset(), 0); a. dc64 (interpReq); a. bind (&after); }
void emitBindJ(CodeBlock& cb, CodeBlock& frozen, ConditionCode cc, SrcKey dest) { TCA toSmash = cb.frontier(); if (cb.base() == frozen.base()) { // This is just to reserve space. We'll overwrite with the real dest later. mcg->backEnd().emitSmashableJump(cb, toSmash, cc); } mcg->setJmpTransID(toSmash); TCA sr = emitEphemeralServiceReq( frozen, mcg->getFreeStub(frozen, &mcg->cgFixups()), folly::none, REQ_BIND_JMP, toSmash, dest.toAtomicInt(), TransFlags{}.packed ); MacroAssembler a { cb }; if (cb.base() == frozen.base()) { UndoMarker um {cb}; cb.setFrontier(toSmash); mcg->backEnd().emitSmashableJump(cb, sr, cc); um.undo(); } else { mcg->backEnd().emitSmashableJump(cb, sr, cc); } }
static void recordActRecPush(const SrcKey& sk, const Unit* unit, const FPIEnt* fpi, const StringData* name, const StringData* clsName, bool staticCall) { // sk is the address of a FPush* of the function whose static name // is name. The boundaries of FPI regions are such that we can't quite // find the FCall that matches this FuncD without decoding forward to // the end; this is not ideal, but is hopefully affordable at translation // time. ASSERT(name->isStatic()); ASSERT(sk.offset() == fpi->m_fpushOff); SrcKey fcall; SrcKey next(sk); next.advance(unit); do { if (*unit->at(next.offset()) == OpFCall) { // Remember the last FCall in the region; the region might end // with UnboxR, e.g. fcall = next; } next.advance(unit); } while (next.offset() <= fpi->m_fcallOff); ASSERT(*unit->at(fcall.offset()) == OpFCall); if (clsName) { const Class* cls = Unit::lookupClass(clsName); bool magic = false; const Func* func = lookupImmutableMethod(cls, name, magic, staticCall); if (func) { recordFunc(fcall, func); } return; } const Func* func = Unit::lookupFunc(name); if (func && func->isNameBindingImmutable(unit)) { // this will never go into a call cache, so we dont need to // encode the args. it will be used in OpFCall below to // set the i->funcd. recordFunc(fcall, func); } else { // It's not enough to remember the function name; we also need to encode // the number of arguments and current flag disposition. int numArgs = getImm(unit->at(sk.offset()), 0).u_IVA; recordNameAndArgs(fcall, name, numArgs); } }
TransRec::TransRec(SrcKey _src, TransID transID, TransKind _kind, TCA _aStart, uint32_t _aLen, TCA _acoldStart, uint32_t _acoldLen, TCA _afrozenStart, uint32_t _afrozenLen, RegionDescPtr region, std::vector<TransBCMapping> _bcMapping, Annotations&& _annotations, bool _hasLoop) : bcMapping(_bcMapping) , annotations(std::move(_annotations)) , funcName(_src.func()->fullName()->data()) , src(_src) , md5(_src.func()->unit()->md5()) , aStart(_aStart) , acoldStart(_acoldStart) , afrozenStart(_afrozenStart) , aLen(_aLen) , acoldLen(_acoldLen) , afrozenLen(_afrozenLen) , bcStart(_src.offset()) , id(transID) , kind(_kind) , hasLoop(_hasLoop) { if (funcName.empty()) funcName = "Pseudo-main"; if (!region) return; assertx(!region->empty()); for (auto& block : region->blocks()) { auto sk = block->start(); blocks.emplace_back(Block{sk.unit()->md5(), sk.offset(), block->last().advanced().offset()}); } auto& firstBlock = *region->blocks().front(); for (auto const& pred : firstBlock.typePreConditions()) { guards.emplace_back(show(pred)); } }
static void recordActRecPush(NormalizedInstruction& i, const Unit* unit, const StringData* name, const StringData* clsName, bool staticCall) { const SrcKey& sk = i.source; FTRACE(2, "annotation: recordActRecPush: {}@{} {}{}{} ({}static)\n", unit->filepath()->data(), sk.offset(), clsName ? clsName->data() : "", clsName ? "::" : "", name, !staticCall ? "non" : ""); SrcKey next(sk); next.advance(unit); const FPIEnt *fpi = curFunc()->findFPI(next.offset()); assert(fpi); assert(name->isStatic()); assert(sk.offset() == fpi->m_fpushOff); SrcKey fcall = sk; fcall.m_offset = fpi->m_fcallOff; assert(isFCallStar(*unit->at(fcall.offset()))); if (clsName) { const Class* cls = Unit::lookupUniqueClass(clsName); bool magic = false; const Func* func = lookupImmutableMethod(cls, name, magic, staticCall); if (func) { recordFunc(i, fcall, func); } return; } const Func* func = Unit::lookupFunc(name); if (func && func->isNameBindingImmutable(unit)) { // this will never go into a call cache, so we dont need to // encode the args. it will be used in OpFCall below to // set the i->funcd. recordFunc(i, fcall, func); } else { // It's not enough to remember the function name; we also need to encode // the number of arguments and current flag disposition. int numArgs = getImm(unit->at(sk.offset()), 0).u_IVA; recordNameAndArgs(fcall, name, numArgs); } }
bool reachedTranslationLimit(TransKind kind, SrcKey sk, const SrcRec& srcRec) { const auto numTrans = srcRec.translations().size(); // Optimized translations perform this check at relocation time to avoid // invalidating all of their SrcKeys early. if (kind == TransKind::Optimize) return false; if ((kind == TransKind::Profile && numTrans != RuntimeOption::EvalJitMaxProfileTranslations) || (kind != TransKind::Profile && numTrans != RuntimeOption::EvalJitMaxTranslations)) { return false; } INC_TPC(max_trans); if (debug && Trace::moduleEnabled(Trace::mcg, 2)) { const auto& tns = srcRec.translations(); TRACE(1, "Too many (%zd) translations: %s, BC offset %d\n", tns.size(), sk.unit()->filepath()->data(), sk.offset()); SKTRACE(2, sk, "{\n"); TCA topTrans = srcRec.getTopTranslation(); for (size_t i = 0; i < tns.size(); ++i) { auto const rec = transdb::getTransRec(tns[i].mainStart()); assertx(rec); SKTRACE(2, sk, "%zd %p\n", i, tns[i].mainStart()); if (tns[i].mainStart() == topTrans) { SKTRACE(2, sk, "%zd: *Top*\n", i); } if (rec->kind == TransKind::Anchor) { SKTRACE(2, sk, "%zd: Anchor\n", i); } else { SKTRACE(2, sk, "%zd: guards {\n", i); for (unsigned j = 0; j < rec->guards.size(); ++j) { FTRACE(2, "{}\n", rec->guards[j]); } SKTRACE(2, sk, "%zd } guards\n", i); } } SKTRACE(2, sk, "} /* Too many translations */\n"); } return true; }
TransRec::TransRec(SrcKey _src, TransKind _kind, TCA _aStart, uint32_t _aLen, TCA _acoldStart, uint32_t _acoldLen, TCA _afrozenStart, uint32_t _afrozenLen, RegionDescPtr region, std::vector<TransBCMapping> _bcMapping, bool _isLLVM) : bcMapping(_bcMapping) , funcName(_src.func()->fullName()->data()) , src(_src) , md5(_src.func()->unit()->md5()) , aStart(_aStart) , acoldStart(_acoldStart) , afrozenStart(_afrozenStart) , aLen(_aLen) , acoldLen(_acoldLen) , afrozenLen(_afrozenLen) , bcStart(_src.offset()) , id(0) , kind(_kind) , isLLVM(_isLLVM) { if (funcName.empty()) funcName = "Pseudo-main"; if (!region) return; assertx(!region->empty()); for (auto& block : region->blocks()) { auto sk = block->start(); blocks.emplace_back(Block{sk.unit()->md5(), sk.offset(), block->last().advanced().offset()}); } auto& firstBlock = *region->blocks().front(); auto guardRange = firstBlock.typePreds().equal_range(firstBlock.start()); for (; guardRange.first != guardRange.second; ++guardRange.first) { guards.emplace_back(show(guardRange.first->second)); } }
TransID ProfData::addTransPrologue(TransKind kind, const SrcKey& sk, int nArgs) { assert(kind == TransPrologue || kind == TransProflogue); TransID transId = m_numTrans++; m_transRecs.emplace_back(new ProfTransRec(transId, kind, sk, nArgs)); if (kind == TransProflogue) { // only Proflogue translations need an entry in the m_prologueDB m_prologueDB.add(sk.getFuncId(), nArgs, transId); } return transId; }
void addDbgGuardImpl(SrcKey sk, SrcRec* sr) { TCA realCode = sr->getTopTranslation(); if (!realCode) return; // No translations, nothing to do. auto& cb = mcg->code.main(); auto const dbgGuard = vwrap(cb, [&] (Vout& v) { if (!sk.resumed()) { auto const off = sr->nonResumedSPOff(); v << lea{rvmfp()[-cellsToBytes(off.offset)], rvmsp()}; } auto const tinfo = v.makeReg(); auto const attached = v.makeReg(); auto const sf = v.makeReg(); auto const done = v.makeBlock(); constexpr size_t dbgOff = offsetof(ThreadInfo, m_reqInjectionData) + RequestInjectionData::debuggerReadOnlyOffset(); v << ldimmq{reinterpret_cast<uintptr_t>(sk.pc()), rarg(0)}; emitTLSLoad(v, tls_datum(ThreadInfo::s_threadInfo), tinfo); v << loadb{tinfo[dbgOff], attached}; v << testbi{static_cast<int8_t>(0xffu), attached, sf}; v << jcci{CC_NZ, sf, done, mcg->ustubs().interpHelper}; v = done; v << fallthru{}; }, CodeKind::Helper); // Emit a jump to the actual code. auto const dbgBranchGuardSrc = emitSmashableJmp(cb, realCode); // Add the guard to the SrcRec. sr->addDebuggerGuard(dbgGuard, dbgBranchGuardSrc); }
bool Translator::isSrcKeyInBL(SrcKey sk) { auto unit = sk.unit(); if (unit->isInterpretOnly()) return true; Lock l(m_dbgBlacklistLock); if (m_dbgBLSrcKey.find(sk) != m_dbgBLSrcKey.end()) { return true; } // Loop until the end of the basic block inclusively. This is useful for // function exit breakpoints, which are implemented by blacklisting the RetC // opcodes. PC pc = nullptr; do { pc = (pc == nullptr) ? unit->at(sk.offset()) : pc + instrLen(pc); if (m_dbgBLPC.checkPC(pc)) { m_dbgBLSrcKey.insert(sk); return true; } } while (!opcodeBreaksBB(peek_op(pc))); return false; }