void genCodeImpl(CodeBlock& mainCode, CodeBlock& stubsCode, IRUnit& unit, std::vector<TransBCMapping>* bcMap, JIT::MCGenerator* mcg, const RegAllocInfo& regs, AsmInfo* asmInfo) { LiveRegs live_regs = computeLiveRegs(unit, regs); CodegenState state(unit, regs, live_regs, asmInfo); // Returns: whether a block has already been emitted. DEBUG_ONLY auto isEmitted = [&](Block* block) { return state.addresses[block]; }; /* * Emit the given block on the supplied assembler. The `nextLinear' * is the next block that will be emitted on this assembler. If is * not the next block in control flow order, then emit a patchable jump * to the next flow block. */ auto emitBlock = [&](CodeBlock& cb, Block* block, Block* nextLinear) { assert(!isEmitted(block)); FTRACE(6, "genBlock {} on {}\n", block->id(), cb.base() == stubsCode.base() ? "astubs" : "a"); auto const aStart = cb.frontier(); auto const astubsStart = stubsCode.frontier(); mcg->backEnd().patchJumps(cb, state, block); state.addresses[block] = aStart; // If the block ends with a Jmp and the next block is going to be // its target, we don't need to actually emit it. IRInstruction* last = &block->back(); state.noTerminalJmp = last->op() == Jmp && nextLinear == last->taken(); if (state.asmInfo) { state.asmInfo->asmRanges[block] = TcaRange(aStart, cb.frontier()); } genBlock(unit, cb, stubsCode, mcg, state, block, bcMap); auto nextFlow = block->next(); if (nextFlow && nextFlow != nextLinear) { mcg->backEnd().emitFwdJmp(cb, nextFlow, state); } if (state.asmInfo) { state.asmInfo->asmRanges[block] = TcaRange(aStart, cb.frontier()); if (cb.base() != stubsCode.base()) { state.asmInfo->astubRanges[block] = TcaRange(astubsStart, stubsCode.frontier()); } } }; if (RuntimeOption::EvalHHIRGenerateAsserts) { mcg->backEnd().emitTraceCall(mainCode, unit.bcOff()); } auto const linfo = layoutBlocks(unit); for (auto it = linfo.blocks.begin(); it != linfo.astubsIt; ++it) { Block* nextLinear = boost::next(it) != linfo.astubsIt ? *boost::next(it) : nullptr; emitBlock(mainCode, *it, nextLinear); } for (auto it = linfo.astubsIt; it != linfo.blocks.end(); ++it) { Block* nextLinear = boost::next(it) != linfo.blocks.end() ? *boost::next(it) : nullptr; emitBlock(stubsCode, *it, nextLinear); } if (debug) { for (Block* UNUSED block : linfo.blocks) { assert(isEmitted(block)); } } }
void print(std::ostream& os, const Block* block, const RegAllocInfo* regs, const AsmInfo* asmInfo, const GuardConstraints* guards, BCMarker* markerPtr) { BCMarker dummy; BCMarker& curMarker = markerPtr ? *markerPtr : dummy; TcaRange blockRange = asmInfo ? asmInfo->asmRanges[block] : TcaRange(nullptr, nullptr); os << '\n' << std::string(kIndent - 3, ' '); printLabel(os, block); os << punc(":"); auto& preds = block->preds(); if (!preds.empty()) { os << " (preds"; for (auto const& edge : preds) { os << " B" << edge.from()->id(); } os << ')'; } os << "\n"; if (block->empty()) { os << std::string(kIndent, ' ') << "empty block\n"; return; } const char* markerEndl = ""; for (auto it = block->begin(); it != block->end();) { auto& inst = *it; ++it; if (inst.marker() != curMarker) { std::ostringstream mStr; auto const& newMarker = inst.marker(); if (!newMarker.hasFunc()) { os << color(ANSI_COLOR_BLUE) << std::string(kIndent, ' ') << "--- invalid marker" << color(ANSI_COLOR_END) << '\n'; } else { auto func = newMarker.func(); if (!curMarker.hasFunc() || func != curMarker.func()) { func->prettyPrint(mStr, Func::PrintOpts().noFpi()); } mStr << std::string(kIndent, ' ') << newMarker.show() << '\n'; auto bcOffset = newMarker.bcOff(); func->unit()->prettyPrint( mStr, Unit::PrintOpts() .range(bcOffset, bcOffset+1) .noLineNumbers() .noFuncs() .indent(0)); std::vector<std::string> vec; folly::split('\n', mStr.str(), vec); os << markerEndl; markerEndl = "\n"; for (auto& s : vec) { if (s.empty()) continue; os << color(ANSI_COLOR_BLUE) << s << color(ANSI_COLOR_END) << '\n'; } } curMarker = newMarker; } if (inst.op() == DefLabel) { // print phi pseudo-instructions for (unsigned i = 0, n = inst.numDsts(); i < n; ++i) { os << std::string(kIndent + folly::format("({}) ", inst.id()).str().size(), ' '); auto dst = inst.dst(i); jit::print(os, dst, dstLoc(regs, &inst, i)); os << punc(" = ") << color(ANSI_COLOR_CYAN) << "phi " << color(ANSI_COLOR_END); bool first = true; inst.block()->forEachSrc(i, [&](IRInstruction* jmp, SSATmp*) { if (!first) os << punc(", "); first = false; printSrc(os, jmp, i, regs); os << punc("@"); printLabel(os, jmp->block()); }); os << '\n'; } } os << std::string(kIndent, ' '); jit::print(os, &inst, regs, guards); os << '\n'; if (asmInfo) { TcaRange instRange = asmInfo->instRanges[inst]; if (!instRange.empty()) { disasmRange(os, instRange.begin(), instRange.end()); os << '\n'; assert(instRange.end() >= blockRange.start() && instRange.end() <= blockRange.end()); blockRange = TcaRange(instRange.end(), blockRange.end()); } } } if (asmInfo) { // print code associated with this block that isn't tied to any // instruction. This includes code after the last isntruction (e.g. // jmp to next block), and ACold or AFrozen code. if (!blockRange.empty()) { os << std::string(kIndent, ' ') << punc("A:") << "\n"; disasmRange(os, blockRange.start(), blockRange.end()); } auto acoldRange = asmInfo->acoldRanges[block]; if (!acoldRange.empty()) { os << std::string(kIndent, ' ') << punc("ACold:") << "\n"; disasmRange(os, acoldRange.start(), acoldRange.end()); } auto afrozenRange = asmInfo->afrozenRanges[block]; if (!afrozenRange.empty()) { os << std::string(kIndent, ' ') << punc("AFrozen:") << "\n"; disasmRange(os, afrozenRange.start(), afrozenRange.end()); } if (!blockRange.empty() || !acoldRange.empty() || !afrozenRange.empty()) { os << '\n'; } } os << std::string(kIndent - 2, ' '); auto next = block->empty() ? nullptr : block->next(); if (next) { os << punc("-> "); printLabel(os, next); os << '\n'; } else { os << "no fallthrough\n"; } }
static void genCodeImpl(IRUnit& unit, AsmInfo* asmInfo) { auto regs = allocateRegs(unit); assert(checkRegisters(unit, regs)); // calls checkCfg internally. Timer _t(Timer::codeGen); LiveRegs live_regs = computeLiveRegs(unit, regs); CodegenState state(unit, regs, live_regs, asmInfo); // Returns: whether a block has already been emitted. DEBUG_ONLY auto isEmitted = [&](Block* block) { return state.addresses[block]; }; CodeBlock& mainCodeIn = mcg->code.main(); CodeBlock& coldCodeIn = mcg->code.cold(); CodeBlock* frozenCode = &mcg->code.frozen(); CodeBlock mainCode; CodeBlock coldCode; bool relocate = false; if (RuntimeOption::EvalJitRelocationSize && mcg->backEnd().supportsRelocation() && coldCodeIn.canEmit(RuntimeOption::EvalJitRelocationSize * 3)) { /* * This is mainly to exercise the relocator, and ensure that its * not broken by new non-relocatable code. Later, it will be * used to do some peephole optimizations, such as reducing branch * sizes. * Allocate enough space that the relocated cold code doesn't * overlap the emitted cold code. */ static unsigned seed = 42; auto off = rand_r(&seed) & (mcg->backEnd().cacheLineSize() - 1); coldCode.init(coldCodeIn.frontier() + RuntimeOption::EvalJitRelocationSize + off, RuntimeOption::EvalJitRelocationSize - off, "cgRelocCold"); mainCode.init(coldCode.frontier() + RuntimeOption::EvalJitRelocationSize + off, RuntimeOption::EvalJitRelocationSize - off, "cgRelocMain"); relocate = true; } else { /* * Use separate code blocks, so that attempts to use the mcg's * code blocks directly will fail (eg by overwriting the same * memory being written through these locals). */ coldCode.init(coldCodeIn.frontier(), coldCodeIn.available(), coldCodeIn.name().c_str()); mainCode.init(mainCodeIn.frontier(), mainCodeIn.available(), mainCodeIn.name().c_str()); } if (frozenCode == &coldCodeIn) { frozenCode = &coldCode; } auto frozenStart = frozenCode->frontier(); auto coldStart DEBUG_ONLY = coldCodeIn.frontier(); auto mainStart DEBUG_ONLY = mainCodeIn.frontier(); auto bcMap = &mcg->cgFixups().m_bcMap; { mcg->code.lock(); mcg->cgFixups().setBlocks(&mainCode, &coldCode, frozenCode); SCOPE_EXIT { mcg->cgFixups().setBlocks(nullptr, nullptr, nullptr); mcg->code.unlock(); }; /* * Emit the given block on the supplied assembler. The `nextLinear' * is the next block that will be emitted on this assembler. If is * not the next block in control flow order, then emit a patchable jump * to the next flow block. */ auto emitBlock = [&](CodeBlock& cb, Block* block, Block* nextLinear) { assert(!isEmitted(block)); FTRACE(6, "genBlock {} on {}\n", block->id(), cb.base() == coldCode.base() ? "acold" : "a"); auto const aStart = cb.frontier(); auto const acoldStart = coldCode.frontier(); auto const afrozenStart = frozenCode->frontier(); mcg->backEnd().patchJumps(cb, state, block); state.addresses[block] = aStart; // If the block ends with a Jmp and the next block is going to be // its target, we don't need to actually emit it. IRInstruction* last = &block->back(); state.noTerminalJmp = last->op() == Jmp && nextLinear == last->taken(); if (state.asmInfo) { state.asmInfo->asmRanges[block] = TcaRange(aStart, cb.frontier()); } genBlock(unit, cb, coldCode, *frozenCode, state, block, bcMap); auto nextFlow = block->next(); if (nextFlow && nextFlow != nextLinear) { mcg->backEnd().emitFwdJmp(cb, nextFlow, state); } if (state.asmInfo) { state.asmInfo->asmRanges[block] = TcaRange(aStart, cb.frontier()); if (cb.base() != coldCode.base() && frozenCode != &coldCode) { state.asmInfo->acoldRanges[block] = TcaRange(acoldStart, coldCode.frontier()); } if (cb.base() != frozenCode->base()) { state.asmInfo->afrozenRanges[block] = TcaRange(afrozenStart, frozenCode->frontier()); } } }; if (RuntimeOption::EvalHHIRGenerateAsserts) { mcg->backEnd().emitTraceCall(mainCode, unit.bcOff()); } auto const linfo = layoutBlocks(unit); for (auto it = linfo.blocks.begin(); it != linfo.acoldIt; ++it) { Block* nextLinear = boost::next(it) != linfo.acoldIt ? *boost::next(it) : nullptr; emitBlock(mainCode, *it, nextLinear); } for (auto it = linfo.acoldIt; it != linfo.afrozenIt; ++it) { Block* nextLinear = boost::next(it) != linfo.afrozenIt ? *boost::next(it) : nullptr; emitBlock(coldCode, *it, nextLinear); } for (auto it = linfo.afrozenIt; it != linfo.blocks.end(); ++it) { Block* nextLinear = boost::next(it) != linfo.blocks.end() ? *boost::next(it) : nullptr; emitBlock(*frozenCode, *it, nextLinear); } if (debug) { for (Block* UNUSED block : linfo.blocks) { assert(isEmitted(block)); } } } assert(coldCodeIn.frontier() == coldStart); assert(mainCodeIn.frontier() == mainStart); if (relocate) { if (asmInfo) { printUnit(kRelocationLevel, unit, " before relocation ", ®s, asmInfo); } auto& be = mcg->backEnd(); RelocationInfo rel; be.relocate(rel, mainCodeIn, mainCode.base(), mainCode.frontier(), mcg->cgFixups()); be.relocate(rel, coldCodeIn, coldCode.base(), coldCode.frontier(), mcg->cgFixups()); if (frozenCode != &coldCode) { rel.recordRange(frozenStart, frozenCode->frontier(), frozenStart, frozenCode->frontier()); } be.adjustForRelocation(rel, mcg->cgFixups()); be.adjustForRelocation(rel, asmInfo, mcg->cgFixups()); if (asmInfo) { static int64_t mainDeltaTot = 0, coldDeltaTot = 0; int64_t mainDelta = (mainCodeIn.frontier() - mainStart) - (mainCode.frontier() - mainCode.base()); int64_t coldDelta = (coldCodeIn.frontier() - coldStart) - (coldCode.frontier() - coldCode.base()); mainDeltaTot += mainDelta; HPHP::Trace::traceRelease("main delta after relocation: %" PRId64 " (%" PRId64 ")\n", mainDelta, mainDeltaTot); coldDeltaTot += coldDelta; HPHP::Trace::traceRelease("cold delta after relocation: %" PRId64 " (%" PRId64 ")\n", coldDelta, coldDeltaTot); } #ifndef NDEBUG auto& ip = mcg->cgFixups().m_inProgressTailJumps; for (size_t i = 0; i < ip.size(); ++i) { const auto& ib = ip[i]; assert(!mainCode.contains(ib.toSmash())); assert(!coldCode.contains(ib.toSmash())); } memset(mainCode.base(), 0xcc, mainCode.frontier() - mainCode.base()); memset(coldCode.base(), 0xcc, coldCode.frontier() - coldCode.base()); #endif } else { coldCodeIn.skip(coldCode.frontier() - coldCodeIn.frontier()); mainCodeIn.skip(mainCode.frontier() - mainCodeIn.frontier()); } if (asmInfo) { printUnit(kCodeGenLevel, unit, " after code gen ", ®s, asmInfo); } }
void print(std::ostream& os, const Block* block, AreaIndex area, const AsmInfo* asmInfo, const GuardConstraints* guards, BCMarker* markerPtr) { BCMarker dummy; BCMarker& curMarker = markerPtr ? *markerPtr : dummy; TcaRange blockRange = asmInfo ? asmInfo->blockRangesForArea(area)[block] : TcaRange { nullptr, nullptr }; os << '\n' << std::string(kIndent - 3, ' '); printLabel(os, block); os << punc(":"); auto& preds = block->preds(); if (!preds.empty()) { os << " (preds"; for (auto const& edge : preds) { os << " B" << edge.from()->id(); } os << ')'; } os << "\n"; if (block->empty()) { os << std::string(kIndent, ' ') << "empty block\n"; return; } const char* markerEndl = ""; for (auto it = block->begin(); it != block->end();) { auto& inst = *it; ++it; if (inst.marker() != curMarker) { std::ostringstream mStr; auto const& newMarker = inst.marker(); if (!newMarker.hasFunc()) { os << color(ANSI_COLOR_BLUE) << std::string(kIndent, ' ') << "--- invalid marker" << color(ANSI_COLOR_END) << '\n'; } else { auto func = newMarker.func(); if (!curMarker.hasFunc() || func != curMarker.func()) { func->prettyPrint(mStr, Func::PrintOpts().noFpi()); } mStr << std::string(kIndent, ' ') << newMarker.show() << '\n'; auto bcOffset = newMarker.bcOff(); func->unit()->prettyPrint( mStr, Unit::PrintOpts() .range(bcOffset, bcOffset+1) .noLineNumbers() .noFuncs() .indent(0)); std::vector<std::string> vec; folly::split('\n', mStr.str(), vec); os << markerEndl; markerEndl = "\n"; for (auto& s : vec) { if (s.empty()) continue; os << color(ANSI_COLOR_BLUE) << s << color(ANSI_COLOR_END) << '\n'; } } curMarker = newMarker; } if (inst.op() == DefLabel) { // print phi pseudo-instructions for (unsigned i = 0, n = inst.numDsts(); i < n; ++i) { os << std::string(kIndent + folly::format("({}) ", inst.id()).str().size(), ' '); auto dst = inst.dst(i); jit::print(os, dst); os << punc(" = ") << color(ANSI_COLOR_CYAN) << "phi " << color(ANSI_COLOR_END); bool first = true; inst.block()->forEachSrc(i, [&](IRInstruction* jmp, SSATmp*) { if (!first) os << punc(", "); first = false; printSrc(os, jmp, i); os << punc("@"); printLabel(os, jmp->block()); }); os << '\n'; } } os << std::string(kIndent, ' '); jit::print(os, &inst, guards); os << '\n'; if (asmInfo) { // There can be asm ranges in areas other than the one this blocks claims // to be in so we have to iterate all the areas to be sure to get // everything. for (auto i = 0; i < kNumAreas; ++i) { AreaIndex currentArea = static_cast<AreaIndex>(i); TcaRange instRange = asmInfo->instRangesForArea(currentArea)[inst]; if (!instRange.empty()) { os << std::string(kIndent + 4, ' ') << areaAsString(currentArea); os << ":\n"; disasmRange(os, instRange.begin(), instRange.end()); os << '\n'; if (currentArea == area) { // FIXME: this used to be an assertion auto things_are_ok = instRange.end() >= blockRange.start() && instRange.end() <= blockRange.end(); if (things_are_ok) { blockRange = TcaRange(instRange.end(), blockRange.end()); } else { // Don't crash; do something broken instead. os << "<note: print range is probably incorrect right now>\n"; } } } } } } if (asmInfo) { // Print code associated with the block that isn't tied to any instruction. if (!blockRange.empty()) { os << std::string(kIndent, ' ') << punc("A:") << "\n"; disasmRange(os, blockRange.start(), blockRange.end()); os << '\n'; } } os << std::string(kIndent - 2, ' '); auto next = block->empty() ? nullptr : block->next(); if (next) { os << punc("-> "); printLabel(os, next); os << '\n'; } else { os << "no fallthrough\n"; } }