void recordGdbTranslation(SrcKey sk, const Func* srcFunc, const CodeBlock& cb, const TCA start, const TCA end, bool exit, bool inPrologue) { assertx(cb.contains(start) && cb.contains(end)); if (start != end) { assertOwnsCodeLock(); if (!RuntimeOption::EvalJitNoGdb) { Debug::DebugInfo::Get()->recordTracelet( Debug::TCRange(start, end, &cb == &code().cold()), srcFunc, srcFunc->unit() ? srcFunc->unit()->at(sk.offset()) : nullptr, exit, inPrologue ); } if (RuntimeOption::EvalPerfPidMap) { Debug::DebugInfo::Get()->recordPerfMap( Debug::TCRange(start, end, &cb == &code().cold()), sk, srcFunc, exit, inPrologue ); } } }
/* * Reclaim all translations associated with a SrcRec including the anchor * translation. */ void reclaimSrcRec(const SrcRec* rec) { assertOwnsCodeLock(); assertOwnsMetadataLock(); ITRACE(1, "Reclaiming SrcRec addr={} anchor={}\n", (void*)rec, rec->getFallbackTranslation()); Trace::Indent _i; auto anchor = rec->getFallbackTranslation(); code().blockFor(anchor).free(anchor, svcreq::stub_size()); for (auto& loc : rec->translations()) { reclaimTranslation(loc); } }
void relocate(std::vector<TransRelocInfo>& relocs, CodeBlock& dest, CGMeta& fixups) { assertOwnsCodeLock(); assert(!Func::s_treadmill); auto newRelocMapName = Debug::DebugInfo::Get()->getRelocMapName() + ".tmp"; auto newRelocMap = fopen(newRelocMapName.c_str(), "w+"); if (!newRelocMap) return; SCOPE_EXIT { if (newRelocMap) fclose(newRelocMap); }; Func::s_treadmill = true; SCOPE_EXIT { Func::s_treadmill = false; }; auto ignoreEntry = [](const SrcKey& sk) { // We can have entries such as UniqueStubs with no SrcKey // These are ok to process. if (!sk.valid()) return false; // But if the func has been removed from the AtomicHashMap, // we don't want to process it. return !Func::isFuncIdValid(sk.funcID()); }; RelocationInfo rel; size_t num = 0; assert(fixups.alignments.empty()); for (size_t sz = relocs.size(); num < sz; num++) { auto& reloc = relocs[num]; if (ignoreEntry(reloc.sk)) continue; auto start DEBUG_ONLY = dest.frontier(); try { x64::relocate(rel, dest, reloc.start, reloc.end, reloc.fixups, nullptr); } catch (const DataBlockFull& dbf) { break; } if (Trace::moduleEnabledRelease(Trace::mcg, 1)) { Trace::traceRelease( folly::sformat("Relocate: 0x{:08x}+0x{:04x} => 0x{:08x}+0x{:04x}\n", (uintptr_t)reloc.start, reloc.end - reloc.start, (uintptr_t)start, dest.frontier() - start)); } } swap_trick(fixups.alignments); assert(fixups.empty()); x64::adjustForRelocation(rel); for (size_t i = 0; i < num; i++) { if (!ignoreEntry(relocs[i].sk)) { x64::adjustMetaDataForRelocation(rel, nullptr, relocs[i].fixups); } } for (size_t i = 0; i < num; i++) { if (!ignoreEntry(relocs[i].sk)) { relocs[i].fixups.process_only(nullptr); } } // At this point, all the relocated code should be correct, and runable. // But eg if it has unlikely paths into cold code that has not been relocated, // then the cold code will still point back to the original, not the relocated // versions. Similarly reusable stubs will still point to the old code. // Since we can now execute the relocated code, its ok to start fixing these // things now. for (auto& it : srcDB()) { it.second->relocate(rel); } std::unordered_set<Func*> visitedFuncs; CodeSmasher s; for (size_t i = 0; i < num; i++) { auto& reloc = relocs[i]; if (ignoreEntry(reloc.sk)) continue; for (auto& ib : reloc.incomingBranches) { ib.relocate(rel); } if (!reloc.sk.valid()) continue; auto f = const_cast<Func*>(reloc.sk.func()); x64::adjustCodeForRelocation(rel, reloc.fixups); reloc.fixups.clear(); // fixup code references in the corresponding cold block to point // to the new code x64::adjustForRelocation(rel, reloc.coldStart, reloc.coldEnd); if (visitedFuncs.insert(f).second) { if (auto adjusted = rel.adjustedAddressAfter(f->getFuncBody())) { f->setFuncBody(adjusted); } int num = Func::getMaxNumPrologues(f->numParams()); if (num < kNumFixedPrologues) num = kNumFixedPrologues; while (num--) { auto addr = f->getPrologue(num); if (auto adjusted = rel.adjustedAddressAfter(addr)) { f->setPrologue(num, adjusted); } } } if (reloc.end != reloc.start) { s.entries.emplace_back(reloc.start, reloc.end); } } auto relocMap = Debug::DebugInfo::Get()->getRelocMap(); always_assert(relocMap); fseek(relocMap, 0, SEEK_SET); auto deadStubs = getFreeTCStubs(); auto param = PostProcessParam { rel, deadStubs, newRelocMap }; std::set<TCA> liveStubs; readRelocations(relocMap, &liveStubs, postProcess, ¶m); // ensure that any reusable stubs are updated for the relocated code for (auto stub : liveStubs) { FTRACE(1, "Stub: 0x{:08x}\n", (uintptr_t)stub); fixups.reusedStubs.emplace_back(stub); always_assert(!rel.adjustedAddressAfter(stub)); fprintf(newRelocMap, "%" PRIxPTR " 0 %s\n", uintptr_t(stub), "NewStub"); } x64::adjustCodeForRelocation(rel, fixups); unlink(Debug::DebugInfo::Get()->getRelocMapName().c_str()); rename(newRelocMapName.c_str(), Debug::DebugInfo::Get()->getRelocMapName().c_str()); fclose(newRelocMap); newRelocMap = nullptr; freopen(Debug::DebugInfo::Get()->getRelocMapName().c_str(), "r+", relocMap); fseek(relocMap, 0, SEEK_END); okToRelocate = false; Treadmill::enqueue(std::move(s)); }