Beispiel #1
0
void recordRelocationMetaData(SrcKey sk, SrcRec& srcRec, const TransLoc& loc,
                              CGMeta& fixups) {
  if (!RuntimeOption::EvalPerfRelocate) return;

  recordPerfRelocMap(loc.mainStart(), loc.mainEnd(),
                     loc.coldCodeStart(), loc.coldEnd(),
                     sk, -1,
                     srcRec.tailFallbackJumps(),
                     fixups);
}
Beispiel #2
0
void relocateStubs(TransLoc& loc, TCA frozenStart, TCA frozenEnd,
                   RelocationInfo& rel, CodeCache::View cache,
                   CGMeta& fixups) {
  auto const stubSize = svcreq::stub_size();

  for (auto addr : fixups.reusedStubs) {
    if (!loc.contains(addr)) continue;
    always_assert(frozenStart <= addr);

    CodeBlock dest;
    dest.init(cache.frozen().frontier(), stubSize, "New Stub");
    x64::relocate(rel, dest, addr, addr + stubSize, fixups, nullptr);
    cache.frozen().skip(stubSize);
    if (addr != frozenStart) {
      rel.recordRange(frozenStart, addr, frozenStart, addr);
    }
    frozenStart = addr + stubSize;
  }
  if (frozenStart != frozenEnd) {
    rel.recordRange(frozenStart, frozenEnd, frozenStart, frozenEnd);
  }

  x64::adjustForRelocation(rel);
  x64::adjustMetaDataForRelocation(rel, nullptr, fixups);
  x64::adjustCodeForRelocation(rel, fixups);
}
Beispiel #3
0
void recordFuncPrologue(const Func* func, TransLoc loc) {
  assertx(Translator::WriteLease().amOwner());

  FTRACE(1, "Recording Prologue for func {} (id = {}) main={}\n",
         func->fullName()->data(), func->getFuncId(), loc.mainStart());
  s_funcTCData[func].prologues.emplace_back(loc);
}
Beispiel #4
0
TransLoc TransRange::loc() const {
  TransLoc loc;
  loc.setMainStart(main.begin());
  loc.setColdStart(cold.begin() - sizeof(uint32_t));
  loc.setFrozenStart(frozen.begin() - sizeof(uint32_t));
  loc.setMainSize(main.size());

  assertx(loc.coldCodeSize() == cold.size());
  assertx(loc.frozenCodeSize() == frozen.size());
  return loc;
}
Beispiel #5
0
void SrcRec::newTranslation(TransLoc loc,
                            GrowableVector<IncomingBranch>& tailBranches) {
  // When translation punts due to hitting limit, will generate one
  // more translation that will call the interpreter.
  assertx(m_translations.size() <=
          std::max(RuntimeOption::EvalJitMaxProfileTranslations,
                   RuntimeOption::EvalJitMaxTranslations));

  TRACE(1, "SrcRec(%p)::newTranslation @%p, ", this, loc.mainStart());

  m_translations.push_back(loc);
  if (!m_topTranslation.get()) {
    m_topTranslation = loc.mainStart();
    patchIncomingBranches(loc.mainStart());
  }

  /*
   * Link all the jumps from the current tail translation to this new
   * guy.
   *
   * It's (mostly) ok if someone is running in this code while we do
   * this: we hold the write lease, they'll instead jump to the anchor
   * and do REQ_RETRANSLATE and failing to get the write lease they'll
   * interp.  FIXME: Unfortunately, right now, in an unlikely race
   * another thread could create another translation with the same
   * type specialization that we just created in this case.  (If we
   * happen to release the write lease after they jump but before they
   * get into REQ_RETRANSLATE, they'll acquire it and generate a
   * translation possibly for this same situation.)
   */
  for (auto& br : m_tailFallbackJumps) {
    br.patch(loc.mainStart());
  }

  // This is the new tail translation, so store the fallback jump list
  // in case we translate this again.
  m_tailFallbackJumps.swap(tailBranches);
}
Beispiel #6
0
void reclaimTranslation(TransLoc loc) {
  BlockingLeaseHolder writer(Translator::WriteLease());

  ITRACE(1, "Reclaiming translation M[{}, {}] C[{}, {}] F[{}, {}]\n",
         loc.mainStart(), loc.mainEnd(), loc.coldStart(), loc.coldEnd(),
         loc.frozenStart(), loc.frozenEnd());

  Trace::Indent _i;

  auto& cache = mcg->code();
  cache.blockFor(loc.mainStart()).free(loc.mainStart(), loc.mainSize());
  cache.blockFor(loc.coldStart()).free(loc.coldStart(), loc.coldSize());
  if (loc.coldStart() != loc.frozenStart()) {
    cache.blockFor(loc.frozenStart()).free(loc.frozenStart(), loc.frozenSize());
  }

  for (auto it = s_smashedBranches.begin(); it != s_smashedBranches.end();) {
    auto br = it++;
    if (loc.contains(br->first)) {
      ITRACE(1, "Erasing smashed branch @ {} from SrcRec addr={}\n",
             br->first, (void*)br->second);
      br->second->removeIncomingBranch(br->first);
      s_smashedBranches.erase(br);
    }
  }

  // Erase meta-data about these regions of the TC
  {
    ITRACE(1, "Clearing translation meta-data\n");
    Trace::Indent _i;
    clearTCMaps(loc.mainStart(), loc.mainEnd());
    clearTCMaps(loc.coldCodeStart(), loc.coldEnd());
    clearTCMaps(loc.frozenCodeStart(), loc.frozenEnd());
  }

  if (debug) {
    // Ensure no one calls into the function
    ITRACE(1, "Overwriting function\n");
    auto clearBlock = [] (CodeBlock& cb) {
      X64Assembler a {cb};
      while (cb.available() >= 2) a.ud2();
      if (cb.available() > 0) a.int3();
      always_assert(!cb.available());
    };

    CodeBlock main, cold, frozen;
    main.init(loc.mainStart(), loc.mainSize(), "Dead Main");
    cold.init(loc.coldStart(), loc.coldSize(), "Dead Cold");
    frozen.init(loc.frozenStart(), loc.frozenSize(), "Dead Frozen");

    clearBlock(main);
    clearBlock(cold);
    clearBlock(frozen);
  }
}