Exemplo n.º 1
0
void SrcRec::patch(Asm* a, IncomingBranch branch, TCA dest) {
  if (branch.m_type == IncomingBranch::ADDR) {
    // Note that this effectively ignores a
    atomic_release_store(branch.m_addr, dest);
    return;
  }

  // modifying reachable code
  switch(branch.m_type) {
    case IncomingBranch::JMP: {
      CodeCursor cg(*a, branch.m_src);
      TranslatorX64::smashJmp(*a, branch.m_src, dest);
      break;
    }
    case IncomingBranch::JCC: {
      // patch destination, but preserve the condition code
      int32_t delta = safe_cast<int32_t>((dest - branch.m_src) -
                                         TranslatorX64::kJmpccLen);
      int32_t* addr = (int32_t*)(branch.m_src + TranslatorX64::kJmpccLen - 4);
      atomic_release_store(addr, delta);
      break;
    }
    default:
      not_implemented();
  }
}
Exemplo n.º 2
0
void SrcRec::replaceOldTranslations() {
  // Everyone needs to give up on old translations; send them to the anchor,
  // which is a REQ_RETRANSLATE.
  m_translations.clear();
  m_tailFallbackJumps.clear();
  atomic_release_store(&m_topTranslation, static_cast<TCA>(0));

  /*
   * It may seem a little weird that we're about to point every
   * incoming branch at the anchor, since that's going to just
   * unconditionally retranslate this SrcKey and never patch the
   * incoming branch to do something else.
   *
   * The reason this is ok is this mechanism is only used in
   * non-RepoAuthoritative mode, and the granularity of code
   * invalidation there is such that we'll only have incoming branches
   * like this basically within the same file since we don't have
   * whole program analysis.
   *
   * This means all these incoming branches are about to go away
   * anyway ...
   *
   * If we ever change that we'll have to change this to patch to
   * some sort of rebind requests.
   */
  assert(!RuntimeOption::RepoAuthoritative || RuntimeOption::EvalJitPGO);
  patchIncomingBranches(m_anchorTranslation);
}
Exemplo n.º 3
0
void SrcRec::replaceOldTranslations(Asm& a, Asm& astubs) {
  // Everyone needs to give up on old translations; send them to the anchor,
  // which is a REQ_RETRANSLATE
  m_translations.clear();
  m_tailFallbackJumps.clear();
  atomic_release_store(&m_topTranslation, static_cast<TCA>(0));
  patchIncomingBranches(a, astubs, m_anchorTranslation);
}
Exemplo n.º 4
0
void SrcRec::patch(IncomingBranch branch, TCA dest) {
  switch (branch.type()) {
  case IncomingBranch::Tag::JMP: {
    auto& a = tx64->getAsmFor(branch.toSmash());
    CodeCursor cg(a, branch.toSmash());
    TranslatorX64::smashJmp(a, branch.toSmash(), dest);
    break;
  }

  case IncomingBranch::Tag::JCC: {
    // patch destination, but preserve the condition code
    int32_t delta = safe_cast<int32_t>((dest - branch.toSmash()) - kJmpccLen);
    int32_t* addr = (int32_t*)(branch.toSmash() + kJmpccLen - 4);
    atomic_release_store(addr, delta);
    break;
  }

  case IncomingBranch::Tag::ADDR:
    // Note that this effectively ignores a
    atomic_release_store(reinterpret_cast<TCA*>(branch.toSmash()), dest);
  }
}
Exemplo n.º 5
0
void SrcRec::addDebuggerGuard(TCA dbgGuard, TCA dbgBranchGuardSrc) {
  assert(!m_dbgBranchGuardSrc);

  TRACE(1, "SrcRec(%p)::addDebuggerGuard @%p, "
        "%zd incoming branches to rechain\n",
        this, dbgGuard, m_incomingBranches.size());

  patchIncomingBranches(dbgGuard);

  // Set m_dbgBranchGuardSrc after patching, so we don't try to patch
  // the debug guard.
  m_dbgBranchGuardSrc = dbgBranchGuardSrc;
  atomic_release_store(&m_topTranslation, dbgGuard);
}
Exemplo n.º 6
0
void SrcRec::patch(IncomingBranch branch, TCA dest) {
  switch (branch.type()) {
  case IncomingBranch::Tag::JMP: {
    JIT::smashJmp(branch.toSmash(), dest);
    break;
  }

  case IncomingBranch::Tag::JCC: {
    JIT::smashJcc(branch.toSmash(), dest);
    break;
  }

  case IncomingBranch::Tag::ADDR:
    // Note that this effectively ignores a
    atomic_release_store(reinterpret_cast<TCA*>(branch.toSmash()), dest);
  }
}
Exemplo n.º 7
0
void SrcRec::patch(Asm* a, IncomingBranch branch, TCA dest) {
  if (branch.m_type == IncomingBranch::ADDR) {
    // Note that this effectively ignores a
    atomic_release_store(branch.m_addr, dest);
    return;
  }

  CodeCursor cg(*a, branch.m_src);

  // modifying reachable code
  switch(branch.m_type) {
  case IncomingBranch::JMP:
    TranslatorX64::smash(*a, branch.m_src, dest);
    break;
  case IncomingBranch::JCC:
    ASSERT(TranslatorX64::isSmashable(*a,
                                      TranslatorX64::kJmpccLen));
    a->jcc(branch.m_cc, dest);
    break;
  default:
    not_implemented();
  }
}
Exemplo n.º 8
0
void SrcRec::newTranslation(Asm& a, Asm &astubs, TCA newStart) {
  // When translation punts due to hitting limit, will generate one
  // more translation that will call the interpreter.
  ASSERT(m_translations.size() <= kMaxTranslations);

  TRACE(1, "SrcRec(%p)::newTranslation @%p, ", this, newStart);

  m_translations.push_back(newStart);
  if (!m_topTranslation) {
    atomic_release_store(&m_topTranslation, newStart);
    patchIncomingBranches(a, astubs, newStart);
  }

  /*
   * Link all the jumps from the current tail translation to this new
   * guy.
   *
   * It's (mostly) ok if someone is running in this code while we do
   * this: we hold the write lease, they'll instead jump to the anchor
   * and do REQ_RETRANSLATE and failing to get the write lease they'll
   * interp.  FIXME: Unfortunately, right now, in an unlikely race
   * another thread could create another translation with the same
   * type specialization that we just created in this case.  (If we
   * happen to release the write lease after they jump but before they
   * get into REQ_RETRANSLATE, they'll acquire it and generate a
   * translation possibly for this same situation.)
   */
  for (size_t i = 0; i < m_tailFallbackJumps.size(); ++i) {
    Asm& as = Asm::Choose(a, astubs, m_tailFallbackJumps[i].m_src);
    patch(&as, m_tailFallbackJumps[i], newStart);
  }

  // This is the new tail translation, so store the fallback jump list
  // in case we translate this again.
  m_tailFallbackJumps.swap(m_inProgressTailJumps);
  m_inProgressTailJumps.clear();
}