예제 #1
0
void emitFuncGuard(const Func* func, CodeBlock& cb) {
  using namespace reg;
  X64Assembler a { cb };

  assertx(x64::abi(CodeKind::CrossTrace).gpUnreserved.contains(rax));

  TCA start DEBUG_ONLY = a.frontier();

  auto const funcImm = Immed64(func);

  if (funcImm.fits(sz::dword)) {
    emitSmashableCmpq(a.code(), funcImm.l(), rVmFp,
                      safe_cast<int8_t>(AROFF(m_func)));
  } else {
    // Although func doesn't fit in a signed 32-bit immediate, it may still fit
    // in an unsigned one.  Rather than deal with yet another case (which only
    // happens when we disable jemalloc), just emit a smashable mov followed by
    // a register cmp.
    emitSmashableMovq(a.code(), uint64_t(func), rax);
    a.  cmpq   (rax, rVmFp[AROFF(m_func)]);
  }
  a.    jnz    (mcg->tx().uniqueStubs.funcPrologueRedispatch);

  assertx(funcPrologueToGuard(a.frontier(), func) == start);
  assertx(funcPrologueHasGuard(a.frontier(), func));
}
예제 #2
0
void emitFuncGuard(const Func* func, CodeBlock& cb) {
  using namespace reg;
  X64Assembler a { cb };

  assertx(cross_trace_abi.gpUnreserved.contains(rax));

  auto const funcImm = Immed64(func);
  int nbytes, offset;

  if (!funcImm.fits(sz::dword)) {
    nbytes = kFuncGuardSmash;
    offset = kFuncGuardImm;
  } else {
    nbytes = kFuncGuardShortSmash;
    offset = kFuncGuardShortImm;
  }
  mcg->backEnd().prepareForSmash(a.code(), nbytes, offset);

  TCA start DEBUG_ONLY = a.frontier();

  if (!funcImm.fits(sz::dword)) {
    // Although func doesnt fit in a signed 32-bit immediate, it may still fit
    // in an unsigned one.  Rather than deal with yet another case (which only
    // happens when we disable jemalloc), just force it to be an 8-byte
    // immediate, and patch it up afterwards.
    a.  movq   (0xdeadbeeffeedface, rax);

    auto immptr = reinterpret_cast<uintptr_t*>(a.frontier()) - 1;
    assertx(*immptr == 0xdeadbeeffeedface);
    *immptr = uintptr_t(func);

    a.  cmpq   (rax, rVmFp[AROFF(m_func)]);
  } else {
    a.  cmpq   (funcImm.l(), rVmFp[AROFF(m_func)]);
  }
  a.    jnz    (mcg->tx().uniqueStubs.funcPrologueRedispatch);

  assertx(funcPrologueToGuard(a.frontier(), func) == start);
  assertx(funcPrologueHasGuard(a.frontier(), func));
}
예제 #3
0
StoreImmPatcher::StoreImmPatcher(X64Assembler& as, uint64_t initial,
                                 RegNumber reg,
                                 int32_t offset, RegNumber base) {
  m_is32 = deltaFits(initial, sz::dword);
  if (m_is32) {
    as.store_imm64_disp_reg64(initial, offset, base);
  } else {
    as.mov_imm64_reg(initial, reg);
    as.store_reg64_disp_reg64(reg, offset, base);
  }
  m_addr = as.frontier() - (m_is32 ? 4 : 8);
  assert((m_is32 ?  (uint64_t)*(int32_t*)m_addr : *(uint64_t*)m_addr)
         == initial);
}
예제 #4
0
TCA emitCallToExit(CodeBlock& cb) {
  X64Assembler a { cb };

  // Emit a byte of padding. This is a kind of hacky way to avoid
  // hitting an assert in recordGdbStub when we call it with stub - 1
  // as the start address.
  a.emitNop(1);
  auto const start = a.frontier();
  if (RuntimeOption::EvalHHIRGenerateAsserts) {
    Label ok;
    a.emitImmReg(uintptr_t(enterTCExit), reg::rax);
    a.cmpq(reg::rax, *rsp());
    a.je8 (ok);
    a.ud2();
  asm_label(a, ok);
  }

  // Emulate a ret to enterTCExit without actually doing one to avoid
  // unbalancing the return stack buffer. The call from enterTCHelper() that
  // got us into the TC was popped off the RSB by the ret that got us to this
  // stub.
  a.addq(8, rsp());
  if (a.jmpDeltaFits(TCA(enterTCExit))) {
    a.jmp(TCA(enterTCExit));
  } else {
    // can't do a near jmp and a rip-relative load/jmp would require threading
    // through extra state to allocate a literal. use an indirect jump through
    // a register
    a.emitImmReg(uintptr_t(enterTCExit), reg::rax);
    a.jmp(reg::rax);
  }

  // On a backtrace, gdb tries to locate the calling frame at address
  // returnRIP-1. However, for the first VM frame, there is no code at
  // returnRIP-1, since the AR was set up manually. For this frame,
  // record the tracelet address as starting from this callToExit-1,
  // so gdb does not barf.
  return start;
}
예제 #5
0
void emitFuncGuard(const Func* func, CodeBlock& cb, CGMeta& fixups) {
  using namespace reg;
  X64Assembler a { cb };

  assertx(x64::abi(CodeKind::CrossTrace).gpUnreserved.contains(rax));

  auto const funcImm = Immed64(func);

  if (funcImm.fits(sz::dword)) {
    emitSmashableCmpq(a.code(), fixups, funcImm.l(), rvmfp(),
                      safe_cast<int8_t>(AROFF(m_func)));
  } else {
    // Although func doesn't fit in a signed 32-bit immediate, it may still fit
    // in an unsigned one.  Rather than deal with yet another case (which only
    // happens when we disable jemalloc), just emit a smashable mov followed by
    // a register cmp.
    emitSmashableMovq(a.code(), fixups, uint64_t(func), rax);
    a.  cmpq   (rax, rvmfp()[AROFF(m_func)]);
  }
  a.    jnz    (tc::ustubs().funcPrologueRedispatch);

  DEBUG_ONLY auto guard = funcGuardFromPrologue(a.frontier(), func);
  assertx(funcGuardMatches(guard, func));
}