void emitFuncGuard(const Func* func, CodeBlock& cb) { using namespace reg; X64Assembler a { cb }; assertx(x64::abi(CodeKind::CrossTrace).gpUnreserved.contains(rax)); TCA start DEBUG_ONLY = a.frontier(); auto const funcImm = Immed64(func); if (funcImm.fits(sz::dword)) { emitSmashableCmpq(a.code(), funcImm.l(), rVmFp, safe_cast<int8_t>(AROFF(m_func))); } else { // Although func doesn't fit in a signed 32-bit immediate, it may still fit // in an unsigned one. Rather than deal with yet another case (which only // happens when we disable jemalloc), just emit a smashable mov followed by // a register cmp. emitSmashableMovq(a.code(), uint64_t(func), rax); a. cmpq (rax, rVmFp[AROFF(m_func)]); } a. jnz (mcg->tx().uniqueStubs.funcPrologueRedispatch); assertx(funcPrologueToGuard(a.frontier(), func) == start); assertx(funcPrologueHasGuard(a.frontier(), func)); }
void emitFuncGuard(const Func* func, CodeBlock& cb, CGMeta& fixups) { using namespace reg; X64Assembler a { cb }; assertx(x64::abi(CodeKind::CrossTrace).gpUnreserved.contains(rax)); auto const funcImm = Immed64(func); if (funcImm.fits(sz::dword)) { emitSmashableCmpq(a.code(), fixups, funcImm.l(), rvmfp(), safe_cast<int8_t>(AROFF(m_func))); } else { // Although func doesn't fit in a signed 32-bit immediate, it may still fit // in an unsigned one. Rather than deal with yet another case (which only // happens when we disable jemalloc), just emit a smashable mov followed by // a register cmp. emitSmashableMovq(a.code(), fixups, uint64_t(func), rax); a. cmpq (rax, rvmfp()[AROFF(m_func)]); } a. jnz (tc::ustubs().funcPrologueRedispatch); DEBUG_ONLY auto guard = funcGuardFromPrologue(a.frontier(), func); assertx(funcGuardMatches(guard, func)); }
void emitFuncGuard(const Func* func, CodeBlock& cb) { using namespace reg; X64Assembler a { cb }; assertx(cross_trace_abi.gpUnreserved.contains(rax)); auto const funcImm = Immed64(func); int nbytes, offset; if (!funcImm.fits(sz::dword)) { nbytes = kFuncGuardSmash; offset = kFuncGuardImm; } else { nbytes = kFuncGuardShortSmash; offset = kFuncGuardShortImm; } mcg->backEnd().prepareForSmash(a.code(), nbytes, offset); TCA start DEBUG_ONLY = a.frontier(); if (!funcImm.fits(sz::dword)) { // Although func doesnt fit in a signed 32-bit immediate, it may still fit // in an unsigned one. Rather than deal with yet another case (which only // happens when we disable jemalloc), just force it to be an 8-byte // immediate, and patch it up afterwards. a. movq (0xdeadbeeffeedface, rax); auto immptr = reinterpret_cast<uintptr_t*>(a.frontier()) - 1; assertx(*immptr == 0xdeadbeeffeedface); *immptr = uintptr_t(func); a. cmpq (rax, rVmFp[AROFF(m_func)]); } else { a. cmpq (funcImm.l(), rVmFp[AROFF(m_func)]); } a. jnz (mcg->tx().uniqueStubs.funcPrologueRedispatch); assertx(funcPrologueToGuard(a.frontier(), func) == start); assertx(funcPrologueHasGuard(a.frontier(), func)); }
void PhysRegSaverParity::emitPops(X64Assembler& as, RegSet regs) { emitPops(Vauto(as.code()).main(), regs); }
PhysRegSaverParity::PhysRegSaverParity(int parity, X64Assembler& as, RegSet regs) : PhysRegSaverParity{parity, Vauto(as.code()).main(), regs} { m_v = nullptr; m_as = &as; }