void cgDbgTrashFrame(IRLS& env, const IRInstruction* inst) { auto const fp = srcLoc(env, inst, 0).reg(); auto const off = cellsToBytes(inst->extra<DbgTrashFrame>()->offset.offset); for (auto i = 0; i < kNumActRecCells; ++i) { trashTV(vmain(env), fp, off + cellsToBytes(i), kTVTrashJITFrame); } }
int32_t emitBindCall(CodeBlock& mainCode, CodeBlock& stubsCode, SrcKey srcKey, const Func* funcd, int numArgs) { // If this is a call to a builtin and we don't need any argument // munging, we can skip the prologue system and do it inline. if (isNativeImplCall(funcd, numArgs)) { StoreImmPatcher patchIP(mainCode, (uint64_t)mainCode.frontier(), reg::rax, cellsToBytes(numArgs) + AROFF(m_savedRip), rVmSp); assert(funcd->numLocals() == funcd->numParams()); assert(funcd->numIterators() == 0); Asm a { mainCode }; emitLea(a, rVmSp[cellsToBytes(numArgs)], rVmFp); emitCheckSurpriseFlagsEnter(mainCode, stubsCode, true, mcg->fixupMap(), Fixup(0, numArgs)); // rVmSp is already correctly adjusted, because there's no locals // other than the arguments passed. auto retval = emitNativeImpl(mainCode, funcd); patchIP.patch(uint64_t(mainCode.frontier())); return retval; } Asm a { mainCode }; if (debug) { auto off = cellsToBytes(numArgs) + AROFF(m_savedRip); emitImmStoreq(a, kUninitializedRIP, rVmSp[off]); } // Stash callee's rVmFp into rStashedAR for the callee's prologue emitLea(a, rVmSp[cellsToBytes(numArgs)], rStashedAR); emitBindCallHelper(mainCode, stubsCode, srcKey, funcd, numArgs); return 0; }
void cgInlineReturnNoFrame(IRLS& env, const IRInstruction* inst) { auto& v = vmain(env); if (RuntimeOption::EvalHHIRGenerateAsserts) { auto const extra = inst->extra<InlineReturnNoFrame>(); auto const offset = cellsToBytes(extra->offset.offset); for (auto i = 0; i < kNumActRecCells; ++i) { trashTV(v, rvmfp(), offset - cellsToBytes(i), kTVTrashJITFrame); } } v << inlineend{}; }
void CodeGenerator::cgSideExitGuardStk(IRInstruction* inst) { auto const sp = x2a(curOpd(inst->src(0)).reg()); auto const extra = inst->extra<SideExitGuardStk>(); emitTypeTest( inst->typeParam(), sp[cellsToBytes(extra->checkedSlot) + TVOFF(m_type)], sp[cellsToBytes(extra->checkedSlot) + TVOFF(m_data)], [&] (ConditionCode cc) { auto const sk = SrcKey(curFunc(), extra->taken); emitBindSideExit(this->m_mainCode, this->m_stubsCode, sk, ccNegate(cc)); } ); }
void cgInlineReturn(IRLS& env, const IRInstruction* inst) { auto& v = vmain(env); auto const fp = srcLoc(env, inst, 0).reg(); auto const callerFPOff = inst->extra<InlineReturn>()->offset; v << lea{fp[cellsToBytes(callerFPOff.offset)], rvmfp()}; v << inlineend{}; }
void cgContEnter(IRLS& env, const IRInstruction* inst) { auto const sp = srcLoc(env, inst, 0).reg(); auto const fp = srcLoc(env, inst, 1).reg(); auto const genFP = srcLoc(env, inst, 2).reg(); auto const target = srcLoc(env, inst, 3).reg(); auto const extra = inst->extra<ContEnter>(); auto const spOff = extra->spOffset; auto const returnOff = extra->returnBCOffset; auto& v = vmain(env); auto const next = v.makeBlock(); v << store{fp, genFP[AROFF(m_sfp)]}; v << storeli{returnOff, genFP[AROFF(m_soff)]}; v << copy{genFP, fp}; auto const sync_sp = v.makeReg(); v << lea{sp[cellsToBytes(spOff.offset)], sync_sp}; v << syncvmsp{sync_sp}; v << contenter{fp, target, cross_trace_regs_resumed(), {next, label(env, inst->taken())}}; v = next; auto const dst = dstLoc(env, inst, 0); auto const type = inst->dst()->type(); if (!type.admitsSingleVal()) { v << defvmretdata{dst.reg(0)}; } if (type.needsReg()) { v << defvmrettype{dst.reg(1)}; } }
void cgCallArray(IRLS& env, const IRInstruction* inst) { auto const extra = inst->extra<CallArray>(); auto const sp = srcLoc(env, inst, 0).reg(); auto& v = vmain(env); auto const syncSP = v.makeReg(); v << lea{sp[cellsToBytes(extra->spOffset.offset)], syncSP}; v << syncvmsp{syncSP}; auto const target = extra->numParams == 0 ? mcg->ustubs().fcallArrayHelper : mcg->ustubs().fcallUnpackHelper; auto const pc = v.cns(extra->pc); auto const after = v.cns(extra->after); auto const args = extra->numParams == 0 ? v.makeTuple({pc, after}) : v.makeTuple({pc, after, v.cns(extra->numParams)}); auto const done = v.makeBlock(); v << vcallarray{target, fcall_array_regs(), args, {done, label(env, inst->taken())}}; env.catch_calls[inst->taken()] = CatchCall::PHP; v = done; auto const dst = dstLoc(env, inst, 0); v << defvmret{dst.reg(0), dst.reg(1)}; }
void cgStOutValue(IRLS& env, const IRInstruction* inst) { auto const fp = srcLoc(env, inst, 0).reg(); auto const off = cellsToBytes( inst->extra<StOutValue>()->index + kNumActRecCells ); storeTV(vmain(env), fp[off], srcLoc(env, inst, 1), inst->src(1)); }
void cgContEnter(IRLS& env, const IRInstruction* inst) { auto const sp = srcLoc(env, inst, 0).reg(); auto const fp = srcLoc(env, inst, 1).reg(); auto const genFP = srcLoc(env, inst, 2).reg(); auto const target = srcLoc(env, inst, 3).reg(); auto const extra = inst->extra<ContEnter>(); auto const spOff = extra->spOffset; auto const returnOff = extra->returnBCOffset; auto& v = vmain(env); auto const next = v.makeBlock(); v << store{fp, genFP[AROFF(m_sfp)]}; v << storeli{returnOff, genFP[AROFF(m_soff)]}; v << copy{genFP, fp}; auto const sync_sp = v.makeReg(); v << lea{sp[cellsToBytes(spOff.offset)], sync_sp}; v << syncvmsp{sync_sp}; v << contenter{fp, target, cross_trace_regs_resumed(), {next, label(env, inst->taken())}}; env.catch_calls[inst->taken()] = CatchCall::PHP; v = next; }
void cgCoerceStk(IRLS& env, const IRInstruction *inst) { auto const extra = inst->extra<CoerceStk>(); auto const sp = srcLoc(env, inst, 0).reg(); auto const offset = cellsToBytes(extra->offset.offset); implCoerce(env, inst, sp, offset, extra->callee, extra->argNum); }
void cgCheckStk(IRLS& env, const IRInstruction* inst) { auto const baseOff = cellsToBytes(inst->extra<CheckStk>()->offset.offset); auto const base = srcLoc(env, inst, 0).reg()[baseOff]; emitTypeCheck(vmain(env), env, inst->typeParam(), base + TVOFF(m_type), base + TVOFF(m_data), inst->taken()); }
void CodeGenerator::cgInterpOne(IRInstruction* inst) { cgInterpOneCommon(inst); auto const& extra = *inst->extra<InterpOne>(); auto newSpReg = x2a(curOpd(inst->dst()).reg()); auto spAdjustBytes = cellsToBytes(extra.cellsPopped - extra.cellsPushed); emitRegGetsRegPlusImm(m_as, newSpReg, newSpReg, spAdjustBytes); }
void cgSyncReturnBC(IRLS& env, const IRInstruction* inst) { auto const extra = inst->extra<SyncReturnBC>(); auto const spOffset = cellsToBytes(extra->spOffset.offset); auto const bcOffset = extra->bcOffset; auto const sp = srcLoc(env, inst, 0).reg(); auto const fp = srcLoc(env, inst, 1).reg(); auto& v = vmain(env); v << storeli{safe_cast<int32_t>(bcOffset), sp[spOffset + AROFF(m_soff)]}; v << store{fp, sp[spOffset + AROFF(m_sfp)]}; }
void cgDbgTraceCall(IRLS& env, const IRInstruction* inst) { auto const spOff = inst->extra<DbgTraceCall>()->offset; auto const args = argGroup(env, inst) .ssa(0) .addr(srcLoc(env, inst, 1).reg(), cellsToBytes(spOff.offset)) .imm(inst->marker().bcOff()); cgCallHelper(vmain(env), env, CallSpec::direct(traceCallback), callDest(env, inst), SyncOptions::None, args); }
void cgProfileMethod(IRLS& env, const IRInstruction* inst) { auto const extra = inst->extra<ProfileMethodData>(); auto const sp = srcLoc(env, inst, 0).reg(); auto const args = argGroup(env, inst) .addr(rvmtl(), safe_cast<int32_t>(extra->handle)) .addr(sp, cellsToBytes(extra->bcSPOff.offset)) .ssa(1); cgCallHelper(vmain(env), env, CallSpec::method(&MethProfile::reportMeth), kVoidDest, SyncOptions::None, args); }
void cgDbgAssertARFunc(IRLS& env, const IRInstruction* inst) { auto const sp = srcLoc(env, inst, 0).reg(); auto const func = srcLoc(env, inst, 1).reg(0); auto const off = cellsToBytes(inst->extra<DbgAssertARFunc>()->offset.offset); auto& v = vmain(env); auto const sf = v.makeReg(); v << cmpqm{func, sp[off + AROFF(m_func)], sf}; ifThen(v, CC_NE, sf, [&](Vout& v) { v << ud2{}; }); }
void CodeGenerator::cgGuardStk(IRInstruction* inst) { auto const rSP = x2a(curOpd(inst->src(0)).reg()); auto const baseOff = cellsToBytes(inst->extra<GuardStk>()->offset); emitTypeTest( inst->typeParam(), rSP[baseOff + TVOFF(m_type)], rSP[baseOff + TVOFF(m_data)], [&] (ConditionCode cc) { auto const destSK = SrcKey(curFunc(), m_unit.bcOff()); auto const destSR = m_tx64->getSrcRec(destSK); destSR->emitFallbackJump(this->m_mainCode, ccNegate(cc)); }); }
void cgInterpOne(IRLS& env, const IRInstruction* inst) { auto const extra = inst->extra<InterpOne>(); auto const sp = srcLoc(env, inst, 0).reg(); auto const helper = interpOneEntryPoints[size_t(extra->opcode)]; auto const args = argGroup(env, inst) .ssa(1) .addr(sp, cellsToBytes(extra->spOffset.offset)) .imm(extra->bcOff); // Call the interpOne##Op() routine, which syncs VM regs manually. cgCallHelper(vmain(env), env, CallSpec::direct(helper), kVoidDest, SyncOptions::None, args); }
void cgInterpOneCF(IRLS& env, const IRInstruction* inst) { auto const extra = inst->extra<InterpOneCF>(); auto const sp = srcLoc(env, inst, 0).reg(); auto& v = vmain(env); auto const sync_sp = v.makeReg(); v << lea{sp[cellsToBytes(extra->spOffset.offset)], sync_sp}; v << syncvmsp{sync_sp}; assertx(tc::ustubs().interpOneCFHelpers.count(extra->opcode)); // We pass the Offset in the third argument register. v << ldimml{extra->bcOff, rarg(2)}; v << jmpi{tc::ustubs().interpOneCFHelpers.at(extra->opcode), interp_one_cf_regs()}; }
void cgInitObjProps(IRLS& env, const IRInstruction* inst) { auto const cls = inst->extra<InitObjProps>()->cls; auto const obj = srcLoc(env, inst, 0).reg(); auto& v = vmain(env); // Set the attributes, if any. auto const odAttrs = cls->getODAttrs(); if (odAttrs) { static_assert(sizeof(ObjectData::Attribute) == 2, "Codegen expects 2-byte ObjectData attributes"); assertx(!(odAttrs & 0xffff0000)); v << orwim{odAttrs, obj[ObjectData::attributeOff()], v.makeReg()}; } // Initialize the properties. auto const nprops = cls->numDeclProperties(); if (nprops > 0) { if (cls->pinitVec().size() == 0) { // If the Class has no 86pinit property-initializer functions, we can // just copy the initial values from a data member on the Class. implInitObjPropsFast(v, env, inst, obj, cls, nprops); } else { // Load the Class's propInitVec from the target cache. We know it's // already been initialized as a pre-condition on this op. auto const propHandle = cls->propHandle(); assertx(rds::isNormalHandle(propHandle)); auto const propInitVec = v.makeReg(); auto const propData = v.makeReg(); v << load{Vreg(rvmtl())[propHandle], propInitVec}; v << load{propInitVec[Class::PropInitVec::dataOff()], propData}; auto const propsOff = sizeof(ObjectData) + cls->builtinODTailSize(); auto args = argGroup(env, inst) .addr(obj, safe_cast<int32_t>(propsOff)) .reg(propData); if (!cls->hasDeepInitProps()) { cgCallHelper(v, env, CallSpec::direct(memcpy), kVoidDest, SyncOptions::None, args.imm(cellsToBytes(nprops))); } else { cgCallHelper(v, env, CallSpec::direct(deepInitHelper), kVoidDest, SyncOptions::None, args.imm(nprops)); } } } }
void cgDefInlineFP(IRLS& env, const IRInstruction* inst) { auto const extra = inst->extra<DefInlineFP>(); auto const callerSP = srcLoc(env, inst, 0).reg(); auto const callerFP = srcLoc(env, inst, 1).reg(); auto& v = vmain(env); auto const ar = callerSP[cellsToBytes(extra->spOffset.offset)]; // Do roughly the same work as an HHIR Call. v << store{callerFP, ar + AROFF(m_sfp)}; emitImmStoreq(v, uintptr_t(tc::ustubs().retInlHelper), ar + AROFF(m_savedRip)); v << storeli{extra->retBCOff, ar + AROFF(m_soff)}; if (extra->target->attrs() & AttrMayUseVV) { v << storeqi{0, ar + AROFF(m_invName)}; } v << lea{ar, dstLoc(env, inst, 0).reg()}; }
void cgLookupClsMethod(IRLS& env, const IRInstruction* inst) { auto const extra = inst->extra<LookupClsMethod>(); auto const sp = srcLoc(env, inst, 2).reg(); auto const args = argGroup(env, inst) .ssa(0) .ssa(1) .addr(sp, cellsToBytes(extra->calleeAROffset.offset)) .ssa(3); if (extra->forward) { cgCallHelper(vmain(env), env, CallSpec::direct(lookupClsMethodHelper<true>), callDest(env, inst), SyncOptions::Sync, args); } else { cgCallHelper(vmain(env), env, CallSpec::direct(lookupClsMethodHelper<false>), callDest(env, inst), SyncOptions::Sync, args); } }
void addDbgGuardImpl(SrcKey sk, SrcRec* sr) { TCA realCode = sr->getTopTranslation(); if (!realCode) return; // No translations, nothing to do. auto& cb = mcg->code.main(); auto const dbgGuard = vwrap(cb, [&] (Vout& v) { if (!sk.resumed()) { auto const off = sr->nonResumedSPOff(); v << lea{rvmfp()[-cellsToBytes(off.offset)], rvmsp()}; } auto const tinfo = v.makeReg(); auto const attached = v.makeReg(); auto const sf = v.makeReg(); auto const done = v.makeBlock(); constexpr size_t dbgOff = offsetof(ThreadInfo, m_reqInjectionData) + RequestInjectionData::debuggerReadOnlyOffset(); v << ldimmq{reinterpret_cast<uintptr_t>(sk.pc()), rarg(0)}; emitTLSLoad(v, tls_datum(ThreadInfo::s_threadInfo), tinfo); v << loadb{tinfo[dbgOff], attached}; v << testbi{static_cast<int8_t>(0xffu), attached, sf}; v << jcci{CC_NZ, sf, done, mcg->ustubs().interpHelper}; v = done; v << fallthru{}; }, CodeKind::Helper); // Emit a jump to the actual code. auto const dbgBranchGuardSrc = emitSmashableJmp(cb, realCode); // Add the guard to the SrcRec. sr->addDebuggerGuard(dbgGuard, dbgBranchGuardSrc); }
void cgCheckSurpriseAndStack(IRLS& env, const IRInstruction* inst) { auto const fp = srcLoc(env, inst, 0).reg(); auto const extra = inst->extra<CheckSurpriseAndStack>(); auto const func = extra->func; auto const off = func->getEntryForNumArgs(extra->argc) - func->base(); auto const fixup = Fixup(off, func->numSlotsInFrame()); auto& v = vmain(env); auto const sf = v.makeReg(); auto const needed_top = v.makeReg(); v << lea{fp[-cellsToBytes(func->maxStackCells())], needed_top}; v << cmpqm{needed_top, rvmtl()[rds::kSurpriseFlagsOff], sf}; unlikelyIfThen(v, vcold(env), CC_AE, sf, [&] (Vout& v) { auto const stub = tc::ustubs().functionSurprisedOrStackOverflow; auto const done = v.makeBlock(); v << vinvoke{CallSpec::stub(stub), v.makeVcallArgs({}), v.makeTuple({}), {done, label(env, inst->taken())}, fixup }; v = done; }); }
void cgStStk(IRLS& env, const IRInstruction* inst) { auto const sp = srcLoc(env, inst, 0).reg(); auto const off = cellsToBytes(inst->extra<StStk>()->offset.offset); storeTV(vmain(env), sp[off], srcLoc(env, inst, 1), inst->src(1)); }
void cgCall(IRLS& env, const IRInstruction* inst) { auto const sp = srcLoc(env, inst, 0).reg(); auto const fp = srcLoc(env, inst, 1).reg(); auto const extra = inst->extra<Call>(); auto const callee = extra->callee; auto const argc = extra->numParams; auto& v = vmain(env); auto& vc = vcold(env); auto const catchBlock = label(env, inst->taken()); auto const calleeSP = sp[cellsToBytes(extra->spOffset.offset)]; auto const calleeAR = calleeSP + cellsToBytes(argc); v << store{fp, calleeAR + AROFF(m_sfp)}; v << storeli{safe_cast<int32_t>(extra->after), calleeAR + AROFF(m_soff)}; if (extra->fcallAwait) { // This clobbers any flags that might have already been set on the callee // AR (e.g., by SpillFrame), but this is okay because there should never be // any conflicts; see the documentation in act-rec.h. auto const imm = static_cast<int32_t>( ActRec::encodeNumArgsAndFlags(argc, ActRec::Flags::IsFCallAwait) ); v << storeli{imm, calleeAR + AROFF(m_numArgsAndFlags)}; } auto const isNativeImplCall = callee && callee->builtinFuncPtr() && !callee->nativeFuncPtr() && argc == callee->numParams(); if (isNativeImplCall) { // The assumption here is that for builtins, the generated func contains // only a single opcode (NativeImpl), and there are no non-argument locals. if (do_assert) { assertx(argc == callee->numLocals()); assertx(callee->numIterators() == 0); auto addr = callee->getEntry(); while (peek_op(addr) == Op::AssertRATL) { addr += instrLen(addr); } assertx(peek_op(addr) == Op::NativeImpl); assertx(addr + instrLen(addr) == callee->unit()->entry() + callee->past()); } v << store{v.cns(mcg->ustubs().retHelper), calleeAR + AROFF(m_savedRip)}; if (callee->attrs() & AttrMayUseVV) { v << storeqi{0, calleeAR + AROFF(m_invName)}; } v << lea{calleeAR, rvmfp()}; emitCheckSurpriseFlagsEnter(v, vc, fp, Fixup(0, argc), catchBlock); auto const builtinFuncPtr = callee->builtinFuncPtr(); TRACE(2, "Calling builtin preClass %p func %p\n", callee->preClass(), builtinFuncPtr); // We sometimes call this while curFunc() isn't really the builtin, so make // sure to record the sync point as if we are inside the builtin. if (FixupMap::eagerRecord(callee)) { auto const syncSP = v.makeReg(); v << lea{calleeSP, syncSP}; emitEagerSyncPoint(v, callee->getEntry(), rvmtl(), rvmfp(), syncSP); } // Call the native implementation. This will free the locals for us in the // normal case. In the case where an exception is thrown, the VM unwinder // will handle it for us. auto const done = v.makeBlock(); v << vinvoke{CallSpec::direct(builtinFuncPtr), v.makeVcallArgs({{rvmfp()}}), v.makeTuple({}), {done, catchBlock}, Fixup(0, argc)}; env.catch_calls[inst->taken()] = CatchCall::CPP; v = done; // The native implementation already put the return value on the stack for // us, and handled cleaning up the arguments. We have to update the frame // pointer and the stack pointer, and load the return value into the return // register so the trace we are returning to has it where it expects. // TODO(#1273094): We should probably modify the actual builtins to return // values via registers using the C ABI and do a reg-to-reg move. loadTV(v, inst->dst(), dstLoc(env, inst, 0), rvmfp()[AROFF(m_r)], true); v << load{rvmfp()[AROFF(m_sfp)], rvmfp()}; emitRB(v, Trace::RBTypeFuncExit, callee->fullName()->data()); return; } v << lea{calleeAR, rvmfp()}; if (RuntimeOption::EvalHHIRGenerateAsserts) { v << syncvmsp{v.cns(0x42)}; constexpr uint64_t kUninitializedRIP = 0xba5eba11acc01ade; emitImmStoreq(v, kUninitializedRIP, rvmfp()[AROFF(m_savedRip)]); } // Emit a smashable call that initially calls a recyclable service request // stub. The stub and the eventual targets take rvmfp() as an argument, // pointing to the callee ActRec. auto const target = callee ? mcg->ustubs().immutableBindCallStub : mcg->ustubs().bindCallStub; auto const done = v.makeBlock(); v << callphp{target, php_call_regs(), {{done, catchBlock}}}; env.catch_calls[inst->taken()] = CatchCall::PHP; v = done; auto const dst = dstLoc(env, inst, 0); v << defvmret{dst.reg(0), dst.reg(1)}; }
void cgCallBuiltin(IRLS& env, const IRInstruction* inst) { auto const extra = inst->extra<CallBuiltin>(); auto const callee = extra->callee; auto const returnType = inst->typeParam(); auto const funcReturnType = callee->returnType(); auto const returnByValue = callee->isReturnByValue(); auto const dstData = dstLoc(env, inst, 0).reg(0); auto const dstType = dstLoc(env, inst, 0).reg(1); auto& v = vmain(env); // Whether `t' is passed in/out of C++ as String&/Array&/Object&. auto const isReqPtrRef = [] (MaybeDataType t) { return isStringType(t) || isArrayLikeType(t) || t == KindOfObject || t == KindOfResource; }; if (FixupMap::eagerRecord(callee)) { auto const sp = srcLoc(env, inst, 1).reg(); auto const spOffset = cellsToBytes(extra->spOffset.offset); auto const& marker = inst->marker(); auto const pc = marker.fixupSk().unit()->entry() + marker.fixupBcOff(); auto const synced_sp = v.makeReg(); v << lea{sp[spOffset], synced_sp}; emitEagerSyncPoint(v, pc, rvmtl(), srcLoc(env, inst, 0).reg(), synced_sp); } int returnOffset = rds::kVmMInstrStateOff + offsetof(MInstrState, tvBuiltinReturn); auto args = argGroup(env, inst); if (!returnByValue) { if (isBuiltinByRef(funcReturnType)) { if (isReqPtrRef(funcReturnType)) { returnOffset += TVOFF(m_data); } // Pass the address of tvBuiltinReturn to the native function as the // location where it can construct the return Array, String, Object, or // Variant. args.addr(rvmtl(), returnOffset); args.indirect(); } } // The srcs past the first two (sp and fp) are the arguments to the callee. auto srcNum = uint32_t{2}; // Add the this_ or self_ argument for HNI builtins. if (callee->isMethod()) { if (callee->isStatic()) { args.ssa(srcNum); ++srcNum; } else { // Note that we don't support objects with vtables here (if they may need // a $this pointer adjustment). This should be filtered out during irgen // or before. args.ssa(srcNum); ++srcNum; } } // Add the func_num_args() value if needed. if (callee->attrs() & AttrNumArgs) { // If `numNonDefault' is negative, this is passed as an src. if (extra->numNonDefault >= 0) { args.imm((int64_t)extra->numNonDefault); } else { args.ssa(srcNum); ++srcNum; } } // Add the positional arguments. for (uint32_t i = 0; i < callee->numParams(); ++i, ++srcNum) { auto const& pi = callee->params()[i]; // Non-pointer and NativeArg args are passed by value. String, Array, // Object, and Variant are passed by const&, i.e. a pointer to stack memory // holding the value, so we expect PtrToT types for these. Pointers to // req::ptr types (String, Array, Object) need adjusting to point to // &ptr->m_data. if (TVOFF(m_data) && !pi.nativeArg && isReqPtrRef(pi.builtinType)) { assertx(inst->src(srcNum)->type() <= TPtrToGen); args.addr(srcLoc(env, inst, srcNum).reg(), TVOFF(m_data)); } else if (pi.nativeArg && !pi.builtinType && !callee->byRef(i)) { // This condition indicates a MixedTV (i.e., TypedValue-by-value) arg. args.typedValue(srcNum); } else { args.ssa(srcNum, pi.builtinType == KindOfDouble); } } auto dest = [&] () -> CallDest { if (isBuiltinByRef(funcReturnType)) { if (!returnByValue) return kVoidDest; // indirect return return funcReturnType ? callDest(dstData) // String, Array, or Object : callDest(dstData, dstType); // Variant } return funcReturnType == KindOfDouble ? callDestDbl(env, inst) : callDest(env, inst); }(); cgCallHelper(v, env, CallSpec::direct(callee->nativeFuncPtr()), dest, SyncOptions::Sync, args); // For primitive return types (int, bool, double) and returnByValue, the // return value is already in dstData/dstType. if (returnType.isSimpleType() || returnByValue) return; // For return by reference (String, Object, Array, Variant), the builtin // writes the return value into MInstrState::tvBuiltinReturn, from where it // has to be tested and copied. if (returnType.isReferenceType()) { // The return type is String, Array, or Object; fold nullptr to KindOfNull. assertx(isBuiltinByRef(funcReturnType) && isReqPtrRef(funcReturnType)); v << load{rvmtl()[returnOffset], dstData}; if (dstType.isValid()) { auto const sf = v.makeReg(); auto const rtype = v.cns(returnType.toDataType()); auto const nulltype = v.cns(KindOfNull); v << testq{dstData, dstData, sf}; v << cmovb{CC_Z, sf, rtype, nulltype, dstType}; } return; } if (returnType <= TCell || returnType <= TBoxedCell) { // The return type is Variant; fold KindOfUninit to KindOfNull. assertx(isBuiltinByRef(funcReturnType) && !isReqPtrRef(funcReturnType)); static_assert(KindOfUninit == 0, "KindOfUninit must be 0 for test"); v << load{rvmtl()[returnOffset + TVOFF(m_data)], dstData}; if (dstType.isValid()) { auto const rtype = v.makeReg(); v << loadb{rvmtl()[returnOffset + TVOFF(m_type)], rtype}; auto const sf = v.makeReg(); auto const nulltype = v.cns(KindOfNull); v << testb{rtype, rtype, sf}; v << cmovb{CC_Z, sf, rtype, nulltype, dstType}; } return; } not_reached(); }
void cgDbgTrashStk(IRLS& env, const IRInstruction* inst) { auto const sp = srcLoc(env, inst, 0).reg(); auto const off = cellsToBytes(inst->extra<DbgTrashStk>()->offset.offset); trashTV(vmain(env), sp, off, kTVTrashJITStk); }
/* * Service request stub emitter. * * Emit a service request stub of type `sr' at `start' in `cb'. */ void emit_svcreq(CodeBlock& cb, TCA start, bool persist, folly::Optional<FPInvOffset> spOff, ServiceRequest sr, const ArgVec& argv) { FTRACE(2, "svcreq @{} {}(", start, to_name(sr)); auto const is_reused = start != cb.frontier(); CodeBlock stub; stub.init(start, stub_size(), "svcreq_stub"); { Vauto vasm{stub}; auto& v = vasm.main(); // If we have an spOff, materialize rvmsp() so that handleSRHelper() can do // a VM reg sync. (When we don't have an spOff, the caller of the service // request was responsible for making sure rvmsp already contained the top // of the stack.) if (spOff) { v << lea{rvmfp()[-cellsToBytes(spOff->offset)], rvmsp()}; } auto live_out = leave_trace_regs(); assert(argv.size() <= kMaxArgs); // Pick up CondCode arguments first---vasm may optimize immediate loads // into operations which clobber status flags. for (auto i = 0; i < argv.size(); ++i) { auto const& arg = argv[i]; if (arg.kind != Arg::Kind::CondCode) continue; FTRACE(2, "c({}), ", cc_names[arg.cc]); v << setcc{arg.cc, r_svcreq_sf(), rbyte(r_svcreq_arg(i))}; } for (auto i = 0; i < argv.size(); ++i) { auto const& arg = argv[i]; auto const r = r_svcreq_arg(i); switch (arg.kind) { case Arg::Kind::Immed: FTRACE(2, "{}, ", arg.imm); v << copy{v.cns(arg.imm), r}; break; case Arg::Kind::Address: FTRACE(2, "{}(%rip), ", arg.imm); v << leap{reg::rip[arg.imm], r}; break; case Arg::Kind::CondCode: break; } live_out |= r; } FTRACE(2, ") : stub@"); if (persist) { FTRACE(2, "<none>"); v << copy{v.cns(0), r_svcreq_stub()}; } else { FTRACE(2, "{}", stub.base()); v << leap{reg::rip[int64_t(stub.base())], r_svcreq_stub()}; } v << copy{v.cns(sr), r_svcreq_req()}; live_out |= r_svcreq_stub(); live_out |= r_svcreq_req(); v << jmpi{TCA(handleSRHelper), live_out}; // We pad ephemeral stubs unconditionally. This is required for // correctness by the x64 code relocator. vasm.unit().padding = !persist; } if (!is_reused) cb.skip(stub.used()); }
void CodeGenerator::cgLdStackAddr(IRInstruction* inst) { auto const dstReg = x2a(curOpd(inst->dst()).reg()); auto const baseReg = x2a(curOpd(inst->src(0)).reg()); auto const offset = cellsToBytes(inst->extra<LdStackAddr>()->offset); emitRegGetsRegPlusImm(m_as, dstReg, baseReg, offset); }