void cgLdElem(IRLS& env, const IRInstruction* inst) { auto const rbase = srcLoc(env, inst, 0).reg(); auto const ridx = srcLoc(env, inst, 1).reg(); auto const idx = inst->src(1); auto& v = vmain(env); if (idx->hasConstVal() && deltaFits(idx->intVal(), sz::dword)) { loadTV(v, inst->dst(), dstLoc(env, inst, 0), rbase[idx->intVal()]); } else { loadTV(v, inst->dst(), dstLoc(env, inst, 0), rbase[ridx]); } }
void cgLdMem(IRLS& env, const IRInstruction* inst) { auto const ptr = inst->src(0); auto const ptrLoc = tmpLoc(env, ptr); auto const dstLoc = tmpLoc(env, inst->dst()); loadTV(vmain(env), inst->dst()->type(), dstLoc, memTVTypePtr(ptr, ptrLoc), memTVValPtr(ptr, ptrLoc)); }
void cgLdLocPseudoMain(IRLS& env, const IRInstruction* inst) { auto const fp = srcLoc(env, inst, 0).reg(); auto const off = localOffset(inst->extra<LdLocPseudoMain>()->locId); auto& v = vmain(env); irlower::emitTypeCheck(v, env, inst->typeParam(), fp[off + TVOFF(m_type)], fp[off + TVOFF(m_data)], inst->taken()); loadTV(v, inst->dst(), dstLoc(env, inst, 0), fp[off]); }
void cgLdWHResult(IRLS& env, const IRInstruction* inst) { auto const obj = srcLoc(env, inst, 0).reg(); loadTV(vmain(env), inst->dst(), dstLoc(env, inst, 0), obj[WH::resultOff()]); }
void cgLdContArKey(IRLS& env, const IRInstruction* inst) { auto const contAR = srcLoc(env, inst, 0).reg(); auto const keyOff = GENDATAOFF(m_key) - Generator::arOff(); loadTV(vmain(env), inst->dst(), dstLoc(env, inst, 0), contAR[keyOff]); }
void cgLdContField(IRLS& env, const IRInstruction* inst) { loadTV(vmain(env), inst->dst(), dstLoc(env, inst, 0), srcLoc(env, inst, 0).reg()[inst->src(1)->intVal()]); }
void cgCall(IRLS& env, const IRInstruction* inst) { auto const sp = srcLoc(env, inst, 0).reg(); auto const fp = srcLoc(env, inst, 1).reg(); auto const extra = inst->extra<Call>(); auto const callee = extra->callee; auto const argc = extra->numParams; auto& v = vmain(env); auto& vc = vcold(env); auto const catchBlock = label(env, inst->taken()); auto const calleeSP = sp[cellsToBytes(extra->spOffset.offset)]; auto const calleeAR = calleeSP + cellsToBytes(argc); v << store{fp, calleeAR + AROFF(m_sfp)}; v << storeli{safe_cast<int32_t>(extra->after), calleeAR + AROFF(m_soff)}; if (extra->fcallAwait) { // This clobbers any flags that might have already been set on the callee // AR (e.g., by SpillFrame), but this is okay because there should never be // any conflicts; see the documentation in act-rec.h. auto const imm = static_cast<int32_t>( ActRec::encodeNumArgsAndFlags(argc, ActRec::Flags::IsFCallAwait) ); v << storeli{imm, calleeAR + AROFF(m_numArgsAndFlags)}; } auto const isNativeImplCall = callee && callee->builtinFuncPtr() && !callee->nativeFuncPtr() && argc == callee->numParams(); if (isNativeImplCall) { // The assumption here is that for builtins, the generated func contains // only a single opcode (NativeImpl), and there are no non-argument locals. if (do_assert) { assertx(argc == callee->numLocals()); assertx(callee->numIterators() == 0); auto addr = callee->getEntry(); while (peek_op(addr) == Op::AssertRATL) { addr += instrLen(addr); } assertx(peek_op(addr) == Op::NativeImpl); assertx(addr + instrLen(addr) == callee->unit()->entry() + callee->past()); } v << store{v.cns(mcg->ustubs().retHelper), calleeAR + AROFF(m_savedRip)}; if (callee->attrs() & AttrMayUseVV) { v << storeqi{0, calleeAR + AROFF(m_invName)}; } v << lea{calleeAR, rvmfp()}; emitCheckSurpriseFlagsEnter(v, vc, fp, Fixup(0, argc), catchBlock); auto const builtinFuncPtr = callee->builtinFuncPtr(); TRACE(2, "Calling builtin preClass %p func %p\n", callee->preClass(), builtinFuncPtr); // We sometimes call this while curFunc() isn't really the builtin, so make // sure to record the sync point as if we are inside the builtin. if (FixupMap::eagerRecord(callee)) { auto const syncSP = v.makeReg(); v << lea{calleeSP, syncSP}; emitEagerSyncPoint(v, callee->getEntry(), rvmtl(), rvmfp(), syncSP); } // Call the native implementation. This will free the locals for us in the // normal case. In the case where an exception is thrown, the VM unwinder // will handle it for us. auto const done = v.makeBlock(); v << vinvoke{CallSpec::direct(builtinFuncPtr), v.makeVcallArgs({{rvmfp()}}), v.makeTuple({}), {done, catchBlock}, Fixup(0, argc)}; env.catch_calls[inst->taken()] = CatchCall::CPP; v = done; // The native implementation already put the return value on the stack for // us, and handled cleaning up the arguments. We have to update the frame // pointer and the stack pointer, and load the return value into the return // register so the trace we are returning to has it where it expects. // TODO(#1273094): We should probably modify the actual builtins to return // values via registers using the C ABI and do a reg-to-reg move. loadTV(v, inst->dst(), dstLoc(env, inst, 0), rvmfp()[AROFF(m_r)], true); v << load{rvmfp()[AROFF(m_sfp)], rvmfp()}; emitRB(v, Trace::RBTypeFuncExit, callee->fullName()->data()); return; } v << lea{calleeAR, rvmfp()}; if (RuntimeOption::EvalHHIRGenerateAsserts) { v << syncvmsp{v.cns(0x42)}; constexpr uint64_t kUninitializedRIP = 0xba5eba11acc01ade; emitImmStoreq(v, kUninitializedRIP, rvmfp()[AROFF(m_savedRip)]); } // Emit a smashable call that initially calls a recyclable service request // stub. The stub and the eventual targets take rvmfp() as an argument, // pointing to the callee ActRec. auto const target = callee ? mcg->ustubs().immutableBindCallStub : mcg->ustubs().bindCallStub; auto const done = v.makeBlock(); v << callphp{target, php_call_regs(), {{done, catchBlock}}}; env.catch_calls[inst->taken()] = CatchCall::PHP; v = done; auto const dst = dstLoc(env, inst, 0); v << defvmret{dst.reg(0), dst.reg(1)}; }
void cgLdRef(IRLS& env, const IRInstruction* inst) { auto const ptr = srcLoc(env, inst, 0).reg(); loadTV(vmain(env), inst->dst(), dstLoc(env, inst, 0), ptr[RefData::tvOffset()]); }
void cgLdStk(IRLS& env, const IRInstruction* inst) { auto const sp = srcLoc(env, inst, 0).reg(); auto const off = cellsToBytes(inst->extra<LdStk>()->offset.offset); loadTV(vmain(env), inst->dst(), dstLoc(env, inst, 0), sp[off]); }
void cgLdCns(IRLS& env, const IRInstruction* inst) { auto const cnsName = inst->src(0)->strVal(); auto const ch = makeCnsHandle(cnsName, false); auto const dst = dstLoc(env, inst, 0); auto& v = vmain(env); assertx(inst->taken()); if (rds::isNormalHandle(ch)) { auto const sf = checkRDSHandleInitialized(v, ch); fwdJcc(v, env, CC_NE, sf, inst->taken()); loadTV(v, inst->dst(), dst, rvmtl()[ch]); return; } assertx(rds::isPersistentHandle(ch)); auto const& cns = rds::handleToRef<TypedValue>(ch); if (cns.m_type == KindOfUninit) { loadTV(v, inst->dst(), dst, rvmtl()[ch]); auto const sf = v.makeReg(); irlower::emitTypeTest( v, env, TUninit, dst.reg(1), dst.reg(0), sf, [&] (ConditionCode cc, Vreg sf) { fwdJcc(v, env, cc, sf, inst->taken()); } ); } else { // Statically known constant. assertx(!dst.isFullSIMD()); switch (cns.m_type) { case KindOfNull: v << copy{v.cns(nullptr), dst.reg(0)}; break; case KindOfBoolean: v << copy{v.cns(!!cns.m_data.num), dst.reg(0)}; break; case KindOfInt64: case KindOfPersistentString: case KindOfPersistentVec: case KindOfPersistentDict: case KindOfPersistentKeyset: case KindOfPersistentArray: case KindOfString: case KindOfVec: case KindOfDict: case KindOfKeyset: case KindOfArray: case KindOfObject: case KindOfResource: case KindOfRef: v << copy{v.cns(cns.m_data.num), dst.reg(0)}; break; case KindOfDouble: v << copy{v.cns(cns.m_data.dbl), dst.reg(0)}; break; case KindOfUninit: case KindOfClass: not_reached(); } v << copy{v.cns(cns.m_type), dst.reg(1)}; } }
void cgLdUnwinderValue(IRLS& env, const IRInstruction* inst) { auto& v = vmain(env); loadTV(v, inst->dst(), dstLoc(env, inst, 0), rvmtl()[unwinderTVOff()]); }