void emitCreateCont(IRGS& env) { auto const resumeOffset = nextBcOff(env); assertx(!resumed(env)); assertx(curFunc(env)->isGenerator()); if (curFunc(env)->isAsyncGenerator()) PUNT(CreateCont-AsyncGenerator); // Create the Generator object. CreateCont takes care of copying local // variables and iterators. auto const func = curFunc(env); auto const resumeSk = SrcKey(func, resumeOffset, true); auto const bind_data = LdBindAddrData { resumeSk, invSPOff(env) + 1 }; auto const resumeAddr = gen(env, LdBindAddr, bind_data); auto const cont = gen(env, CreateCont, fp(env), cns(env, func->numSlotsInFrame()), resumeAddr, cns(env, resumeOffset)); // The suspend hook will decref the newly created generator if it throws. auto const contAR = gen(env, LdContActRec, IsAsyncData(curFunc(env)->isAsync()), cont); suspendHookE(env, fp(env), contAR, cont); // Grab caller info from ActRec, free ActRec, store the return value // and return control to the caller. gen(env, StRetVal, fp(env), cont); auto const ret_data = RetCtrlData { offsetToReturnSlot(env), false }; gen(env, RetCtrl, ret_data, sp(env), fp(env)); }
SSATmp* implInstanceOfD(IRGS& env, SSATmp* src, const StringData* className) { /* * InstanceOfD is always false if it's not an object. * * We're prepared to generate translations for known non-object types, but if * it's Gen/Cell we're going to PUNT because it's natural to translate that * case with control flow TODO(#2020251) */ if (TObj < src->type()) { PUNT(InstanceOfD_MaybeObj); } if (!src->isA(TObj)) { bool res = ((src->isA(TArr) && interface_supports_array(className))) || (src->isA(TStr) && interface_supports_string(className)) || (src->isA(TInt) && interface_supports_int(className)) || (src->isA(TDbl) && interface_supports_double(className)); return cns(env, res); } auto const checkCls = ldClassSafe(env, className); if (auto isInstance = implInstanceCheck(env, src, className, checkCls)) { return isInstance; } return gen(env, InstanceOf, gen(env, LdObjClass, src), checkCls); }
Word QepcadCls::IPFZT(Word r, Word A) { Word A1,Ap,As,P,Ths,i,s,t; /* hide Ap,Ths,i,s,t; */ Step1: /* Initialize. */ /*Int*/ if (PCIPFZT == 'n') return(0); /*Int*/ Ths = ACLOCK(); t = 1; P = NIL; for (i = r; i >= 1; i--) P = COMP(i,P); i = 1; Step2: /* Test for finitely many distinct x sub i - coordinates among the common zeros. */ Ap = A; As = NIL; do { ADV(Ap,&A1,&Ap); if (i > 1) A1 = PPERMV(r,A1,P); s = PUNT(r,A1); if (s == 2) goto Step4; else if (s == 1) goto Step3; As = COMP(A1,As); } while (!(Ap == NIL)); As = INV(As); if (!IPFZT1(r,As)) { t = 0; goto Step4; } Step3: /* Check for completion. */ i = i + 1; if (i > r) goto Step4; P = PERMCY(P); goto Step2; Step4: /* Return. */ /*Int*/ Ths = ACLOCK() - Ths; /*Int*/ TMIPFZT[r] = TMIPFZT[r] + Ths; goto Return; Return: /* Prepare for return. */ return(t); }
void emitCreateCont(HTS& env) { auto const resumeOffset = nextBcOff(env); assert(!resumed(env)); assert(curFunc(env)->isGenerator()); if (curFunc(env)->isAsyncGenerator()) PUNT(CreateCont-AsyncGenerator); // Create the Generator object. CreateCont takes care of copying local // variables and iterators. auto const func = curFunc(env); auto const resumeSk = SrcKey(func, resumeOffset, true); auto const resumeAddr = gen(env, LdBindAddr, LdBindAddrData(resumeSk)); auto const cont = gen(env, CreateCont, fp(env), cns(env, func->numSlotsInFrame()), resumeAddr, cns(env, resumeOffset)); // The suspend hook will decref the newly created generator if it throws. auto const contAR = gen(env, LdContActRec, cont); suspendHookE(env, fp(env), contAR); // Grab caller info from ActRec, free ActRec, store the return value // and return control to the caller. gen(env, StRetVal, fp(env), cont); auto const retAddr = gen(env, LdRetAddr, fp(env)); auto const stack = gen(env, RetAdjustStack, fp(env)); auto const frame = gen(env, FreeActRec, fp(env)); gen(env, RetCtrl, RetCtrlData(false), stack, frame, retAddr); }
void emitStaticLocInit(HTS& env, int32_t locId, const StringData* name) { if (curFunc(env)->isPseudoMain()) PUNT(StaticLocInit); auto const ldPMExit = makePseudoMainExit(env); auto const value = popC(env); // Closures and generators from closures don't satisfy the "one static per // source location" rule that the inline fastpath requires auto const box = [&]{ if (curFunc(env)->isClosureBody()) { return gen(env, ClosureStaticLocInit, cns(env, name), fp(env), value); } auto const cachedBox = gen(env, LdStaticLocCached, StaticLocName { curFunc(env), name }); ifThen( env, [&] (Block* taken) { gen(env, CheckStaticLocInit, taken, cachedBox); }, [&] { hint(env, Block::Hint::Unlikely); gen(env, StaticLocInitCached, cachedBox, value); } ); return cachedBox; }(); gen(env, IncRef, box); auto const oldValue = ldLoc(env, locId, ldPMExit, DataTypeSpecific); stLocRaw(env, locId, fp(env), box); gen(env, DecRef, oldValue); // We don't need to decref value---it's a bytecode invariant that // our Cell was not ref-counted. }
void emitEmptyS(IRGS& env) { auto const ssaPropName = topC(env, BCSPOffset{1}); if (!ssaPropName->isA(TStr)) { PUNT(EmptyS-PropNameNotString); } auto const ssaCls = popA(env); auto const ret = cond( env, [&] (Block* taken) { auto propAddr = ldClsPropAddr(env, ssaCls, ssaPropName, false); return gen(env, CheckNonNull, taken, propAddr); }, [&] (SSATmp* ptr) { auto const unbox = gen(env, UnboxPtr, ptr); auto const val = gen(env, LdMem, unbox->type().deref(), unbox); return gen(env, XorBool, gen(env, ConvCellToBool, val), cns(env, true)); }, [&] { // Taken: LdClsPropAddr* returned Nullptr because it isn't defined return cns(env, true); }); destroyName(env, ssaPropName); push(env, ret); }
void emitIssetS(HTS& env) { auto const ssaPropName = topC(env, BCSPOffset{1}); if (!ssaPropName->isA(Type::Str)) { PUNT(IssetS-PropNameNotString); } auto const ssaCls = popA(env); auto const ret = cond( env, 0, [&] (Block* taken) { auto propAddr = ldClsPropAddr(env, ssaCls, ssaPropName, false); return gen(env, CheckNonNull, taken, propAddr); }, [&] (SSATmp* ptr) { // Next: property or global exists return gen(env, IsNTypeMem, Type::Null, gen(env, UnboxPtr, ptr)); }, [&] { // Taken: LdClsPropAddr* returned Nullptr because it isn't defined return cns(env, false); } ); destroyName(env, ssaPropName); push(env, ret); }
void emitBindG(IRGS& env) { auto const name = topC(env, BCSPOffset{1}); if (!name->isA(TStr)) PUNT(BindG-NameNotStr); auto const box = popV(env); auto const ptr = gen(env, LdGblAddrDef, name); destroyName(env, name); bindMem(env, ptr, box); }
void emitSetG(IRGS& env) { auto const name = topC(env, BCSPOffset{1}); if (!name->isA(TStr)) PUNT(SetG-NameNotStr); auto const value = popC(env, DataTypeCountness); auto const unboxed = gen(env, UnboxPtr, gen(env, LdGblAddrDef, name)); destroyName(env, name); bindMem(env, unboxed, value); }
void LinearScan::allocRegs(Trace* trace) { if (RuntimeOption::EvalHHIREnableCoalescing) { // <coalesce> doesn't need instruction numbering. coalesce(trace); } numberInstructions(trace); collectNatives(trace); computePreColoringHint(); initFreeList(); allocRegsToTraceAux(trace); // Renumber instructions, because we added spills and reloads. numberInstructions(trace); if (RuntimeOption::EvalHHIREnableRematerialization && m_slots.size() > 0) { // Don't bother rematerializing the trace if it has no Spill/Reload. if (RuntimeOption::EvalDumpIR > 5) { std::cout << "--------- HHIR before rematerialization ---------\n"; trace->print(std::cout, false); std::cout << "-------------------------------------------------\n"; } rematerialize(trace); } // assignSpillLoc needs next natives in order to decide whether we // can use MMX registers. collectNatives(trace); // Make sure rsp is 16-aligned. uint32 numSpillLocs = assignSpillLoc(trace); if (numSpillLocs % 2) { ++numSpillLocs; } assert(NumPreAllocatedSpillLocs % 2 == 0); if (numSpillLocs > 0) { preAllocSpillLoc(trace, numSpillLocs); if (numSpillLocs > (uint32)NumPreAllocatedSpillLocs) { /* * We only insert AllocSpill and FreeSpill when the pre-allocated * spill locations are not enough. * * AllocSpill and FreeSpill take the number of extra spill locations * besides the pre-allocated ones. * * TODO(#2044051) AllocSpill/FreeSpill are currently disabled * due to bugs. */ PUNT(LinearScan_AllocSpill); insertAllocFreeSpill(trace, numSpillLocs - NumPreAllocatedSpillLocs); } } numberInstructions(trace); // record the live out register set at each instruction LinearScan::computeLiveOutRegs(trace); }
void emitVGetG(IRGS& env) { auto const name = topC(env); if (!name->isA(TStr)) PUNT(VGetG-NonStrName); auto const ptr = gen(env, LdGblAddrDef, name); destroyName(env, name); pushIncRef( env, gen(env, LdMem, TBoxedInitCell, gen(env, BoxPtr, ptr)) ); }
void emitAGetL(HTS& env, int32_t id) { auto const ldrefExit = makeExit(env); auto const ldPMExit = makePseudoMainExit(env); auto const src = ldLocInner(env, id, ldrefExit, ldPMExit, DataTypeSpecific); if (src->type().subtypeOfAny(Type::Obj, Type::Str)) { implAGet(env, src); } else { PUNT(AGetL); } }
void emitRetC(IRGS& env) { if (curFunc(env)->isAsyncGenerator()) PUNT(RetC-AsyncGenerator); if (isInlining(env)) { assertx(!resumed(env)); retFromInlined(env); } else { implRet(env); } }
void emitCGetG(IRGS& env) { auto const exit = makeExitSlow(env); auto const name = topC(env); if (!name->isA(TStr)) PUNT(CGetG-NonStrName); auto const ptr = gen(env, LdGblAddr, exit, name); destroyName(env, name); pushIncRef( env, gen(env, LdMem, TCell, gen(env, UnboxPtr, ptr)) ); }
void emitAwait(HTS& env, int32_t numIters) { auto const resumeOffset = nextBcOff(env); assert(curFunc(env)->isAsync()); if (curFunc(env)->isAsyncGenerator()) PUNT(Await-AsyncGenerator); auto const exitSlow = makeExitSlow(env); if (!topC(env)->isA(Type::Obj)) PUNT(Await-NonObject); auto const child = popC(env); gen(env, JmpZero, exitSlow, gen(env, IsWaitHandle, child)); // cns() would ODR-use these auto const kSucceeded = c_WaitHandle::STATE_SUCCEEDED; auto const kFailed = c_WaitHandle::STATE_FAILED; auto const state = gen(env, LdWHState, child); auto const failed = gen(env, EqInt, state, cns(env, kFailed)); gen(env, JmpNZero, exitSlow, failed); env.irb->ifThenElse( [&] (Block* taken) { auto const succeeded = gen(env, EqInt, state, cns(env, kSucceeded)); gen(env, JmpNZero, taken, succeeded); }, [&] { // Next: the wait handle is not finished, we need to suspend if (resumed(env)) { implAwaitR(env, child, resumeOffset); } else { implAwaitE(env, child, resumeOffset, numIters); } }, [&] { // Taken: retrieve the result from the wait handle auto const res = gen(env, LdWHResult, child); gen(env, IncRef, res); gen(env, DecRef, child); push(env, res); } ); }
void inlSingletonSProp(IRGS& env, const Func* func, const Op* clsOp, const Op* propOp) { assertx(*clsOp == Op::String); assertx(*propOp == Op::String); TransFlags trflags; trflags.noinlineSingleton = true; auto exitBlock = makeExit(env, trflags); // Pull the class and property names. auto const unit = func->unit(); auto const clsName = unit->lookupLitstrId(getImmPtr(clsOp, 0)->u_SA); auto const propName = unit->lookupLitstrId(getImmPtr(propOp, 0)->u_SA); // Make sure we have a valid class. auto const cls = Unit::lookupClass(clsName); if (UNLIKELY(!classHasPersistentRDS(cls))) { PUNT(SingletonSProp-Persistent); } // Make sure the sprop is accessible from the singleton method's context. auto const lookup = cls->findSProp(func->cls(), propName); if (UNLIKELY(lookup.prop == kInvalidSlot || !lookup.accessible)) { PUNT(SingletonSProp-Accessibility); } // Look up the static property. auto const sprop = ldClsPropAddrKnown(env, cls, propName); auto const unboxed = gen(env, UnboxPtr, sprop); auto const value = gen(env, LdMem, unboxed->type().deref(), unboxed); // Side exit if the static property is null. auto isnull = gen(env, IsType, TNull, value); gen(env, JmpNZero, exitBlock, isnull); // Return the singleton. pushIncRef(env, value); }
void emitBindS(IRGS& env) { auto const ssaPropName = topC(env, BCSPOffset{2}); if (!ssaPropName->isA(TStr)) { PUNT(BindS-PropNameNotString); } auto const value = popV(env); auto const ssaCls = popA(env); auto const propAddr = ldClsPropAddr(env, ssaCls, ssaPropName, true); destroyName(env, ssaPropName); bindMem(env, propAddr, value); }
void emitCGetS(IRGS& env) { auto const ssaPropName = topC(env, BCSPOffset{1}); if (!ssaPropName->isA(TStr)) { PUNT(CGetS-PropNameNotString); } auto const ssaCls = popA(env); auto const propAddr = ldClsPropAddr(env, ssaCls, ssaPropName, true); auto const unboxed = gen(env, UnboxPtr, propAddr); auto const ldMem = gen(env, LdMem, unboxed->type().deref(), unboxed); destroyName(env, ssaPropName); pushIncRef(env, ldMem); }
void emitSetS(IRGS& env) { auto const ssaPropName = topC(env, BCSPOffset{2}); if (!ssaPropName->isA(TStr)) { PUNT(SetS-PropNameNotString); } auto const value = popC(env, DataTypeCountness); auto const ssaCls = popA(env); auto const propAddr = ldClsPropAddr(env, ssaCls, ssaPropName, true); auto const ptr = gen(env, UnboxPtr, propAddr); destroyName(env, ssaPropName); bindMem(env, ptr, value); }
// Spill ivl from its start until its first register use. If there // is no use, spill the entire interval. Otherwise split the // interval just before the use, and enqueue the second part. void Vxls::spill(Interval* ivl) { unsigned first_use = ivl->firstUse(); if (first_use <= ivl->end()) { auto split_pos = nearestSplitBefore(first_use); if (split_pos <= ivl->start()) { // this only can happen if we need more than the available registers // at a single position. I can happen in phijmp or callargs. TRACE(1, "vxls-punt RegSpill\n"); PUNT(RegSpill); // cannot split before first_use } pending.push(ivl->split(split_pos)); } assert(ivl->uses.empty()); ivl->reg = InvalidReg; if (!ivl->cns) assignSpill(ivl); }
void emitIterInit(IRGS& env, int32_t iterId, Offset relOffset, int32_t valLocalId) { auto const targetOffset = iterBranchTarget(*env.currentNormalizedInstruction); auto const src = popC(env); if (!src->type().subtypeOfAny(TArr, TObj)) PUNT(IterInit); auto const res = gen( env, IterInit, TBool, IterData(iterId, -1, valLocalId), src, fp(env) ); implCondJmp(env, targetOffset, true, res); }
void emitIssetG(IRGS& env) { auto const name = topC(env, BCSPOffset{0}); if (!name->isA(TStr)) PUNT(IssetG-NameNotStr); auto const ret = cond( env, [&] (Block* taken) { return gen(env, LdGblAddr, taken, name); }, [&] (SSATmp* ptr) { // Next: global exists return gen(env, IsNTypeMem, TNull, gen(env, UnboxPtr, ptr)); }, [&] { // Taken: global doesn't exist return cns(env, false); } ); destroyName(env, name); push(env, ret); }
void emitVGetS(IRGS& env) { auto const ssaPropName = topC(env, BCSPOffset{1}); if (!ssaPropName->isA(TStr)) { PUNT(VGetS-PropNameNotString); } auto const ssaCls = popA(env); auto const propAddr = ldClsPropAddr(env, ssaCls, ssaPropName, true); destroyName(env, ssaPropName); auto const val = gen( env, LdMem, TBoxedInitCell, gen(env, BoxPtr, propAddr) ); pushIncRef(env, val); }
// Assign the next available spill slot to interval void Vxls::assignSpill(Interval* ivl) { assert(!ivl->fixed() && ivl->parent && ivl->uses.empty()); auto leader = ivl->parent; if (leader->slot < 0) { if (leader->reg.isGP()) { leader->slot = m_nextSlot++; } else { // todo: t4764214 not all XMMs are really wide. if (!PhysLoc::isAligned(m_nextSlot)) m_nextSlot++; leader->slot = m_nextSlot; m_nextSlot += 2; } if (m_nextSlot > NumPreAllocatedSpillLocs) { // ran out of spill slots TRACE(1, "vxls-punt TooManySpills\n"); PUNT(LinearScan_TooManySpills); } } ivl->slot = leader->slot; }
void emitEmptyG(IRGS& env) { auto const name = topC(env); if (!name->isA(TStr)) PUNT(EmptyG-NameNotStr); auto const ret = cond( env, [&] (Block* taken) { return gen(env, LdGblAddr, taken, name); }, [&] (SSATmp* ptr) { // Next: global exists auto const unboxed = gen(env, UnboxPtr, ptr); auto const val = gen(env, LdMem, TCell, unboxed); return gen(env, XorBool, gen(env, ConvCellToBool, val), cns(env, true)); }, [&] { // Taken: global doesn't exist return cns(env, true); }); destroyName(env, name); push(env, ret); }
void emitCGetQuietG(IRGS& env) { auto const name = topC(env); if (!name->isA(TStr)) PUNT(CGetQuietG-NonStrName); auto ret = cond( env, [&] (Block* taken) { return gen(env, LdGblAddr, taken, name); }, [&] (SSATmp* ptr) { auto tmp = gen(env, LdMem, TCell, gen(env, UnboxPtr, ptr)); gen(env, IncRef, tmp); return tmp; }, // Taken: LdGblAddr branched here because no global variable exists with // that name. [&] { return cns(env, TInitNull); } ); destroyName(env, name); push(env, ret); }
void emitYieldK(IRGS& env) { auto const resumeOffset = nextBcOff(env); assertx(resumed(env)); assertx(curFunc(env)->isGenerator()); if (curFunc(env)->isAsyncGenerator()) PUNT(YieldK-AsyncGenerator); yieldImpl(env, resumeOffset); auto const newKey = popC(env); auto const oldKey = gen(env, LdContArKey, TCell, fp(env)); gen(env, StContArKey, fp(env), newKey); gen(env, DecRef, oldKey); auto const keyType = newKey->type(); if (keyType <= TInt) { gen(env, ContArUpdateIdx, fp(env), newKey); } yieldReturnControl(env); }
void emitNewPackedArray(HTS& env, int32_t numArgs) { if (numArgs > kPackedCapCodeThreshold) { PUNT(NewPackedArray-UnrealisticallyHuge); } auto const array = gen( env, AllocPackedArray, PackedArrayData { static_cast<uint32_t>(numArgs) } ); static constexpr auto kMaxUnrolledInitArray = 8; if (numArgs > kMaxUnrolledInitArray) { spillStack(env); gen( env, InitPackedArrayLoop, InitPackedArrayLoopData { offsetFromSP(env, 0), static_cast<uint32_t>(numArgs) }, array, sp(env) ); discard(env, numArgs); push(env, array); return; } for (int i = 0; i < numArgs; ++i) { gen( env, InitPackedArray, IndexData { static_cast<uint32_t>(numArgs - i - 1) }, array, popC(env) ); } push(env, array); }
void emitStaticLoc(HTS& env, int32_t locId, const StringData* name) { if (curFunc(env)->isPseudoMain()) PUNT(StaticLoc); auto const ldPMExit = makePseudoMainExit(env); auto const box = curFunc(env)->isClosureBody() ? gen(env, ClosureStaticLocInit, cns(env, name), fp(env), cns(env, Type::Uninit)) : gen(env, LdStaticLocCached, StaticLocName { curFunc(env), name }); auto const res = cond( env, 0, [&] (Block* taken) { gen(env, CheckStaticLocInit, taken, box); }, [&] { // Next: the static local is already initialized return cns(env, true); }, [&] { // Taken: need to initialize the static local /* * Even though this path is "cold", we're not marking it * unlikely because the size of the instructions this will * generate is about 10 bytes, which is not much larger than the * 5 byte jump to acold would be. * * One note about StaticLoc: we're literally always going to * generate a fallthrough trace here that is cold (the code that * initializes the static local). TODO(#2894612). */ gen(env, StaticLocInitCached, box, cns(env, Type::InitNull)); return cns(env, false); }); gen(env, IncRef, box); auto const oldValue = ldLoc(env, locId, ldPMExit, DataTypeGeneric); stLocRaw(env, locId, fp(env), box); gen(env, DecRef, oldValue); push(env, res); }
void emitYield(IRGS& env) { auto const resumeOffset = nextBcOff(env); assertx(resumed(env)); assertx(curFunc(env)->isGenerator()); if (curFunc(env)->isAsyncGenerator()) PUNT(Yield-AsyncGenerator); yieldImpl(env, resumeOffset); // take a fast path if this generator has no yield k => v; if (curFunc(env)->isPairGenerator()) { auto const newIdx = gen(env, ContArIncIdx, fp(env)); auto const oldKey = gen(env, LdContArKey, TCell, fp(env)); gen(env, StContArKey, fp(env), newIdx); gen(env, DecRef, oldKey); } else { // we're guaranteed that the key is an int gen(env, ContArIncKey, fp(env)); } yieldReturnControl(env); }