void emitColAddNewElemC(HTS& env) { if (!topC(env, 1)->isA(Type::Obj)) { return interpOne(env, Type::Obj, 2); } auto const val = popC(env); auto const coll = popC(env); // The AddNewElem helper decrefs its args, so don't decref pop'ed values. push(env, gen(env, ColAddNewElemC, coll, val)); }
void emitAddNewElemC(HTS& env) { if (!topC(env, 1)->isA(Type::Arr)) { return interpOne(env, Type::Arr, 2); } auto const val = popC(env); auto const arr = popC(env); // The AddNewElem helper decrefs its args, so don't decref pop'ed values. push(env, gen(env, AddNewElem, arr, val)); }
void emitInitProp(IRGS& env, const StringData* propName, InitPropOp op) { auto const val = popC(env); auto const ctx = curClass(env); SSATmp* base; Slot idx = 0; switch (op) { case InitPropOp::Static: { // For sinit, the context class is always the same as the late-bound // class, so we can just use curClass(). auto const handle = ctx->sPropHandle(ctx->lookupSProp(propName)); base = gen( env, LdRDSAddr, RDSHandleData { handle }, TPtrToSPropCell ); } break; case InitPropOp::NonStatic: { // The above is not the case for pinit, so we need to load. auto const cctx = gen(env, LdCctx, fp(env)); auto const cls = gen(env, LdClsCtx, cctx); base = gen(env, LdClsInitData, cls); idx = ctx->lookupDeclProp(propName); } break; } gen(env, StElem, base, cns(env, idx * sizeof(TypedValue)), val); }
void emitStaticLocInit(HTS& env, int32_t locId, const StringData* name) { if (curFunc(env)->isPseudoMain()) PUNT(StaticLocInit); auto const ldPMExit = makePseudoMainExit(env); auto const value = popC(env); // Closures and generators from closures don't satisfy the "one static per // source location" rule that the inline fastpath requires auto const box = [&]{ if (curFunc(env)->isClosureBody()) { return gen(env, ClosureStaticLocInit, cns(env, name), fp(env), value); } auto const cachedBox = gen(env, LdStaticLocCached, StaticLocName { curFunc(env), name }); ifThen( env, [&] (Block* taken) { gen(env, CheckStaticLocInit, taken, cachedBox); }, [&] { hint(env, Block::Hint::Unlikely); gen(env, StaticLocInitCached, cachedBox, value); } ); return cachedBox; }(); gen(env, IncRef, box); auto const oldValue = ldLoc(env, locId, ldPMExit, DataTypeSpecific); stLocRaw(env, locId, fp(env), box); gen(env, DecRef, oldValue); // We don't need to decref value---it's a bytecode invariant that // our Cell was not ref-counted. }
void emitColAddElemC(HTS& env) { if (!topC(env, 2)->isA(Type::Obj)) { return interpOne(env, Type::Obj, 3); } if (!topC(env, 1, DataTypeGeneric)->type(). subtypeOfAny(Type::Int, Type::Str)) { interpOne(env, Type::Obj, 3); return; } auto const val = popC(env); auto const key = popC(env); auto const coll = popC(env); push(env, gen(env, ColAddElemC, coll, key, val)); gen(env, DecRef, key); }
void emitPrint(HTS& env) { auto const type = topC(env)->type(); if (!type.subtypeOfAny(Type::Int, Type::Bool, Type::Null, Type::Str)) { interpOne(env, Type::Int, 1); return; } auto const cell = popC(env); Opcode op; if (type <= Type::Str) { op = PrintStr; } else if (type <= Type::Int) { op = PrintInt; } else if (type <= Type::Bool) { op = PrintBool; } else { assert(type <= Type::Null); op = Nop; } // the print helpers decref their arg, so don't decref pop'ed value if (op != Nop) { gen(env, op, cell); } push(env, cns(env, 1)); }
void emitSetG(IRGS& env) { auto const name = topC(env, BCSPOffset{1}); if (!name->isA(TStr)) PUNT(SetG-NameNotStr); auto const value = popC(env, DataTypeCountness); auto const unboxed = gen(env, UnboxPtr, gen(env, LdGblAddrDef, name)); destroyName(env, name); bindMem(env, unboxed, value); }
void emitSetL(HTS& env, int32_t id) { auto const ldrefExit = makeExit(env); auto const ldPMExit = makePseudoMainExit(env); // since we're just storing the value in a local, this function doesn't care // about the type of the value. stLoc needs to IncRef the value so it may // constrain it further. auto const src = popC(env, DataTypeGeneric); pushStLoc(env, id, ldrefExit, ldPMExit, src); }
void emitAGetC(HTS& env) { auto const name = topC(env); if (name->type().subtypeOfAny(Type::Obj, Type::Str)) { popC(env); implAGet(env, name); gen(env, DecRef, name); } else { interpOne(env, Type::Cls, 1); } }
void emitIsTypeC(IRGS& env, IsTypeOp subop) { if (subop == IsTypeOp::Scalar) return implIsScalarC(env); auto const t = typeOpToDataType(subop); auto const src = popC(env, DataTypeSpecific); if (t == KindOfObject) { push(env, optimizedCallIsObject(env, src)); } else { push(env, gen(env, IsType, Type(t), src)); } decRef(env, src); }
void emitAddElemC(HTS& env) { // This is just to peek at the type; it'll be consumed for real down below and // we don't want to constrain it if we're just going to InterpOne. auto const kt = topC(env, 1, DataTypeGeneric)->type(); Opcode op; if (kt <= Type::Int) { op = AddElemIntKey; } else if (kt <= Type::Str) { op = AddElemStrKey; } else { interpOne(env, Type::Arr, 3); return; } // val is teleported from the stack to the array, so we don't have to do any // refcounting. auto const val = popC(env, DataTypeGeneric); auto const key = popC(env); auto const arr = popC(env); // The AddElem* instructions decref their args, so don't decref pop'ed // values. push(env, gen(env, op, arr, key, val)); }
void emitOODeclExists(IRGS& env, OODeclExistsOp subop) { auto const tAutoload = popC(env); auto const tCls = popC(env); assertx(tCls->isA(TStr)); // result of CastString assertx(tAutoload->isA(TBool)); // result of CastBool ClassKind kind; switch (subop) { case OODeclExistsOp::Class: kind = ClassKind::Class; break; case OODeclExistsOp::Trait: kind = ClassKind::Trait; break; case OODeclExistsOp::Interface: kind = ClassKind::Interface; break; } auto const val = gen( env, OODeclExists, ClassKindData { kind }, tCls, tAutoload ); push(env, val); decRef(env, tCls); }
void emitSetS(IRGS& env) { auto const ssaPropName = topC(env, BCSPOffset{2}); if (!ssaPropName->isA(TStr)) { PUNT(SetS-PropNameNotString); } auto const value = popC(env, DataTypeCountness); auto const ssaCls = popA(env); auto const propAddr = ldClsPropAddr(env, ssaCls, ssaPropName, true); auto const ptr = gen(env, UnboxPtr, propAddr); destroyName(env, ssaPropName); bindMem(env, ptr, value); }
void emitInstanceOf(IRGS& env) { auto const t1 = popC(env); auto const t2 = popC(env); // t2 instanceof t1 if (t1->isA(TObj) && t2->isA(TObj)) { auto const c2 = gen(env, LdObjClass, t2); auto const c1 = gen(env, LdObjClass, t1); push(env, gen(env, InstanceOf, c2, c1)); decRef(env, t2); decRef(env, t1); return; } if (!t1->isA(TStr)) PUNT(InstanceOf-NotStr); if (t2->isA(TObj)) { auto const rds = gen(env, LookupClsRDSHandle, t1); auto const c1 = gen(env, DerefClsRDSHandle, rds); auto const c2 = gen(env, LdObjClass, t2); push(env, gen(env, InstanceOf, c2, c1)); decRef(env, t2); decRef(env, t1); return; } push( env, t2->isA(TArr) ? gen(env, InterfaceSupportsArr, t1) : t2->isA(TInt) ? gen(env, InterfaceSupportsInt, t1) : t2->isA(TStr) ? gen(env, InterfaceSupportsStr, t1) : t2->isA(TDbl) ? gen(env, InterfaceSupportsDbl, t1) : cns(env, false) ); decRef(env, t2); decRef(env, t1); }
void emitCastArray(HTS& env) { auto const src = popC(env); push( env, [&] { if (src->isA(Type::Arr)) return src; if (src->isA(Type::Null)) return cns(env, staticEmptyArray()); if (src->isA(Type::Bool)) return gen(env, ConvBoolToArr, src); if (src->isA(Type::Dbl)) return gen(env, ConvDblToArr, src); if (src->isA(Type::Int)) return gen(env, ConvIntToArr, src); if (src->isA(Type::Str)) return gen(env, ConvStrToArr, src); if (src->isA(Type::Obj)) return gen(env, ConvObjToArr, src); return gen(env, ConvCellToArr, src); }() ); }
void emitIterInit(IRGS& env, int32_t iterId, Offset relOffset, int32_t valLocalId) { auto const targetOffset = iterBranchTarget(*env.currentNormalizedInstruction); auto const src = popC(env); if (!src->type().subtypeOfAny(TArr, TObj)) PUNT(IterInit); auto const res = gen( env, IterInit, TBool, IterData(iterId, -1, valLocalId), src, fp(env) ); implCondJmp(env, targetOffset, true, res); }
void emitWHResult(IRGS& env) { assertx(topC(env)->isA(TObj)); auto const exitSlow = makeExitSlow(env); auto const child = popC(env); // In most conditions, this will be optimized out by the simplifier. // We already need to setup a side-exit for the !succeeded case. gen(env, JmpZero, exitSlow, gen(env, IsWaitHandle, child)); static_assert( c_WaitHandle::STATE_SUCCEEDED == 0, "we test state for non-zero, success must be zero" ); gen(env, JmpNZero, exitSlow, gen(env, LdWHState, child)); auto const res = gen(env, LdWHResult, TInitCell, child); gen(env, IncRef, res); gen(env, DecRef, child); push(env, res); }
void emitSSwitch(HTS& env, const ImmVector& iv) { const int numCases = iv.size() - 1; /* * We use a fast path translation with a hashtable if none of the * cases are numeric strings and if the input is actually a string. * * Otherwise we do a linear search through the cases calling string * conversion routines. */ const bool fastPath = topC(env)->isA(Type::Str) && std::none_of(iv.strvec(), iv.strvec() + numCases, [&](const StrVecItem& item) { return curUnit(env)->lookupLitstrId(item.str)->isNumeric(); } ); auto const testVal = popC(env); std::vector<LdSSwitchData::Elm> cases(numCases); for (int i = 0; i < numCases; ++i) { auto const& kv = iv.strvec()[i]; cases[i].str = curUnit(env)->lookupLitstrId(kv.str); cases[i].dest = SrcKey{curSrcKey(env), bcOff(env) + kv.dest}; } LdSSwitchData data; data.numCases = numCases; data.cases = &cases[0]; data.defaultSk = SrcKey{curSrcKey(env), bcOff(env) + iv.strvec()[iv.size() - 1].dest}; auto const dest = gen(env, fastPath ? LdSSwitchDestFast : LdSSwitchDestSlow, data, testVal); gen(env, DecRef, testVal); gen(env, AdjustSP, IRSPOffsetData { offsetFromIRSP(env, BCSPOffset{0}) }, sp(env)); gen(env, JmpSSwitchDest, dest, sp(env)); }
void emitDecodeCufIter(IRGS& env, int32_t iterId, Offset relOffset) { auto const src = popC(env); auto const type = src->type(); if (type.subtypeOfAny(TArr, TStr, TObj)) { auto const res = gen( env, DecodeCufIter, TBool, IterId(iterId), src, fp(env) ); decRef(env, src); implCondJmp(env, bcOff(env) + relOffset, true, res); } else { decRef(env, src); jmpImpl(env, bcOff(env) + relOffset); } }
void emitAwait(HTS& env, int32_t numIters) { auto const resumeOffset = nextBcOff(env); assert(curFunc(env)->isAsync()); if (curFunc(env)->isAsyncGenerator()) PUNT(Await-AsyncGenerator); auto const exitSlow = makeExitSlow(env); if (!topC(env)->isA(Type::Obj)) PUNT(Await-NonObject); auto const child = popC(env); gen(env, JmpZero, exitSlow, gen(env, IsWaitHandle, child)); // cns() would ODR-use these auto const kSucceeded = c_WaitHandle::STATE_SUCCEEDED; auto const kFailed = c_WaitHandle::STATE_FAILED; auto const state = gen(env, LdWHState, child); auto const failed = gen(env, EqInt, state, cns(env, kFailed)); gen(env, JmpNZero, exitSlow, failed); env.irb->ifThenElse( [&] (Block* taken) { auto const succeeded = gen(env, EqInt, state, cns(env, kSucceeded)); gen(env, JmpNZero, taken, succeeded); }, [&] { // Next: the wait handle is not finished, we need to suspend if (resumed(env)) { implAwaitR(env, child, resumeOffset); } else { implAwaitE(env, child, resumeOffset, numIters); } }, [&] { // Taken: retrieve the result from the wait handle auto const res = gen(env, LdWHResult, child); gen(env, IncRef, res); gen(env, DecRef, child); push(env, res); } ); }
void emitYieldK(IRGS& env) { auto const resumeOffset = nextBcOff(env); assertx(resumed(env)); assertx(curFunc(env)->isGenerator()); if (curFunc(env)->isAsyncGenerator()) PUNT(YieldK-AsyncGenerator); yieldImpl(env, resumeOffset); auto const newKey = popC(env); auto const oldKey = gen(env, LdContArKey, TCell, fp(env)); gen(env, StContArKey, fp(env), newKey); gen(env, DecRef, oldKey); auto const keyType = newKey->type(); if (keyType <= TInt) { gen(env, ContArUpdateIdx, fp(env), newKey); } yieldReturnControl(env); }
void emitDecodeCufIter(IRGS& env, int32_t iterId, Offset relOffset) { auto const src = popC(env); auto const type = src->type(); if (type.subtypeOfAny(Type::Arr, Type::Str, Type::Obj)) { auto const res = gen( env, DecodeCufIter, Type::Bool, IterId(iterId), src, fp(env) ); gen(env, DecRef, src); implCondJmp(env, bcOff(env) + relOffset, true, res); } else { gen(env, DecRef, src); jmpImpl(env, bcOff(env) + relOffset, instrJmpFlags(*env.currentNormalizedInstruction)); } }
void emitNewPackedArray(HTS& env, int32_t numArgs) { if (numArgs > kPackedCapCodeThreshold) { PUNT(NewPackedArray-UnrealisticallyHuge); } auto const array = gen( env, AllocPackedArray, PackedArrayData { static_cast<uint32_t>(numArgs) } ); static constexpr auto kMaxUnrolledInitArray = 8; if (numArgs > kMaxUnrolledInitArray) { spillStack(env); gen( env, InitPackedArrayLoop, InitPackedArrayLoopData { offsetFromSP(env, 0), static_cast<uint32_t>(numArgs) }, array, sp(env) ); discard(env, numArgs); push(env, array); return; } for (int i = 0; i < numArgs; ++i) { gen( env, InitPackedArray, IndexData { static_cast<uint32_t>(numArgs - i - 1) }, array, popC(env) ); } push(env, array); }
void emitSwitch(HTS& env, const ImmVector& iv, int64_t base, int32_t bounded) { int nTargets = bounded ? iv.size() - 2 : iv.size(); SSATmp* const switchVal = popC(env); Type type = switchVal->type(); assert(IMPLIES(!(type <= Type::Int), bounded)); assert(IMPLIES(bounded, iv.size() > 2)); SSATmp* index; SSATmp* ssabase = cns(env, base); SSATmp* ssatargets = cns(env, nTargets); Offset defaultOff = bcOff(env) + iv.vec32()[iv.size() - 1]; Offset zeroOff = 0; if (base <= 0 && (base + nTargets) > 0) { zeroOff = bcOff(env) + iv.vec32()[0 - base]; } else { zeroOff = defaultOff; } if (type <= Type::Null) { gen(env, Jmp, makeExit(env, zeroOff)); return; } if (type <= Type::Bool) { Offset nonZeroOff = bcOff(env) + iv.vec32()[iv.size() - 2]; gen(env, JmpNZero, makeExit(env, nonZeroOff), switchVal); gen(env, Jmp, makeExit(env, zeroOff)); return; } if (type <= Type::Int) { // No special treatment needed index = switchVal; } else if (type <= Type::Dbl) { // switch(Double|String|Obj)Helper do bounds-checking for us, so // we need to make sure the default case is in the jump table, // and don't emit our own bounds-checking code bounded = false; index = gen(env, LdSwitchDblIndex, switchVal, ssabase, ssatargets); } else if (type <= Type::Str) { bounded = false; index = gen(env, LdSwitchStrIndex, switchVal, ssabase, ssatargets); } else if (type <= Type::Obj) { // switchObjHelper can throw exceptions and reenter the VM so we use the // catch block here. bounded = false; index = gen(env, LdSwitchObjIndex, switchVal, ssabase, ssatargets); } else if (type <= Type::Arr) { gen(env, DecRef, switchVal); gen(env, Jmp, makeExit(env, defaultOff)); return; } else { PUNT(Switch-UnknownType); } std::vector<Offset> targets(iv.size()); for (int i = 0; i < iv.size(); i++) { targets[i] = bcOff(env) + iv.vec32()[i]; } JmpSwitchData data; data.base = base; data.bounded = bounded; data.cases = iv.size(); data.defaultOff = defaultOff; data.targets = &targets[0]; spillStack(env); gen(env, AdjustSP, StackOffset { offsetFromSP(env, 0) }, sp(env)); gen(env, JmpSwitchDest, data, index, sp(env)); }
void emitCastDouble(HTS& env) { auto const src = popC(env); push(env, gen(env, ConvCellToDbl, src)); gen(env, DecRef, src); }
void emitCastInt(HTS& env) { auto const src = popC(env); push(env, gen(env, ConvCellToInt, src)); gen(env, DecRef, src); }
void emitAwait(IRGS& env, int32_t numIters) { auto const resumeOffset = nextBcOff(env); assertx(curFunc(env)->isAsync()); if (curFunc(env)->isAsyncGenerator()) PUNT(Await-AsyncGenerator); auto const exitSlow = makeExitSlow(env); if (!topC(env)->isA(TObj)) PUNT(Await-NonObject); auto const child = popC(env); gen(env, JmpZero, exitSlow, gen(env, IsWaitHandle, child)); // cns() would ODR-use these auto const kSucceeded = c_WaitHandle::STATE_SUCCEEDED; auto const kFailed = c_WaitHandle::STATE_FAILED; auto const state = gen(env, LdWHState, child); /* * HHBBC may have proven something about the inner type of this wait handle. * * So, we may have an assertion on the type of the top of the stack after * this instruction. We know the next bytecode instruction is reachable from * fallthrough on the Await, so if it is an AssertRATStk 0, anything coming * out of the wait handle must be a subtype of that type, so this is a safe * and conservative way to do this optimization (even if our successor * bytecode offset is a jump target from things we aren't thinking about * here). */ auto const knownTy = [&] { auto pc = curUnit(env)->at(resumeOffset); if (*reinterpret_cast<const Op*>(pc) != Op::AssertRATStk) return TInitCell; ++pc; auto const stkLoc = decodeVariableSizeImm(&pc); if (stkLoc != 0) return TInitCell; auto const rat = decodeRAT(curUnit(env), pc); auto const ty = ratToAssertType(env, rat); return ty ? *ty : TInitCell; }(); ifThenElse( env, [&] (Block* taken) { auto const succeeded = gen(env, EqInt, state, cns(env, kSucceeded)); gen(env, JmpNZero, taken, succeeded); }, [&] { // Next: the wait handle is not finished, we need to suspend auto const failed = gen(env, EqInt, state, cns(env, kFailed)); gen(env, JmpNZero, exitSlow, failed); if (resumed(env)) { implAwaitR(env, child, resumeOffset); } else { implAwaitE(env, child, resumeOffset, numIters); } }, [&] { // Taken: retrieve the result from the wait handle auto const res = gen(env, LdWHResult, knownTy, child); gen(env, IncRef, res); gen(env, DecRef, child); push(env, res); } ); }
void emitCastObject(HTS& env) { auto const src = popC(env); push(env, gen(env, ConvCellToObj, src)); }
void emitCastString(HTS& env) { auto const src = popC(env); push(env, gen(env, ConvCellToStr, src)); gen(env, DecRef, src); }
void emitInstanceOfD(IRGS& env, const StringData* className) { auto const src = popC(env); push(env, implInstanceOfD(env, src, className)); decRef(env, src); }