void cgContPreNext(IRLS& env, const IRInstruction* inst) { auto const cont = srcLoc(env, inst, 0).reg(); auto const checkStarted = inst->src(1)->boolVal(); auto const isAsync = inst->extra<IsAsyncData>()->isAsync; auto& v = vmain(env); auto const sf = v.makeReg(); // These asserts make sure that the startedCheck work. static_assert(uint8_t(BaseGenerator::State::Created) == 0, "used below"); static_assert(uint8_t(BaseGenerator::State::Started) == 1, "used below"); static_assert(uint8_t(BaseGenerator::State::Done) > 3, ""); // These asserts ensure that the state transition works. If we're in the // Created state we want to transition to Priming, and if we're in the // Started state we want to transition to Running. By laying out the enum // this way we can avoid the branch and just transition by adding 2 to the // current state. static_assert(uint8_t(BaseGenerator::State::Priming) == uint8_t(BaseGenerator::State::Created) + 2, "used below"); static_assert(uint8_t(BaseGenerator::State::Running) == uint8_t(BaseGenerator::State::Started) + 2, "used below"); // Take exit if state != 1 (checkStarted) or if state > 1 (!checkStarted). auto stateOff = BaseGenerator::stateOff() - genOffset(isAsync); v << cmpbim{int8_t(BaseGenerator::State::Started), cont[stateOff], sf}; fwdJcc(v, env, checkStarted ? CC_NE : CC_A, sf, inst->taken()); // Transition the generator into either the Priming state (if we were just // created) or the Running state (if we were started). Due to the way the // enum is layed out, we can model this by just adding 2. v << addlim{int8_t(2), cont[stateOff], v.makeReg()}; }
void cgCheckStaticLoc(IRLS& env, const IRInstruction* inst) { auto const extra = inst->extra<CheckStaticLoc>(); auto const link = rds::bindStaticLocal(extra->func, extra->name); auto& v = vmain(env); auto const sf = checkRDSHandleInitialized(v, link.handle()); fwdJcc(v, env, CC_NE, sf, inst->taken()); }
void cgLdClsCns(IRLS& env, const IRInstruction* inst) { auto const extra = inst->extra<LdClsCns>(); auto const link = rds::bindClassConstant(extra->clsName, extra->cnsName); auto const dst = dstLoc(env, inst, 0).reg(); auto& v = vmain(env); auto const sf = checkRDSHandleInitialized(v, link.handle()); fwdJcc(v, env, CC_NE, sf, inst->taken()); v << lea{rvmtl()[link.handle()], dst}; }
void cgCheckNonNull(IRLS& env, const IRInstruction* inst) { auto dst = dstLoc(env, inst, 0).reg(); auto src = srcLoc(env, inst, 0).reg(); auto& v = vmain(env); assertx(inst->taken()); auto const sf = v.makeReg(); v << testq{src, src, sf}; fwdJcc(v, env, CC_Z, sf, inst->taken()); v << copy{src, dst}; }
void cgCheckSubClsCns(IRLS& env, const IRInstruction* inst) { auto const extra = inst->extra<CheckSubClsCns>(); auto& v = vmain(env); auto const slot = extra->slot; auto const tmp = v.makeReg(); auto const sf = v.makeReg(); v << load{srcLoc(env, inst, 0).reg()[Class::constantsVecOff()], tmp}; emitCmpLowPtr<StringData>(v, sf, v.cns(extra->cnsName), tmp[slot * sizeof(Class::Const) + offsetof(Class::Const, name)]); fwdJcc(v, env, CC_NE, sf, inst->taken()); }
void cgLdClsMethodFCacheFunc(IRLS& env, const IRInstruction* inst) { auto const extra = inst->extra<ClsMethodData>(); auto const dst = dstLoc(env, inst, 0).reg(); auto& v = vmain(env); auto const ch = StaticMethodFCache::alloc( extra->clsName, extra->methodName, ctxName(inst->marker()) ); auto const sf = checkRDSHandleInitialized(v, ch); fwdJcc(v, env, CC_NE, sf, inst->taken()); emitLdLowPtr(v, rvmtl()[ch + offsetof(StaticMethodFCache, m_func)], dst, sizeof(LowPtr<const Func>)); }
void cgLdCns(IRLS& env, const IRInstruction* inst) { auto const cnsName = inst->src(0)->strVal(); auto const ch = makeCnsHandle(cnsName, false); auto const dst = dstLoc(env, inst, 0); auto& v = vmain(env); assertx(inst->taken()); if (rds::isNormalHandle(ch)) { auto const sf = checkRDSHandleInitialized(v, ch); fwdJcc(v, env, CC_NE, sf, inst->taken()); loadTV(v, inst->dst(), dst, rvmtl()[ch]); return; } assertx(rds::isPersistentHandle(ch)); auto const& cns = rds::handleToRef<TypedValue>(ch); if (cns.m_type == KindOfUninit) { loadTV(v, inst->dst(), dst, rvmtl()[ch]); auto const sf = v.makeReg(); irlower::emitTypeTest( v, env, TUninit, dst.reg(1), dst.reg(0), sf, [&] (ConditionCode cc, Vreg sf) { fwdJcc(v, env, cc, sf, inst->taken()); } ); } else { // Statically known constant. assertx(!dst.isFullSIMD()); switch (cns.m_type) { case KindOfNull: v << copy{v.cns(nullptr), dst.reg(0)}; break; case KindOfBoolean: v << copy{v.cns(!!cns.m_data.num), dst.reg(0)}; break; case KindOfInt64: case KindOfPersistentString: case KindOfPersistentVec: case KindOfPersistentDict: case KindOfPersistentKeyset: case KindOfPersistentArray: case KindOfString: case KindOfVec: case KindOfDict: case KindOfKeyset: case KindOfArray: case KindOfObject: case KindOfResource: case KindOfRef: v << copy{v.cns(cns.m_data.num), dst.reg(0)}; break; case KindOfDouble: v << copy{v.cns(cns.m_data.dbl), dst.reg(0)}; break; case KindOfUninit: case KindOfClass: not_reached(); } v << copy{v.cns(cns.m_type), dst.reg(1)}; } }
void cgCheckType(IRLS& env, const IRInstruction* inst) { // Note: If you add new supported type checks, you should update // negativeCheckType() to indicate whether it is precise or not. auto const src = inst->src(0); auto const dst = inst->dst(); auto const srcData = srcLoc(env, inst, 0).reg(0); auto const srcType = srcLoc(env, inst, 0).reg(1); auto& v = vmain(env); auto const doJcc = [&] (ConditionCode cc, Vreg sf) { fwdJcc(v, env, ccNegate(cc), sf, inst->taken()); }; auto const doMov = [&] { auto const dstData = dstLoc(env, inst, 0).reg(0); auto const dstType = dstLoc(env, inst, 0).reg(1); if (dst->isA(TBool) && !src->isA(TBool)) { v << movtqb{srcData, dstData}; } else { v << copy{srcData, dstData}; } if (dstType == InvalidReg) return; if (srcType != InvalidReg) { v << copy{srcType, dstType}; } else { v << ldimmq{src->type().toDataType(), dstType}; } }; auto const typeParam = inst->typeParam(); if (src->isA(typeParam)) { // src is the target type or better. Just define our dst. doMov(); return; } if (!src->type().maybe(typeParam)) { // src is definitely not the target type. Always jump. v << jmp{label(env, inst->taken())}; return; } if (srcType != InvalidReg) { emitTypeTest(v, env, typeParam, srcType, srcData, v.makeReg(), doJcc); doMov(); return; } if (src->type() <= TBoxedCell && typeParam <= TBoxedCell) { // We should never have specific known Boxed types; those should only be // used for hints and predictions. always_assert(!(typeParam < TBoxedInitCell)); doMov(); return; } /* * See if we're just checking the array kind or object class of a value with * a mostly-known type. * * Important: We don't support typeParam being something like * StaticArr=kPackedKind unless the src->type() also already knows its * staticness. We do allow things like CheckType<Arr=Packed> t1:StaticArr, * though. This is why we have to check that the unspecialized type is at * least as big as the src->type(). */ if (typeParam.isSpecialized() && typeParam.unspecialize() >= src->type()) { detail::emitSpecializedTypeTest(v, env, typeParam, srcData, v.makeReg(), doJcc); doMov(); return; } /* * Since not all of our unions carry a type register, there are some * situations with strings and arrays that are neither constantly-foldable * nor in the emitTypeTest() code path. * * We currently actually check their persistent bit here, which will let * both static and uncounted strings through. Also note that * CheckType<Uncounted> t1:{Null|Str} doesn't get this treatment currently--- * the emitTypeTest() path above will only check the type register. */ if (!typeParam.isSpecialized() && typeParam <= TUncounted && src->type().subtypeOfAny(TStr, TArr) && src->type().maybe(typeParam)) { assertx(src->type().maybe(TPersistent)); auto const sf = v.makeReg(); v << cmplim{0, srcData[FAST_REFCOUNT_OFFSET], sf}; doJcc(CC_L, sf); doMov(); return; } always_assert_flog( false, "Bad src: {} and dst: {} types in '{}'", src->type(), typeParam, *inst ); }