void ActRecState::pushFunc(const NormalizedInstruction& inst) { assertx(isFPush(inst.op())); const Unit& unit = *inst.unit(); const Func* func = nullptr; if (inst.op() == OpFPushFuncD || inst.op() == OpFPushFuncU) { Id funcId = inst.imm[1].u_SA; auto const& nep = unit.lookupNamedEntityPairId(funcId); func = Unit::lookupFunc(nep.second); } else if (inst.op() == OpFPushCtorD) { Id clsId = inst.imm[1].u_SA; auto const ctxFunc = inst.func(); if (ctxFunc) { auto const str = unit.lookupLitstrId(clsId); auto const ctx = ctxFunc->cls(); auto const cls = Unit::lookupUniqueClassInContext(str, ctx); func = lookupImmutableCtor(cls, ctx); } } if (func) func->validate(); if (func && func->isNameBindingImmutable(&unit)) { pushFuncD(func); return; } pushDynFunc(); }
void ActRecState::pushFunc(const NormalizedInstruction& inst) { assertx(isFPush(inst.op())); if (inst.op() == OpFPushFuncD || inst.op() == OpFPushFuncU) { const Unit& unit = *inst.unit(); Id funcId = inst.imm[1].u_SA; auto const& nep = unit.lookupNamedEntityPairId(funcId); auto const func = Unit::lookupFunc(nep.second); if (func) func->validate(); if (func && func->isNameBindingImmutable(&unit)) { pushFuncD(func); return; } } pushDynFunc(); }
/* * Merge one FrameState into another, returning whether it changed. Frame * pointers and stack depth must match. If the stack pointer tmps are * different, clear the tracked value (we can make a new one, given fp and * irSPOff). */ bool merge_into(FrameState& dst, const FrameState& src) { auto changed = false; // Cannot merge irSPOff state, so assert they match. always_assert(dst.irSPOff == src.irSPOff); always_assert(dst.curFunc == src.curFunc); // The only thing that can change the FP is inlining, but we can't have one // of the predecessors in an inlined callee while the other isn't. always_assert(dst.fpValue == src.fpValue); // FrameState for the same function must always have the same number of // locals. always_assert(src.locals.size() == dst.locals.size()); // We must always have the same spValue. always_assert(dst.spValue == src.spValue); if (dst.needRatchet != src.needRatchet) { dst.needRatchet = true; changed = true; } if (dst.mbase.value != src.mbase.value) { dst.mbase.value = nullptr; changed = true; } if (dst.mbr.ptr != src.mbr.ptr) { dst.mbr.ptr = nullptr; changed = true; } changed |= merge_util(dst.mbr.pointee, dst.mbr.pointee | src.mbr.pointee); changed |= merge_util(dst.mbr.ptrType, dst.mbr.ptrType | src.mbr.ptrType); // The tracked FPI state must always be the same, notice that the size of the // FPI stacks may differ as the FPush associated with one of the merged blocks // may be outside the region. In this case we must drop the unknown state. dst.fpiStack.resize(std::min(dst.fpiStack.size(), src.fpiStack.size())); for (int i = 0; i < dst.fpiStack.size(); ++i) { auto& dstInfo = dst.fpiStack[i]; auto const& srcInfo = src.fpiStack[i]; always_assert(dstInfo.returnSP == srcInfo.returnSP); always_assert(dstInfo.returnSPOff == srcInfo.returnSPOff); always_assert(isFPush(dstInfo.fpushOpc) && dstInfo.fpushOpc == srcInfo.fpushOpc); // If one of the merged edges was interp'ed mark the result as interp'ed if (!dstInfo.interp && srcInfo.interp) { dstInfo.interp = true; changed = true; } // If one of the merged edges spans a call then mark them both as spanning if (!dstInfo.spansCall && srcInfo.spansCall) { dstInfo.spansCall = true; changed = true; } // Merge the contexts from the respective spills if (dstInfo.ctx != srcInfo.ctx) { dstInfo.ctx = least_common_ancestor(dstInfo.ctx, srcInfo.ctx); changed = true; } if (dstInfo.ctxType != srcInfo.ctxType) { dstInfo.ctxType |= srcInfo.ctxType; changed = true; } // Merge the Funcs if (dstInfo.func != nullptr && dstInfo.func != srcInfo.func) { dstInfo.func = nullptr; changed = true; } } // This is available iff it's available in both states changed |= merge_util(dst.thisAvailable, dst.thisAvailable && src.thisAvailable); // The frame may span a call if it could have done so in either state. changed |= merge_util(dst.frameMaySpanCall, dst.frameMaySpanCall || src.frameMaySpanCall); for (auto i = uint32_t{0}; i < src.locals.size(); ++i) { changed |= merge_into(dst.locals[i], src.locals[i]); } changed |= merge_memory_stack_into(dst.stack, src.stack); changed |= merge_util(dst.stackModified, dst.stackModified || src.stackModified); // Eval stack depth should be the same at merge points. always_assert(dst.bcSPOff == src.bcSPOff); for (auto const& srcPair : src.predictedTypes) { auto dstIt = dst.predictedTypes.find(srcPair.first); if (dstIt == dst.predictedTypes.end()) { dst.predictedTypes.emplace(srcPair); changed = true; continue; } auto const newType = dstIt->second | srcPair.second; if (newType != dstIt->second) { dstIt->second = newType; changed = true; } } return changed; }