SSATmp* IRBuilder::preOptimizeCheckStk(IRInstruction* inst) { auto const newType = inst->typeParam(); auto sp = inst->src(0); auto offset = inst->extra<CheckStk>()->offset; auto stkVal = getStackValue(sp, offset); auto const oldType = stkVal.knownType; if (oldType.isBoxed() && newType.isBoxed() && (oldType.not(newType) || newType < oldType)) { /* This CheckStk serves to update the inner type hint for a boxed * value, which requires no runtime work. This depends on the type being * boxed, and constraining it with DataTypeCountness will do it. */ constrainStack(sp, offset, DataTypeCountness); return gen(AssertStk, newType, StackOffset(offset), sp); } if (newType.not(oldType)) { /* This check will always fail. It's probably due to an incorrect * prediction. Generate a Jmp, and return the source because * following instructions may depend on the output of CheckStk * (they'll be DCEd later). Note that we can't use convertToJmp * because the return value isn't nullptr, so the original * instruction won't be inserted into the stream. */ gen(Jmp, inst->taken()); return sp; } if (newType >= oldType) { // The new type isn't better than the old type. return sp; } return nullptr; }
SSATmp* TraceBuilder::genLdStackAddr(SSATmp* sp, int64_t index) { Type type; bool spansCall; UNUSED SSATmp* val = getStackValue(sp, index, spansCall, type); type = noneToGen(type); assert(IMPLIES(val != nullptr, val->type().equals(type))); assert(type.notPtr()); return gen(LdStackAddr, type.ptr(), sp, cns(index)); }
SSATmp* IRBuilder::preOptimizeAssertStk(IRInstruction* inst) { auto const idx = inst->extra<AssertStk>()->offset; auto const info = getStackValue(inst->src(0), idx); return preOptimizeAssertTypeOp(inst, info.knownType, [&](TypeConstraint tc) { constrainStack(inst->src(0), idx, tc); } ); }
void TraceBuilder::constrainStack(SSATmp* sp, int32_t idx, DataTypeCategory cat) { FTRACE(1, "constrainStack({}, {}, {})\n", *sp->inst(), idx, cat); assert(sp->isA(Type::StkPtr)); // We've hit a LdStack. Use getStackValue to find the instruction that gave // us the type of the stack element. If it's a GuardStk or CheckStk, it's our // target. If it's anything else, the value is new so there's no guard to // relax. auto typeSrc = getStackValue(sp, idx).typeSrc; FTRACE(1, " - typeSrc = {}\n", *typeSrc); if (typeSrc->op() == GuardStk || typeSrc->op() == CheckStk) { constrainGuard(typeSrc, cat); } }
void TraceBuilder::genDecRefStack(Type type, uint32_t stackOff) { bool spansCall = false; Type knownType = Type::None; SSATmp* tmp = getStackValue(m_spValue, stackOff, spansCall, knownType); if (!tmp || (spansCall && tmp->inst()->op() != DefConst)) { // We don't want to extend live ranges of tmps across calls, so we // don't get the value if spansCall is true; however, we can use // any type information known. if (knownType != Type::None) { type = Type::mostRefined(type, knownType); } gen(DecRefStack, type, m_spValue, cns(int64_t(stackOff))); } else { gen(DecRef, tmp); } }
/* * Compute the stack and local type postconditions for a * single-entry/single-exit tracelet. */ std::vector<RegionDesc::TypePred> IRBuilder::getKnownTypes() { // This function is only correct when given a single-exit region, as // in TransProfile. Furthermore, its output is only used to guide // formation of profile-driven regions. assert(tx->mode() == TransProfile); // We want the state for the last block on the "main trace". Figure // out which that is. Block* mainExit = nullptr; for (auto* b : rpoSortCfg(m_unit)) { if (isMainExit(b)) { assert(mainExit == nullptr); mainExit = b; } } assert(mainExit != nullptr); // Load state for mainExit. This feels hacky. FTRACE(1, "mainExit: B{}\n", mainExit->id()); m_state.startBlock(mainExit); // Now use the current state to get all the types. std::vector<RegionDesc::TypePred> result; auto const curFunc = m_state.func(); auto const sp = m_state.sp(); auto const spOffset = m_state.spOffset(); for (unsigned i = 0; i < curFunc->maxStackCells(); ++i) { auto t = getStackValue(sp, i).knownType; if (!t.equals(Type::StackElem)) { result.push_back({ RegionDesc::Location::Stack{i, spOffset - i}, t }); } } for (unsigned i = 0; i < curFunc->numLocals(); ++i) { auto t = m_state.localType(i); if (!t.equals(Type::Gen)) { FTRACE(1, "Local {}: {}\n", i, t.toString()); result.push_back({ RegionDesc::Location::Local{i}, t }); } } return result; }
bool IRBuilder::constrainStack(SSATmp* sp, int32_t idx, TypeConstraint tc) { if (!shouldConstrainGuards()) return false; FTRACE(1, "constrainStack({}, {}, {})\n", *sp->inst(), idx, tc); assert(sp->isA(Type::StkPtr)); // We've hit a LdStack. If getStackValue gives us a value, recurse on // that. Otherwise, look at the instruction that gave us the type of the // stack element. If it's a GuardStk or CheckStk, it's our target. If it's // anything else, the value is new so there's no guard to relax. auto stackInfo = getStackValue(sp, idx); if (stackInfo.value) { FTRACE(1, " - value = {}\n", *stackInfo.value->inst()); return constrainValue(stackInfo.value, tc); } else { auto typeSrc = stackInfo.typeSrc; FTRACE(1, " - typeSrc = {}\n", *typeSrc); return typeSrc->is(GuardStk, CheckStk) && constrainGuard(typeSrc, tc); } }
std::vector<RegionDesc::TypePred> IRBuilder::getKnownTypes() const { std::vector<RegionDesc::TypePred> result; auto const curFunc = m_state.func(); auto const sp = m_state.sp(); auto const spOffset = m_state.spOffset(); for (unsigned i = 0; i < curFunc->maxStackCells(); ++i) { auto t = getStackValue(sp, i).knownType; if (!t.equals(Type::StackElem)) { result.push_back({ RegionDesc::Location::Stack{i, spOffset - i}, t }); } } for (unsigned i = 0; i < curFunc->numLocals(); ++i) { auto t = m_state.localType(i); if (!t.equals(Type::Gen)) { result.push_back({ RegionDesc::Location::Local{i}, t }); } } return result; }
std::vector<RegionDesc::TypePred> TraceBuilder::getKnownTypes() const { std::vector<RegionDesc::TypePred> result; const Func* curFunc = m_curFunc->getValFunc(); for (unsigned i = 0; i < curFunc->maxStackCells(); ++i) { auto t = getStackValue(m_spValue, i).knownType; if (!t.equals(Type::None) && !t.equals(Type::Gen)) { result.push_back({ RegionDesc::Location::Stack{i}, t }); } } // XXX(t2598894) This is only safe right now because it's not called on a // trace with relaxed guards. for (unsigned i = 0; i < curFunc->numLocals(); ++i) { auto t = m_locals[i].type; if (!t.equals(Type::None) && !t.equals(Type::Gen)) { result.push_back({ RegionDesc::Location::Local{i}, t }); } } return result; }
// All accesses to the stack and locals in this function use DataTypeGeneric so // this function should only be used for inspecting state; when the values are // actually used they must be constrained further. Type predictedTypeFromLocation(HTS& env, const Location& loc) { switch (loc.space) { case Location::Stack: { auto i = loc.offset; assert(i >= 0); if (i < env.irb->evalStack().size()) { return top(env, i, DataTypeGeneric)->type(); } else { auto stackVal = getStackValue( env.irb->sp(), i - env.irb->evalStack().size() + env.irb->stackDeficit() ); if (stackVal.knownType.isBoxed() && !(stackVal.predictedInner <= Type::Bottom)) { return ldRefReturn(stackVal.predictedInner.unbox()).box(); } return stackVal.knownType; } } break; case Location::Local: return env.irb->predictedLocalType(loc.offset); case Location::Litstr: return Type::cns(curUnit(env)->lookupLitstrId(loc.offset)); case Location::Litint: return Type::cns(loc.offset); case Location::This: // Don't specialize $this for cloned closures which may have been re-bound if (curFunc(env)->hasForeignThis()) return Type::Obj; if (auto const cls = curFunc(env)->cls()) { return Type::Obj.specialize(cls); } return Type::Obj; case Location::Iter: case Location::Invalid: break; } not_reached(); }
bool IRBuilder::constrainStack(SSATmp* sp, int32_t idx, TypeConstraint tc) { if (!shouldConstrainGuards()) return false; always_assert(IMPLIES(tc.innerCat > DataTypeGeneric, tc.category >= DataTypeCountness)); ITRACE(1, "constrainStack({}, {}, {})\n", *sp->inst(), idx, tc); Indent _i; assert(sp->isA(Type::StkPtr)); // We've hit a LdStack. If getStackValue gives us a value, recurse on // that. Otherwise, look at the instruction that gave us the type of the // stack element. If it's a GuardStk or CheckStk, it's our target. If it's // anything else, the value is new so there's no guard to relax. auto stackInfo = getStackValue(sp, idx); // Sometimes code in HhbcTranslator asks for a value with DataTypeSpecific // but can tolerate a less specific value. If that happens, there's nothing // to constrain. if (!typeFitsConstraint(stackInfo.knownType, tc)) return false; IRInstruction* typeSrc = stackInfo.typeSrc; if (stackInfo.value) { ITRACE(1, "value = {}\n", *stackInfo.value->inst()); return constrainValue(stackInfo.value, tc); } else if (typeSrc->is(AssertStk)) { // If the immutable typeParam fits the constraint, we're done. auto const typeParam = typeSrc->typeParam(); if (typeFitsConstraint(typeParam, tc)) return false; auto const srcIdx = typeSrc->extra<StackOffset>()->offset; auto const srcType = getStackValue(typeSrc->src(0), srcIdx).knownType; auto const newTc = relaxConstraint(tc, typeParam, srcType); ITRACE(1, "tracing through {}, orig tc: {}, new tc: {}\n", *typeSrc, tc, newTc); return constrainStack(typeSrc->src(0), srcIdx, newTc); } else if (typeSrc->is(CheckStk)) { auto changed = false; auto const typeParam = typeSrc->typeParam(); auto const srcIdx = typeSrc->extra<StackOffset>()->offset; auto const srcType = getStackValue(typeSrc->src(0), srcIdx).knownType; // Constrain the guard on the CheckType, but first relax the constraint // based on what's known about srcType. auto const guardTc = relaxConstraint(tc, srcType, typeParam); changed = constrainGuard(typeSrc, guardTc) || changed; // Relax typeParam with its current constraint. This is used below to // recursively relax the constraint on the source, if needed. auto constraint = m_guardConstraints[typeSrc]; constraint.category = std::max(constraint.category, guardTc.category); constraint.innerCat = std::max(constraint.innerCat, guardTc.innerCat); auto const knownType = refineType(relaxType(typeParam, constraint), constraint.assertedType); if (!typeFitsConstraint(knownType, tc)) { auto const newTc = relaxConstraint(tc, knownType, srcType); ITRACE(1, "tracing through {}, orig tc: {}, new tc: {}\n", *typeSrc, tc, newTc); changed = constrainStack(typeSrc->src(0), srcIdx, newTc) || changed; } return changed; } else { ITRACE(1, "typeSrc = {}\n", *typeSrc); return typeSrc->is(GuardStk) && constrainGuard(typeSrc, tc); } }
/* * Intended to be called after all optimizations are finished on a * single-entry, single-exit tracelet, this collects the types of all stack * slots and locals at the end of the main exit. */ void IRUnit::collectPostConditions() { // This function is only correct when given a single-exit region, as in // TransKind::Profile. Furthermore, its output is only used to guide // formation of profile-driven regions. assert(mcg->tx().mode() == TransKind::Profile); assert(m_postConds.empty()); Timer _t(Timer::collectPostConditions); // We want the state for the last block on the "main trace". Figure // out which that is. Block* mainExit = nullptr; Block* lastMainBlock = nullptr; FrameStateMgr state{*this, entry()->front().marker()}; // TODO(#5678127): this code is wrong for HHIRBytecodeControlFlow state.setLegacyReoptimize(); ITRACE(2, "collectPostConditions starting\n"); Trace::Indent _i; for (auto* block : rpoSortCfg(*this)) { state.startBlock(block, block->front().marker()); for (auto& inst : *block) { state.update(&inst); } if (isMainBlock(block)) lastMainBlock = block; if (isMainExit(block)) { mainExit = block; break; } state.finishBlock(block); } // If we didn't find an obvious exit, then use the last block in the region. always_assert(lastMainBlock != nullptr); if (mainExit == nullptr) mainExit = lastMainBlock; FTRACE(1, "mainExit: B{}\n", mainExit->id()); // state currently holds the state at the end of mainExit auto const curFunc = state.func(); auto const sp = state.sp(); auto const spOffset = state.spOffset(); for (unsigned i = 0; i < spOffset; ++i) { auto t = getStackValue(sp, i).knownType; if (!t.equals(Type::StackElem)) { m_postConds.push_back({ RegionDesc::Location::Stack{i, spOffset - i}, t }); } } for (unsigned i = 0; i < curFunc->numLocals(); ++i) { auto t = state.localType(i); if (!t.equals(Type::Gen)) { FTRACE(1, "Local {}: {}\n", i, t.toString()); m_postConds.push_back({ RegionDesc::Location::Local{i}, t }); } } }