void IRTranslator::translateInstr(const NormalizedInstruction& ni) { auto& ht = m_hhbcTrans; ht.setBcOff(ni.source.offset(), ni.endsRegion && !m_hhbcTrans.isInlining()); FTRACE(1, "\n{:-^60}\n", folly::format("Translating {}: {} with stack:\n{}", ni.offset(), ni.toString(), ht.showStack())); // When profiling, we disable type predictions to avoid side exits assert(IMPLIES(mcg->tx().mode() == TransKind::Profile, !ni.outputPredicted)); ht.emitRB(RBTypeBytecodeStart, ni.source, 2); ht.emitIncStat(Stats::Instr_TC, 1, false); auto pc = reinterpret_cast<const Op*>(ni.pc()); for (auto i = 0, num = instrNumPops(pc); i < num; ++i) { auto const type = flavorToType(instrInputFlavor(pc, i)); if (type != Type::Gen) m_hhbcTrans.assertTypeStack(i, type); } if (RuntimeOption::EvalHHIRGenerateAsserts >= 2) { ht.emitDbgAssertRetAddr(); } if (isAlwaysNop(ni.op())) { // Do nothing } else if (instrMustInterp(ni) || ni.interp) { interpretInstr(ni); } else { translateInstrWork(ni); } }
void translateInstr( IRGS& irgs, const NormalizedInstruction& ni, bool checkOuterTypeOnly, bool firstInst ) { irgen::prepareForNextHHBC( irgs, &ni, ni.source, ni.endsRegion && !irgen::isInlining(irgs) ); const Func* builtinFunc = nullptr; if (ni.op() == OpFCallBuiltin) { auto str = ni.m_unit->lookupLitstrId(ni.imm[2].u_SA); builtinFunc = Unit::lookupFunc(str); } auto pc = ni.pc(); for (auto i = 0, num = instrNumPops(pc); i < num; ++i) { auto const type = !builtinFunc ? flavorToType(instrInputFlavor(pc, i)) : builtinFunc->byRef(num - i - 1) ? TGen : TCell; // TODO(#5706706): want to use assertTypeLocation, but Location::Stack // is a little unsure of itself. irgen::assertTypeStack(irgs, BCSPOffset{i}, type); } FTRACE(1, "\nTranslating {}: {} with state:\n{}\n", ni.offset(), ni, show(irgs)); irgen::ringbufferEntry(irgs, Trace::RBTypeBytecodeStart, ni.source, 2); irgen::emitIncStat(irgs, Stats::Instr_TC, 1); if (Stats::enableInstrCount()) { irgen::emitIncStat(irgs, Stats::opToTranslStat(ni.op()), 1); } if (Trace::moduleEnabledRelease(Trace::llvm_count, 1) || RuntimeOption::EvalJitLLVMCounters) { irgen::gen(irgs, CountBytecode); } if (isAlwaysNop(ni.op())) return; if (ni.interp || RuntimeOption::EvalJitAlwaysInterpOne) { irgen::interpOne(irgs, ni); return; } translateDispatch(irgs, ni); FTRACE(3, "\nTranslated {}: {} with state:\n{}\n", ni.offset(), ni, show(irgs)); }
void IRTranslator::translateFCall(const NormalizedInstruction& i) { auto const numArgs = i.imm[0].u_IVA; const PC after = m_hhbcTrans.curUnit()->at(i.nextSk().offset()); const Func* srcFunc = m_hhbcTrans.curFunc(); Offset returnBcOffset = srcFunc->unit()->offsetOf(after - srcFunc->base()); /* * If we have a calleeTrace, we're going to see if we should inline * the call. */ if (i.calleeTrace) { if (!i.calleeTrace->m_inliningFailed) { assert(shouldIRInline(m_hhbcTrans.curFunc(), i.funcd, *i.calleeTrace)); m_hhbcTrans.beginInlining(numArgs, i.funcd, returnBcOffset); static const bool shapeStats = Stats::enabledAny() && getenv("HHVM_STATS_INLINESHAPE"); if (shapeStats) { m_hhbcTrans.profileInlineFunctionShape(traceletShape(*i.calleeTrace)); } for (auto* ni = i.calleeTrace->m_instrStream.first; ni; ni = ni->next) { if (isAlwaysNop(ni->op())) { // This might not be necessary---but for now it's preserving // side effects of the call to readMetaData that used to // exist here. ni->noOp = true; } translateInstr(*ni); } return; } static const auto enabled = Stats::enabledAny() && getenv("HHVM_STATS_FAILEDINL"); if (enabled) { m_hhbcTrans.profileFunctionEntry("FailedCandidate"); m_hhbcTrans.profileFailedInlShape(traceletShape(*i.calleeTrace)); } } HHIR_EMIT(FCall, numArgs, returnBcOffset, i.funcd, JIT::callDestroysLocals(i, m_hhbcTrans.curFunc())); }
/* * Get location metadata for the inputs of `ni'. */ InputInfoVec getInputs(NormalizedInstruction& ni, FPInvOffset bcSPOff) { InputInfoVec inputs; if (isAlwaysNop(ni.op())) return inputs; always_assert_flog( instrInfo.count(ni.op()), "Invalid opcode in getInputsImpl: {}\n", opcodeToName(ni.op()) ); UNUSED auto const sk = ni.source; auto const& info = instrInfo[ni.op()]; auto const flags = info.in; auto stackOff = bcSPOff; if (flags & FStack) { stackOff -= ni.imm[0].u_IVA; // arguments consumed stackOff -= kNumActRecCells; // ActRec is torn down as well } if (flags & FuncdRef) inputs.needsRefCheck = true; if (flags & IgnoreInnerType) ni.ignoreInnerType = true; if (flags & Stack1) { SKTRACE(1, sk, "getInputs: Stack1 %d\n", stackOff.offset); inputs.emplace_back(Location::Stack { stackOff-- }); if (flags & DontGuardStack1) inputs.back().dontGuard = true; if (flags & Stack2) { SKTRACE(1, sk, "getInputs: Stack2 %d\n", stackOff.offset); inputs.emplace_back(Location::Stack { stackOff-- }); if (flags & Stack3) { SKTRACE(1, sk, "getInputs: Stack3 %d\n", stackOff.offset); inputs.emplace_back(Location::Stack { stackOff-- }); } } } if (flags & StackI) { inputs.emplace_back(Location::Stack { BCSPRelOffset{ni.imm[0].u_IVA}.to<FPInvOffset>(bcSPOff) }); } if (flags & StackN) { int numArgs = (ni.op() == Op::NewPackedArray || ni.op() == Op::NewVecArray || ni.op() == Op::ConcatN) ? ni.imm[0].u_IVA : ni.immVec.numStackValues(); SKTRACE(1, sk, "getInputs: StackN %d %d\n", stackOff.offset, numArgs); for (int i = 0; i < numArgs; i++) { inputs.emplace_back(Location::Stack { stackOff-- }); inputs.back().dontGuard = true; inputs.back().dontBreak = true; } } if (flags & BStackN) { int numArgs = ni.imm[0].u_IVA; SKTRACE(1, sk, "getInputs: BStackN %d %d\n", stackOff.offset, numArgs); for (int i = 0; i < numArgs; i++) { inputs.emplace_back(Location::Stack { stackOff-- }); } } if (flags & Local) { // (Almost) all instructions that take a Local have its index at their // first immediate. auto const loc = ni.imm[localImmIdx(ni.op())].u_IVA; SKTRACE(1, sk, "getInputs: local %d\n", loc); inputs.emplace_back(Location::Local { uint32_t(loc) }); } if (flags & AllLocals) ni.ignoreInnerType = true; if (flags & MKey) { auto mk = ni.imm[memberKeyImmIdx(ni.op())].u_KA; switch (mk.mcode) { case MEL: case MPL: inputs.emplace_back(Location::Local { uint32_t(mk.iva) }); break; case MEC: case MPC: inputs.emplace_back(Location::Stack { BCSPRelOffset{int32_t(mk.iva)}.to<FPInvOffset>(bcSPOff) }); break; case MW: case MEI: case MET: case MPT: case MQT: // The inputs vector is only used for deciding when to break the // tracelet, which can never happen for these cases. break; } } SKTRACE(1, sk, "stack args: virtual sfo now %d\n", stackOff.offset); TRACE(1, "%s\n", Trace::prettyNode("Inputs", inputs).c_str()); if ((flags & DontGuardAny) || dontGuardAnyInputs(ni.op())) { for (auto& info : inputs) info.dontGuard = true; } return inputs; }
/* * getInputs -- * Returns locations for this instruction's inputs. */ InputInfoVec getInputs(NormalizedInstruction& ni) { InputInfoVec inputs; auto UNUSED sk = ni.source; if (isAlwaysNop(ni.op())) return inputs; assertx(inputs.empty()); always_assert_flog( instrInfo.count(ni.op()), "Invalid opcode in getInputsImpl: {}\n", opcodeToName(ni.op()) ); const InstrInfo& info = instrInfo[ni.op()]; Operands input = info.in; BCSPOffset spOff{0}; if (input & FuncdRef) { inputs.needsRefCheck = true; } if (input & Iter) { inputs.emplace_back(Location(Location::Iter, ni.imm[0].u_IVA)); } if (input & FStack) { spOff += ni.imm[0].u_IVA; // arguments consumed spOff += kNumActRecCells; // ActRec is torn down as well } if (input & IgnoreInnerType) ni.ignoreInnerType = true; if (input & Stack1) { SKTRACE(1, sk, "getInputs: stack1 %d\n", spOff.offset); inputs.emplace_back(Location(spOff++)); if (input & DontGuardStack1) inputs.back().dontGuard = true; if (input & Stack2) { SKTRACE(1, sk, "getInputs: stack2 %d\n", spOff.offset); inputs.emplace_back(Location(spOff++)); if (input & Stack3) { SKTRACE(1, sk, "getInputs: stack3 %d\n", spOff.offset); inputs.emplace_back(Location(spOff++)); } } } if (input & StackI) { inputs.emplace_back(Location(BCSPOffset{ni.imm[0].u_IVA})); } if (input & StackN) { int numArgs = (ni.op() == Op::NewPackedArray || ni.op() == Op::ConcatN) ? ni.imm[0].u_IVA : ni.immVec.numStackValues(); SKTRACE(1, sk, "getInputs: stackN %d %d\n", spOff.offset, numArgs); for (int i = 0; i < numArgs; i++) { inputs.emplace_back(Location(spOff++)); inputs.back().dontGuard = true; inputs.back().dontBreak = true; } } if (input & BStackN) { int numArgs = ni.imm[0].u_IVA; SKTRACE(1, sk, "getInputs: BStackN %d %d\n", spOff.offset, numArgs); for (int i = 0; i < numArgs; i++) { inputs.emplace_back(Location(spOff++)); } } if (input & Local) { // (Almost) all instructions that take a Local have its index at // their first immediate. auto const loc = ni.imm[localImmIdx(ni.op())].u_IVA; SKTRACE(1, sk, "getInputs: local %d\n", loc); inputs.emplace_back(Location(Location::Local, loc)); } if (input & MKey) { auto mk = ni.imm[memberKeyImmIdx(ni.op())].u_KA; switch (mk.mcode) { case MEL: case MPL: inputs.emplace_back(Location(Location::Local, mk.iva)); break; case MEC: case MPC: inputs.emplace_back(Location(BCSPOffset{int32_t(mk.iva)})); break; case MW: case MEI: case MET: case MPT: case MQT: // The inputs vector is only used for deciding when to break the // tracelet, which can never happen for these cases. break; } } if (input & AllLocals) { ni.ignoreInnerType = true; } SKTRACE(1, sk, "stack args: virtual sfo now %d\n", spOff.offset); TRACE(1, "%s\n", Trace::prettyNode("Inputs", inputs).c_str()); if (inputs.size() && ((input & DontGuardAny) || dontGuardAnyInputs(ni.op()))) { for (int i = inputs.size(); i--; ) { inputs[i].dontGuard = true; } } if (input & This) { inputs.emplace_back(Location(Location::This)); } return inputs; }