void interpOne(IRGS& env, folly::Optional<Type> outType, int popped, int pushed, InterpOneData& idata) { auto const unit = curUnit(env); spillStack(env); env.irb->exceptionStackBoundary(); auto const op = unit->getOpcode(bcOff(env)); auto& iInfo = getInstrInfo(op); if (iInfo.type == jit::InstrFlags::OutFDesc) { env.fpiStack.push(FPIInfo { sp(env), env.irb->spOffset(), nullptr }); } else if (isFCallStar(op) && !env.fpiStack.empty()) { env.fpiStack.pop(); } idata.bcOff = bcOff(env); idata.cellsPopped = popped; idata.cellsPushed = pushed; idata.opcode = op; gen( env, opcodeChangesPC(idata.opcode) ? InterpOneCF : InterpOne, outType, idata, sp(env), fp(env) ); assertx(env.irb->stackDeficit() == 0); }
void interpOne(IRGS& env, folly::Optional<Type> outType, int popped, int pushed, InterpOneData& idata) { auto const unit = curUnit(env); spillStack(env); env.irb->exceptionStackBoundary(); auto const op = unit->getOp(bcOff(env)); idata.bcOff = bcOff(env); idata.cellsPopped = popped; idata.cellsPushed = pushed; idata.opcode = op; gen( env, opcodeChangesPC(idata.opcode) ? InterpOneCF : InterpOne, outType, idata, sp(env), fp(env) ); assertx(env.irb->stackDeficit() == 0); }
void emitDecodeCufIter(IRGS& env, int32_t iterId, Offset relOffset) { auto const src = popC(env); auto const type = src->type(); if (type.subtypeOfAny(TArr, TStr, TObj)) { auto const res = gen( env, DecodeCufIter, TBool, IterId(iterId), src, fp(env) ); decRef(env, src); implCondJmp(env, bcOff(env) + relOffset, true, res); } else { decRef(env, src); jmpImpl(env, bcOff(env) + relOffset); } }
Block* makeExitSlow(IRGS& env) { auto const exit = env.unit.defBlock(Block::Hint::Unlikely); BlockPusher bp(*env.irb, makeMarker(env, bcOff(env)), exit); interpOne(env, *env.currentNormalizedInstruction); // If it changes the PC, InterpOneCF will get us to the new location. if (!opcodeChangesPC(env.currentNormalizedInstruction->op())) { gen(env, Jmp, makeExit(env, nextBcOff(env))); } return exit; }
void emitSSwitch(HTS& env, const ImmVector& iv) { const int numCases = iv.size() - 1; /* * We use a fast path translation with a hashtable if none of the * cases are numeric strings and if the input is actually a string. * * Otherwise we do a linear search through the cases calling string * conversion routines. */ const bool fastPath = topC(env)->isA(Type::Str) && std::none_of(iv.strvec(), iv.strvec() + numCases, [&](const StrVecItem& item) { return curUnit(env)->lookupLitstrId(item.str)->isNumeric(); } ); auto const testVal = popC(env); std::vector<LdSSwitchData::Elm> cases(numCases); for (int i = 0; i < numCases; ++i) { auto const& kv = iv.strvec()[i]; cases[i].str = curUnit(env)->lookupLitstrId(kv.str); cases[i].dest = SrcKey{curSrcKey(env), bcOff(env) + kv.dest}; } LdSSwitchData data; data.numCases = numCases; data.cases = &cases[0]; data.defaultSk = SrcKey{curSrcKey(env), bcOff(env) + iv.strvec()[iv.size() - 1].dest}; auto const dest = gen(env, fastPath ? LdSSwitchDestFast : LdSSwitchDestSlow, data, testVal); gen(env, DecRef, testVal); gen(env, AdjustSP, IRSPOffsetData { offsetFromIRSP(env, BCSPOffset{0}) }, sp(env)); gen(env, JmpSSwitchDest, dest, sp(env)); }
void emitDecodeCufIter(IRGS& env, int32_t iterId, Offset relOffset) { auto const src = popC(env); auto const type = src->type(); if (type.subtypeOfAny(Type::Arr, Type::Str, Type::Obj)) { auto const res = gen( env, DecodeCufIter, Type::Bool, IterId(iterId), src, fp(env) ); gen(env, DecRef, src); implCondJmp(env, bcOff(env) + relOffset, true, res); } else { gen(env, DecRef, src); jmpImpl(env, bcOff(env) + relOffset, instrJmpFlags(*env.currentNormalizedInstruction)); } }
void interpOne(IRGS& env, folly::Optional<Type> outType, int popped, int pushed, InterpOneData& idata) { auto const unit = curUnit(env); auto const op = unit->getOp(bcOff(env)); idata.bcOff = bcOff(env); idata.cellsPopped = popped; idata.cellsPushed = pushed; idata.opcode = op; gen( env, opcodeChangesPC(idata.opcode) ? InterpOneCF : InterpOne, outType, idata, sp(env), fp(env) ); }
void emitIterBreak(IRGS& env, Offset relOffset, const ImmVector& iv) { for (int iterIndex = 0; iterIndex < iv.size(); iterIndex += 2) { IterKind iterKind = (IterKind)iv.vec32()[iterIndex]; Id iterId = iv.vec32()[iterIndex + 1]; switch (iterKind) { case KindOfIter: gen(env, IterFree, IterId(iterId), fp(env)); break; case KindOfMIter: gen(env, MIterFree, IterId(iterId), fp(env)); break; case KindOfCIter: gen(env, CIterFree, IterId(iterId), fp(env)); break; } } jmpImpl(env, bcOff(env) + relOffset); }
Block* makeExitOpt(HTS& env, TransID transId) { assert(!isInlining(env)); auto const targetBcOff = bcOff(env); auto const exit = env.unit.defBlock(Block::Hint::Unlikely); BlockPusher blockPusher(*env.irb, makeMarker(env, targetBcOff), exit); spillStack(env); gen(env, AdjustSP, StackOffset { offsetFromSP(env, 0) }, sp(env)); gen(env, ReqRetranslateOpt, ReqRetransOptData{transId, SrcKey{curSrcKey(env), targetBcOff}}, sp(env)); return exit; }
Block* makeExitOpt(IRGS& env, TransID transId) { assertx(!isInlining(env)); auto const targetBcOff = bcOff(env); auto const exit = defBlock(env, Block::Hint::Unlikely); BlockPusher blockPusher(*env.irb, makeMarker(env, targetBcOff), exit); auto const data = ReqRetranslateOptData { transId, SrcKey { curSrcKey(env), targetBcOff }, bcSPOffset(env) }; gen(env, ReqRetranslateOpt, data, sp(env), fp(env)); return exit; }
void emitMIterNext(IRGS& env, int32_t iterId, Offset relOffset, int32_t valLocalId) { surpriseCheck(env, relOffset); auto const res = gen( env, MIterNext, TBool, IterData(iterId, -1, valLocalId), fp(env) ); implCondJmp(env, bcOff(env) + relOffset, false, res); }
void emitIterBreak(IRGS& env, const ImmVector& iv, Offset relOffset) { always_assert(env.currentNormalizedInstruction->endsRegion); for (int iterIndex = 0; iterIndex < iv.size(); iterIndex += 2) { IterKind iterKind = (IterKind)iv.vec32()[iterIndex]; Id iterId = iv.vec32()[iterIndex + 1]; switch (iterKind) { case KindOfIter: gen(env, IterFree, IterId(iterId), fp(env)); break; case KindOfMIter: gen(env, MIterFree, IterId(iterId), fp(env)); break; case KindOfCIter: gen(env, CIterFree, IterId(iterId), fp(env)); break; } } // Would need to change this if we support not ending regions on this: gen(env, Jmp, makeExit(env, bcOff(env) + relOffset)); }
std::string show(const IRGS& irgs) { std::ostringstream out; auto header = [&](const std::string& str) { out << folly::format("+{:-^102}+\n", str); }; const int32_t frameCells = resumed(irgs) ? 0 : curFunc(irgs)->numSlotsInFrame(); auto const stackDepth = irgs.irb->fs().bcSPOff().offset - frameCells; assertx(stackDepth >= 0); auto spOffset = stackDepth; auto elem = [&](const std::string& str) { out << folly::format("| {:<100} |\n", folly::format("{:>2}: {}", stackDepth - spOffset, str)); assertx(spOffset > 0); --spOffset; }; auto fpi = curFunc(irgs)->findFPI(bcOff(irgs)); auto checkFpi = [&]() { if (fpi && spOffset + frameCells == fpi->m_fpOff) { auto fpushOff = fpi->m_fpushOff; auto after = fpushOff + instrLen(curUnit(irgs)->at(fpushOff)); std::ostringstream msg; msg << "ActRec from "; curUnit(irgs)->prettyPrint( msg, Unit::PrintOpts().range(fpushOff, after) .noLineNumbers() .indent(0) .noFuncs() ); auto msgStr = msg.str(); assertx(msgStr.back() == '\n'); msgStr.erase(msgStr.size() - 1); for (unsigned i = 0; i < kNumActRecCells; ++i) elem(msgStr); fpi = fpi->m_parentIndex != -1 ? &curFunc(irgs)->fpitab()[fpi->m_parentIndex] : nullptr; return true; } return false; }; header(folly::format(" {} stack element(s): ", stackDepth).str()); assertx(spOffset <= curFunc(irgs)->maxStackCells()); for (auto i = 0; i < spOffset; ) { if (checkFpi()) { i += kNumActRecCells; continue; } auto const spRel = offsetFromIRSP(irgs, BCSPRelOffset{i}); auto const stkTy = irgs.irb->stack(spRel, DataTypeGeneric).type; auto const stkVal = irgs.irb->stack(spRel, DataTypeGeneric).value; std::string elemStr; if (stkTy == TStkElem) { elemStr = "unknown"; } else if (stkVal) { elemStr = stkVal->inst()->toString(); } else { elemStr = stkTy.toString(); } auto const irSPRel = BCSPRelOffset{i} .to<FPInvOffset>(irgs.irb->fs().bcSPOff()); auto const predicted = predictedType(irgs, Location::Stack { irSPRel }); if (predicted < stkTy) { elemStr += folly::sformat(" (predict: {})", predicted); } elem(elemStr); ++i; } header(""); out << "\n"; header(folly::format(" {} local(s) ", curFunc(irgs)->numLocals()).str()); for (unsigned i = 0; i < curFunc(irgs)->numLocals(); ++i) { auto const localValue = irgs.irb->local(i, DataTypeGeneric).value; auto const localTy = localValue ? localValue->type() : irgs.irb->local(i, DataTypeGeneric).type; auto str = localValue ? localValue->inst()->toString() : localTy.toString(); auto const predicted = irgs.irb->fs().local(i).predictedType; if (predicted < localTy) str += folly::sformat(" (predict: {})", predicted); if (localTy <= TBoxedCell) { auto const pred = irgs.irb->predictedLocalInnerType(i); if (pred != TBottom) { str += folly::sformat(" (predict inner: {})", pred.toString()); } } out << folly::format("| {:<100} |\n", folly::format("{:>2}: {}", i, str)); } header(""); return out.str(); }
void emitJmpZ(HTS& env, Offset relOffset) { surpriseCheck(env, relOffset); auto const takenOff = bcOff(env) + relOffset; implCondJmp(env, takenOff, true, popC(env)); }
void emitSwitch(HTS& env, const ImmVector& iv, int64_t base, int32_t bounded) { int nTargets = bounded ? iv.size() - 2 : iv.size(); SSATmp* const switchVal = popC(env); Type type = switchVal->type(); assert(IMPLIES(!(type <= Type::Int), bounded)); assert(IMPLIES(bounded, iv.size() > 2)); SSATmp* index; SSATmp* ssabase = cns(env, base); SSATmp* ssatargets = cns(env, nTargets); Offset defaultOff = bcOff(env) + iv.vec32()[iv.size() - 1]; Offset zeroOff = 0; if (base <= 0 && (base + nTargets) > 0) { zeroOff = bcOff(env) + iv.vec32()[0 - base]; } else { zeroOff = defaultOff; } if (type <= Type::Null) { gen(env, Jmp, makeExit(env, zeroOff)); return; } if (type <= Type::Bool) { Offset nonZeroOff = bcOff(env) + iv.vec32()[iv.size() - 2]; gen(env, JmpNZero, makeExit(env, nonZeroOff), switchVal); gen(env, Jmp, makeExit(env, zeroOff)); return; } if (type <= Type::Int) { // No special treatment needed index = switchVal; } else if (type <= Type::Dbl) { // switch(Double|String|Obj)Helper do bounds-checking for us, so // we need to make sure the default case is in the jump table, // and don't emit our own bounds-checking code bounded = false; index = gen(env, LdSwitchDblIndex, switchVal, ssabase, ssatargets); } else if (type <= Type::Str) { bounded = false; index = gen(env, LdSwitchStrIndex, switchVal, ssabase, ssatargets); } else if (type <= Type::Obj) { // switchObjHelper can throw exceptions and reenter the VM so we use the // catch block here. bounded = false; index = gen(env, LdSwitchObjIndex, switchVal, ssabase, ssatargets); } else if (type <= Type::Arr) { gen(env, DecRef, switchVal); gen(env, Jmp, makeExit(env, defaultOff)); return; } else { PUNT(Switch-UnknownType); } std::vector<Offset> targets(iv.size()); for (int i = 0; i < iv.size(); i++) { targets[i] = bcOff(env) + iv.vec32()[i]; } JmpSwitchData data; data.base = base; data.bounded = bounded; data.cases = iv.size(); data.defaultOff = defaultOff; data.targets = &targets[0]; spillStack(env); gen(env, AdjustSP, StackOffset { offsetFromSP(env, 0) }, sp(env)); gen(env, JmpSwitchDest, data, index, sp(env)); }
void emitJmpNS(HTS& env, Offset relOffset) { jmpImpl(env, bcOff(env) + relOffset, instrJmpFlags(*env.currentNormalizedInstruction)); }
void emitJmp(HTS& env, Offset relOffset) { surpriseCheck(env, relOffset); auto const offset = bcOff(env) + relOffset; jmpImpl(env, offset, instrJmpFlags(*env.currentNormalizedInstruction)); }