// Solves the equation within a set of parenthesis // returns SUCCESS or FAIL int satisfy_paren() { char op; int larg; int rarg; int answer; int ret; ret = peek_op(&op); while (op != '(' && ret == SUCCESS) { // evaluate everything inside of the parens if (pop_num(&rarg) != SUCCESS) return FAIL; if (pop_num(&larg) != SUCCESS) return FAIL; if (pop_op(&op) != SUCCESS) return FAIL; if (evaluate(larg, op, rarg, &answer) != SUCCESS) return FAIL; debug_print("inside paren loop: @d@c@d=@d\n", larg, op, rarg, answer); push_num(answer); ret = peek_op(&op); } if (op == '(') { pop_op(&op); // consume the open paren } return ret; }
/** * instrNumPushes() returns the number of values pushed onto the stack * for a given push/pop instruction. For peek/poke instructions or * InsertMid instructions, this function returns 0. */ int instrNumPushes(PC pc) { static const int8_t numberOfPushes[] = { #define NOV 0 #define ONE(...) 1 #define TWO(...) 2 #define THREE(...) 3 #define FOUR(...) 4 #define INS_1(...) 0 #define CMANY -1 #define O(name, imm, pop, push, flags) push, OPCODES #undef NOV #undef ONE #undef TWO #undef THREE #undef FOUR #undef INS_1 #undef CMANY #undef O }; auto const op = peek_op(pc); int n = numberOfPushes[size_t(op)]; // The FCallM call flavors push a tuple of arguments onto the stack if (n == -1) return getImm(pc, 1).u_IVA; return n; }
/** * instrNumPushes() returns the number of values pushed onto the stack * for a given push/pop instruction. For peek/poke instructions or * InsertMid instructions, this function returns 0. */ int instrNumPushes(PC pc) { static const int8_t numberOfPushes[] = { #define NOV 0 #define ONE(...) 1 #define TWO(...) 2 #define THREE(...) 3 #define FOUR(...) 4 #define INS_1(...) 0 #define INS_2(...) 0 #define IDX_A -1 #define O(name, imm, pop, push, flags) push, OPCODES #undef NOV #undef ONE #undef TWO #undef THREE #undef FOUR #undef INS_1 #undef INS_2 #undef IDX_A #undef O }; auto const op = peek_op(pc); auto const pushes = numberOfPushes[size_t(op)]; // BaseSC and BaseSL may push back a C that was on top of the A they removed. if (pushes == -1) return getImm(pc, 1).u_IVA; return pushes; }
/** * instrNumPops() returns the number of values consumed from the stack * for a given push/pop instruction. For peek/poke instructions, this * function returns 0. */ int instrNumPops(PC pc) { static const int32_t numberOfPops[] = { #define NOV 0 #define ONE(...) 1 #define TWO(...) 2 #define THREE(...) 3 #define FOUR(...) 4 #define MMANY -1 #define C_MMANY -2 #define V_MMANY -2 #define R_MMANY -2 #define MFINAL -3 #define FMANY -3 #define CVMANY -3 #define CVUMANY -3 #define CMANY -3 #define SMANY -1 #define IDX_A -4 #define O(name, imm, pop, push, flags) pop, OPCODES #undef NOV #undef ONE #undef TWO #undef THREE #undef FOUR #undef MMANY #undef C_MMANY #undef V_MMANY #undef R_MMANY #undef MFINAL #undef FMANY #undef CVMANY #undef CVUMANY #undef CMANY #undef SMANY #undef IDX_A #undef O }; int n = numberOfPops[size_t(peek_op(pc))]; // For most instructions, we know how many values are popped based // solely on the opcode if (n >= 0) return n; // BaseSC and BaseSL remove an A that may be on the top of the stack or one // element below the top, depending on the second immediate. if (n == -4) return getImm(pc, 1).u_IVA + 1; // FCall, NewPackedArray, and final member operations specify how many values // are popped in their first immediate if (n == -3) return getImm(pc, 0).u_IVA; // For instructions with vector immediates, we have to scan the // contents of the vector immediate to determine how many values // are popped assert(n == -1 || n == -2); ImmVector iv = getImmVector(pc); // Count the number of values on the stack accounted for by the // ImmVector's location and members int k = iv.numStackValues(); // If this instruction also takes a RHS, count that too if (n == -2) ++k; return k; }
OffsetSet instrSuccOffsets(PC opc, const Unit* unit) { OffsetSet succBcOffs; auto const bcStart = unit->entry(); auto const op = peek_op(opc); if (!instrIsControlFlow(op)) { Offset succOff = opc + instrLen(opc) - bcStart; succBcOffs.insert(succOff); return succBcOffs; } if (instrAllowsFallThru(op)) { Offset succOff = opc + instrLen(opc) - bcStart; succBcOffs.insert(succOff); } if (isSwitch(op)) { foreachSwitchTarget(opc, [&](Offset offset) { succBcOffs.insert(offset + opc - bcStart); }); } else { Offset target = instrJumpTarget(bcStart, opc - bcStart); if (target != InvalidAbsoluteOffset) { succBcOffs.insert(target); } } return succBcOffs; }
/** * Create blocks for each entry point as well as ordinary control * flow boundaries. Calls are not treated as basic-block ends. */ void GraphBuilder::createBlocks() { PC bc = m_unit->entry(); m_graph->param_count = m_func->params().size(); m_graph->first_linear = createBlock(m_func->base()); // DV entry points m_graph->entries = new (m_arena) Block*[m_graph->param_count + 1]; int dv_index = 0; for (auto& param : m_func->params()) { m_graph->entries[dv_index++] = !param.hasDefaultValue() ? 0 : createBlock(param.funcletOff); } // main entry point assert(dv_index == m_graph->param_count); m_graph->entries[dv_index] = createBlock(m_func->base()); // ordinary basic block boundaries for (InstrRange i = funcInstrs(m_func); !i.empty(); ) { PC pc = i.popFront(); if ((isCF(pc) || isTF(pc)) && !i.empty()) createBlock(i.front()); if (isSwitch(peek_op(pc))) { foreachSwitchTarget(pc, [&](Offset o) { createBlock(pc + o); }); } else { Offset target = instrJumpTarget(bc, pc - bc); if (target != InvalidAbsoluteOffset) createBlock(target); } } }
StackTransInfo instrStackTransInfo(PC opcode) { static const StackTransInfo::Kind transKind[] = { #define NOV StackTransInfo::Kind::PushPop #define ONE(...) StackTransInfo::Kind::PushPop #define TWO(...) StackTransInfo::Kind::PushPop #define THREE(...) StackTransInfo::Kind::PushPop #define FOUR(...) StackTransInfo::Kind::PushPop #define IDX_A StackTransInfo::Kind::PushPop #define INS_1(...) StackTransInfo::Kind::InsertMid #define INS_2(...) StackTransInfo::Kind::InsertMid #define O(name, imm, pop, push, flags) push, OPCODES #undef NOV #undef ONE #undef TWO #undef THREE #undef FOUR #undef INS_1 #undef INS_2 #undef IDX_A #undef O }; static const int8_t peekPokeType[] = { #define NOV -1 #define ONE(...) -1 #define TWO(...) -1 #define THREE(...) -1 #define FOUR(...) -1 #define INS_1(...) 0 #define INS_2(...) 1 #define IDX_A 0 #define O(name, imm, pop, push, flags) push, OPCODES #undef NOV #undef ONE #undef TWO #undef THREE #undef FOUR #undef INS_2 #undef INS_1 #undef IDX_A #undef O }; StackTransInfo ret; auto const op = peek_op(opcode); ret.kind = transKind[size_t(op)]; switch (ret.kind) { case StackTransInfo::Kind::PushPop: ret.pos = 0; ret.numPushes = instrNumPushes(opcode); ret.numPops = instrNumPops(opcode); return ret; case StackTransInfo::Kind::InsertMid: ret.numPops = 0; ret.numPushes = 0; ret.pos = peekPokeType[size_t(op)]; return ret; } not_reached(); }
int instrFpToArDelta(const Func* func, PC opcode) { // This function should only be called for instructions that read the current // FPI assertx(instrReadsCurrentFpi(peek_op(opcode))); auto const fpi = func->findFPI(func->unit()->offsetOf(opcode)); assertx(fpi != nullptr); return fpi->m_fpOff; }
/** * instrNumPops() returns the number of values consumed from the stack * for a given push/pop instruction. For peek/poke instructions, this * function returns 0. */ int instrNumPops(PC pc) { static const int32_t numberOfPops[] = { #define NOV 0 #define ONE(...) 1 #define TWO(...) 2 #define THREE(...) 3 #define FOUR(...) 4 #define MFINAL -3 #define F_MFINAL -6 #define C_MFINAL -5 #define V_MFINAL C_MFINAL #define FMANY -3 #define UFMANY -4 #define CVUMANY -3 #define CMANY -3 #define SMANY -1 #define O(name, imm, pop, push, flags) pop, OPCODES #undef NOV #undef ONE #undef TWO #undef THREE #undef FOUR #undef MFINAL #undef F_MFINAL #undef C_MFINAL #undef V_MFINAL #undef FMANY #undef UFMANY #undef CVUMANY #undef CMANY #undef SMANY #undef O }; auto const op = peek_op(pc); int n = numberOfPops[size_t(op)]; // For most instructions, we know how many values are popped based // solely on the opcode if (n >= 0) return n; // FCall, NewPackedArray, and some final member operations specify how many // values are popped in their first immediate if (n == -3) return getImm(pc, 0).u_IVA; // FCallM, FCallDM, and FCallUnpackM pop uninit values from the stack and // push multiple returned values. if (n == -4) return getImm(pc, 0).u_IVA + getImm(pc, 1).u_IVA - 1; // FPassM final operations have paramId as imm 0 and stackCount as imm1 if (n == -6) return getImm(pc, 1).u_IVA; // Other final member operations pop their first immediate + 1 if (n == -5) return getImm(pc, 0).u_IVA + 1; // For instructions with vector immediates, we have to scan the contents of // the vector immediate to determine how many values are popped assertx(n == -1); ImmVector iv = getImmVector(pc); int k = iv.numStackValues(); return k; }
/** * instrNumPops() returns the number of values consumed from the stack * for a given push/pop instruction. For peek/poke instructions, this * function returns 0. */ int instrNumPops(PC pc) { static const int32_t numberOfPops[] = { #define NOV 0 #define ONE(...) 1 #define TWO(...) 2 #define THREE(...) 3 #define FOUR(...) 4 #define FIVE(...) 5 #define MFINAL -3 #define C_MFINAL -5 #define V_MFINAL C_MFINAL #define CVMANY -3 #define CVUMANY -3 #define FCALL -4 #define CMANY -3 #define SMANY -1 #define O(name, imm, pop, push, flags) pop, OPCODES #undef NOV #undef ONE #undef TWO #undef THREE #undef FOUR #undef FIVE #undef MFINAL #undef C_MFINAL #undef V_MFINAL #undef CVMANY #undef CVUMANY #undef FCALL #undef CMANY #undef SMANY #undef O }; auto const op = peek_op(pc); int n = numberOfPops[size_t(op)]; // For most instructions, we know how many values are popped based // solely on the opcode if (n >= 0) return n; // FCallAwait, NewPackedArray, and some final member operations specify how // many values are popped in their first immediate if (n == -3) return getImm(pc, 0).u_IVA; // FCall pops numArgs, unpack and (numRets - 1) uninit values if (n == -4) { auto const fca = getImm(pc, 0).u_FCA; return fca.numArgs + (fca.hasUnpack ? 1 : 0) + fca.numRets - 1; } // Other final member operations pop their first immediate + 1 if (n == -5) return getImm(pc, 0).u_IVA + 1; // For instructions with vector immediates, we have to scan the contents of // the vector immediate to determine how many values are popped assertx(n == -1); ImmVector iv = getImmVector(pc); int k = iv.numStackValues(); return k; }
/** * instrNumPops() returns the number of values consumed from the stack * for a given push/pop instruction. For peek/poke instructions, this * function returns 0. */ int instrNumPops(PC pc) { static const int32_t numberOfPops[] = { #define NOV 0 #define ONE(...) 1 #define TWO(...) 2 #define THREE(...) 3 #define FOUR(...) 4 #define MFINAL -3 #define F_MFINAL -6 #define C_MFINAL -5 #define V_MFINAL C_MFINAL #define FMANY -3 #define CVUMANY -3 #define CMANY -3 #define SMANY -1 #define IDX_A -4 #define O(name, imm, pop, push, flags) pop, OPCODES #undef NOV #undef ONE #undef TWO #undef THREE #undef FOUR #undef MFINAL #undef F_MFINAL #undef C_MFINAL #undef V_MFINAL #undef FMANY #undef CVUMANY #undef CMANY #undef SMANY #undef IDX_A #undef O }; auto const op = peek_op(pc); int n = numberOfPops[size_t(op)]; // For most instructions, we know how many values are popped based // solely on the opcode if (n >= 0) return n; // BaseSC and BaseSL remove an A that may be on the top of the stack or one // element below the top, depending on the second immediate. if (n == -4) return getImm(pc, 1).u_IVA + 1; // FCall, NewPackedArray, and some final member operations specify how many // values are popped in their first immediate if (n == -3) return getImm(pc, 0).u_IVA; // FPassM final operations have paramId as imm 0 and stackCount as imm1 if (n == -6) return getImm(pc, 1).u_IVA; // Other final member operations pop their first immediate + 1 if (n == -5) return getImm(pc, 0).u_IVA + 1; // For instructions with vector immediates, we have to scan the contents of // the vector immediate to determine how many values are popped assert(n == -1); ImmVector iv = getImmVector(pc); int k = iv.numStackValues(); return k; }
void handleStackOverflow(ActRec* calleeAR) { /* * First synchronize registers. * * We're called in two situations: either this is the first frame after a * re-entry, in which case calleeAR->m_sfp is enterTCHelper's native stack, * or we're called in the middle of one VM entry (from a func prologue). We * want to raise the exception from the caller's FCall instruction in the * second case, and in the first case we have to raise in a special way * inside this re-entry. * * Either way the stack depth is below the calleeAR by numArgs, because we * haven't run func prologue duties yet. */ auto& unsafeRegs = vmRegsUnsafe(); auto const isReentry = calleeAR == vmFirstAR(); auto const arToSync = isReentry ? calleeAR : calleeAR->m_sfp; unsafeRegs.fp = arToSync; unsafeRegs.stack.top() = reinterpret_cast<Cell*>(calleeAR) - calleeAR->numArgs(); auto const func_base = arToSync->func()->base(); // calleeAR m_soff is 0 in the re-entry case, so we'll set pc to the func // base. But it also doesn't matter because we're going to throw a special // VMReenterStackOverflow in that case so the unwinder won't worry about it. unsafeRegs.pc = arToSync->func()->unit()->at(func_base + calleeAR->m_soff); tl_regState = VMRegState::CLEAN; if (!isReentry) { /* * The normal case - we were called via FCall, or FCallArray. We need to * construct the pc of the fcall from the return address (which will be * after the fcall). Because fcall is a variable length instruction, and * because we sometimes delete instructions from the instruction stream, we * need to use fpi regions to find the fcall. */ const FPIEnt* fe = liveFunc()->findPrecedingFPI( liveUnit()->offsetOf(vmpc())); vmpc() = liveUnit()->at(fe->m_fcallOff); assertx(isFCallStar(peek_op(vmpc()))); raise_error("Stack overflow"); } else { /* * We were called via re-entry. Leak the params and the ActRec, and tell * the unwinder that there's nothing left to do in this "entry". * * Also, the caller hasn't set up the m_invName area on the ActRec (unless * it was a magic call), since it's the prologue's responsibility if it's a * non-magic call. We can just null it out since we're fatalling. */ vmsp() = reinterpret_cast<Cell*>(calleeAR + 1); calleeAR->setVarEnv(nullptr); throw VMReenterStackOverflow(); } not_reached(); }
IterTable getIterTable(PC opcode) { auto const op = peek_op(opcode); auto const numImm = numImmediates(op); for (int k = 0; k < numImm; ++k) { auto const type = immType(op, k); if (type != ILA) continue; auto ptr = reinterpret_cast<PC>(getImmPtr(opcode, k)); return iterTableFromStream(ptr); } not_reached(); }
void CmdNext::stepCurrentLine(CmdInterrupt& interrupt, ActRec* fp, PC pc) { // Special handling for yields from generators and awaits from // async. The destination of these instructions is somewhat counter // intuitive so we take care to ensure that we step to the most // appropriate place. For yields, we want to land on the next // statement when driven from a C++ iterator like ASIO. If the // generator is driven directly from PHP (i.e., a loop calling // send($foo)) then we'll land back at the callsite of send(). For // returns from generators, we follow the execution stack for now, // and end up at the caller of ASIO or send(). For async functions // stepping over an await, we land on the next statement. auto const op = peek_op(pc); if (op == OpAwait) { assertx(fp->func()->isAsync()); auto wh = c_Awaitable::fromCell(*vmsp()); if (wh && !wh->isFinished()) { TRACE(2, "CmdNext: encountered blocking await\n"); if (fp->resumed()) { setupStepSuspend(fp, pc); removeLocationFilter(); } else { // Eager execution in non-resumed mode is supported only by async // functions. We need to step over this opcode, then grab the created // AsyncFunctionWaitHandle and setup stepping like we do for // OpAwait. assertx(fp->func()->isAsyncFunction()); m_skippingAwait = true; m_needsVMInterrupt = true; removeLocationFilter(); } return; } } else if (op == OpYield || op == OpYieldK) { assertx(fp->resumed()); assertx(fp->func()->isGenerator()); TRACE(2, "CmdNext: encountered yield from generator\n"); setupStepOuts(); setupStepSuspend(fp, pc); removeLocationFilter(); return; } else if (op == OpRetC && fp->resumed()) { assertx(fp->func()->isResumable()); TRACE(2, "CmdNext: encountered return from resumed resumable\n"); setupStepOuts(); removeLocationFilter(); return; } installLocationFilterForLine(interrupt.getSite()); m_needsVMInterrupt = true; }
ImmVector getImmVector(PC opcode) { auto const op = peek_op(opcode); int numImm = numImmediates(op); for (int k = 0; k < numImm; ++k) { ArgType t = immType(op, k); if (t == BLA || t == SLA || t == I32LA || t == BLLA || t == VSA) { PC vp = getImmPtr(opcode, k)->bytes; auto const size = decode_iva(vp); return ImmVector(vp, size, t == VSA ? size : 0); } } not_reached(); }
void CmdOut::onBeginInterrupt(DebuggerProxy &proxy, CmdInterrupt &interrupt) { TRACE(2, "CmdOut::onBeginInterrupt\n"); assertx(!m_complete); // Complete cmds should not be asked to do work. m_needsVMInterrupt = false; if (m_skippingOverPopC) { m_complete = true; return; } int currentVMDepth = g_context->m_nesting; int currentStackDepth = proxy.getStackDepth(); // Deeper or same depth? Keep running. if ((currentVMDepth > m_vmDepth) || ((currentVMDepth == m_vmDepth) && (currentStackDepth >= m_stackDepth))) { TRACE(2, "CmdOut: deeper, keep running...\n"); return; } if (interrupt.getInterruptType() == ExceptionHandler) { // If we're about to enter an exception handler we turn interrupts on to // ensure we stop when control reaches the handler. The normal logic below // will decide if we're done at that point or not. TRACE(2, "CmdOut: exception thrown\n"); removeLocationFilter(); m_needsVMInterrupt = true; return; } TRACE(2, "CmdOut: shallower stack depth, done.\n"); cleanupStepOuts(); int depth = decCount(); if (depth == 0) { PC pc = vmpc(); // Step over PopC following a call if (peek_op(pc) == Op::PopC) { m_skippingOverPopC = true; m_needsVMInterrupt = true; } else { m_complete = true; } return; } else { TRACE(2, "CmdOut: not complete, step out again.\n"); onSetup(proxy, interrupt); } }
// Removes a range of PCs to the filter given a collection of offset ranges. // Omit PCs which have opcodes that don't pass the given opcode filter. void PCFilter::removeRanges(const Unit* unit, const OffsetRangeVec& offsets, OpcodeFilter isOpcodeAllowed) { for (auto range = offsets.cbegin(); range != offsets.cend(); ++range) { TRACE(3, "\toffsets [%d, %d) (remove)\n", range->base, range->past); for (PC pc = unit->at(range->base); pc < unit->at(range->past); pc += instrLen(pc)) { if (isOpcodeAllowed(peek_op(pc))) { TRACE(3, "\t\tpc %p (remove)\n", pc); removePC(pc); } else { TRACE(3, "\t\tpc %p -- skipping (offset %d) (remove)\n", pc, unit->offsetOf(pc)); } } } }
/** * Returns the expected input flavor of stack slot idx. */ FlavorDesc instrInputFlavor(PC op, uint32_t idx) { auto constexpr nov = NOV; #define NOV always_assert(0 && "Opcode has no stack inputs"); #define ONE(f1) return doFlavor(idx, f1); #define TWO(f1, f2) return doFlavor(idx, f1, f2); #define THREE(f1, f2, f3) return doFlavor(idx, f1, f2, f3); #define FOUR(f1, f2, f3, f4) return doFlavor(idx, f1, f2, f3, f4); #define MMANY return minstrFlavor(op, idx, nov); #define C_MMANY return minstrFlavor(op, idx, CV); #define V_MMANY return minstrFlavor(op, idx, VV); #define R_MMANY return minstrFlavor(op, idx, RV); #define MFINAL return manyFlavor(op, idx, CRV); #define C_MFINAL return idx == 0 ? CV : CRV; #define R_MFINAL return idx == 0 ? RV : CRV; #define V_MFINAL return idx == 0 ? VV : CRV; #define FMANY return manyFlavor(op, idx, FV); #define CVMANY return manyFlavor(op, idx, CVV); #define CVUMANY return manyFlavor(op, idx, CVUV); #define CMANY return manyFlavor(op, idx, CV); #define SMANY return manyFlavor(op, idx, CV); #define IDX_A return baseSFlavor(op, idx); #define O(name, imm, pop, push, flags) case Op::name: pop switch (peek_op(op)) { OPCODES } not_reached(); #undef NOV #undef ONE #undef TWO #undef THREE #undef FOUR #undef MMANY #undef C_MMANY #undef V_MMANY #undef R_MMANY #undef MFINAL #undef C_MFINAL #undef R_MFINAL #undef V_MFINAL #undef FMANY #undef CVMANY #undef CVUMANY #undef CMANY #undef SMANY #undef IDX_A #undef O }
int64_t getStackPopped(PC pc) { auto const op = peek_op(pc); switch (op) { case Op::FCall: return getImm(pc, 0).u_IVA + kNumActRecCells; case Op::FCallD: return getImm(pc, 0).u_IVA + kNumActRecCells; case Op::FCallAwait: return getImm(pc, 0).u_IVA + kNumActRecCells; case Op::FCallArray: return kNumActRecCells + 1; case Op::QueryM: case Op::VGetM: case Op::IncDecM: case Op::UnsetM: case Op::NewPackedArray: case Op::NewVecArray: case Op::NewKeysetArray: case Op::ConcatN: case Op::FCallBuiltin: case Op::CreateCl: return getImm(pc, 0).u_IVA; case Op::FPassM: // imm[0] is argument index return getImm(pc, 1).u_IVA; case Op::SetM: case Op::SetOpM: case Op::BindM: return getImm(pc, 0).u_IVA + 1; case Op::NewStructArray: return getImmVector(pc).size(); case Op::BaseSC: case Op::BaseSL: return getImm(pc, 1).u_IVA + 1; default: break; } uint64_t mask = getInstrInfo(op).in; int64_t count = 0; // All instructions with these properties are handled above assertx((mask & (StackN | BStackN)) == 0); return count + countOperands(mask); }
ImmVector getImmVector(PC opcode) { auto const op = peek_op(opcode); int numImm = numImmediates(op); for (int k = 0; k < numImm; ++k) { ArgType t = immType(op, k); if (t == BLA || t == SLA || t == ILA || t == I32LA) { void* vp = getImmPtr(opcode, k); return ImmVector::createFromStream( static_cast<const int32_t*>(vp) ); } if (t == VSA) { const int32_t* vp = (int32_t*)getImmPtr(opcode, k); return ImmVector(reinterpret_cast<const uint8_t*>(vp + 1), vp[0], vp[0]); } } not_reached(); }
bool Translator::isSrcKeyInBL(SrcKey sk) { auto unit = sk.unit(); if (unit->isInterpretOnly()) return true; Lock l(m_dbgBlacklistLock); if (m_dbgBLSrcKey.find(sk) != m_dbgBLSrcKey.end()) { return true; } // Loop until the end of the basic block inclusively. This is useful for // function exit breakpoints, which are implemented by blacklisting the RetC // opcodes. PC pc = nullptr; do { pc = (pc == nullptr) ? unit->at(sk.offset()) : pc + instrLen(pc); if (m_dbgBLPC.checkPC(pc)) { m_dbgBLSrcKey.insert(sk); return true; } } while (!opcodeBreaksBB(peek_op(pc))); return false; }
/** * Returns the expected input flavor of stack slot idx. */ FlavorDesc instrInputFlavor(PC op, uint32_t idx) { #define NOV always_assert(0 && "Opcode has no stack inputs"); #define ONE(f1) return doFlavor(idx, f1); #define TWO(f1, f2) return doFlavor(idx, f1, f2); #define THREE(f1, f2, f3) return doFlavor(idx, f1, f2, f3); #define FOUR(f1, f2, f3, f4) return doFlavor(idx, f1, f2, f3, f4); #define FIVE(f1, f2, f3, f4, f5) return doFlavor(idx, f1, f2, f3, f4, f5); #define MFINAL return manyFlavor(op, idx, CRV); #define C_MFINAL return idx == 0 ? CV : CRV; #define V_MFINAL return idx == 0 ? VV : CRV; #define CVMANY return manyFlavor(op, idx, CVV); #define CVUMANY return manyFlavor(op, idx, CVUV); #define FCALL return fcallFlavor(op, idx); #define CMANY return manyFlavor(op, idx, CV); #define SMANY return manyFlavor(op, idx, CV); #define O(name, imm, pop, push, flags) case Op::name: pop switch (peek_op(op)) { OPCODES } not_reached(); #undef NOV #undef ONE #undef TWO #undef THREE #undef FOUR #undef FIVE #undef MFINAL #undef C_MFINAL #undef V_MFINAL #undef CVMANY #undef CVUMANY #undef FCALL #undef CMANY #undef SMANY #undef O }
/** * Link ordinary blocks with ordinary edges and set their last instruction * and end offsets */ void GraphBuilder::linkBlocks() { PC bc = m_unit->entry(); Block* block = m_graph->first_linear; block->id = m_graph->block_count++; for (InstrRange i = funcInstrs(m_func); !i.empty(); ) { PC pc = i.popFront(); block->last = pc; if (isCF(pc)) { if (isSwitch(peek_op(pc))) { int i = 0; foreachSwitchTarget(pc, [&](Offset o) { succs(block)[i++] = at(pc + o); }); } else { Offset target = instrJumpTarget(bc, pc - bc); if (target != InvalidAbsoluteOffset) { assert(numSuccBlocks(block) > 0); succs(block)[numSuccBlocks(block) - 1] = at(target); } } } PC next_pc = !i.empty() ? i.front() : m_unit->at(m_func->past()); Block* next = at(next_pc); if (next) { block->next_linear = next; block->end = next_pc; if (!isTF(pc)) { assert(numSuccBlocks(block) > 0); succs(block)[0] = next; } block = next; block->id = m_graph->block_count++; } } block->end = m_unit->at(m_func->past()); }
void CmdNext::onBeginInterrupt(DebuggerProxy& proxy, CmdInterrupt& interrupt) { TRACE(2, "CmdNext::onBeginInterrupt\n"); assertx(!m_complete); // Complete cmds should not be asked to do work. ActRec *fp = vmfp(); if (!fp) { // If we have no frame just wait for the next instruction to be interpreted. m_needsVMInterrupt = true; return; } PC pc = vmpc(); Unit* unit = fp->m_func->unit(); Offset offset = unit->offsetOf(pc); TRACE(2, "CmdNext: pc %p, opcode %s at '%s' offset %d\n", pc, opcodeToName(peek_op(pc)), fp->m_func->fullName()->data(), offset); int currentVMDepth = g_context->m_nesting; int currentStackDepth = proxy.getStackDepth(); TRACE(2, "CmdNext: original depth %d:%d, current depth %d:%d\n", m_vmDepth, m_stackDepth, currentVMDepth, currentStackDepth); // Where are we on the stack now vs. when we started? Breaking the answer down // into distinct variables helps the clarity of the algorithm below. bool deeper = false; bool originalDepth = false; if ((currentVMDepth == m_vmDepth) && (currentStackDepth == m_stackDepth)) { originalDepth = true; } else if ((currentVMDepth > m_vmDepth) || ((currentVMDepth == m_vmDepth) && (currentStackDepth > m_stackDepth))) { deeper = true; } m_needsVMInterrupt = false; // Will be set again below if still needed. // First consider if we've got internal breakpoints setup. These are used when // we can make an accurate prediction of where execution should flow, // eventually, and when we want to let the program run normally until we get // there. if (hasStepOuts() || hasStepResumable()) { TRACE(2, "CmdNext: checking internal breakpoint(s)\n"); if (atStepOutOffset(unit, offset)) { if (deeper) return; // Recursion TRACE(2, "CmdNext: hit step-out\n"); } else if (atStepResumableOffset(unit, offset)) { if (m_stepResumableId != getResumableId(fp)) return; TRACE(2, "CmdNext: hit step-cont\n"); // We're in the resumable we expect. This may be at a // different stack depth, though, especially if we've moved from // the original function to the resumable. Update the depth // accordingly. if (!originalDepth) { m_vmDepth = currentVMDepth; m_stackDepth = currentStackDepth; deeper = false; originalDepth = true; } } else if (interrupt.getInterruptType() == ExceptionHandler) { // Entering an exception handler may take us someplace we weren't // expecting. Adjust internal breakpoints accordingly. First case is easy. if (deeper) { TRACE(2, "CmdNext: exception handler, deeper\n"); return; } // For step-conts, we ignore handlers at the original level if we're not // in the original resumable. We don't care about exception handlers // in resumables being driven at the same level. if (hasStepResumable() && originalDepth && (m_stepResumableId != getResumableId(fp))) { TRACE(2, "CmdNext: exception handler, original depth, wrong cont\n"); return; } // Sometimes we have handlers in generated code, i.e., Continuation::next. // These just help propagate exceptions so ignore those. if (fp->m_func->line1() == 0) { TRACE(2, "CmdNext: exception handler, ignoring func with no source\n"); return; } if (fp->m_func->isBuiltin()) { TRACE(2, "CmdNext: exception handler, ignoring builtin functions\n"); return; } TRACE(2, "CmdNext: exception handler altering expected flow\n"); } else { // We have internal breakpoints setup, but we haven't hit one yet. Keep // running until we reach one. TRACE(2, "CmdNext: waiting to hit internal breakpoint...\n"); return; } // We've hit one internal breakpoint at a useful place, or decided we don't, // need them, so we can remove them all now. cleanupStepOuts(); cleanupStepResumable(); } if (interrupt.getInterruptType() == ExceptionHandler) { // If we're about to enter an exception handler we turn interrupts on to // ensure we stop when control reaches the handler. The normal logic below // will decide if we're done at that point or not. TRACE(2, "CmdNext: exception handler\n"); removeLocationFilter(); m_needsVMInterrupt = true; return; } if (m_skippingAwait) { m_skippingAwait = false; stepAfterAwait(); return; } if (deeper) { TRACE(2, "CmdNext: deeper, setup step out to get back to original line\n"); setupStepOuts(); // We can nuke the entire location filter here since we'll re-install it // when we get back to the old level. Keeping it installed may be more // efficient if we were on a large line, but there is a penalty for every // opcode executed while it's installed and that's bad if there's a lot of // code called from that line. removeLocationFilter(); return; } if (originalDepth && (m_loc == interrupt.getFileLine())) { TRACE(2, "CmdNext: not complete, still on same line\n"); stepCurrentLine(interrupt, fp, pc); return; } TRACE(2, "CmdNext: operation complete.\n"); m_complete = (decCount() == 0); if (!m_complete) { TRACE(2, "CmdNext: repeat count > 0, start fresh.\n"); onSetup(proxy, interrupt); } }
void BCPattern::matchAnchored(const Expr& pattern, PC start, PC end, Result& result) { auto pos = pattern.begin(); for (auto inst = start; inst != end; ) { // Detect a match. if (pos == pattern.end()) { result.m_start = start; result.m_end = inst; return; } auto const op = peek_op(inst); // Skip pattern-globally ignored opcodes. if (m_ignores.count(op)) { inst = next(inst); continue; } // Check for alternations whenever we fail to match. auto nomatch = [&] { if (!pos->hasAlt()) return result.erase(); // Pop the capture if we made one. if (pos->shouldCapture()) { result.m_captures.pop_back(); } for (auto const& atom : pos->getAlt()) { // Construct the full alternate pattern. auto alt = Expr { atom }; alt.insert(alt.end(), std::next(pos), pattern.end()); auto res = result; // Match on the alternate. matchAnchored(alt, inst, end, res); if (res.found()) { result = res; result.m_start = start; return; } } return result.erase(); }; // Capture the atom if desired. if (pos->shouldCapture()) { result.m_captures.push_back(inst); } // Check for shallow match. if (pos->op() != op) { return nomatch(); } auto filter = pos->getFilter(); // Check for deep match if desired. if (filter && !filter(inst, result.m_captures)) { return nomatch(); } if ((pos->op() == Op::JmpZ || pos->op() == Op::JmpNZ)) { // Match the taken block, if there is one. auto off = instrJumpOffset(inst); assert(off); auto res = result; matchAnchored(pos->getTaken(), inst + *off, end, res); if (!res.found()) { return nomatch(); } // Grab the captures. result.m_captures = res.m_captures; } if (pos->hasSeq()) { // Match the subsequence if we have one. auto res = result; matchAnchored(pos->getSeq(), next(inst), end, res); if (!res.found()) { return nomatch(); } // Set the PC. result.m_captures = res.m_captures; inst = res.m_end; } else { // Step the PC. inst = next(inst); } // Step the pattern. ++pos; } // Detect a terminal match. if (pos == pattern.end()) { result.m_start = start; result.m_end = end; } }
void cgCall(IRLS& env, const IRInstruction* inst) { auto const sp = srcLoc(env, inst, 0).reg(); auto const fp = srcLoc(env, inst, 1).reg(); auto const extra = inst->extra<Call>(); auto const callee = extra->callee; auto const argc = extra->numParams; auto& v = vmain(env); auto& vc = vcold(env); auto const catchBlock = label(env, inst->taken()); auto const calleeSP = sp[cellsToBytes(extra->spOffset.offset)]; auto const calleeAR = calleeSP + cellsToBytes(argc); v << store{fp, calleeAR + AROFF(m_sfp)}; v << storeli{safe_cast<int32_t>(extra->after), calleeAR + AROFF(m_soff)}; if (extra->fcallAwait) { // This clobbers any flags that might have already been set on the callee // AR (e.g., by SpillFrame), but this is okay because there should never be // any conflicts; see the documentation in act-rec.h. auto const imm = static_cast<int32_t>( ActRec::encodeNumArgsAndFlags(argc, ActRec::Flags::IsFCallAwait) ); v << storeli{imm, calleeAR + AROFF(m_numArgsAndFlags)}; } auto const isNativeImplCall = callee && callee->builtinFuncPtr() && !callee->nativeFuncPtr() && argc == callee->numParams(); if (isNativeImplCall) { // The assumption here is that for builtins, the generated func contains // only a single opcode (NativeImpl), and there are no non-argument locals. if (do_assert) { assertx(argc == callee->numLocals()); assertx(callee->numIterators() == 0); auto addr = callee->getEntry(); while (peek_op(addr) == Op::AssertRATL) { addr += instrLen(addr); } assertx(peek_op(addr) == Op::NativeImpl); assertx(addr + instrLen(addr) == callee->unit()->entry() + callee->past()); } v << store{v.cns(mcg->ustubs().retHelper), calleeAR + AROFF(m_savedRip)}; if (callee->attrs() & AttrMayUseVV) { v << storeqi{0, calleeAR + AROFF(m_invName)}; } v << lea{calleeAR, rvmfp()}; emitCheckSurpriseFlagsEnter(v, vc, fp, Fixup(0, argc), catchBlock); auto const builtinFuncPtr = callee->builtinFuncPtr(); TRACE(2, "Calling builtin preClass %p func %p\n", callee->preClass(), builtinFuncPtr); // We sometimes call this while curFunc() isn't really the builtin, so make // sure to record the sync point as if we are inside the builtin. if (FixupMap::eagerRecord(callee)) { auto const syncSP = v.makeReg(); v << lea{calleeSP, syncSP}; emitEagerSyncPoint(v, callee->getEntry(), rvmtl(), rvmfp(), syncSP); } // Call the native implementation. This will free the locals for us in the // normal case. In the case where an exception is thrown, the VM unwinder // will handle it for us. auto const done = v.makeBlock(); v << vinvoke{CallSpec::direct(builtinFuncPtr), v.makeVcallArgs({{rvmfp()}}), v.makeTuple({}), {done, catchBlock}, Fixup(0, argc)}; env.catch_calls[inst->taken()] = CatchCall::CPP; v = done; // The native implementation already put the return value on the stack for // us, and handled cleaning up the arguments. We have to update the frame // pointer and the stack pointer, and load the return value into the return // register so the trace we are returning to has it where it expects. // TODO(#1273094): We should probably modify the actual builtins to return // values via registers using the C ABI and do a reg-to-reg move. loadTV(v, inst->dst(), dstLoc(env, inst, 0), rvmfp()[AROFF(m_r)], true); v << load{rvmfp()[AROFF(m_sfp)], rvmfp()}; emitRB(v, Trace::RBTypeFuncExit, callee->fullName()->data()); return; } v << lea{calleeAR, rvmfp()}; if (RuntimeOption::EvalHHIRGenerateAsserts) { v << syncvmsp{v.cns(0x42)}; constexpr uint64_t kUninitializedRIP = 0xba5eba11acc01ade; emitImmStoreq(v, kUninitializedRIP, rvmfp()[AROFF(m_savedRip)]); } // Emit a smashable call that initially calls a recyclable service request // stub. The stub and the eventual targets take rvmfp() as an argument, // pointing to the callee ActRec. auto const target = callee ? mcg->ustubs().immutableBindCallStub : mcg->ustubs().bindCallStub; auto const done = v.makeBlock(); v << callphp{target, php_call_regs(), {{done, catchBlock}}}; env.catch_calls[inst->taken()] = CatchCall::PHP; v = done; auto const dst = dstLoc(env, inst, 0); v << defvmret{dst.reg(0), dst.reg(1)}; }
int64_t getStackPushed(PC pc) { auto const op = peek_op(pc); if (op == Op::BaseSC || op == Op::BaseSL) return getImm(pc, 1).u_IVA; return countOperands(getInstrInfo(op).out); }
/** * Discard the current frame, assuming that a PHP exception given in * phpException argument, or C++ exception (phpException == nullptr) * is being thrown. Returns an exception to propagate, or nulltpr * if the VM execution should be resumed. */ ObjectData* tearDownFrame(ActRec*& fp, Stack& stack, PC& pc, ObjectData* phpException) { auto const func = fp->func(); auto const curOp = peek_op(pc); auto const prevFp = fp->sfp(); auto const soff = fp->m_soff; ITRACE(1, "tearDownFrame: {} ({})\n", func->fullName()->data(), func->unit()->filepath()->data()); ITRACE(1, " fp {} prevFp {}\n", implicit_cast<void*>(fp), implicit_cast<void*>(prevFp)); // When throwing from a constructor, we normally want to avoid running the // destructor on an object that hasn't been fully constructed yet. But if // we're unwinding through the constructor's RetC, the constructor has // logically finished and we're unwinding for some internal reason (timeout // or user profiler, most likely). More importantly, fp->m_this may have // already been destructed and/or overwritten due to sharing space with // fp->m_r. if (curOp != OpRetC && fp->hasThis() && fp->getThis()->getVMClass()->getCtor() == func && fp->getThis()->getVMClass()->getDtor()) { /* * Looks like an FPushCtor call, but it could still have been called * directly. Check the fpi region to be sure. */ Offset prevPc; auto outer = g_context->getPrevVMState(fp, &prevPc); if (outer) { auto fe = outer->func()->findPrecedingFPI(prevPc); if (fe && isFPushCtor(outer->func()->unit()->getOp(fe->m_fpushOff))) { fp->getThis()->setNoDestruct(); } } } auto const decRefLocals = [&] { /* * It is possible that locals have already been decref'd. * * Here's why: * * - If a destructor for any of these things throws a php * exception, it's swallowed at the dtor boundary and we keep * running php. * * - If the destructor for any of these things throws a fatal, * it's swallowed, and we set surprise flags to throw a fatal * from now on. * * - If the second case happened and we have to run another * destructor, its enter hook will throw, but it will be * swallowed again. * * - Finally, the exit hook for the returning function can * throw, but this happens last so everything is destructed. * * - When that happens, exit hook sets localsDecRefd flag. */ if (!fp->localsDecRefd()) { try { // Note that we must convert locals and the $this to // uninit/zero during unwind. This is because a backtrace // from another destructing object during this unwind may try // to read them. frame_free_locals_unwind(fp, func->numLocals(), phpException); } catch (...) {} } }; if (LIKELY(!fp->resumed())) { decRefLocals(); if (UNLIKELY(func->isAsyncFunction()) && phpException && !fp->isFCallAwait()) { // If in an eagerly executed async function, wrap the user exception // into a failed StaticWaitHandle and return it to the caller. auto const waitHandle = c_StaticWaitHandle::CreateFailed(phpException); phpException = nullptr; stack.ndiscard(func->numSlotsInFrame()); stack.ret(); assert(stack.topTV() == &fp->m_r); cellCopy(make_tv<KindOfObject>(waitHandle), fp->m_r); } else { // Free ActRec. stack.ndiscard(func->numSlotsInFrame()); stack.discardAR(); } } else if (func->isAsyncFunction()) { auto const waitHandle = frame_afwh(fp); if (phpException) { // Handle exception thrown by async function. decRefLocals(); waitHandle->fail(phpException); phpException = nullptr; } else if (waitHandle->isRunning()) { // Let the C++ exception propagate. If the current frame represents async // function that is running, mark it as abruptly interrupted. Some opcodes // like Await may change state of the async function just before exit hook // decides to throw C++ exception. decRefLocals(); waitHandle->failCpp(); } } else if (func->isAsyncGenerator()) { auto const gen = frame_async_generator(fp); if (phpException) { // Handle exception thrown by async generator. decRefLocals(); auto eagerResult = gen->fail(phpException); phpException = nullptr; if (eagerResult) { stack.pushObjectNoRc(eagerResult); } } else if (gen->isEagerlyExecuted() || gen->getWaitHandle()->isRunning()) { // Fail the async generator and let the C++ exception propagate. decRefLocals(); gen->failCpp(); } } else if (func->isNonAsyncGenerator()) { // Mark the generator as finished. decRefLocals(); frame_generator(fp)->fail(); } else { not_reached(); } /* * At the final ActRec in this nesting level. */ if (UNLIKELY(!prevFp)) { pc = nullptr; fp = nullptr; return phpException; } assert(stack.isValidAddress(reinterpret_cast<uintptr_t>(prevFp)) || prevFp->resumed()); auto const prevOff = soff + prevFp->func()->base(); pc = prevFp->func()->unit()->at(prevOff); fp = prevFp; return phpException; }
static bool execute(char *buffer, bool *data) { char *token, **ops, *o1, *o2; bool *output, res; int n_o, n_q, sz_o, sz_q;; sz_o = 8192; sz_q = 8192; ops = malloc(sz_q * sizeof(char *)); output = malloc(sz_o * sizeof(bool)); n_o = n_q = 0; for (token = strtok(buffer, " "); token; token = strtok(NULL, " ")) { if (strncmp(token, "b", 1) == 0) { push_bit(data[atoi(token + 1)], &output, &n_o, &sz_o); } else if (is_function(token)) { push_op(token, &ops, &n_q, &sz_q); } else if (strncmp(token, ",", 1) == 0) { while (strncmp(peek_op(ops, n_q), "(", 1) != 0) { token = pop_op(ops, &n_q); if (is_operator(token)) { push_bit(operator(token, pop_bit(output, &n_o), pop_bit(output, &n_o)), &output, &n_o, &sz_o); } else { push_bit(function(token, output, &n_o), &output, &n_o, &sz_o); } } } else if (is_operator(token)) { o1 = token; while (peek_op(ops, n_q) && is_operator(peek_op(ops, n_q))) { o2 = peek_op(ops, n_q); if (operator_precedence(o1) <= operator_precedence(o2)) { push_bit(operator(pop_op(ops, &n_q), pop_bit(output, &n_o), pop_bit(output, &n_o)), &output, &n_o, &sz_o); } else { break; } } push_op(o1, &ops, &n_q, &sz_q); } else if (strncmp(token, "(", 1) == 0) { push_op(token, &ops, &n_q, &sz_q); } else if (strncmp(token, ")", 1) == 0) { while (strncmp(peek_op(ops, n_q), "(", 1) != 0) { token = pop_op(ops, &n_q); if (is_operator(token)) { push_bit(operator(token, pop_bit(output, &n_o), pop_bit(output, &n_o)), &output, &n_o, &sz_o); } else { push_bit(function(token, output, &n_o), &output, &n_o, &sz_o); } } token = pop_op(ops, &n_q); /* pop the left bracket */ if (peek_op(ops, n_q) && is_function(peek_op(ops, n_q))) { push_bit(function(pop_op(ops, &n_q), output, &n_o), &output, &n_o, &sz_o); } } else { fprintf(stderr, "ERROR: unknown symbol: %s\n", token); exit(EXIT_FAILURE); } } while ((token = pop_op(ops, &n_q))) { if (is_operator(token)) { push_bit(operator(token, pop_bit(output, &n_o), pop_bit(output, &n_o)), &output, &n_o, &sz_o); } else { push_bit(function(token, output, &n_o), &output, &n_o, &sz_o); } } res = output[0]; free(output); free(ops); return res; }
void print_instr(Output& out, const FuncInfo& finfo, PC pc) { auto const startPc = pc; auto rel_label = [&] (Offset off) { auto const tgt = startPc - finfo.unit->at(0) + off; return jmp_label(finfo, tgt); }; auto print_switch = [&] { auto const vecLen = decode<int32_t>(pc); out.fmt(" <"); for (auto i = int32_t{0}; i < vecLen; ++i) { auto const off = decode<Offset>(pc); FTRACE(1, "sw label: {}\n", off); out.fmt("{}{}", i != 0 ? " " : "", rel_label(off)); } out.fmt(">"); }; auto print_sswitch = [&] { auto const vecLen = decode<int32_t>(pc); out.fmt(" <"); for (auto i = int32_t{0}; i < vecLen; ++i) { auto const strId = decode<Id>(pc); auto const offset = decode<Offset>(pc); out.fmt("{}{}:{}", i != 0 ? " " : "", strId == -1 ? "-" : escaped(finfo.unit->lookupLitstrId(strId)), rel_label(offset) ); } out.fmt(">"); }; auto print_itertab = [&] { auto const vecLen = decode<int32_t>(pc); out.fmt(" <"); for (auto i = int32_t{0}; i < vecLen; ++i) { auto const kind = static_cast<IterKind>(decode<int32_t>(pc)); auto const id = decode<int32_t>(pc); auto const kindStr = [&]() -> const char* { switch (kind) { case KindOfIter: return "(Iter)"; case KindOfMIter: return "(MIter)"; case KindOfCIter: return "(CIter)"; } not_reached(); }(); out.fmt("{}{} {}", i != 0 ? ", " : "", kindStr, id); } out.fmt(">"); }; auto print_stringvec = [&] { auto const vecLen = decode<int32_t>(pc); out.fmt(" <"); for (auto i = uint32_t{0}; i < vecLen; ++i) { auto const str = finfo.unit->lookupLitstrId(decode<int32_t>(pc)); out.fmt("{}{}", i != 0 ? " " : "", escaped(str)); } out.fmt(">"); }; #define IMM_BLA print_switch(); #define IMM_SLA print_sswitch(); #define IMM_ILA print_itertab(); #define IMM_IVA out.fmt(" {}", decodeVariableSizeImm(&pc)); #define IMM_I64A out.fmt(" {}", decode<int64_t>(pc)); #define IMM_LA out.fmt(" {}", loc_name(finfo, decodeVariableSizeImm(&pc))); #define IMM_IA out.fmt(" {}", decodeVariableSizeImm(&pc)); #define IMM_DA out.fmt(" {}", decode<double>(pc)); #define IMM_SA out.fmt(" {}", \ escaped(finfo.unit->lookupLitstrId(decode<Id>(pc)))); #define IMM_RATA out.fmt(" {}", show(decodeRAT(finfo.unit, pc))); #define IMM_AA out.fmt(" @A_{}", decode<Id>(pc)); #define IMM_BA out.fmt(" {}", rel_label(decode<Offset>(pc))); #define IMM_OA(ty) out.fmt(" {}", \ subopToName(static_cast<ty>(decode<uint8_t>(pc)))); #define IMM_VSA print_stringvec(); #define IMM_KA out.fmt(" {}", show(decode_member_key(pc, finfo.unit))); #define IMM_NA #define IMM_ONE(x) IMM_##x #define IMM_TWO(x,y) IMM_ONE(x) IMM_ONE(y) #define IMM_THREE(x,y,z) IMM_TWO(x,y) IMM_ONE(z) #define IMM_FOUR(x,y,z,l) IMM_THREE(x,y,z) IMM_ONE(l) out.indent(); #define O(opcode, imms, ...) \ case Op::opcode: \ ++pc; \ out.fmt("{}", #opcode); \ IMM_##imms \ break; switch (peek_op(pc)) { OPCODES } #undef O assert(pc == startPc + instrLen(startPc)); #undef IMM_NA #undef IMM_ONE #undef IMM_TWO #undef IMM_THREE #undef IMM_FOUR #undef IMM_BLA #undef IMM_SLA #undef IMM_ILA #undef IMM_IVA #undef IMM_I64A #undef IMM_LA #undef IMM_IA #undef IMM_DA #undef IMM_SA #undef IMM_RATA #undef IMM_AA #undef IMM_BA #undef IMM_OA #undef IMM_VSA #undef IMM_KA out.nl(); }