const u8 *MipsJit::DoJit(u32 em_address, JitBlock *b) { js.cancel = false; js.blockStart = js.compilerPC = mips_->pc; js.lastContinuedPC = 0; js.initialBlockSize = 0; js.nextExit = 0; js.downcountAmount = 0; js.curBlock = b; js.compiling = true; js.inDelaySlot = false; js.PrefixStart(); b->normalEntry = GetCodePtr(); js.numInstructions = 0; while (js.compiling) { MIPSOpcode inst = Memory::Read_Opcode_JIT(js.compilerPC); js.downcountAmount += MIPSGetInstructionCycleEstimate(inst); MIPSCompileOp(inst); js.compilerPC += 4; js.numInstructions++; // Safety check, in case we get a bunch of really large jit ops without a lot of branching. if (GetSpaceLeft() < 0x800 || js.numInstructions >= JitBlockCache::MAX_BLOCK_INSTRUCTIONS) { FlushAll(); WriteExit(js.compilerPC, js.nextExit++); js.compiling = false; } } b->codeSize = GetCodePtr() - b->normalEntry; // Don't forget to zap the newly written instructions in the instruction cache! FlushIcache(); if (js.lastContinuedPC == 0) b->originalSize = js.numInstructions; else { // We continued at least once. Add the last proxy and set the originalSize correctly. blocks.ProxyBlock(js.blockStart, js.lastContinuedPC, (js.compilerPC - js.lastContinuedPC) / sizeof(u32), GetCodePtr()); b->originalSize = js.initialBlockSize; } return b->normalEntry; }
/* AfterCompact - called after the heap manager compacts the heap */ static void AfterCompact(void *cookie) { Interpreter *i = (Interpreter *)cookie; uint8_t *cbase = GetCodePtr(i->code); i->pc = cbase + (i->pc - i->cbase); i->cbase = cbase; }
static void StartCode(Interpreter *i) { VMHANDLE code = PopH(i); VMVALUE tmp, tmp2; if (!code) Abort(i->sys, str_not_code_object_err, code); switch (GetHeapObjType(code)) { case ObjTypeCode: tmp = (VMVALUE)(i->fp - i->stack); tmp2 = (VMVALUE)(i->hfp - (VMHANDLE *)i->stack); i->hfp = i->hsp; PushH(i, i->code); i->fp = i->sp; Reserve(i, F_SIZE); i->fp[F_FP] = tmp; i->fp[F_HFP] = tmp2; i->fp[F_PC] = (VMVALUE)(i->pc - i->cbase); i->code = code; ObjAddRef(i->code); i->cbase = i->pc = GetCodePtr(code); break; case ObjTypeIntrinsic: (*GetIntrinsicHandler(code))(i); break; default: Abort(i->sys, str_not_code_object_err, code); break; } }
const u8 *Jit::DoJit(u32 em_address, JitBlock *b) { js.cancel = false; js.blockStart = js.compilerPC = mips_->pc; js.downcountAmount = 0; js.curBlock = b; js.compiling = true; js.inDelaySlot = false; js.PrefixStart(); // We add a check before the block, used when entering from a linked block. b->checkedEntry = GetCodePtr(); // Downcount flag check. The last block decremented downcounter, and the flag should still be available. FixupBranch skip = J_CC(CC_NBE); MOV(32, M(&mips_->pc), Imm32(js.blockStart)); JMP(asm_.outerLoop, true); // downcount hit zero - go advance. SetJumpTarget(skip); b->normalEntry = GetCodePtr(); // TODO: this needs work MIPSAnalyst::AnalysisResults analysis; // = MIPSAnalyst::Analyze(em_address); gpr.Start(mips_, analysis); fpr.Start(mips_, analysis); js.numInstructions = 0; while (js.compiling) { // Jit breakpoints are quite fast, so let's do them in release too. CheckJitBreakpoint(js.compilerPC, 0); u32 inst = Memory::Read_Instruction(js.compilerPC); js.downcountAmount += MIPSGetInstructionCycleEstimate(inst); MIPSCompileOp(inst); js.compilerPC += 4; js.numInstructions++; } b->codeSize = (u32)(GetCodePtr() - b->normalEntry); NOP(); AlignCode4(); b->originalSize = js.numInstructions; return b->normalEntry; }
const u8 *Jit::DoJit(u32 em_address, JitBlock *b) { js.cancel = false; js.blockStart = js.compilerPC = mips_->pc; js.downcountAmount = 0; js.curBlock = b; js.compiling = true; js.inDelaySlot = false; b->normalEntry = GetCodePtr(); // TODO: this needs work MIPSAnalyst::AnalysisResults analysis; // = MIPSAnalyst::Analyze(em_address); gpr.Start(mips_, analysis); fpr.Start(mips_, analysis); int numInstructions = 0; int cycles = 0; while (js.compiling) { u32 inst = Memory::Read_Instruction(js.compilerPC); js.downcountAmount += MIPSGetInstructionCycleEstimate(inst); MIPSCompileOp(inst); js.compilerPC += 4; numInstructions++; } b->codeSize = GetCodePtr() - b->normalEntry; NOP(); AlignCode16(); b->originalSize = numInstructions; return b->normalEntry; }
static void PopFrame(Interpreter *i) { int argumentCount = VMCODEBYTE(i->pc++); int handleArgumentCount = VMCODEBYTE(i->pc++); ObjRelease(i->heap, i->code); i->code = i->hfp[HF_CODE]; i->hsp = i->hfp; while (--handleArgumentCount >= 0) { ObjRelease(i->heap, *i->hsp); DropH(i, 1); } i->cbase = GetCodePtr(i->code); i->pc = i->cbase + i->fp[F_PC]; i->hfp = (VMHANDLE *)i->stack + i->fp[F_HFP]; i->sp = i->fp; i->fp = i->stack + i->fp[F_FP]; Drop(i, argumentCount); }
bool Jit::ReplaceJalTo(u32 dest) { MIPSOpcode op(Memory::Read_Opcode_JIT(dest)); if (!MIPS_IS_REPLACEMENT(op.encoding)) return false; int index = op.encoding & MIPS_EMUHACK_VALUE_MASK; const ReplacementTableEntry *entry = GetReplacementFunc(index); if (!entry) { ERROR_LOG(HLE, "ReplaceJalTo: Invalid replacement op %08x at %08x", op.encoding, dest); return false; } if (entry->flags & (REPFLAG_HOOKENTER | REPFLAG_HOOKEXIT | REPFLAG_DISABLED)) { // If it's a hook, we can't replace the jal, we have to go inside the func. return false; } // Warning - this might be bad if the code at the destination changes... if (entry->flags & REPFLAG_ALLOWINLINE) { // Jackpot! Just do it, no flushing. The code will be entirely inlined. // First, compile the delay slot. It's unconditional so no issues. CompileDelaySlot(DELAYSLOT_NICE); // Technically, we should write the unused return address to RA, but meh. MIPSReplaceFunc repl = entry->jitReplaceFunc; int cycles = (this->*repl)(); js.downcountAmount += cycles; } else { gpr.SetImm(MIPS_REG_RA, js.compilerPC + 8); CompileDelaySlot(DELAYSLOT_NICE); FlushAll(); MOV(32, M(&mips_->pc), Imm32(js.compilerPC)); ABI_CallFunction(entry->replaceFunc); SUB(32, M(¤tMIPS->downcount), R(EAX)); } js.compilerPC += 4; // No writing exits, keep going! // Add a trigger so that if the inlined code changes, we invalidate this block. // TODO: Correctly determine the size of this block. blocks.ProxyBlock(js.blockStart, dest, 4, GetCodePtr()); return true; }
void CachedInterpreter::Jit(u32 address) { if (m_code.size() >= CODE_SIZE / sizeof(Instruction) - 0x1000 || IsFull() || SConfig::GetInstance().bJITNoBlockCache) { ClearCache(); } u32 nextPC = analyzer.Analyze(PC, &code_block, &code_buffer, code_buffer.GetSize()); if (code_block.m_memory_exception) { // Address of instruction could not be translated NPC = nextPC; PowerPC::ppcState.Exceptions |= EXCEPTION_ISI; PowerPC::CheckExceptions(); WARN_LOG(POWERPC, "ISI exception at 0x%08x", nextPC); return; } int block_num = AllocateBlock(PC); JitBlock *b = GetBlock(block_num); js.blockStart = PC; js.firstFPInstructionFound = false; js.fifoBytesThisBlock = 0; js.downcountAmount = 0; js.curBlock = b; PPCAnalyst::CodeOp *ops = code_buffer.codebuffer; b->checkedEntry = GetCodePtr(); b->normalEntry = GetCodePtr(); b->runCount = 0; for (u32 i = 0; i < code_block.m_num_instructions; i++) { js.downcountAmount += ops[i].opinfo->numCycles; u32 function = HLE::GetFunctionIndex(ops[i].address); if (function != 0) { int type = HLE::GetFunctionTypeByIndex(function); if (type == HLE::HLE_HOOK_START || type == HLE::HLE_HOOK_REPLACE) { int flags = HLE::GetFunctionFlagsByIndex(function); if (HLE::IsEnabled(flags)) { m_code.emplace_back(WritePC, ops[i].address); m_code.emplace_back(Interpreter::HLEFunction, ops[i].inst); if (type == HLE::HLE_HOOK_REPLACE) { m_code.emplace_back(EndBlock, js.downcountAmount); m_code.emplace_back(); break; } } } } if (!ops[i].skip) { if ((ops[i].opinfo->flags & FL_USE_FPU) && !js.firstFPInstructionFound) { m_code.emplace_back(CheckFPU, ops[i].address); js.firstFPInstructionFound = true; } if (ops[i].opinfo->flags & FL_ENDBLOCK) m_code.emplace_back(WritePC, ops[i].address); m_code.emplace_back(GetInterpreterOp(ops[i].inst), ops[i].inst); if (ops[i].opinfo->flags & FL_ENDBLOCK) m_code.emplace_back(EndBlock, js.downcountAmount); } } if (code_block.m_broken) { m_code.emplace_back(WritePC, nextPC); m_code.emplace_back(EndBlock, js.downcountAmount); } m_code.emplace_back(); b->codeSize = (u32)(GetCodePtr() - b->checkedEntry); b->originalSize = code_block.m_num_instructions; FinalizeBlock(block_num, jo.enableBlocklink, b->checkedEntry); }
const u8 *Jit::DoJit(u32 em_address, JitBlock *b) { js.cancel = false; js.blockStart = js.compilerPC = mips_->pc; js.nextExit = 0; js.downcountAmount = 0; js.curBlock = b; js.compiling = true; js.inDelaySlot = false; js.afterOp = JitState::AFTER_NONE; js.PrefixStart(); // We add a check before the block, used when entering from a linked block. b->checkedEntry = GetCodePtr(); // Downcount flag check. The last block decremented downcounter, and the flag should still be available. FixupBranch skip = J_CC(CC_NBE); MOV(32, M(&mips_->pc), Imm32(js.blockStart)); JMP(asm_.outerLoop, true); // downcount hit zero - go advance. SetJumpTarget(skip); b->normalEntry = GetCodePtr(); MIPSAnalyst::AnalysisResults analysis = MIPSAnalyst::Analyze(em_address); gpr.Start(mips_, analysis); fpr.Start(mips_, analysis); js.numInstructions = 0; while (js.compiling) { // Jit breakpoints are quite fast, so let's do them in release too. CheckJitBreakpoint(js.compilerPC, 0); MIPSOpcode inst = Memory::Read_Opcode_JIT(js.compilerPC); js.downcountAmount += MIPSGetInstructionCycleEstimate(inst); MIPSCompileOp(inst); if (js.afterOp & JitState::AFTER_CORE_STATE) { // TODO: Save/restore? FlushAll(); // If we're rewinding, CORE_NEXTFRAME should not cause a rewind. // It doesn't really matter either way if we're not rewinding. // CORE_RUNNING is <= CORE_NEXTFRAME. CMP(32, M(&coreState), Imm32(CORE_NEXTFRAME)); FixupBranch skipCheck = J_CC(CC_LE); if (js.afterOp & JitState::AFTER_REWIND_PC_BAD_STATE) MOV(32, M(&mips_->pc), Imm32(js.compilerPC)); else MOV(32, M(&mips_->pc), Imm32(js.compilerPC + 4)); WriteSyscallExit(); SetJumpTarget(skipCheck); js.afterOp = JitState::AFTER_NONE; } if (js.afterOp & JitState::AFTER_MEMCHECK_CLEANUP) { js.afterOp &= ~JitState::AFTER_MEMCHECK_CLEANUP; } js.compilerPC += 4; js.numInstructions++; // Safety check, in case we get a bunch of really large jit ops without a lot of branching. if (GetSpaceLeft() < 0x800) { FlushAll(); WriteExit(js.compilerPC, js.nextExit++); js.compiling = false; } } b->codeSize = (u32)(GetCodePtr() - b->normalEntry); NOP(); AlignCode4(); b->originalSize = js.numInstructions; return b->normalEntry; }
void MipsJit::AddContinuedBlock(u32 dest) { // The first block is the root block. When we continue, we create proxy blocks after that. if (js.lastContinuedPC == 0) js.initialBlockSize = js.numInstructions; else blocks.ProxyBlock(js.blockStart, js.lastContinuedPC, (js.compilerPC - js.lastContinuedPC) / sizeof(u32), GetCodePtr()); js.lastContinuedPC = dest; }
void CachedInterpreter::Jit(u32 address) { if (m_code.size() >= CODE_SIZE / sizeof(Instruction) - 0x1000 || SConfig::GetInstance().bJITNoBlockCache) { ClearCache(); } u32 nextPC = analyzer.Analyze(PC, &code_block, &code_buffer, code_buffer.GetSize()); if (code_block.m_memory_exception) { // Address of instruction could not be translated NPC = nextPC; PowerPC::ppcState.Exceptions |= EXCEPTION_ISI; PowerPC::CheckExceptions(); WARN_LOG(POWERPC, "ISI exception at 0x%08x", nextPC); return; } JitBlock* b = m_block_cache.AllocateBlock(PC); js.blockStart = PC; js.firstFPInstructionFound = false; js.fifoBytesSinceCheck = 0; js.downcountAmount = 0; js.curBlock = b; PPCAnalyst::CodeOp* ops = code_buffer.codebuffer; b->checkedEntry = GetCodePtr(); b->normalEntry = GetCodePtr(); for (u32 i = 0; i < code_block.m_num_instructions; i++) { js.downcountAmount += ops[i].opinfo->numCycles; u32 function = HLE::GetFirstFunctionIndex(ops[i].address); if (function != 0) { HLE::HookType type = HLE::GetFunctionTypeByIndex(function); if (type == HLE::HookType::Start || type == HLE::HookType::Replace) { HLE::HookFlag flags = HLE::GetFunctionFlagsByIndex(function); if (HLE::IsEnabled(flags)) { m_code.emplace_back(WritePC, ops[i].address); m_code.emplace_back(Interpreter::HLEFunction, function); if (type == HLE::HookType::Replace) { m_code.emplace_back(EndBlock, js.downcountAmount); m_code.emplace_back(); break; } } } } if (!ops[i].skip) { bool check_fpu = (ops[i].opinfo->flags & FL_USE_FPU) && !js.firstFPInstructionFound; bool endblock = (ops[i].opinfo->flags & FL_ENDBLOCK) != 0; bool memcheck = (ops[i].opinfo->flags & FL_LOADSTORE) && jo.memcheck; if (check_fpu) { m_code.emplace_back(WritePC, ops[i].address); m_code.emplace_back(CheckFPU, js.downcountAmount); js.firstFPInstructionFound = true; } if (endblock || memcheck) m_code.emplace_back(WritePC, ops[i].address); m_code.emplace_back(PPCTables::GetInterpreterOp(ops[i].inst), ops[i].inst); if (memcheck) m_code.emplace_back(CheckDSI, js.downcountAmount); if (endblock) m_code.emplace_back(EndBlock, js.downcountAmount); } } if (code_block.m_broken) { m_code.emplace_back(WriteBrokenBlockNPC, nextPC); m_code.emplace_back(EndBlock, js.downcountAmount); } m_code.emplace_back(); b->codeSize = (u32)(GetCodePtr() - b->checkedEntry); b->originalSize = code_block.m_num_instructions; m_block_cache.FinalizeBlock(*b, jo.enableBlocklink, code_block.m_physical_addresses); }
void JitArmILAsmRoutineManager::Generate() { enterCode = GetCodePtr(); PUSH(9, R4, R5, R6, R7, R8, R9, R10, R11, _LR); // Take care to 8-byte align stack for function calls. // We are misaligned here because of an odd number of args for PUSH. // It's not like x86 where you need to account for an extra 4 bytes // consumed by CALL. SUB(_SP, _SP, 4); MOVI2R(R0, (u32)&CoreTiming::downcount); MOVI2R(R9, (u32)&PowerPC::ppcState.spr[0]); FixupBranch skipToRealDispatcher = B(); dispatcher = GetCodePtr(); printf("ILDispatcher is %p\n", dispatcher); // Downcount Check // The result of slice decrementation should be in flags if somebody jumped here // IMPORTANT - We jump on negative, not carry!!! FixupBranch bail = B_CC(CC_MI); SetJumpTarget(skipToRealDispatcher); dispatcherNoCheck = GetCodePtr(); // This block of code gets the address of the compiled block of code // It runs though to the compiling portion if it isn't found LDR(R12, R9, PPCSTATE_OFF(pc));// Load the current PC into R12 Operand2 iCacheMask = Operand2(0xE, 2); // JIT_ICACHE_MASK BIC(R12, R12, iCacheMask); // R12 contains PC & JIT_ICACHE_MASK here. MOVI2R(R14, (u32)jit->GetBlockCache()->iCache); LDR(R12, R14, R12); // R12 contains iCache[PC & JIT_ICACHE_MASK] here // R12 Confirmed this is the correct iCache Location loaded. TST(R12, 0x80); // Test to see if it is a JIT block. SetCC(CC_EQ); // Success, it is our Jitblock. MOVI2R(R14, (u32)jit->GetBlockCache()->GetCodePointers()); // LDR R14 right here to get CodePointers()[0] pointer. LSL(R12, R12, 2); // Multiply by four because address locations are u32 in size LDR(R14, R14, R12); // Load the block address in to R14 B(R14); // No need to jump anywhere after here, the block will go back to dispatcher start SetCC(); // If we get to this point, that means that we don't have the block cached to execute // So call ArmJit to compile the block and then execute it. MOVI2R(R14, (u32)&Jit); BL(R14); B(dispatcherNoCheck); // fpException() // Floating Point Exception Check, Jumped to if false fpException = GetCodePtr(); LDR(R0, R9, PPCSTATE_OFF(Exceptions)); ORR(R0, R0, EXCEPTION_FPU_UNAVAILABLE); STR(R0, R9, PPCSTATE_OFF(Exceptions)); QuickCallFunction(R14, (void*)&PowerPC::CheckExceptions); LDR(R0, R9, PPCSTATE_OFF(npc)); STR(R0, R9, PPCSTATE_OFF(pc)); B(dispatcher); SetJumpTarget(bail); doTiming = GetCodePtr(); // XXX: In JIT64, Advance() gets called /after/ the exception checking // once it jumps back to the start of outerLoop QuickCallFunction(R14, (void*)&CoreTiming::Advance); // Does exception checking testExceptions = GetCodePtr(); LDR(R0, R9, PPCSTATE_OFF(pc)); STR(R0, R9, PPCSTATE_OFF(npc)); QuickCallFunction(R14, (void*)&PowerPC::CheckExceptions); LDR(R0, R9, PPCSTATE_OFF(npc)); STR(R0, R9, PPCSTATE_OFF(pc)); // Check the state pointer to see if we are exiting // Gets checked on every exception check MOVI2R(R0, (u32)PowerPC::GetStatePtr()); MVN(R1, 0); LDR(R0, R0); TST(R0, R1); FixupBranch Exit = B_CC(CC_NEQ); B(dispatcher); SetJumpTarget(Exit); ADD(_SP, _SP, 4); POP(9, R4, R5, R6, R7, R8, R9, R10, R11, _PC); // Returns GenerateCommon(); FlushIcache(); }
void CachedInterpreter::Jit(u32 address) { if (m_code.size() >= CODE_SIZE / sizeof(Instruction) - 0x1000 || SConfig::GetInstance().bJITNoBlockCache) { ClearCache(); } const u32 nextPC = analyzer.Analyze(PC, &code_block, &m_code_buffer, m_code_buffer.size()); if (code_block.m_memory_exception) { // Address of instruction could not be translated NPC = nextPC; PowerPC::ppcState.Exceptions |= EXCEPTION_ISI; PowerPC::CheckExceptions(); WARN_LOG(POWERPC, "ISI exception at 0x%08x", nextPC); return; } JitBlock* b = m_block_cache.AllocateBlock(PC); js.blockStart = PC; js.firstFPInstructionFound = false; js.fifoBytesSinceCheck = 0; js.downcountAmount = 0; js.curBlock = b; b->checkedEntry = GetCodePtr(); b->normalEntry = GetCodePtr(); for (u32 i = 0; i < code_block.m_num_instructions; i++) { PPCAnalyst::CodeOp& op = m_code_buffer[i]; js.downcountAmount += op.opinfo->numCycles; if (HandleFunctionHooking(op.address)) break; if (!op.skip) { const bool breakpoint = SConfig::GetInstance().bEnableDebugging && PowerPC::breakpoints.IsAddressBreakPoint(op.address); const bool check_fpu = (op.opinfo->flags & FL_USE_FPU) && !js.firstFPInstructionFound; const bool endblock = (op.opinfo->flags & FL_ENDBLOCK) != 0; const bool memcheck = (op.opinfo->flags & FL_LOADSTORE) && jo.memcheck; if (breakpoint) { m_code.emplace_back(WritePC, op.address); m_code.emplace_back(CheckBreakpoint, js.downcountAmount); } if (check_fpu) { m_code.emplace_back(WritePC, op.address); m_code.emplace_back(CheckFPU, js.downcountAmount); js.firstFPInstructionFound = true; } if (endblock || memcheck) m_code.emplace_back(WritePC, op.address); m_code.emplace_back(PPCTables::GetInterpreterOp(op.inst), op.inst); if (memcheck) m_code.emplace_back(CheckDSI, js.downcountAmount); if (endblock) m_code.emplace_back(EndBlock, js.downcountAmount); } } if (code_block.m_broken) { m_code.emplace_back(WriteBrokenBlockNPC, nextPC); m_code.emplace_back(EndBlock, js.downcountAmount); } m_code.emplace_back(); b->codeSize = (u32)(GetCodePtr() - b->checkedEntry); b->originalSize = code_block.m_num_instructions; m_block_cache.FinalizeBlock(*b, jo.enableBlocklink, code_block.m_physical_addresses); }
/* Execute - execute the main code */ int Execute(System *sys, ObjHeap *heap, VMHANDLE main) { size_t stackSize; Interpreter *i; VMVALUE tmp, tmp2, ind; VMHANDLE obj, htmp; int8_t tmpb; /* allocate the interpreter state */ if (!(i = (Interpreter *)AllocateFreeSpace(sys, sizeof(Interpreter)))) return VMFALSE; /* make sure there is space left for the stack */ if ((stackSize = (sys->freeTop - sys->freeNext) / sizeof(VMVALUE)) <= 0) return VMFALSE; /* setup the heap before/after compact functions */ heap->beforeCompact = NULL; heap->afterCompact = AfterCompact; heap->compactCookie = i; /* initialize the interpreter state */ i->sys = sys; i->heap = heap; i->stack = (VMVALUE *)((uint8_t *)i + sizeof(Interpreter)); i->stackTop = i->stack + stackSize; /* setup to execute the main function */ i->code = main; ObjAddRef(i->code); i->cbase = i->pc = GetCodePtr(main); i->sp = i->fp = i->stackTop; i->hsp = i->hfp = (VMHANDLE *)i->stack - 1; if (setjmp(i->sys->errorTarget)) { while (i->hsp > (VMHANDLE *)i->stack) ObjRelease(i->heap, PopH(i)); ObjRelease(i->heap, i->code); return VMFALSE; } for (;;) { #if 0 ShowStack(i); DecodeInstruction(0, 0, i->pc); #endif switch (VMCODEBYTE(i->pc++)) { case OP_HALT: return VMTRUE; case OP_BRT: get_VMVALUE(tmp, VMCODEBYTE(i->pc++)); if (Pop(i)) i->pc += tmp; break; case OP_BRTSC: get_VMVALUE(tmp, VMCODEBYTE(i->pc++)); if (*i->sp) i->pc += tmp; else Drop(i, 1); break; case OP_BRF: get_VMVALUE(tmp, VMCODEBYTE(i->pc++)); if (!Pop(i)) i->pc += tmp; break; case OP_BRFSC: get_VMVALUE(tmp, VMCODEBYTE(i->pc++)); if (!*i->sp) i->pc += tmp; else Drop(i, 1); break; case OP_BR: get_VMVALUE(tmp, VMCODEBYTE(i->pc++)); i->pc += tmp; break; case OP_NOT: *i->sp = (*i->sp ? VMFALSE : VMTRUE); break; case OP_NEG: *i->sp = -*i->sp; break; case OP_ADD: tmp = Pop(i); *i->sp += tmp; break; case OP_SUB: tmp = Pop(i); *i->sp -= tmp; break; case OP_MUL: tmp = Pop(i); *i->sp *= tmp; break; case OP_DIV: tmp = Pop(i); *i->sp = (tmp == 0 ? 0 : *i->sp / tmp); break; case OP_REM: tmp = Pop(i); *i->sp = (tmp == 0 ? 0 : *i->sp % tmp); break; case OP_CAT: StringCat(i); break; case OP_BNOT: *i->sp = ~*i->sp; break; case OP_BAND: tmp = Pop(i); *i->sp &= tmp; break; case OP_BOR: tmp = Pop(i); *i->sp |= tmp; break; case OP_BXOR: tmp = Pop(i); *i->sp ^= tmp; break; case OP_SHL: tmp = Pop(i); *i->sp <<= tmp; break; case OP_SHR: tmp = Pop(i); *i->sp >>= tmp; break; case OP_LT: tmp = Pop(i); *i->sp = (*i->sp < tmp ? VMTRUE : VMFALSE); break; case OP_LE: tmp = Pop(i); *i->sp = (*i->sp <= tmp ? VMTRUE : VMFALSE); break; case OP_EQ: tmp = Pop(i); *i->sp = (*i->sp == tmp ? VMTRUE : VMFALSE); break; case OP_NE: tmp = Pop(i); *i->sp = (*i->sp != tmp ? VMTRUE : VMFALSE); break; case OP_GE: tmp = Pop(i); *i->sp = (*i->sp >= tmp ? VMTRUE : VMFALSE); break; case OP_GT: tmp = Pop(i); *i->sp = (*i->sp > tmp ? VMTRUE : VMFALSE); break; case OP_LIT: get_VMVALUE(tmp, VMCODEBYTE(i->pc++)); CPush(i, tmp); break; case OP_GREF: get_VMVALUE(tmp, VMCODEBYTE(i->pc++)); obj = (VMHANDLE)tmp; CPush(i, GetSymbolPtr(obj)->v.iValue); break; case OP_GSET: get_VMVALUE(tmp, VMCODEBYTE(i->pc++)); obj = (VMHANDLE)tmp; GetSymbolPtr(obj)->v.iValue = Pop(i); break; case OP_LREF: tmpb = (int8_t)VMCODEBYTE(i->pc++); CPush(i, i->fp[(int)tmpb]); break; case OP_LSET: tmpb = (int8_t)VMCODEBYTE(i->pc++); i->fp[(int)tmpb] = Pop(i); break; case OP_VREF: ind = *i->sp; obj = *i->hsp; if (ind < 0 || ind >= GetHeapObjSize(obj)) Abort(i->sys, str_subscript_err, ind); *i->sp = GetIntegerVectorBase(obj)[ind]; DropH(i, 1); break; case OP_VSET: tmp2 = Pop(i); ind = Pop(i); obj = *i->hsp; if (ind < 0 || ind >= GetHeapObjSize(obj)) Abort(i->sys, str_subscript_err, ind); GetIntegerVectorBase(obj)[ind] = tmp2; DropH(i, 1); break; case OP_LITH: get_VMVALUE(tmp, VMCODEBYTE(i->pc++)); CPushH(i, (VMHANDLE)tmp); ObjAddRef(*i->hsp); break; case OP_GREFH: get_VMVALUE(tmp, VMCODEBYTE(i->pc++)); CPushH(i, GetSymbolPtr((VMHANDLE)tmp)->v.hValue); ObjAddRef(*i->hsp); break; case OP_GSETH: get_VMVALUE(tmp, VMCODEBYTE(i->pc++)); ObjRelease(i->heap, GetSymbolPtr((VMHANDLE)tmp)->v.hValue); GetSymbolPtr((VMHANDLE)tmp)->v.hValue = PopH(i); break; case OP_LREFH: tmpb = (int8_t)VMCODEBYTE(i->pc++); CPushH(i, i->hfp[(int)tmpb]); ObjAddRef(*i->hsp); break; case OP_LSETH: tmpb = (int8_t)VMCODEBYTE(i->pc++); ObjRelease(i->heap, i->hfp[(int)tmpb]); i->hfp[(int)tmpb] = PopH(i); break; case OP_VREFH: ind = Pop(i); obj = *i->hsp; if (ind < 0 || ind >= GetHeapObjSize(obj)) Abort(i->sys, str_subscript_err, ind); *i->hsp = GetStringVectorBase(obj)[ind]; ObjAddRef(*i->hsp); break; case OP_VSETH: htmp = PopH(i); ind = Pop(i); obj = *i->hsp; if (ind < 0 || ind >= GetHeapObjSize(obj)) Abort(i->sys, str_subscript_err, ind); ObjRelease(i->heap, GetStringVectorBase(obj)[ind]); GetStringVectorBase(obj)[ind] = htmp; DropH(i, 1); break; case OP_RESERVE: tmp = VMCODEBYTE(i->pc++); tmp2 = VMCODEBYTE(i->pc++); Reserve(i, tmp); ReserveH(i, tmp2); break; case OP_CALL: StartCode(i); break; case OP_RETURN: tmp = *i->sp; PopFrame(i); Push(i, tmp); break; case OP_RETURNH: htmp = *i->hsp; PopFrame(i); PushH(i, htmp); break; case OP_RETURNV: PopFrame(i); break; case OP_DROP: Drop(i, 1); break; case OP_DROPH: ObjRelease(i->heap, *i->hsp); DropH(i, 1); break; default: Abort(i->sys, str_opcode_err, VMCODEBYTE(i->pc - 1)); break; } } }