int JitBlockCache::AllocateBlock(u32 startAddress) { JitBlock &b = blocks_[num_blocks_]; b.proxyFor = 0; // If there's an existing pure proxy block at the address, we need to ditch it and create a new one, // taking over the proxied blocks. int num = GetBlockNumberFromStartAddress(startAddress, false); if (num >= 0) { if (blocks_[num].IsPureProxy()) { RemoveBlockMap(num); blocks_[num].invalid = true; b.proxyFor = new std::vector<u32>(); *b.proxyFor = *blocks_[num].proxyFor; blocks_[num].proxyFor->clear(); delete blocks_[num].proxyFor; blocks_[num].proxyFor = 0; } } b.invalid = false; b.originalAddress = startAddress; for (int i = 0; i < MAX_JIT_BLOCK_EXITS; ++i) { b.exitAddress[i] = INVALID_EXIT; b.exitPtrs[i] = 0; b.linkStatus[i] = false; } b.blockNum = num_blocks_; num_blocks_++; //commit the current block return num_blocks_ - 1; }
void JitBlockCache::ProxyBlock(u32 rootAddress, u32 startAddress, u32 size, const u8 *codePtr) { // If there's an existing block at the startAddress, add rootAddress as a proxy root of that block // instead of creating a new block. int num = GetBlockNumberFromStartAddress(startAddress, false); if (num != -1) { DEBUG_LOG(HLE, "Adding proxy root %08x to block at %08x", rootAddress, startAddress); if (!blocks_[num].proxyFor) { blocks_[num].proxyFor = new std::vector<u32>(); } blocks_[num].proxyFor->push_back(rootAddress); } JitBlock &b = blocks_[num_blocks_]; b.invalid = false; b.originalAddress = startAddress; b.originalSize = size; for (int i = 0; i < MAX_JIT_BLOCK_EXITS; ++i) { b.exitAddress[i] = INVALID_EXIT; b.exitPtrs[i] = 0; b.linkStatus[i] = false; } b.exitAddress[0] = rootAddress; b.blockNum = num_blocks_; b.proxyFor = new std::vector<u32>(); b.SetPureProxy(); // flag as pure proxy block. // Make binary searches and stuff work ok b.normalEntry = codePtr; b.checkedEntry = codePtr; proxyBlockMap_.insert(std::make_pair(startAddress, num_blocks_)); AddBlockMap(num_blocks_); num_blocks_++; //commit the current block }
void CachedInterpreter::SingleStep() { int block = GetBlockNumberFromStartAddress(PC); if (block >= 0) { Instruction* code = (Instruction*)GetCompiledCodeFromBlock(block); while (true) { switch (code->type) { case Instruction::INSTRUCTION_ABORT: return; case Instruction::INSTRUCTION_TYPE_COMMON: code->common_callback(UGeckoInstruction(code->data)); code++; break; case Instruction::INSTRUCTION_TYPE_CONDITIONAL: bool ret = code->conditional_callback(code->data); code++; if (ret) return; break; } } } Jit(PC); }
void JitBlockCache::LinkBlockExits(int i) { JitBlock &b = blocks_[i]; if (b.invalid) { // This block is dead. Don't relink it. return; } for (int e = 0; e < MAX_JIT_BLOCK_EXITS; e++) { if (b.exitAddress[e] != INVALID_EXIT && !b.linkStatus[e]) { int destinationBlock = GetBlockNumberFromStartAddress(b.exitAddress[e]); if (destinationBlock != -1) { #if defined(ARM) const u8 *nextExit = b.exitPtrs[e + 1]; if (!nextExit) { nextExit = b.normalEntry + b.codeSize; } ARMXEmitter emit(b.exitPtrs[e]); emit.B(blocks_[destinationBlock].checkedEntry); u32 op = 0; // Overwrite with nops until the next unconditional branch. do { emit.BKPT(1); op = *((const u32 *)emit.GetCodePtr()); } while ((op & 0xFF000000) != 0xEA000000); emit.BKPT(1); emit.FlushIcache(); #elif defined(_M_IX86) || defined(_M_X64) XEmitter emit(b.exitPtrs[e]); // Okay, this is a bit ugly, but we check here if it already has a JMP. // That means it doesn't have a full exit to pad with INT 3. bool prelinked = *emit.GetCodePtr() == 0xE9; emit.JMP(blocks_[destinationBlock].checkedEntry, true); if (!prelinked) { ptrdiff_t actualSize = emit.GetWritableCodePtr() - b.exitPtrs[e]; int pad = JitBlockCache::GetBlockExitSize() - (int)actualSize; for (int i = 0; i < pad; ++i) { emit.INT3(); } } #elif defined(PPC) PPCXEmitter emit(b.exitPtrs[e]); emit.B(blocks_[destinationBlock].checkedEntry); emit.FlushIcache(); #endif b.linkStatus[e] = true; } } } }
void JitBlockCache::DestroyBlock(int block_num, bool invalidate) { if (block_num < 0 || block_num >= num_blocks_) { ERROR_LOG_REPORT(JIT, "DestroyBlock: Invalid block number %d", block_num); return; } JitBlock *b = &blocks_[block_num]; // No point it being in there anymore. RemoveBlockMap(block_num); // Pure proxy blocks always point directly to a real block, there should be no chains of // proxy-only blocks pointing to proxy-only blocks. // Follow a block proxy chain. // Destroy the block that transitively has this as a proxy. Likely the root block once inlined // this block or its 'parent', so now that this block has changed, the root block must be destroyed. if (b->proxyFor) { for (size_t i = 0; i < b->proxyFor->size(); i++) { int proxied_blocknum = GetBlockNumberFromStartAddress((*b->proxyFor)[i], false); // If it was already cleared, we don't know which to destroy. if (proxied_blocknum != -1) { DestroyBlock(proxied_blocknum, invalidate); } } b->proxyFor->clear(); delete b->proxyFor; b->proxyFor = 0; } auto range = proxyBlockMap_.equal_range(b->originalAddress); for (auto it = range.first; it != range.second; ++it) { if (it->second == block_num) { // Found it. Delete and bail. proxyBlockMap_.erase(it); break; } } // TODO: Handle the case when there's a proxy block and a regular JIT block at the same location. // In this case we probably "leak" the proxy block currently (no memory leak but it'll stay enabled). if (b->invalid) { if (invalidate) ERROR_LOG(JIT, "Invalidating invalid block %d", block_num); return; } b->invalid = true; if (Memory::ReadUnchecked_U32(b->originalAddress) == GetEmuHackOpForBlock(block_num).encoding) Memory::Write_Opcode_JIT(b->originalAddress, b->originalFirstOpcode); // It's not safe to set normalEntry to 0 here, since we use a binary search // that looks at that later to find blocks. Marking it invalid is enough. UnlinkBlock(block_num); #if defined(ARM) // Send anyone who tries to run this block back to the dispatcher. // Not entirely ideal, but .. pretty good. // I hope there's enough space... // checkedEntry is the only "linked" entrance so it's enough to overwrite that. ARMXEmitter emit((u8 *)b->checkedEntry); emit.MOVI2R(R0, b->originalAddress); emit.STR(R0, CTXREG, offsetof(MIPSState, pc)); emit.B(MIPSComp::jit->dispatcher); emit.FlushIcache(); #elif defined(_M_IX86) || defined(_M_X64) // Send anyone who tries to run this block back to the dispatcher. // Not entirely ideal, but .. pretty good. // Spurious entrances from previously linked blocks can only come through checkedEntry XEmitter emit((u8 *)b->checkedEntry); emit.MOV(32, M(&mips_->pc), Imm32(b->originalAddress)); emit.JMP(MIPSComp::jit->Asm().dispatcher, true); #elif defined(PPC) PPCXEmitter emit((u8 *)b->checkedEntry); emit.MOVI2R(R3, b->originalAddress); emit.STW(R0, CTXREG, offsetof(MIPSState, pc)); emit.B(MIPSComp::jit->dispatcher); emit.FlushIcache(); #endif }