Пример #1
0
	void CompileExceptionCheck(ExceptionType type)
	{
		if (!jit)
			return;

		std::unordered_set<u32>* exception_addresses = nullptr;

		switch (type)
		{
		case ExceptionType::EXCEPTIONS_FIFO_WRITE:
			exception_addresses = &jit->js.fifoWriteAddresses;
			break;
		}

		if (PC != 0 && (exception_addresses->find(PC)) == (exception_addresses->end()))
		{
			int optype = GetOpInfo(Memory::ReadUnchecked_U32(PC))->type;
			if (optype == OPTYPE_STORE || optype == OPTYPE_STOREFP || (optype == OPTYPE_STOREPS))
			{
				exception_addresses->insert(PC);

				// Invalidate the JIT block so that it gets recompiled with the external exception check included.
				jit->GetBlockCache()->InvalidateICache(PC, 4, true);
			}
		}
	}
Пример #2
0
	void CompileExceptionCheck(ExceptionType type)
	{
		if (!jit)
			return;

		std::unordered_set<u32>* exception_addresses = nullptr;

		switch (type)
		{
		case ExceptionType::EXCEPTIONS_FIFO_WRITE:
			exception_addresses = &jit->js.fifoWriteAddresses;
			break;
		case ExceptionType::EXCEPTIONS_PAIRED_QUANTIZE:
			exception_addresses = &jit->js.pairedQuantizeAddresses;
			break;
		}

		if (PC != 0 && (exception_addresses->find(PC)) == (exception_addresses->end()))
		{
			if (type == ExceptionType::EXCEPTIONS_FIFO_WRITE)
			{
				// Check in case the code has been replaced since: do we need to do this?
				int optype = GetOpInfo(PowerPC::HostRead_U32(PC))->type;
				if (optype != OPTYPE_STORE && optype != OPTYPE_STOREFP && (optype != OPTYPE_STOREPS))
					return;
			}
			exception_addresses->insert(PC);

			// Invalidate the JIT block so that it gets recompiled with the external exception check included.
			jit->GetBlockCache()->InvalidateICache(PC, 4, true);
		}
	}
Пример #3
0
void CountInstruction(UGeckoInstruction _inst)
{
  GekkoOPInfo* info = GetOpInfo(_inst);
  if (info)
  {
    info->runCount++;
  }
}
Пример #4
0
void STACKALIGN CheckGatherPipe()
{
	if (m_gatherPipeCount >= GATHER_PIPE_SIZE)
	{
		u32 cnt;
		u8* curMem = Memory::GetPointer(ProcessorInterface::Fifo_CPUWritePointer);
		for (cnt = 0; m_gatherPipeCount >= GATHER_PIPE_SIZE; cnt += GATHER_PIPE_SIZE)
		{
			// copy the GatherPipe
			memcpy(curMem, m_gatherPipe + cnt, GATHER_PIPE_SIZE);
			m_gatherPipeCount -= GATHER_PIPE_SIZE;

			// increase the CPUWritePointer
			if (ProcessorInterface::Fifo_CPUWritePointer == ProcessorInterface::Fifo_CPUEnd)
			{
				ProcessorInterface::Fifo_CPUWritePointer = ProcessorInterface::Fifo_CPUBase;
				curMem = Memory::GetPointer(ProcessorInterface::Fifo_CPUWritePointer);
			}
			else
			{
				curMem += GATHER_PIPE_SIZE;
				ProcessorInterface::Fifo_CPUWritePointer += GATHER_PIPE_SIZE;
			}

			g_video_backend->Video_GatherPipeBursted();
		}

		// move back the spill bytes
		memmove(m_gatherPipe, m_gatherPipe + cnt, m_gatherPipeCount);

		// Profile where the FIFO writes are occurring.
		if (jit && PC != 0 && (jit->js.fifoWriteAddresses.find(PC)) == (jit->js.fifoWriteAddresses.end()))
		{
			// Log only stores, fp stores and ps stores, filtering out other instructions arrived via optimizeGatherPipe
			int type = GetOpInfo(Memory::ReadUnchecked_U32(PC))->type;
			if (type == OPTYPE_STORE || type == OPTYPE_STOREFP || (type == OPTYPE_PS && !strcmp(GetOpInfo(Memory::ReadUnchecked_U32(PC))->opname, "psq_st")))
			{
				jit->js.fifoWriteAddresses.insert(PC);

				// Invalidate the JIT block so that it gets recompiled with the external exception check included.
				jit->GetBlockCache()->InvalidateICache(PC, 4);
			}
		}
	}
}
Пример #5
0
void CompileExceptionCheck(ExceptionType type)
{
  if (!g_jit)
    return;

  std::unordered_set<u32>* exception_addresses = nullptr;

  switch (type)
  {
  case ExceptionType::FIFOWrite:
    exception_addresses = &g_jit->js.fifoWriteAddresses;
    break;
  case ExceptionType::PairedQuantize:
    exception_addresses = &g_jit->js.pairedQuantizeAddresses;
    break;
  case ExceptionType::SpeculativeConstants:
    exception_addresses = &g_jit->js.noSpeculativeConstantsAddresses;
    break;
  }

  if (PC != 0 && (exception_addresses->find(PC)) == (exception_addresses->end()))
  {
    if (type == ExceptionType::FIFOWrite)
    {
      // Check in case the code has been replaced since: do we need to do this?
      const ::OpType optype = GetOpInfo(PowerPC::HostRead_U32(PC))->type;
      if (optype != ::OpType::Store && optype != ::OpType::StoreFP && optype != ::OpType::StorePS)
        return;
    }
    exception_addresses->insert(PC);

    // Invalidate the JIT block so that it gets recompiled with the external exception check
    // included.
    g_jit->GetBlockCache()->InvalidateICache(PC, 4, true);
  }
}
Пример #6
0
int Interpreter::SingleStepInner()
{
	static UGeckoInstruction instCode;
	u32 function = HLE::GetFunctionIndex(PC);
	if (function != 0)
	{
		int type = HLE::GetFunctionTypeByIndex(function);
		if (type == HLE::HLE_HOOK_START || type == HLE::HLE_HOOK_REPLACE)
		{
			int flags = HLE::GetFunctionFlagsByIndex(function);
			if (HLE::IsEnabled(flags))
			{
				HLEFunction(function);
				if (type == HLE::HLE_HOOK_START)
				{
					// Run the original.
					function = 0;
				}
			}
			else
			{
				function = 0;
			}
		}
	}

	if (function == 0)
	{
		#ifdef USE_GDBSTUB
		if (gdb_active() && gdb_bp_x(PC))
		{
			Host_UpdateDisasmDialog();

			gdb_signal(SIGTRAP);
			gdb_handle_exception();
		}
		#endif

		NPC = PC + sizeof(UGeckoInstruction);
		instCode.hex = PowerPC::Read_Opcode(PC);

		// Uncomment to trace the interpreter
		//if ((PC & 0xffffff)>=0x0ab54c && (PC & 0xffffff)<=0x0ab624)
		//	startTrace = 1;
		//else
		//	startTrace = 0;

		if (startTrace)
		{
			Trace(instCode);
		}

		if (instCode.hex != 0)
		{
			UReg_MSR& msr = (UReg_MSR&)MSR;
			if (msr.FP)  //If FPU is enabled, just execute
			{
				m_opTable[instCode.OPCD](instCode);
				if (PowerPC::ppcState.Exceptions & EXCEPTION_DSI)
				{
					PowerPC::CheckExceptions();
					m_EndBlock = true;
				}
			}
			else
			{
				// check if we have to generate a FPU unavailable exception
				if (!PPCTables::UsesFPU(instCode))
				{
					m_opTable[instCode.OPCD](instCode);
					if (PowerPC::ppcState.Exceptions & EXCEPTION_DSI)
					{
						PowerPC::CheckExceptions();
						m_EndBlock = true;
					}
				}
				else
				{
					PowerPC::ppcState.Exceptions |= EXCEPTION_FPU_UNAVAILABLE;
					PowerPC::CheckExceptions();
					m_EndBlock = true;
				}
			}
		}
		else
		{
			// Memory exception on instruction fetch
			PowerPC::CheckExceptions();
			m_EndBlock = true;
		}
	}
	last_pc = PC;
	PC = NPC;

	GekkoOPInfo *opinfo = GetOpInfo(instCode);
	return opinfo->numCycles;
}
Пример #7
0
bool IsValidInstruction(UGeckoInstruction _inst)
{
  const GekkoOPInfo* info = GetOpInfo(_inst);
  return info != nullptr;
}
Пример #8
0
const char* GetInstructionName(UGeckoInstruction _inst)
{
  const GekkoOPInfo* info = GetOpInfo(_inst);
  return info ? info->opname : nullptr;
}
Пример #9
0
bool UsesFPU(UGeckoInstruction inst)
{
  GekkoOPInfo* const info = GetOpInfo(inst);

  return (info->flags & FL_USE_FPU) != 0;
}
Пример #10
0
int Interpreter::SingleStepInner(void)
{
	static UGeckoInstruction instCode;
	u32 function = m_EndBlock ? HLE::GetFunctionIndex(PC) : 0; // Check for HLE functions after branches
	if (function != 0)
	{
		int type = HLE::GetFunctionTypeByIndex(function);
		if (type == HLE::HLE_HOOK_START || type == HLE::HLE_HOOK_REPLACE)
		{
			int flags = HLE::GetFunctionFlagsByIndex(function);
			if (HLE::IsEnabled(flags))
			{
				HLEFunction(function);
				if (type == HLE::HLE_HOOK_START)
				{
					// Run the original.
					function = 0;
				}
			}
			else
			{
				function = 0;
			}
		}
	}

	if (function == 0)
	{
		#ifdef USE_GDBSTUB
		if (gdb_active() && gdb_bp_x(PC)) {

			Host_UpdateDisasmDialog();

			gdb_signal(SIGTRAP);
			gdb_handle_exception();
		}
		#endif

		NPC = PC + sizeof(UGeckoInstruction);
		instCode.hex = Memory::Read_Opcode(PC);

		// Uncomment to trace the interpreter
		//if ((PC & 0xffffff)>=0x0ab54c && (PC & 0xffffff)<=0x0ab624)
		//	startTrace = 1;
		//else
		//	startTrace = 0;

		if (startTrace)
		{
			Trace(instCode);
		}

		if (instCode.hex != 0)
		{
			UReg_MSR& msr = (UReg_MSR&)MSR;
			if (msr.FP)  //If FPU is enabled, just execute
			{
				m_opTable[instCode.OPCD](instCode);
				if (PowerPC::ppcState.Exceptions & EXCEPTION_DSI)
				{
					PowerPC::CheckExceptions();
					m_EndBlock = true;
				}
			}
			else
			{
				// check if we have to generate a FPU unavailable exception
				if (!PPCTables::UsesFPU(instCode))
				{
					m_opTable[instCode.OPCD](instCode);
					if (PowerPC::ppcState.Exceptions & EXCEPTION_DSI)
					{
						PowerPC::CheckExceptions();
						m_EndBlock = true;
					}
				}
				else
				{
					Common::AtomicOr(PowerPC::ppcState.Exceptions, EXCEPTION_FPU_UNAVAILABLE);
					PowerPC::CheckExceptions();
					m_EndBlock = true;
				}
			}
		}
		else
		{
			// Memory exception on instruction fetch
			PowerPC::CheckExceptions();
			m_EndBlock = true;
		}
	}
	last_pc = PC;
	PC = NPC;

#if defined(_DEBUG) || defined(DEBUGFAST)
	if (PowerPC::ppcState.gpr[1] == 0)
	{
		WARN_LOG(POWERPC, "%i Corrupt stack", PowerPC::ppcState.DebugCount);
	}
	PowerPC::ppcState.DebugCount++;
#endif
	patches();

	GekkoOPInfo *opinfo = GetOpInfo(instCode);
	return opinfo->numCyclesMinusOne + 1;
}
Пример #11
0
u32 PPCAnalyzer::Analyze(u32 address, CodeBlock *block, CodeBuffer *buffer, u32 blockSize)
{
	// Clear block stats
	memset(block->m_stats, 0, sizeof(BlockStats));

	// Clear register stats
	block->m_gpa->any = true;
	block->m_fpa->any = false;

	block->m_gpa->Clear();
	block->m_fpa->Clear();

	// Set the blocks start address
	block->m_address = address;

	// Reset our block state
	block->m_broken = false;
	block->m_memory_exception = false;
	block->m_num_instructions = 0;

	if (address == 0)
	{
		// Memory exception occurred during instruction fetch
		block->m_memory_exception = true;
		return address;
	}

	if (SConfig::GetInstance().m_LocalCoreStartupParameter.bMMU && (address & JIT_ICACHE_VMEM_BIT))
	{
		if (!Memory::TranslateAddress(address, Memory::FLAG_NO_EXCEPTION))
		{
			// Memory exception occurred during instruction fetch
			block->m_memory_exception = true;
			return address;
		}
	}

	CodeOp *code = buffer->codebuffer;

	bool found_exit = false;
	u32 return_address = 0;
	u32 numFollows = 0;
	u32 num_inst = 0;

	for (u32 i = 0; i < blockSize; ++i)
	{
		UGeckoInstruction inst = JitInterface::ReadOpcodeJIT(address);

		if (inst.hex != 0)
		{
			num_inst++;
			memset(&code[i], 0, sizeof(CodeOp));
			GekkoOPInfo *opinfo = GetOpInfo(inst);

			code[i].opinfo = opinfo;
			code[i].address = address;
			code[i].inst = inst;
			code[i].branchTo = -1;
			code[i].branchToIndex = -1;
			code[i].skip = false;
			block->m_stats->numCycles += opinfo->numCycles;

			SetInstructionStats(block, &code[i], opinfo, i);

			bool follow = false;
			u32 destination = 0;

			bool conditional_continue = false;

			// Do we inline leaf functions?
			if (HasOption(OPTION_LEAF_INLINE))
			{
				if (inst.OPCD == 18 && blockSize > 1)
				{
					//Is bx - should we inline? yes!
					if (inst.AA)
						destination = SignExt26(inst.LI << 2);
					else
						destination = address + SignExt26(inst.LI << 2);
					if (destination != block->m_address)
						follow = true;
				}
				else if (inst.OPCD == 19 && inst.SUBOP10 == 16 &&
					(inst.BO & (1 << 4)) && (inst.BO & (1 << 2)) &&
					return_address != 0)
				{
					// bclrx with unconditional branch = return
					follow = true;
					destination = return_address;
					return_address = 0;

					if (inst.LK)
						return_address = address + 4;
				}
				else if (inst.OPCD == 31 && inst.SUBOP10 == 467)
				{
					// mtspr
					const u32 index = (inst.SPRU << 5) | (inst.SPRL & 0x1F);
					if (index == SPR_LR)
					{
						// We give up to follow the return address
						// because we have to check the register usage.
						return_address = 0;
					}
				}

				// TODO: Find the optimal value for FUNCTION_FOLLOWING_THRESHOLD.
				//       If it is small, the performance will be down.
				//       If it is big, the size of generated code will be big and
				//       cache clearning will happen many times.
				// TODO: Investivate the reason why
				//       "0" is fastest in some games, MP2 for example.
				if (numFollows > FUNCTION_FOLLOWING_THRESHOLD)
					follow = false;
			}

			if (HasOption(OPTION_CONDITIONAL_CONTINUE))
			{
				if (inst.OPCD == 16 &&
				   ((inst.BO & BO_DONT_DECREMENT_FLAG) == 0 || (inst.BO & BO_DONT_CHECK_CONDITION) == 0))
				{
					// bcx with conditional branch
					conditional_continue = true;
				}
				else if (inst.OPCD == 19 && inst.SUBOP10 == 16 &&
				        ((inst.BO & BO_DONT_DECREMENT_FLAG) == 0 || (inst.BO & BO_DONT_CHECK_CONDITION) == 0))
				{
					// bclrx with conditional branch
					conditional_continue = true;
				}
				else if (inst.OPCD == 3 ||
					  (inst.OPCD == 31 && inst.SUBOP10 == 4))
				{
					// tw/twi tests and raises an exception
					conditional_continue = true;
				}
				else if (inst.OPCD == 19 && inst.SUBOP10 == 528 &&
				        (inst.BO_2 & BO_DONT_CHECK_CONDITION) == 0)
				{
					// Rare bcctrx with conditional branch
					// Seen in NES games
					conditional_continue = true;
				}
			}

			if (!follow)
			{
				address += 4;
				if (!conditional_continue && opinfo->flags & FL_ENDBLOCK) //right now we stop early
				{
					found_exit = true;
					break;
				}
			}
			// XXX: We don't support inlining yet.
#if 0
			else
			{
				numFollows++;
				// We don't "code[i].skip = true" here
				// because bx may store a certain value to the link register.
				// Instead, we skip a part of bx in Jit**::bx().
				address = destination;
				merged_addresses[size_of_merged_addresses++] = address;
			}
#endif
		}
		else
		{
			// ISI exception or other critical memory exception occured (game over)
			ERROR_LOG(DYNA_REC, "Instruction hex was 0!");
			break;
		}
	}

	block->m_num_instructions = num_inst;

	if (block->m_num_instructions > 1)
		ReorderInstructions(block->m_num_instructions, code);

	if ((!found_exit && num_inst > 0) || blockSize == 1)
	{
		// We couldn't find an exit
		block->m_broken = true;
	}

	// Scan for flag dependencies; assume the next block (or any branch that can leave the block)
	// wants flags, to be safe.
	bool wantsCR0 = true, wantsCR1 = true, wantsFPRF = true, wantsCA = true;
	BitSet32 fprInUse, gprInUse, gprInReg, fprInXmm;
	for (int i = block->m_num_instructions - 1; i >= 0; i--)
	{
		bool opWantsCR0 = code[i].wantsCR0;
		bool opWantsCR1 = code[i].wantsCR1;
		bool opWantsFPRF = code[i].wantsFPRF;
		bool opWantsCA = code[i].wantsCA;
		code[i].wantsCR0 = wantsCR0 || code[i].canEndBlock;
		code[i].wantsCR1 = wantsCR1 || code[i].canEndBlock;
		code[i].wantsFPRF = wantsFPRF || code[i].canEndBlock;
		code[i].wantsCA = wantsCA || code[i].canEndBlock;
		wantsCR0 |= opWantsCR0 || code[i].canEndBlock;
		wantsCR1 |= opWantsCR1 || code[i].canEndBlock;
		wantsFPRF |= opWantsFPRF || code[i].canEndBlock;
		wantsCA |= opWantsCA || code[i].canEndBlock;
		wantsCR0 &= !code[i].outputCR0 || opWantsCR0;
		wantsCR1 &= !code[i].outputCR1 || opWantsCR1;
		wantsFPRF &= !code[i].outputFPRF || opWantsFPRF;
		wantsCA &= !code[i].outputCA || opWantsCA;
		code[i].gprInUse = gprInUse;
		code[i].fprInUse = fprInUse;
		code[i].gprInReg = gprInReg;
		code[i].fprInXmm = fprInXmm;
		// TODO: if there's no possible endblocks or exceptions in between, tell the regcache
		// we can throw away a register if it's going to be overwritten later.
		gprInUse |= code[i].regsIn;
		gprInReg |= code[i].regsIn;
		fprInUse |= code[i].fregsIn;
		if (strncmp(code[i].opinfo->opname, "stfd", 4))
			fprInXmm |= code[i].fregsIn;
		// For now, we need to count output registers as "used" though; otherwise the flush
		// will result in a redundant store (e.g. store to regcache, then store again to
		// the same location later).
		gprInUse |= code[i].regsOut;
		if (code[i].fregOut >= 0)
			fprInUse[code[i].fregOut] = true;
	}

	// Forward scan, for flags that need the other direction for calculation.
	BitSet32 fprIsSingle, fprIsDuplicated, fprIsStoreSafe;
	for (u32 i = 0; i < block->m_num_instructions; i++)
	{
		code[i].fprIsSingle = fprIsSingle;
		code[i].fprIsDuplicated = fprIsDuplicated;
		code[i].fprIsStoreSafe = fprIsStoreSafe;
		if (code[i].fregOut >= 0)
		{
			fprIsSingle[code[i].fregOut] = false;
			fprIsDuplicated[code[i].fregOut] = false;
			fprIsStoreSafe[code[i].fregOut] = false;
			// Single, duplicated, and doesn't need PPC_FP.
			if (code[i].opinfo->type == OPTYPE_SINGLEFP)
			{
				fprIsSingle[code[i].fregOut] = true;
				fprIsDuplicated[code[i].fregOut] = true;
				fprIsStoreSafe[code[i].fregOut] = true;
			}
			// Single and duplicated, but might be a denormal (not safe to skip PPC_FP).
			// TODO: if we go directly from a load to store, skip conversion entirely?
			// TODO: if we go directly from a load to a float instruction, and the value isn't used
			// for anything else, we can skip PPC_FP on a load too.
			if (!strncmp(code[i].opinfo->opname, "lfs", 3))
			{
				fprIsSingle[code[i].fregOut] = true;
				fprIsDuplicated[code[i].fregOut] = true;
			}
			// Paired are still floats, but the top/bottom halves may differ.
			if (code[i].opinfo->type == OPTYPE_PS || code[i].opinfo->type == OPTYPE_LOADPS)
			{
				fprIsSingle[code[i].fregOut] = true;
				fprIsStoreSafe[code[i].fregOut] = true;
			}
			// Careful: changing the float mode in a block breaks this optimization, since
			// a previous float op might have had had FTZ off while the later store has FTZ
			// on. So, discard all information we have.
			if (!strncmp(code[i].opinfo->opname, "mtfs", 4))
				fprIsStoreSafe = BitSet32(0);
		}
	}
	return address;
}