Ejemplo n.º 1
0
s32 sys_spu_thread_switch_system_module(SPUThread & spu, u32 status)
{
    if (spu.get_ch_count(SPU_RdInMbox))
    {
        return CELL_EBUSY;
    }

    // Cancel any pending status update requests
    spu.set_ch_value(MFC_WrTagUpdate, 0);
    while (spu.get_ch_count(MFC_RdTagStat) != 1);
    spu.get_ch_value(MFC_RdTagStat);

    // Wait for all pending DMA operations to complete
    spu.set_ch_value(MFC_WrTagMask, 0xFFFFFFFF);
    spu.set_ch_value(MFC_WrTagUpdate, MFC_TAG_UPDATE_ALL);
    spu.get_ch_value(MFC_RdTagStat);

    s32 result;

    do
    {
        spu.set_ch_value(SPU_WrOutMbox, status);
        spu.stop_and_signal(0x120);
    }
    while ((result = spu.get_ch_value(SPU_RdInMbox)) == CELL_EBUSY);

    return result;
}
Ejemplo n.º 2
0
s32 sys_spu_thread_send_event(SPUThread & spu, u8 spup, u32 data0, u32 data1)
{
    if (spup > 0x3F)
    {
        return CELL_EINVAL;
    }

    if (spu.get_ch_count(SPU_RdInMbox))
    {
        return CELL_EBUSY;
    }

    spu.set_ch_value(SPU_WrOutMbox, data1);
    spu.set_ch_value(SPU_WrOutIntrMbox, (spup << 24) | (data0 & 0x00FFFFFF));

    return spu.get_ch_value(SPU_RdInMbox);
}
Ejemplo n.º 3
0
void spu_interpreter::BI(SPUThread& CPU, spu_opcode_t op)
{
	if (op.d || op.e)
	{
		throw __FUNCTION__;
	}

	CPU.SetBranch(SPUOpcodes::branchTarget(CPU.GPR[op.ra]._u32[3], 0));
}
Ejemplo n.º 4
0
void spu_interpreter::BISL(SPUThread& CPU, spu_opcode_t op)
{
	if (op.d || op.e)
	{
		throw __FUNCTION__;
	}

	const u32 target = SPUOpcodes::branchTarget(CPU.GPR[op.ra]._u32[3], 0);
	CPU.GPR[op.rt] = u128::from32r(CPU.PC + 4);
	CPU.SetBranch(target);
}
Ejemplo n.º 5
0
void sys_spu_thread_group_exit(SPUThread & spu, s32 status)
{
    // Cancel any pending status update requests
    spu.set_ch_value(MFC_WrTagUpdate, 0);
    while (spu.get_ch_count(MFC_RdTagStat) != 1);
    spu.get_ch_value(MFC_RdTagStat);

    // Wait for all pending DMA operations to complete
    spu.set_ch_value(MFC_WrTagMask, 0xFFFFFFFF);
    spu.set_ch_value(MFC_WrTagUpdate, MFC_TAG_UPDATE_ALL);
    spu.get_ch_value(MFC_RdTagStat);

    spu.set_ch_value(SPU_WrOutMbox, status);
    spu.stop_and_signal(0x101);
}
Ejemplo n.º 6
0
void spu_interpreter::RCHCNT(SPUThread& CPU, spu_opcode_t op)
{
	CPU.GPR[op.rt] = u128::from32r(CPU.get_ch_count(op.ra));
}
Ejemplo n.º 7
0
void spu_interpreter::RDCH(SPUThread& CPU, spu_opcode_t op)
{
	CPU.GPR[op.rt] = u128::from32r(CPU.get_ch_value(op.ra));
}
Ejemplo n.º 8
0
void spu_interpreter::STOP(SPUThread& CPU, spu_opcode_t op)
{
	CPU.stop_and_signal(op.opcode & 0x3fff);
}
Ejemplo n.º 9
0
void spu_interpreter::LQX(SPUThread& CPU, spu_opcode_t op)
{
	CPU.GPR[op.rt] = CPU.read128((CPU.GPR[op.ra]._u32[3] + CPU.GPR[op.rb]._u32[3]) & 0x3fff0);
}
Ejemplo n.º 10
0
void spu_interpreter::STQX(SPUThread& CPU, spu_opcode_t op)
{
	CPU.write128((CPU.GPR[op.ra]._u32[3] + CPU.GPR[op.rb]._u32[3]) & 0x3fff0, CPU.GPR[op.rt]);
}
Ejemplo n.º 11
0
void spu_interpreter::WRCH(SPUThread& CPU, spu_opcode_t op)
{
	CPU.set_ch_value(op.ra, CPU.GPR[op.rt]._u32[3]);
}
Ejemplo n.º 12
0
void SPUThread::ProcessCmd(u32 cmd, u32 tag, u32 lsa, u64 ea, u32 size)
{
	if (cmd & (MFC_BARRIER_MASK | MFC_FENCE_MASK)) _mm_mfence();

	if (ea >= SYS_SPU_THREAD_BASE_LOW)
	{
		if (ea >= 0x100000000)
		{
			LOG_DMAC(LOG_ERROR, "Invalid external address");
			Emu.Pause();
			return;
		}
		else if (group)
		{
			// SPU Thread Group MMIO (LS and SNR)
			u32 num = (ea & SYS_SPU_THREAD_BASE_MASK) / SYS_SPU_THREAD_OFFSET; // thread number in group
			if (num >= group->list.size() || !group->list[num])
			{
				LOG_DMAC(LOG_ERROR, "Invalid thread (SPU Thread Group MMIO)");
				Emu.Pause();
				return;
			}

			SPUThread* spu = (SPUThread*)Emu.GetCPU().GetThread(group->list[num]);

			u32 addr = (ea & SYS_SPU_THREAD_BASE_MASK) % SYS_SPU_THREAD_OFFSET;
			if ((addr <= 0x3ffff) && (addr + size <= 0x40000))
			{
				// LS access
				ea = spu->dmac.ls_offset + addr;
			}
			else if ((cmd & MFC_PUT_CMD) && size == 4 && (addr == SYS_SPU_THREAD_SNR1 || addr == SYS_SPU_THREAD_SNR2))
			{
				spu->WriteSNR(SYS_SPU_THREAD_SNR2 == addr, vm::read32(dmac.ls_offset + lsa));
				return;
			}
			else
			{
				LOG_DMAC(LOG_ERROR, "Invalid register (SPU Thread Group MMIO)");
				Emu.Pause();
				return;
			}
		}
		else
		{
			LOG_DMAC(LOG_ERROR, "Thread group not set (SPU Thread Group MMIO)");
			Emu.Pause();
			return;
		}
	}
	else if (ea >= RAW_SPU_BASE_ADDR && size == 4)
	{
		switch (cmd & ~(MFC_BARRIER_MASK | MFC_FENCE_MASK | MFC_LIST_MASK | MFC_RESULT_MASK))
		{
		case MFC_PUT_CMD:
		{
			vm::write32(ea, ReadLS32(lsa));
			return;
		}

		case MFC_GET_CMD:
		{
			WriteLS32(lsa, vm::read32(ea));
			return;
		}

		default:
		{
			LOG_DMAC(LOG_ERROR, "Unknown DMA command");
			Emu.Pause();
			return;
		}
		}
	}

	switch (cmd & ~(MFC_BARRIER_MASK | MFC_FENCE_MASK | MFC_LIST_MASK | MFC_RESULT_MASK))
	{
	case MFC_PUT_CMD:
	{
		memcpy(vm::get_ptr<void>(ea), vm::get_ptr<void>(dmac.ls_offset + lsa), size);
		return;
	}

	case MFC_GET_CMD:
	{
		memcpy(vm::get_ptr<void>(dmac.ls_offset + lsa), vm::get_ptr<void>(ea), size);
		return;
	}

	default:
	{
		LOG_DMAC(LOG_ERROR, "Unknown DMA command");
		Emu.Pause();
		return;
	}
	}
}