void SPUThread::do_dma_list_cmd(u32 cmd, spu_mfc_arg_t args) { if (!(cmd & MFC_LIST_MASK)) { throw EXCEPTION("Invalid command %s (cmd=0x%x, lsa=0x%x, ea=0x%llx, tag=0x%x, size=0x%x)", get_mfc_cmd_name(cmd), cmd, args.lsa, args.ea, args.tag, args.size); } const u32 list_addr = args.ea & 0x3ffff; const u32 list_size = args.size / 8; args.lsa &= 0x3fff0; struct list_element { be_t<u16> sb; // Stall-and-Notify bit (0x8000) be_t<u16> ts; // List Transfer Size be_t<u32> ea; // External Address Low }; for (u32 i = 0; i < list_size; i++) { auto rec = vm::ptr<list_element>::make(offset + list_addr + i * 8); const u32 size = rec->ts; const u32 addr = rec->ea; if (size) { spu_mfc_arg_t transfer; transfer.ea = addr; transfer.lsa = args.lsa | (addr & 0xf); transfer.tag = args.tag; transfer.size = size; do_dma_transfer(cmd & ~MFC_LIST_MASK, transfer); args.lsa += std::max<u32>(size, 16); } if (rec->sb & 0x8000) { ch_stall_stat.set_value((1 << args.tag) | ch_stall_stat.get_value()); spu_mfc_arg_t stalled; stalled.ea = (args.ea & ~0xffffffff) | (list_addr + (i + 1) * 8); stalled.lsa = args.lsa; stalled.tag = args.tag; stalled.size = (list_size - i - 1) * 8; mfc_queue.emplace_back(cmd, stalled); return; } } }
void SPUThread::process_mfc_cmd(u32 cmd) { if (Ini.HLELogging.GetValue()) { LOG_NOTICE(SPU, "DMA %s: cmd=0x%x, lsa=0x%x, ea=0x%llx, tag=0x%x, size=0x%x", get_mfc_cmd_name(cmd), cmd, ch_mfc_args.lsa, ch_mfc_args.ea, ch_mfc_args.tag, ch_mfc_args.size); } switch (cmd) { case MFC_PUT_CMD: case MFC_PUTB_CMD: case MFC_PUTF_CMD: case MFC_PUTR_CMD: case MFC_PUTRB_CMD: case MFC_PUTRF_CMD: case MFC_GET_CMD: case MFC_GETB_CMD: case MFC_GETF_CMD: { return do_dma_transfer(cmd, ch_mfc_args); } case MFC_PUTL_CMD: case MFC_PUTLB_CMD: case MFC_PUTLF_CMD: case MFC_PUTRL_CMD: case MFC_PUTRLB_CMD: case MFC_PUTRLF_CMD: case MFC_GETL_CMD: case MFC_GETLB_CMD: case MFC_GETLF_CMD: { return do_dma_list_cmd(cmd, ch_mfc_args); } case MFC_GETLLAR_CMD: // acquire reservation { if (ch_mfc_args.size != 128) { break; } const u32 raddr = VM_CAST(ch_mfc_args.ea); vm::reservation_acquire(vm::get_ptr(offset + ch_mfc_args.lsa), raddr, 128); if (last_raddr) { ch_event_stat |= SPU_EVENT_LR; } last_raddr = raddr; return ch_atomic_stat.set_value(MFC_GETLLAR_SUCCESS); } case MFC_PUTLLC_CMD: // store conditionally { if (ch_mfc_args.size != 128) { break; } if (vm::reservation_update(VM_CAST(ch_mfc_args.ea), vm::get_ptr(offset + ch_mfc_args.lsa), 128)) { if (last_raddr == 0) { throw EXCEPTION("Unexpected: PUTLLC command succeeded, but GETLLAR command not detected"); } last_raddr = 0; return ch_atomic_stat.set_value(MFC_PUTLLC_SUCCESS); } else { if (last_raddr != 0) { ch_event_stat |= SPU_EVENT_LR; last_raddr = 0; } return ch_atomic_stat.set_value(MFC_PUTLLC_FAILURE); } } case MFC_PUTLLUC_CMD: // store unconditionally case MFC_PUTQLLUC_CMD: { if (ch_mfc_args.size != 128) { break; } vm::reservation_op(VM_CAST(ch_mfc_args.ea), 128, [this]() { std::memcpy(vm::priv_ptr(VM_CAST(ch_mfc_args.ea)), vm::get_ptr(offset + ch_mfc_args.lsa), 128); }); if (last_raddr != 0 && vm::g_tls_did_break_reservation) { ch_event_stat |= SPU_EVENT_LR; last_raddr = 0; } if (cmd == MFC_PUTLLUC_CMD) { ch_atomic_stat.set_value(MFC_PUTLLUC_SUCCESS); } return; } } throw EXCEPTION("Unknown command %s (cmd=0x%x, lsa=0x%x, ea=0x%llx, tag=0x%x, size=0x%x)", get_mfc_cmd_name(cmd), cmd, ch_mfc_args.lsa, ch_mfc_args.ea, ch_mfc_args.tag, ch_mfc_args.size); }
bool spu_thread::write_reg(const u32 addr, const u32 value) { auto try_start = [this]() { if (status.atomic_op([](u32& status) { if (status & SPU_STATUS_RUNNING) { return false; } status = SPU_STATUS_RUNNING; return true; })) { state -= cpu_flag::stop; thread_ctrl::notify(static_cast<named_thread<spu_thread>&>(*this)); } }; const u32 offset = addr - RAW_SPU_BASE_ADDR - index * RAW_SPU_OFFSET - RAW_SPU_PROB_OFFSET; switch (offset) { case MFC_LSA_offs: { if (value >= 0x40000) { break; } g_tls_mfc[index].lsa = value; return true; } case MFC_EAH_offs: { g_tls_mfc[index].eah = value; return true; } case MFC_EAL_offs: { g_tls_mfc[index].eal = value; return true; } case MFC_Size_Tag_offs: { g_tls_mfc[index].tag = value & 0x1f; g_tls_mfc[index].size = (value >> 16) & 0x7fff; return true; } case MFC_Class_CMD_offs: { g_tls_mfc[index].cmd = MFC(value & 0xff); switch (value & 0xff) { case MFC_SNDSIG_CMD: case MFC_SNDSIGB_CMD: case MFC_SNDSIGF_CMD: { g_tls_mfc[index].size = 4; // Fallthrough } case MFC_PUT_CMD: case MFC_PUTB_CMD: case MFC_PUTF_CMD: case MFC_PUTS_CMD: case MFC_PUTBS_CMD: case MFC_PUTFS_CMD: case MFC_GET_CMD: case MFC_GETB_CMD: case MFC_GETF_CMD: case MFC_GETS_CMD: case MFC_GETBS_CMD: case MFC_GETFS_CMD: { if (g_tls_mfc[index].size) { // Perform transfer immediately do_dma_transfer(g_tls_mfc[index]); } // .cmd should be zero, which is equal to MFC_PPU_DMA_CMD_ENQUEUE_SUCCESSFUL g_tls_mfc[index] = {}; if (value & MFC_START_MASK) { try_start(); } return true; } case MFC_BARRIER_CMD: case MFC_EIEIO_CMD: case MFC_SYNC_CMD: { g_tls_mfc[index] = {}; _mm_mfence(); return true; } } break; } case Prxy_QueryType_offs: { // TODO // 0 - no query requested; cancel previous request // 1 - set (interrupt) status upon completion of any enabled tag groups // 2 - set (interrupt) status upon completion of all enabled tag groups if (value > 2) { break; } if (value) { int_ctrl[2].set(SPU_INT2_STAT_DMA_TAG_GROUP_COMPLETION_INT); // TODO } return true; } case Prxy_QueryMask_offs: { mfc_prxy_mask = value; return true; } case SPU_In_MBox_offs: { ch_in_mbox.push(*this, value); return true; } case SPU_RunCntl_offs: { if (value == SPU_RUNCNTL_RUN_REQUEST) { try_start(); } else if (value == SPU_RUNCNTL_STOP_REQUEST) { status &= ~SPU_STATUS_RUNNING; state += cpu_flag::stop; } else { break; } run_ctrl = value; return true; } case SPU_NPC_offs: { if ((value & 2) || value >= 0x40000) { break; } npc = value; return true; } case SPU_RdSigNotify1_offs: { push_snr(0, value); return true; } case SPU_RdSigNotify2_offs: { push_snr(1, value); return true; } } LOG_ERROR(SPU, "RawSPUThread[%d]: Write32(0x%x, value=0x%x): unknown/illegal offset (0x%x)", index, addr, value, offset); return false; }