void SPUThread::init_regs() { gpr = {}; fpscr.Reset(); ch_mfc_args = {}; mfc_queue.clear(); ch_tag_mask = 0; ch_tag_stat.data.store({}); ch_stall_stat.data.store({}); ch_atomic_stat.data.store({}); ch_in_mbox.clear(); ch_out_mbox.data.store({}); ch_out_intr_mbox.data.store({}); snr_config = 0; ch_snr1.data.store({}); ch_snr2.data.store({}); ch_event_mask = 0; ch_event_stat = 0; last_raddr = 0; ch_dec_start_timestamp = get_timebased_time(); // ??? ch_dec_value = 0; run_ctrl = 0; status = 0; npc = 0; int_ctrl[0].clear(); int_ctrl[1].clear(); int_ctrl[2].clear(); gpr[1]._u32[3] = 0x3FFF0; // initial stack frame pointer }
void SPUThread::set_ch_value(u32 ch, u32 value) { if (Ini.HLELogging.GetValue()) { LOG_NOTICE(SPU, "set_ch_value(ch=%d [%s], value=0x%x)", ch, ch < 128 ? spu_ch_name[ch] : "???", value); } switch (ch) { //case SPU_WrSRR0: // SRR0 = value & 0x3FFFC; //LSLR & ~3 // break; case SPU_WrOutIntrMbox: { if (m_type == CPU_THREAD_RAW_SPU) { std::unique_lock<std::mutex> lock(mutex, std::defer_lock); while (!ch_out_intr_mbox.try_push(value)) { CHECK_EMU_STATUS; if (is_stopped()) throw CPUThreadStop{}; if (!lock) { lock.lock(); continue; } cv.wait(lock); } int_ctrl[2].set(SPU_INT2_STAT_MAILBOX_INT); return; } else { const u8 code = value >> 24; if (code < 64) { /* ===== sys_spu_thread_send_event (used by spu_printf) ===== */ LV2_LOCK; const u8 spup = code & 63; if (!ch_out_mbox.get_count()) { throw EXCEPTION("sys_spu_thread_send_event(value=0x%x, spup=%d): Out_MBox is empty", value, spup); } if (u32 count = ch_in_mbox.get_count()) { throw EXCEPTION("sys_spu_thread_send_event(value=0x%x, spup=%d): In_MBox is not empty (count=%d)", value, spup, count); } const u32 data = ch_out_mbox.get_value(); ch_out_mbox.set_value(data, 0); if (Ini.HLELogging.GetValue()) { LOG_NOTICE(SPU, "sys_spu_thread_send_event(spup=%d, data0=0x%x, data1=0x%x)", spup, value & 0x00ffffff, data); } const auto queue = this->spup[spup].lock(); if (!queue) { LOG_WARNING(SPU, "sys_spu_thread_send_event(spup=%d, data0=0x%x, data1=0x%x): event queue not connected", spup, (value & 0x00ffffff), data); return ch_in_mbox.set_values(1, CELL_ENOTCONN); // TODO: check error passing } if (queue->events.size() >= queue->size) { return ch_in_mbox.set_values(1, CELL_EBUSY); } queue->push(lv2_lock, SYS_SPU_THREAD_EVENT_USER_KEY, m_id, ((u64)spup << 32) | (value & 0x00ffffff), data); return ch_in_mbox.set_values(1, CELL_OK); } else if (code < 128) { /* ===== sys_spu_thread_throw_event ===== */ LV2_LOCK; const u8 spup = code & 63; if (!ch_out_mbox.get_count()) { throw EXCEPTION("sys_spu_thread_throw_event(value=0x%x, spup=%d): Out_MBox is empty", value, spup); } const u32 data = ch_out_mbox.get_value(); ch_out_mbox.set_value(data, 0); if (Ini.HLELogging.GetValue()) { LOG_WARNING(SPU, "sys_spu_thread_throw_event(spup=%d, data0=0x%x, data1=0x%x)", spup, value & 0x00ffffff, data); } const auto queue = this->spup[spup].lock(); if (!queue) { LOG_WARNING(SPU, "sys_spu_thread_throw_event(spup=%d, data0=0x%x, data1=0x%x): event queue not connected", spup, (value & 0x00ffffff), data); return; } // TODO: check passing spup value if (queue->events.size() >= queue->size) { LOG_WARNING(SPU, "sys_spu_thread_throw_event(spup=%d, data0=0x%x, data1=0x%x) failed (queue is full)", spup, (value & 0x00ffffff), data); return; } queue->push(lv2_lock, SYS_SPU_THREAD_EVENT_USER_KEY, m_id, ((u64)spup << 32) | (value & 0x00ffffff), data); return; } else if (code == 128) { /* ===== sys_event_flag_set_bit ===== */ LV2_LOCK; const u32 flag = value & 0xffffff; if (!ch_out_mbox.get_count()) { throw EXCEPTION("sys_event_flag_set_bit(value=0x%x (flag=%d)): Out_MBox is empty", value, flag); } if (u32 count = ch_in_mbox.get_count()) { throw EXCEPTION("sys_event_flag_set_bit(value=0x%x (flag=%d)): In_MBox is not empty (%d)", value, flag, count); } const u32 data = ch_out_mbox.get_value(); ch_out_mbox.set_value(data, 0); if (flag > 63) { throw EXCEPTION("sys_event_flag_set_bit(id=%d, value=0x%x (flag=%d)): Invalid flag", data, value, flag); } if (Ini.HLELogging.GetValue()) { LOG_WARNING(SPU, "sys_event_flag_set_bit(id=%d, value=0x%x (flag=%d))", data, value, flag); } const auto eflag = idm::get<lv2_event_flag_t>(data); if (!eflag) { return ch_in_mbox.set_values(1, CELL_ESRCH); } const u64 bitptn = 1ull << flag; if (~eflag->pattern.fetch_or(bitptn) & bitptn) { // notify if the bit was set eflag->notify_all(lv2_lock); } return ch_in_mbox.set_values(1, CELL_OK); } else if (code == 192) { /* ===== sys_event_flag_set_bit_impatient ===== */ LV2_LOCK; const u32 flag = value & 0xffffff; if (!ch_out_mbox.get_count()) { throw EXCEPTION("sys_event_flag_set_bit_impatient(value=0x%x (flag=%d)): Out_MBox is empty", value, flag); } const u32 data = ch_out_mbox.get_value(); ch_out_mbox.set_value(data, 0); if (flag > 63) { throw EXCEPTION("sys_event_flag_set_bit_impatient(id=%d, value=0x%x (flag=%d)): Invalid flag", data, value, flag); } if (Ini.HLELogging.GetValue()) { LOG_WARNING(SPU, "sys_event_flag_set_bit_impatient(id=%d, value=0x%x (flag=%d))", data, value, flag); } const auto eflag = idm::get<lv2_event_flag_t>(data); if (!eflag) { return; } const u64 bitptn = 1ull << flag; if (~eflag->pattern.fetch_or(bitptn) & bitptn) { // notify if the bit was set eflag->notify_all(lv2_lock); } return; } else { if (ch_out_mbox.get_count()) { throw EXCEPTION("SPU_WrOutIntrMbox: unknown data (value=0x%x); Out_MBox = 0x%x", value, ch_out_mbox.get_value()); } else { throw EXCEPTION("SPU_WrOutIntrMbox: unknown data (value=0x%x)", value); } } } } case SPU_WrOutMbox: { std::unique_lock<std::mutex> lock(mutex, std::defer_lock); while (!ch_out_mbox.try_push(value)) { CHECK_EMU_STATUS; if (is_stopped()) throw CPUThreadStop{}; if (!lock) { lock.lock(); continue; } cv.wait(lock); } return; } case MFC_WrTagMask: { ch_tag_mask = value; return; } case MFC_WrTagUpdate: { ch_tag_stat.set_value(ch_tag_mask); // hack return; } case MFC_LSA: { if (value >= 0x40000) { break; } ch_mfc_args.lsa = value; return; } case MFC_EAH: { ch_mfc_args.eah = value; return; } case MFC_EAL: { ch_mfc_args.eal = value; return; } case MFC_Size: { if (value > 16 * 1024) { break; } ch_mfc_args.size = (u16)value; return; } case MFC_TagID: { if (value >= 32) { break; } ch_mfc_args.tag = (u16)value; return; } case MFC_Cmd: { process_mfc_cmd(value); ch_mfc_args = {}; // clear non-persistent data return; } case MFC_WrListStallAck: { if (value >= 32) { break; } size_t processed = 0; for (size_t i = 0; i < mfc_queue.size(); i++) { if (mfc_queue[i].second.tag == value) { do_dma_list_cmd(mfc_queue[i].first, mfc_queue[i].second); mfc_queue[i].second.tag = 0xdead; processed++; } } while (processed) { for (size_t i = 0; i < mfc_queue.size(); i++) { if (mfc_queue[i].second.tag == 0xdead) { mfc_queue.erase(mfc_queue.begin() + i); processed--; break; } } } return; } case SPU_WrDec: { ch_dec_start_timestamp = get_timebased_time(); ch_dec_value = value; return; } case SPU_WrEventMask: { // detect masking events with enabled interrupt status if (value && ch_event_stat.load() & SPU_EVENT_INTR_ENABLED) { throw EXCEPTION("SPU Interrupts not implemented (mask=0x%x)", value); } // detect masking unimplemented events if (value & ~SPU_EVENT_IMPLEMENTED) { break; } ch_event_mask.store(value); return; } case SPU_WrEventAck: { if (value & ~SPU_EVENT_IMPLEMENTED) { break; } ch_event_stat &= ~value; return; } } throw EXCEPTION("Unknown/illegal channel (ch=%d [%s], value=0x%x)", ch, ch < 128 ? spu_ch_name[ch] : "???", value); }
u32 SPUThread::get_ch_value(u32 ch) { if (Ini.HLELogging.GetValue()) { LOG_NOTICE(SPU, "get_ch_value(ch=%d [%s])", ch, ch < 128 ? spu_ch_name[ch] : "???"); } auto read_channel = [this](spu_channel_t& channel) -> u32 { std::unique_lock<std::mutex> lock(mutex, std::defer_lock); while (true) { bool result; u32 value; std::tie(result, value) = channel.try_pop(); if (result) { return value; } CHECK_EMU_STATUS; if (is_stopped()) throw CPUThreadStop{}; if (!lock) { lock.lock(); continue; } cv.wait(lock); } }; switch (ch) { //case SPU_RdSRR0: // value = SRR0; // break; case SPU_RdInMbox: { std::unique_lock<std::mutex> lock(mutex, std::defer_lock); while (true) { bool result; u32 value; u32 count; std::tie(result, value, count) = ch_in_mbox.try_pop(); if (result) { if (count + 1 == 4 /* SPU_IN_MBOX_THRESHOLD */) // TODO: check this { int_ctrl[2].set(SPU_INT2_STAT_SPU_MAILBOX_THRESHOLD_INT); } return value; } CHECK_EMU_STATUS; if (is_stopped()) throw CPUThreadStop{}; if (!lock) { lock.lock(); continue; } cv.wait(lock); } } case MFC_RdTagStat: { return read_channel(ch_tag_stat); } case MFC_RdTagMask: { return ch_tag_mask; } case SPU_RdSigNotify1: { return read_channel(ch_snr1); } case SPU_RdSigNotify2: { return read_channel(ch_snr2); } case MFC_RdAtomicStat: { return read_channel(ch_atomic_stat); } case MFC_RdListStallStat: { return read_channel(ch_stall_stat); } case SPU_RdDec: { return ch_dec_value - (u32)(get_timebased_time() - ch_dec_start_timestamp); } case SPU_RdEventMask: { return ch_event_mask.load(); } case SPU_RdEventStat: { std::unique_lock<std::mutex> lock(mutex, std::defer_lock); // start waiting or return immediately if (u32 res = get_events(true)) { return res; } if (ch_event_mask.load() & SPU_EVENT_LR) { // register waiter if polling reservation status is required vm::wait_op(*this, last_raddr, 128, WRAP_EXPR(get_events(true) || is_stopped())); } else { lock.lock(); // simple waiting loop otherwise while (!get_events(true) && !is_stopped()) { CHECK_EMU_STATUS; cv.wait(lock); } } ch_event_stat &= ~SPU_EVENT_WAITING; if (is_stopped()) throw CPUThreadStop{}; return get_events(); } case SPU_RdMachStat: { // HACK: "Not isolated" status // Return SPU Interrupt status in LSB return (ch_event_stat.load() & SPU_EVENT_INTR_ENABLED) != 0; } } throw EXCEPTION("Unknown/illegal channel (ch=%d [%s])", ch, ch < 128 ? spu_ch_name[ch] : "???"); }