void _sys_ppu_thread_exit(PPUThread& ppu, u64 errorcode) { sys_ppu_thread.trace("_sys_ppu_thread_exit(errorcode=0x%llx)", errorcode); LV2_LOCK; // get all sys_mutex objects for (auto& mutex : idm::get_all<lv2_mutex_t>()) { // unlock mutex if locked by this thread if (mutex->owner.get() == &ppu) { mutex->unlock(lv2_lock); } } if (!ppu.is_joinable) { idm::remove<PPUThread>(ppu.get_id()); } else { ppu.exit(); } // Throw if this syscall was not called directly by the SC instruction if (~ppu.hle_code != 41) { throw CPUThreadExit{}; } }
s32 sys_rwlock_trywlock(PPUThread& CPU, u32 rw_lock_id) { sys_rwlock.Log("sys_rwlock_trywlock(rw_lock_id=0x%x)", rw_lock_id); LV2_LOCK; const auto rwlock = Emu.GetIdManager().get<lv2_rwlock_t>(rw_lock_id); if (!rwlock) { return CELL_ESRCH; } if (rwlock->writer == CPU.GetId()) { return CELL_EDEADLK; } if (rwlock->readers || rwlock->writer || rwlock->wwaiters) { return CELL_EBUSY; } rwlock->writer = CPU.GetId(); return CELL_OK; }
void sys_interrupt_thread_eoi(PPUThread& CPU) { sys_interrupt.Log("sys_interrupt_thread_eoi()"); // TODO: maybe it should actually unwind the stack (ensure that all the automatic objects are finalized)? CPU.GPR[1] = align(CPU.GetStackAddr() + CPU.GetStackSize(), 0x200) - 0x200; // supercrutch (just to hide error messages) CPU.FastStop(); }
s32 sys_ppu_thread_get_stack_information(PPUThread& CPU, u32 info_addr) { sys_ppu_thread.Log("sys_ppu_thread_get_stack_information(info_addr=0x%x)", info_addr); vm::write32(info_addr, (u32)CPU.GetStackAddr()); vm::write32(info_addr + 4, CPU.GetStackSize()); return CELL_OK; }
void ppu_thread_exit(PPUThread& CPU, u64 errorcode) { if (CPU.owned_mutexes) { sys_ppu_thread.Error("Owned mutexes found (%d)", CPU.owned_mutexes); CPU.owned_mutexes = 0; } CPU.SetExitStatus(errorcode); CPU.Stop(); }
s32 sys_lwmutex_destroy(PPUThread& CPU, vm::ptr<sys_lwmutex_t> lwmutex) { sysPrxForUser.Log("sys_lwmutex_destroy(lwmutex=*0x%x)", lwmutex); // check to prevent recursive locking in the next call if (lwmutex->owner.read_relaxed() == CPU.GetId()) { return CELL_EBUSY; } // attempt to lock the mutex if (s32 res = sys_lwmutex_trylock(CPU, lwmutex)) { return res; } // call the syscall if (s32 res = _sys_lwmutex_destroy(lwmutex->sleep_queue)) { // unlock the mutex if failed sys_lwmutex_unlock(CPU, lwmutex); return res; } // deleting succeeded lwmutex->owner.exchange(lwmutex::dead); return CELL_OK; }
s32 sys_ppu_thread_get_id(PPUThread& CPU, vm::ptr<be_t<u64>> thread_id) { sys_ppu_thread.Log("sys_ppu_thread_get_id(thread_id_addr=0x%x)", thread_id.addr()); *thread_id = CPU.GetId(); return CELL_OK; }
s32 sys_rwlock_wunlock(PPUThread& CPU, u32 rw_lock_id) { sys_rwlock.Log("sys_rwlock_wunlock(rw_lock_id=0x%x)", rw_lock_id); LV2_LOCK; const auto rwlock = Emu.GetIdManager().get<lv2_rwlock_t>(rw_lock_id); if (!rwlock) { return CELL_ESRCH; } if (rwlock->writer != CPU.GetId()) { return CELL_EPERM; } rwlock->writer = 0; if (rwlock->wwaiters) { rwlock->wcv.notify_one(); } else if (rwlock->rwaiters) { rwlock->rcv.notify_all(); } return CELL_OK; }
s32 sys_rwlock_trywlock(PPUThread& ppu, u32 rw_lock_id) { sys_rwlock.Log("sys_rwlock_trywlock(rw_lock_id=0x%x)", rw_lock_id); LV2_LOCK; const auto rwlock = idm::get<lv2_rwlock_t>(rw_lock_id); if (!rwlock) { return CELL_ESRCH; } if (rwlock->writer.get() == &ppu) { return CELL_EDEADLK; } if (rwlock->readers || rwlock->writer || rwlock->wsq.size()) { return CELL_EBUSY; } rwlock->writer = std::static_pointer_cast<CPUThread>(ppu.shared_from_this()); return CELL_OK; }
s32 _sys_lwcond_queue_wait(PPUThread& ppu, u32 lwcond_id, u32 lwmutex_id, u64 timeout) { sys_lwcond.Log("_sys_lwcond_queue_wait(lwcond_id=0x%x, lwmutex_id=0x%x, timeout=0x%llx)", lwcond_id, lwmutex_id, timeout); const u64 start_time = get_system_time(); LV2_LOCK; const auto cond = idm::get<lv2_lwcond_t>(lwcond_id); const auto mutex = idm::get<lv2_lwmutex_t>(lwmutex_id); if (!cond || !mutex) { return CELL_ESRCH; } // finalize unlocking the mutex mutex->unlock(lv2_lock); // add waiter; protocol is ignored in current implementation sleep_queue_entry_t waiter(ppu, cond->sq); // potential mutex waiter (not added immediately) sleep_queue_entry_t mutex_waiter(ppu, cond->sq, defer_sleep); while (!ppu.unsignal()) { CHECK_EMU_STATUS; if (timeout && waiter) { const u64 passed = get_system_time() - start_time; if (passed >= timeout) { // try to reown the mutex if timed out if (mutex->signaled) { mutex->signaled--; return CELL_EDEADLK; } else { return CELL_ETIMEDOUT; } } ppu.cv.wait_for(lv2_lock, std::chrono::microseconds(timeout - passed)); } else { ppu.cv.wait(lv2_lock); } } // return cause return ppu.GPR[3] ? CELL_EBUSY : CELL_OK; }
s32 sys_ppu_thread_get_id(PPUThread& CPU, vm::ptr<u64> thread_id) { sysPrxForUser.Log("sys_ppu_thread_get_id(thread_id=*0x%x)", thread_id); *thread_id = CPU.GetId(); return CELL_OK; }
s32 sys_lwcond_signal_to(PPUThread& CPU, vm::ptr<sys_lwcond_t> lwcond, u32 ppu_thread_id) { sysPrxForUser.Log("sys_lwcond_signal_to(lwcond=*0x%x, ppu_thread_id=0x%x)", lwcond, ppu_thread_id); const vm::ptr<sys_lwmutex_t> lwmutex = lwcond->lwmutex; if ((lwmutex->attribute.data() & se32(SYS_SYNC_ATTR_PROTOCOL_MASK)) == se32(SYS_SYNC_RETRY)) { // TODO (protocol ignored) //return _sys_lwcond_signal(lwcond->lwcond_queue, 0, ppu_thread_id, 2); } if (lwmutex->owner.read_relaxed() == CPU.GetId()) { // if owns the mutex lwmutex->all_info++; // call the syscall if (s32 res = _sys_lwcond_signal(lwcond->lwcond_queue, lwmutex->sleep_queue, ppu_thread_id, 1)) { lwmutex->all_info--; return res; } return CELL_OK; } if (s32 res = sys_lwmutex_trylock(CPU, lwmutex)) { // if locking failed if (res != CELL_EBUSY) { return CELL_ESRCH; } // call the syscall return _sys_lwcond_signal(lwcond->lwcond_queue, 0, ppu_thread_id, 2); } // if locking succeeded lwmutex->all_info++; // call the syscall if (s32 res = _sys_lwcond_signal(lwcond->lwcond_queue, lwmutex->sleep_queue, ppu_thread_id, 3)) { lwmutex->all_info--; // unlock the lightweight mutex sys_lwmutex_unlock(CPU, lwmutex); return res; } return CELL_OK; }
s32 sys_lwcond_signal(PPUThread& ppu, vm::ptr<sys_lwcond_t> lwcond) { sysPrxForUser.trace("sys_lwcond_signal(lwcond=*0x%x)", lwcond); const vm::ptr<sys_lwmutex_t> lwmutex = lwcond->lwmutex; if ((lwmutex->attribute & SYS_SYNC_ATTR_PROTOCOL_MASK) == SYS_SYNC_RETRY) { // TODO (protocol ignored) //return _sys_lwcond_signal(lwcond->lwcond_queue, 0, -1, 2); } if (lwmutex->vars.owner.load() == ppu.get_id()) { // if owns the mutex lwmutex->all_info++; // call the syscall if (s32 res = _sys_lwcond_signal(lwcond->lwcond_queue, lwmutex->sleep_queue, -1, 1)) { lwmutex->all_info--; return res == CELL_EPERM ? CELL_OK : res; } return CELL_OK; } if (s32 res = sys_lwmutex_trylock(ppu, lwmutex)) { // if locking failed if (res != CELL_EBUSY) { return CELL_ESRCH; } // call the syscall return _sys_lwcond_signal(lwcond->lwcond_queue, 0, -1, 2); } // if locking succeeded lwmutex->all_info++; // call the syscall if (s32 res = _sys_lwcond_signal(lwcond->lwcond_queue, lwmutex->sleep_queue, -1, 3)) { lwmutex->all_info--; // unlock the lightweight mutex sys_lwmutex_unlock(ppu, lwmutex); return res == CELL_ENOENT ? CELL_OK : res; } return CELL_OK; }
s32 sys_lwcond_signal_all(PPUThread& ppu, vm::ptr<sys_lwcond_t> lwcond) { sysPrxForUser.trace("sys_lwcond_signal_all(lwcond=*0x%x)", lwcond); const vm::ptr<sys_lwmutex_t> lwmutex = lwcond->lwmutex; if ((lwmutex->attribute & SYS_SYNC_ATTR_PROTOCOL_MASK) == SYS_SYNC_RETRY) { // TODO (protocol ignored) //return _sys_lwcond_signal_all(lwcond->lwcond_queue, lwmutex->sleep_queue, 2); } if (lwmutex->vars.owner.load() == ppu.get_id()) { // if owns the mutex, call the syscall const s32 res = _sys_lwcond_signal_all(lwcond->lwcond_queue, lwmutex->sleep_queue, 1); if (res <= 0) { // return error or CELL_OK return res; } lwmutex->all_info += res; return CELL_OK; } if (s32 res = sys_lwmutex_trylock(ppu, lwmutex)) { // if locking failed if (res != CELL_EBUSY) { return CELL_ESRCH; } // call the syscall return _sys_lwcond_signal_all(lwcond->lwcond_queue, lwmutex->sleep_queue, 2); } // if locking succeeded, call the syscall s32 res = _sys_lwcond_signal_all(lwcond->lwcond_queue, lwmutex->sleep_queue, 1); if (res > 0) { lwmutex->all_info += res; res = CELL_OK; } // unlock mutex sys_lwmutex_unlock(ppu, lwmutex); return res; }
s32 sys_lwcond_signal_all(PPUThread& CPU, vm::ptr<sys_lwcond_t> lwcond) { sysPrxForUser.Log("sys_lwcond_signal_all(lwcond=*0x%x)", lwcond); const vm::ptr<sys_lwmutex_t> lwmutex = lwcond->lwmutex; if ((lwmutex->attribute.data() & se32(SYS_SYNC_ATTR_PROTOCOL_MASK)) == se32(SYS_SYNC_RETRY)) { // TODO (protocol ignored) //return _sys_lwcond_signal_all(lwcond->lwcond_queue, lwmutex->sleep_queue, 2); } if (lwmutex->owner.read_relaxed() == CPU.GetId()) { // if owns the mutex, call the syscall const s32 res = _sys_lwcond_signal_all(lwcond->lwcond_queue, lwmutex->sleep_queue, 1); if (res <= 0) { // return error or CELL_OK return res; } lwmutex->all_info += res; return CELL_OK; } if (s32 res = sys_lwmutex_trylock(CPU, lwmutex)) { // if locking failed if (res != CELL_EBUSY) { return CELL_ESRCH; } // call the syscall return _sys_lwcond_signal_all(lwcond->lwcond_queue, lwmutex->sleep_queue, 2); } // if locking succeeded, call the syscall s32 res = _sys_lwcond_signal_all(lwcond->lwcond_queue, lwmutex->sleep_queue, 1); if (res > 0) { lwmutex->all_info += res; res = CELL_OK; } // unlock mutex sys_lwmutex_unlock(CPU, lwmutex); return res; }
s32 sys_rwlock_rlock(PPUThread& ppu, u32 rw_lock_id, u64 timeout) { sys_rwlock.Log("sys_rwlock_rlock(rw_lock_id=0x%x, timeout=0x%llx)", rw_lock_id, timeout); const u64 start_time = get_system_time(); LV2_LOCK; const auto rwlock = idm::get<lv2_rwlock_t>(rw_lock_id); if (!rwlock) { return CELL_ESRCH; } if (!rwlock->writer && rwlock->wsq.empty()) { if (!++rwlock->readers) { throw EXCEPTION("Too many readers"); } return CELL_OK; } // add waiter; protocol is ignored in current implementation sleep_queue_entry_t waiter(ppu, rwlock->rsq); while (!ppu.unsignal()) { CHECK_EMU_STATUS; if (timeout) { const u64 passed = get_system_time() - start_time; if (passed >= timeout) { return CELL_ETIMEDOUT; } ppu.cv.wait_for(lv2_lock, std::chrono::microseconds(timeout - passed)); } else { ppu.cv.wait(lv2_lock); } } if (rwlock->writer || !rwlock->readers) { throw EXCEPTION("Unexpected"); } return CELL_OK; }
void sys_interrupt_thread_eoi(PPUThread& CPU) { sys_interrupt.Log("sys_interrupt_thread_eoi()"); // TODO: maybe it should actually unwind the stack of PPU thread? CPU.GPR[1] = align(CPU.stack_addr + CPU.stack_size, 0x200) - 0x200; // supercrutch to bypass stack check CPU.FastStop(); }
void sys_interrupt_thread_eoi(PPUThread& ppu) { sys_interrupt.Log("sys_interrupt_thread_eoi()"); // TODO: maybe it should actually unwind the stack of PPU thread? ppu.GPR[1] = align(ppu.stack_addr + ppu.stack_size, 0x200) - 0x200; // supercrutch to bypass stack check ppu.fast_stop(); }
s32 sys_rwlock_wlock(PPUThread& CPU, u32 rw_lock_id, u64 timeout) { sys_rwlock.Log("sys_rwlock_wlock(rw_lock_id=0x%x, timeout=0x%llx)", rw_lock_id, timeout); const u64 start_time = get_system_time(); LV2_LOCK; const auto rwlock = Emu.GetIdManager().get<lv2_rwlock_t>(rw_lock_id); if (!rwlock) { return CELL_ESRCH; } if (rwlock->writer == CPU.GetId()) { return CELL_EDEADLK; } // protocol is ignored in current implementation rwlock->wwaiters++; while (rwlock->readers || rwlock->writer) { CHECK_EMU_STATUS; if (timeout && get_system_time() - start_time > timeout) { rwlock->wwaiters--; return CELL_ETIMEDOUT; } rwlock->wcv.wait_for(lv2_lock, std::chrono::milliseconds(1)); } rwlock->writer = CPU.GetId(); rwlock->wwaiters--; return CELL_OK; }
s32 _sys_lwmutex_lock(PPUThread& ppu, u32 lwmutex_id, u64 timeout) { sys_lwmutex.Log("_sys_lwmutex_lock(lwmutex_id=0x%x, timeout=0x%llx)", lwmutex_id, timeout); const u64 start_time = get_system_time(); LV2_LOCK; const auto mutex = idm::get<lv2_lwmutex_t>(lwmutex_id); if (!mutex) { return CELL_ESRCH; } if (mutex->signaled) { mutex->signaled--; return CELL_OK; } // add waiter; protocol is ignored in current implementation sleep_queue_entry_t waiter(ppu, mutex->sq); while (!ppu.unsignal()) { CHECK_EMU_STATUS; if (timeout) { const u64 passed = get_system_time() - start_time; if (passed >= timeout) { return CELL_ETIMEDOUT; } ppu.cv.wait_for(lv2_lock, std::chrono::microseconds(timeout - passed)); } else { ppu.cv.wait(lv2_lock); } } return CELL_OK; }
s32 sys_semaphore_wait(PPUThread& ppu, u32 sem_id, u64 timeout) { sys_semaphore.Log("sys_semaphore_wait(sem_id=0x%x, timeout=0x%llx)", sem_id, timeout); const u64 start_time = get_system_time(); LV2_LOCK; const auto sem = idm::get<lv2_sema_t>(sem_id); if (!sem) { return CELL_ESRCH; } if (sem->value > 0) { sem->value--; return CELL_OK; } // add waiter; protocol is ignored in current implementation sleep_queue_entry_t waiter(ppu, sem->sq); while (!ppu.unsignal()) { CHECK_EMU_STATUS; if (timeout) { const u64 passed = get_system_time() - start_time; if (passed >= timeout) { return CELL_ETIMEDOUT; } ppu.cv.wait_for(lv2_lock, std::chrono::microseconds(timeout - passed)); } else { ppu.cv.wait(lv2_lock); } } return CELL_OK; }
s32 sys_lwmutex_unlock(PPUThread& CPU, vm::ptr<sys_lwmutex_t> lwmutex) { sysPrxForUser.Log("sys_lwmutex_unlock(lwmutex=*0x%x)", lwmutex); const be_t<u32> tid = be_t<u32>::make(CPU.GetId()); // check owner if (lwmutex->owner.read_relaxed() != tid) { return CELL_EPERM; } if (lwmutex->recursive_count.data()) { // recursive unlocking succeeded lwmutex->recursive_count--; return CELL_OK; } // ensure that waiter is zero if (lwmutex->lock_var.compare_and_swap_test({ tid, lwmutex::zero }, { lwmutex::free, lwmutex::zero })) { // unlocking succeeded return CELL_OK; } if (lwmutex->attribute.data() & se32(SYS_SYNC_RETRY)) { // TODO (protocol is ignored in current implementation) } // set special value lwmutex->owner.exchange(lwmutex::reserved); // call the syscall if (_sys_lwmutex_unlock(lwmutex->sleep_queue) == CELL_ESRCH) { return CELL_ESRCH; } return CELL_OK; }
s32 sys_mutex_trylock(PPUThread& ppu, u32 mutex_id) { sys_mutex.Log("sys_mutex_trylock(mutex_id=0x%x)", mutex_id); LV2_LOCK; const auto mutex = idm::get<lv2_mutex_t>(mutex_id); if (!mutex) { return CELL_ESRCH; } // check current ownership if (mutex->owner.get() == &ppu) { if (mutex->recursive) { if (mutex->recursive_count == 0xffffffffu) { return CELL_EKRESOURCE; } mutex->recursive_count++; return CELL_OK; } return CELL_EDEADLK; } if (mutex->owner) { return CELL_EBUSY; } // own the mutex if free mutex->owner = std::static_pointer_cast<CPUThread>(ppu.shared_from_this()); return CELL_OK; }
s32 sys_lwcond_wait(PPUThread& CPU, vm::ptr<sys_lwcond_t> lwcond, u64 timeout) { sysPrxForUser.Log("sys_lwcond_wait(lwcond=*0x%x, timeout=0x%llx)", lwcond, timeout); const be_t<u32> tid = be_t<u32>::make(CPU.GetId()); const vm::ptr<sys_lwmutex_t> lwmutex = lwcond->lwmutex; if (lwmutex->owner.read_relaxed() != tid) { // if not owner of the mutex return CELL_EPERM; } // save old recursive value const be_t<u32> recursive_value = lwmutex->recursive_count; // set special value lwmutex->owner = { lwmutex::reserved }; lwmutex->recursive_count = 0; // call the syscall s32 res = _sys_lwcond_queue_wait(CPU, lwcond->lwcond_queue, lwmutex->sleep_queue, timeout); if (res == CELL_OK || res == CELL_ESRCH) { if (res == CELL_OK) { lwmutex->all_info--; } // restore owner and recursive value const auto old = lwmutex->owner.exchange(tid); lwmutex->recursive_count = recursive_value; if (old.data() != se32(lwmutex_reserved) && !Emu.IsStopped()) { sysPrxForUser.Fatal("sys_lwcond_wait(lwcond=*0x%x): locking failed (lwmutex->owner=0x%x)", lwcond, old); } return res; } if (res == CELL_EBUSY || res == CELL_ETIMEDOUT) { const s32 res2 = sys_lwmutex_lock(CPU, lwmutex, 0); if (res2 == CELL_OK) { // if successfully locked, restore recursive value lwmutex->recursive_count = recursive_value; return res == CELL_EBUSY ? CELL_OK : res; } return res2; } if (res == CELL_EDEADLK) { // restore owner and recursive value const auto old = lwmutex->owner.exchange(tid); lwmutex->recursive_count = recursive_value; if (old.data() != se32(lwmutex_reserved) && !Emu.IsStopped()) { sysPrxForUser.Fatal("sys_lwcond_wait(lwcond=*0x%x): locking failed after timeout (lwmutex->owner=0x%x)", lwcond, old); } return CELL_ETIMEDOUT; } sysPrxForUser.Fatal("sys_lwcond_wait(lwcond=*0x%x): unexpected syscall result (0x%x)", lwcond, res); return res; }
s32 sys_rwlock_wlock(PPUThread& ppu, u32 rw_lock_id, u64 timeout) { sys_rwlock.Log("sys_rwlock_wlock(rw_lock_id=0x%x, timeout=0x%llx)", rw_lock_id, timeout); const u64 start_time = get_system_time(); LV2_LOCK; const auto rwlock = idm::get<lv2_rwlock_t>(rw_lock_id); if (!rwlock) { return CELL_ESRCH; } if (rwlock->writer.get() == &ppu) { return CELL_EDEADLK; } if (!rwlock->readers && !rwlock->writer) { rwlock->writer = std::static_pointer_cast<CPUThread>(ppu.shared_from_this()); return CELL_OK; } // add waiter; protocol is ignored in current implementation sleep_queue_entry_t waiter(ppu, rwlock->wsq); while (!ppu.unsignal()) { CHECK_EMU_STATUS; if (timeout) { const u64 passed = get_system_time() - start_time; if (passed >= timeout) { // if the last waiter quit the writer sleep queue, readers must acquire the lock if (!rwlock->writer && rwlock->wsq.size() == 1) { if (rwlock->wsq.front().get() != &ppu) { throw EXCEPTION("Unexpected"); } rwlock->wsq.clear(); rwlock->notify_all(lv2_lock); } return CELL_ETIMEDOUT; } ppu.cv.wait_for(lv2_lock, std::chrono::microseconds(timeout - passed)); } else { ppu.cv.wait(lv2_lock); } } if (rwlock->readers || rwlock->writer.get() != &ppu) { throw EXCEPTION("Unexpected"); } return CELL_OK; }
s32 sys_event_queue_receive(PPUThread& ppu, u32 equeue_id, vm::ptr<sys_event_t> dummy_event, u64 timeout) { sys_event.Log("sys_event_queue_receive(equeue_id=0x%x, *0x%x, timeout=0x%llx)", equeue_id, dummy_event, timeout); const u64 start_time = get_system_time(); LV2_LOCK; const auto queue = idm::get<lv2_event_queue_t>(equeue_id); if (!queue) { return CELL_ESRCH; } if (queue->type != SYS_PPU_QUEUE) { return CELL_EINVAL; } if (queue->events.size()) { // event data is returned in registers (dummy_event is not used) std::tie(ppu.GPR[4], ppu.GPR[5], ppu.GPR[6], ppu.GPR[7]) = queue->events.front(); queue->events.pop_front(); return CELL_OK; } // cause (if cancelled) will be returned in r3 ppu.GPR[3] = 0; // add waiter; protocol is ignored in current implementation sleep_queue_entry_t waiter(ppu, queue->sq); while (!ppu.unsignal()) { CHECK_EMU_STATUS; if (timeout) { const u64 passed = get_system_time() - start_time; if (passed >= timeout) { return CELL_ETIMEDOUT; } ppu.cv.wait_for(lv2_lock, std::chrono::microseconds(timeout - passed)); } else { ppu.cv.wait(lv2_lock); } } if (ppu.GPR[3]) { if (idm::check<lv2_event_queue_t>(equeue_id)) { throw EXCEPTION("Unexpected"); } return CELL_ECANCELED; } // r4-r7 registers must be set by push() return CELL_OK; }
std::string ps3_fmt(PPUThread& context, vm::cptr<char> fmt, u32 g_count, u32 f_count, u32 v_count) { std::string result; for (char c = *fmt++; c; c = *fmt++) { switch (c) { case '%': { const auto start = fmt - 1; // read flags const bool plus_sign = *fmt == '+' ? fmt++, true : false; const bool minus_sign = *fmt == '-' ? fmt++, true : false; const bool space_sign = *fmt == ' ' ? fmt++, true : false; const bool number_sign = *fmt == '#' ? fmt++, true : false; const bool zero_padding = *fmt == '0' ? fmt++, true : false; // read width const u32 width = [&]() -> u32 { u32 width = 0; if (*fmt == '*') { fmt++; return context.get_next_gpr_arg(g_count, f_count, v_count); } while (*fmt - '0' < 10) { width = width * 10 + (*fmt++ - '0'); } return width; }(); // read precision const u32 prec = [&]() -> u32 { u32 prec = 0; if (*fmt != '.') { return 0; } if (*++fmt == '*') { fmt++; return context.get_next_gpr_arg(g_count, f_count, v_count); } while (*fmt - '0' < 10) { prec = prec * 10 + (*fmt++ - '0'); } return prec; }(); switch (char cf = *fmt++) { case '%': { if (plus_sign || minus_sign || space_sign || number_sign || zero_padding || width || prec) break; result += '%'; continue; } case 'd': case 'i': { // signed decimal const s64 value = context.get_next_gpr_arg(g_count, f_count, v_count); if (plus_sign || minus_sign || space_sign || number_sign || zero_padding || width || prec) break; result += fmt::to_sdec(value); continue; } case 'x': case 'X': { // hexadecimal const u64 value = context.get_next_gpr_arg(g_count, f_count, v_count); if (plus_sign || minus_sign || space_sign || prec) break; if (number_sign && value) { result += cf == 'x' ? "0x" : "0X"; } const std::string& hex = cf == 'x' ? fmt::to_hex(value) : fmt::toupper(fmt::to_hex(value)); if (hex.length() >= width) { result += hex; } else if (zero_padding) { result += std::string(width - hex.length(), '0') + hex; } else { result += hex + std::string(width - hex.length(), ' '); } continue; } case 's': { // string auto string = vm::cptr<char, u64>::make(context.get_next_gpr_arg(g_count, f_count, v_count)); if (plus_sign || minus_sign || space_sign || number_sign || zero_padding || width || prec) break; result += string.get_ptr(); continue; } case 'u': { // unsigned decimal const u64 value = context.get_next_gpr_arg(g_count, f_count, v_count); if (plus_sign || minus_sign || space_sign || number_sign || zero_padding || width || prec) break; result += fmt::to_udec(value); continue; } } throw EXCEPTION("Unknown formatting: '%s'", start.get_ptr()); } } result += c; } return result; }
s32 sys_lwcond_wait(PPUThread& ppu, vm::ptr<sys_lwcond_t> lwcond, u64 timeout) { sysPrxForUser.trace("sys_lwcond_wait(lwcond=*0x%x, timeout=0x%llx)", lwcond, timeout); const be_t<u32> tid = ppu.get_id(); const vm::ptr<sys_lwmutex_t> lwmutex = lwcond->lwmutex; if (lwmutex->vars.owner.load() != tid) { // if not owner of the mutex return CELL_EPERM; } // save old recursive value const be_t<u32> recursive_value = lwmutex->recursive_count; // set special value lwmutex->vars.owner = lwmutex_reserved; lwmutex->recursive_count = 0; // call the syscall s32 res = _sys_lwcond_queue_wait(ppu, lwcond->lwcond_queue, lwmutex->sleep_queue, timeout); if (res == CELL_OK || res == CELL_ESRCH) { if (res == CELL_OK) { lwmutex->all_info--; } // restore owner and recursive value const auto old = lwmutex->vars.owner.exchange(tid); lwmutex->recursive_count = recursive_value; if (old != lwmutex_reserved) { throw EXCEPTION("Locking failed (lwmutex=*0x%x, owner=0x%x)", lwmutex, old); } return res; } if (res == CELL_EBUSY || res == CELL_ETIMEDOUT) { const s32 res2 = sys_lwmutex_lock(ppu, lwmutex, 0); if (res2 == CELL_OK) { // if successfully locked, restore recursive value lwmutex->recursive_count = recursive_value; return res == CELL_EBUSY ? CELL_OK : res; } return res2; } if (res == CELL_EDEADLK) { // restore owner and recursive value const auto old = lwmutex->vars.owner.exchange(tid); lwmutex->recursive_count = recursive_value; if (old != lwmutex_reserved) { throw EXCEPTION("Locking failed (lwmutex=*0x%x, owner=0x%x)", lwmutex, old); } return CELL_ETIMEDOUT; } throw EXCEPTION("Unexpected syscall result (lwcond=*0x%x, result=0x%x)", lwcond, res); }
s32 sys_mutex_lock(PPUThread& ppu, u32 mutex_id, u64 timeout) { sys_mutex.Log("sys_mutex_lock(mutex_id=0x%x, timeout=0x%llx)", mutex_id, timeout); const u64 start_time = get_system_time(); LV2_LOCK; const auto mutex = idm::get<lv2_mutex_t>(mutex_id); if (!mutex) { return CELL_ESRCH; } // check current ownership if (mutex->owner.get() == &ppu) { if (mutex->recursive) { if (mutex->recursive_count == 0xffffffffu) { return CELL_EKRESOURCE; } mutex->recursive_count++; return CELL_OK; } return CELL_EDEADLK; } // lock immediately if not locked if (!mutex->owner) { mutex->owner = std::static_pointer_cast<CPUThread>(ppu.shared_from_this()); return CELL_OK; } // add waiter; protocol is ignored in current implementation sleep_queue_entry_t waiter(ppu, mutex->sq); while (!ppu.unsignal()) { CHECK_EMU_STATUS; if (timeout) { const u64 passed = get_system_time() - start_time; if (passed >= timeout) { return CELL_ETIMEDOUT; } ppu.cv.wait_for(lv2_lock, std::chrono::microseconds(timeout - passed)); } else { ppu.cv.wait(lv2_lock); } } // new owner must be set when unlocked if (mutex->owner.get() != &ppu) { throw EXCEPTION("Unexpected mutex owner"); } return CELL_OK; }
s32 sys_cond_wait(PPUThread& CPU, u32 cond_id, u64 timeout) { sys_cond.Log("sys_cond_wait(cond_id=0x%x, timeout=%lld)", cond_id, timeout); const u64 start_time = get_system_time(); LV2_LOCK; const auto cond = Emu.GetIdManager().GetIDData<cond_t>(cond_id); if (!cond) { return CELL_ESRCH; } const auto thread = Emu.GetCPU().GetThread(CPU.GetId()); if (cond->mutex->owner.owner_before(thread) || thread.owner_before(cond->mutex->owner)) // check equality { return CELL_EPERM; } // add waiter; protocol is ignored in current implementation cond->waiters.emplace(CPU.GetId()); // unlock mutex cond->mutex->owner.reset(); if (cond->mutex->waiters) { cond->mutex->cv.notify_one(); } // save recursive value const u32 recursive_value = cond->mutex->recursive_count.exchange(0); while (!cond->mutex->owner.expired() || !cond->signaled || cond->waiters.count(CPU.GetId())) { const bool is_timedout = timeout && get_system_time() - start_time > timeout; // check timeout if (is_timedout && cond->mutex->owner.expired()) { // cancel waiting if the mutex is free, restore its owner and recursive value cond->mutex->owner = thread; cond->mutex->recursive_count = recursive_value; if (!cond->waiters.erase(CPU.GetId())) { throw __FUNCTION__; } return CELL_ETIMEDOUT; } if (Emu.IsStopped()) { sys_cond.Warning("sys_cond_wait(id=0x%x) aborted", cond_id); return CELL_OK; } // wait on appropriate condition variable (cond->signaled || is_timedout ? cond->mutex->cv : cond->cv).wait_for(lv2_lock, std::chrono::milliseconds(1)); } // reown the mutex and restore its recursive value cond->mutex->owner = thread; cond->mutex->recursive_count = recursive_value; cond->signaled--; return CELL_OK; }