s32 sys_rwlock_trywlock(PPUThread& CPU, u32 rw_lock_id) { sys_rwlock.Log("sys_rwlock_trywlock(rw_lock_id=0x%x)", rw_lock_id); LV2_LOCK; const auto rwlock = Emu.GetIdManager().get<lv2_rwlock_t>(rw_lock_id); if (!rwlock) { return CELL_ESRCH; } if (rwlock->writer == CPU.GetId()) { return CELL_EDEADLK; } if (rwlock->readers || rwlock->writer || rwlock->wwaiters) { return CELL_EBUSY; } rwlock->writer = CPU.GetId(); return CELL_OK; }
s32 sys_rwlock_wunlock(PPUThread& CPU, u32 rw_lock_id) { sys_rwlock.Log("sys_rwlock_wunlock(rw_lock_id=0x%x)", rw_lock_id); LV2_LOCK; const auto rwlock = Emu.GetIdManager().get<lv2_rwlock_t>(rw_lock_id); if (!rwlock) { return CELL_ESRCH; } if (rwlock->writer != CPU.GetId()) { return CELL_EPERM; } rwlock->writer = 0; if (rwlock->wwaiters) { rwlock->wcv.notify_one(); } else if (rwlock->rwaiters) { rwlock->rcv.notify_all(); } return CELL_OK; }
s32 sys_lwmutex_destroy(PPUThread& CPU, vm::ptr<sys_lwmutex_t> lwmutex) { sysPrxForUser.Log("sys_lwmutex_destroy(lwmutex=*0x%x)", lwmutex); // check to prevent recursive locking in the next call if (lwmutex->owner.read_relaxed() == CPU.GetId()) { return CELL_EBUSY; } // attempt to lock the mutex if (s32 res = sys_lwmutex_trylock(CPU, lwmutex)) { return res; } // call the syscall if (s32 res = _sys_lwmutex_destroy(lwmutex->sleep_queue)) { // unlock the mutex if failed sys_lwmutex_unlock(CPU, lwmutex); return res; } // deleting succeeded lwmutex->owner.exchange(lwmutex::dead); return CELL_OK; }
s32 sys_ppu_thread_get_id(PPUThread& CPU, vm::ptr<be_t<u64>> thread_id) { sys_ppu_thread.Log("sys_ppu_thread_get_id(thread_id_addr=0x%x)", thread_id.addr()); *thread_id = CPU.GetId(); return CELL_OK; }
s32 sys_ppu_thread_get_id(PPUThread& CPU, vm::ptr<u64> thread_id) { sysPrxForUser.Log("sys_ppu_thread_get_id(thread_id=*0x%x)", thread_id); *thread_id = CPU.GetId(); return CELL_OK; }
s32 sys_lwcond_signal_to(PPUThread& CPU, vm::ptr<sys_lwcond_t> lwcond, u32 ppu_thread_id) { sysPrxForUser.Log("sys_lwcond_signal_to(lwcond=*0x%x, ppu_thread_id=0x%x)", lwcond, ppu_thread_id); const vm::ptr<sys_lwmutex_t> lwmutex = lwcond->lwmutex; if ((lwmutex->attribute.data() & se32(SYS_SYNC_ATTR_PROTOCOL_MASK)) == se32(SYS_SYNC_RETRY)) { // TODO (protocol ignored) //return _sys_lwcond_signal(lwcond->lwcond_queue, 0, ppu_thread_id, 2); } if (lwmutex->owner.read_relaxed() == CPU.GetId()) { // if owns the mutex lwmutex->all_info++; // call the syscall if (s32 res = _sys_lwcond_signal(lwcond->lwcond_queue, lwmutex->sleep_queue, ppu_thread_id, 1)) { lwmutex->all_info--; return res; } return CELL_OK; } if (s32 res = sys_lwmutex_trylock(CPU, lwmutex)) { // if locking failed if (res != CELL_EBUSY) { return CELL_ESRCH; } // call the syscall return _sys_lwcond_signal(lwcond->lwcond_queue, 0, ppu_thread_id, 2); } // if locking succeeded lwmutex->all_info++; // call the syscall if (s32 res = _sys_lwcond_signal(lwcond->lwcond_queue, lwmutex->sleep_queue, ppu_thread_id, 3)) { lwmutex->all_info--; // unlock the lightweight mutex sys_lwmutex_unlock(CPU, lwmutex); return res; } return CELL_OK; }
s32 sys_lwcond_signal_all(PPUThread& CPU, vm::ptr<sys_lwcond_t> lwcond) { sysPrxForUser.Log("sys_lwcond_signal_all(lwcond=*0x%x)", lwcond); const vm::ptr<sys_lwmutex_t> lwmutex = lwcond->lwmutex; if ((lwmutex->attribute.data() & se32(SYS_SYNC_ATTR_PROTOCOL_MASK)) == se32(SYS_SYNC_RETRY)) { // TODO (protocol ignored) //return _sys_lwcond_signal_all(lwcond->lwcond_queue, lwmutex->sleep_queue, 2); } if (lwmutex->owner.read_relaxed() == CPU.GetId()) { // if owns the mutex, call the syscall const s32 res = _sys_lwcond_signal_all(lwcond->lwcond_queue, lwmutex->sleep_queue, 1); if (res <= 0) { // return error or CELL_OK return res; } lwmutex->all_info += res; return CELL_OK; } if (s32 res = sys_lwmutex_trylock(CPU, lwmutex)) { // if locking failed if (res != CELL_EBUSY) { return CELL_ESRCH; } // call the syscall return _sys_lwcond_signal_all(lwcond->lwcond_queue, lwmutex->sleep_queue, 2); } // if locking succeeded, call the syscall s32 res = _sys_lwcond_signal_all(lwcond->lwcond_queue, lwmutex->sleep_queue, 1); if (res > 0) { lwmutex->all_info += res; res = CELL_OK; } // unlock mutex sys_lwmutex_unlock(CPU, lwmutex); return res; }
s32 sys_rwlock_wlock(PPUThread& CPU, u32 rw_lock_id, u64 timeout) { sys_rwlock.Log("sys_rwlock_wlock(rw_lock_id=0x%x, timeout=0x%llx)", rw_lock_id, timeout); const u64 start_time = get_system_time(); LV2_LOCK; const auto rwlock = Emu.GetIdManager().get<lv2_rwlock_t>(rw_lock_id); if (!rwlock) { return CELL_ESRCH; } if (rwlock->writer == CPU.GetId()) { return CELL_EDEADLK; } // protocol is ignored in current implementation rwlock->wwaiters++; while (rwlock->readers || rwlock->writer) { CHECK_EMU_STATUS; if (timeout && get_system_time() - start_time > timeout) { rwlock->wwaiters--; return CELL_ETIMEDOUT; } rwlock->wcv.wait_for(lv2_lock, std::chrono::milliseconds(1)); } rwlock->writer = CPU.GetId(); rwlock->wwaiters--; return CELL_OK; }
s32 sys_lwmutex_unlock(PPUThread& CPU, vm::ptr<sys_lwmutex_t> lwmutex) { sysPrxForUser.Log("sys_lwmutex_unlock(lwmutex=*0x%x)", lwmutex); const be_t<u32> tid = be_t<u32>::make(CPU.GetId()); // check owner if (lwmutex->owner.read_relaxed() != tid) { return CELL_EPERM; } if (lwmutex->recursive_count.data()) { // recursive unlocking succeeded lwmutex->recursive_count--; return CELL_OK; } // ensure that waiter is zero if (lwmutex->lock_var.compare_and_swap_test({ tid, lwmutex::zero }, { lwmutex::free, lwmutex::zero })) { // unlocking succeeded return CELL_OK; } if (lwmutex->attribute.data() & se32(SYS_SYNC_RETRY)) { // TODO (protocol is ignored in current implementation) } // set special value lwmutex->owner.exchange(lwmutex::reserved); // call the syscall if (_sys_lwmutex_unlock(lwmutex->sleep_queue) == CELL_ESRCH) { return CELL_ESRCH; } return CELL_OK; }
s32 sys_cond_wait(PPUThread& CPU, u32 cond_id, u64 timeout) { sys_cond.Log("sys_cond_wait(cond_id=0x%x, timeout=%lld)", cond_id, timeout); const u64 start_time = get_system_time(); LV2_LOCK; const auto cond = Emu.GetIdManager().GetIDData<cond_t>(cond_id); if (!cond) { return CELL_ESRCH; } const auto thread = Emu.GetCPU().GetThread(CPU.GetId()); if (cond->mutex->owner.owner_before(thread) || thread.owner_before(cond->mutex->owner)) // check equality { return CELL_EPERM; } // add waiter; protocol is ignored in current implementation cond->waiters.emplace(CPU.GetId()); // unlock mutex cond->mutex->owner.reset(); if (cond->mutex->waiters) { cond->mutex->cv.notify_one(); } // save recursive value const u32 recursive_value = cond->mutex->recursive_count.exchange(0); while (!cond->mutex->owner.expired() || !cond->signaled || cond->waiters.count(CPU.GetId())) { const bool is_timedout = timeout && get_system_time() - start_time > timeout; // check timeout if (is_timedout && cond->mutex->owner.expired()) { // cancel waiting if the mutex is free, restore its owner and recursive value cond->mutex->owner = thread; cond->mutex->recursive_count = recursive_value; if (!cond->waiters.erase(CPU.GetId())) { throw __FUNCTION__; } return CELL_ETIMEDOUT; } if (Emu.IsStopped()) { sys_cond.Warning("sys_cond_wait(id=0x%x) aborted", cond_id); return CELL_OK; } // wait on appropriate condition variable (cond->signaled || is_timedout ? cond->mutex->cv : cond->cv).wait_for(lv2_lock, std::chrono::milliseconds(1)); } // reown the mutex and restore its recursive value cond->mutex->owner = thread; cond->mutex->recursive_count = recursive_value; cond->signaled--; return CELL_OK; }
s32 sys_lwcond_wait(PPUThread& CPU, vm::ptr<sys_lwcond_t> lwcond, u64 timeout) { sysPrxForUser.Log("sys_lwcond_wait(lwcond=*0x%x, timeout=0x%llx)", lwcond, timeout); const be_t<u32> tid = be_t<u32>::make(CPU.GetId()); const vm::ptr<sys_lwmutex_t> lwmutex = lwcond->lwmutex; if (lwmutex->owner.read_relaxed() != tid) { // if not owner of the mutex return CELL_EPERM; } // save old recursive value const be_t<u32> recursive_value = lwmutex->recursive_count; // set special value lwmutex->owner = { lwmutex::reserved }; lwmutex->recursive_count = 0; // call the syscall s32 res = _sys_lwcond_queue_wait(CPU, lwcond->lwcond_queue, lwmutex->sleep_queue, timeout); if (res == CELL_OK || res == CELL_ESRCH) { if (res == CELL_OK) { lwmutex->all_info--; } // restore owner and recursive value const auto old = lwmutex->owner.exchange(tid); lwmutex->recursive_count = recursive_value; if (old.data() != se32(lwmutex_reserved) && !Emu.IsStopped()) { sysPrxForUser.Fatal("sys_lwcond_wait(lwcond=*0x%x): locking failed (lwmutex->owner=0x%x)", lwcond, old); } return res; } if (res == CELL_EBUSY || res == CELL_ETIMEDOUT) { const s32 res2 = sys_lwmutex_lock(CPU, lwmutex, 0); if (res2 == CELL_OK) { // if successfully locked, restore recursive value lwmutex->recursive_count = recursive_value; return res == CELL_EBUSY ? CELL_OK : res; } return res2; } if (res == CELL_EDEADLK) { // restore owner and recursive value const auto old = lwmutex->owner.exchange(tid); lwmutex->recursive_count = recursive_value; if (old.data() != se32(lwmutex_reserved) && !Emu.IsStopped()) { sysPrxForUser.Fatal("sys_lwcond_wait(lwcond=*0x%x): locking failed after timeout (lwmutex->owner=0x%x)", lwcond, old); } return CELL_ETIMEDOUT; } sysPrxForUser.Fatal("sys_lwcond_wait(lwcond=*0x%x): unexpected syscall result (0x%x)", lwcond, res); return res; }
s32 sys_lwmutex_trylock(PPUThread& CPU, vm::ptr<sys_lwmutex_t> lwmutex) { sysPrxForUser.Log("sys_lwmutex_trylock(lwmutex=*0x%x)", lwmutex); const be_t<u32> tid = be_t<u32>::make(CPU.GetId()); // try to lock lightweight mutex const be_t<u32> old_owner = lwmutex->owner.compare_and_swap(lwmutex::free, tid); if (old_owner.data() == se32(lwmutex_free)) { // locking succeeded return CELL_OK; } if (old_owner.data() == tid.data()) { // recursive locking if ((lwmutex->attribute.data() & se32(SYS_SYNC_RECURSIVE)) == 0) { // if not recursive return CELL_EDEADLK; } if (lwmutex->recursive_count.data() == -1) { // if recursion limit reached return CELL_EKRESOURCE; } // recursive locking succeeded lwmutex->recursive_count++; lwmutex->lock_var.read_sync(); return CELL_OK; } if (old_owner.data() == se32(lwmutex_dead)) { // invalid or deleted mutex return CELL_EINVAL; } if (old_owner.data() == se32(lwmutex_reserved)) { // should be locked by the syscall const s32 res = _sys_lwmutex_trylock(lwmutex->sleep_queue); if (res == CELL_OK) { // locking succeeded auto old = lwmutex->owner.exchange(tid); if (old.data() != se32(lwmutex_reserved) && !Emu.IsStopped()) { sysPrxForUser.Fatal("sys_lwmutex_trylock(lwmutex=*0x%x): locking failed (owner=0x%x)", lwmutex, old); } } return res; } // locked by another thread return CELL_EBUSY; }
s32 sys_lwmutex_lock(PPUThread& CPU, vm::ptr<sys_lwmutex_t> lwmutex, u64 timeout) { sysPrxForUser.Log("sys_lwmutex_lock(lwmutex=*0x%x, timeout=0x%llx)", lwmutex, timeout); const be_t<u32> tid = be_t<u32>::make(CPU.GetId()); // try to lock lightweight mutex const be_t<u32> old_owner = lwmutex->owner.compare_and_swap(lwmutex::free, tid); if (old_owner.data() == se32(lwmutex_free)) { // locking succeeded return CELL_OK; } if (old_owner.data() == tid.data()) { // recursive locking if ((lwmutex->attribute.data() & se32(SYS_SYNC_RECURSIVE)) == 0) { // if not recursive return CELL_EDEADLK; } if (lwmutex->recursive_count.data() == -1) { // if recursion limit reached return CELL_EKRESOURCE; } // recursive locking succeeded lwmutex->recursive_count++; lwmutex->lock_var.read_sync(); return CELL_OK; } if (old_owner.data() == se32(lwmutex_dead)) { // invalid or deleted mutex return CELL_EINVAL; } for (u32 i = 0; i < 300; i++) { if (lwmutex->owner.read_relaxed().data() == se32(lwmutex_free)) { if (lwmutex->owner.compare_and_swap_test(lwmutex::free, tid)) { // locking succeeded return CELL_OK; } } } // atomically increment waiter value using 64 bit op lwmutex->all_info++; if (lwmutex->owner.compare_and_swap_test(lwmutex::free, tid)) { // locking succeeded lwmutex->all_info--; return CELL_OK; } // lock using the syscall const s32 res = _sys_lwmutex_lock(lwmutex->sleep_queue, timeout); lwmutex->all_info--; if (res == CELL_OK) { // locking succeeded auto old = lwmutex->owner.exchange(tid); if (old.data() != se32(lwmutex_reserved) && !Emu.IsStopped()) { sysPrxForUser.Fatal("sys_lwmutex_lock(lwmutex=*0x%x): locking failed (owner=0x%x)", lwmutex, old); } return CELL_OK; } if (res == CELL_EBUSY && lwmutex->attribute.data() & se32(SYS_SYNC_RETRY)) { // TODO (protocol is ignored in current implementation) throw __FUNCTION__; } return res; }