s32 _sys_printf(vm::ptr<const char> fmt) // va_args... { sysPrxForUser.Todo("_sys_printf(fmt=*0x%x, ...)", fmt); // probably, assertion failed sysPrxForUser.Fatal("_sys_printf: \n%s", fmt.get_ptr()); Emu.Pause(); return CELL_OK; }
s32 sys_lwcond_wait(PPUThread& CPU, vm::ptr<sys_lwcond_t> lwcond, u64 timeout) { sysPrxForUser.Log("sys_lwcond_wait(lwcond=*0x%x, timeout=0x%llx)", lwcond, timeout); const be_t<u32> tid = be_t<u32>::make(CPU.GetId()); const vm::ptr<sys_lwmutex_t> lwmutex = lwcond->lwmutex; if (lwmutex->owner.read_relaxed() != tid) { // if not owner of the mutex return CELL_EPERM; } // save old recursive value const be_t<u32> recursive_value = lwmutex->recursive_count; // set special value lwmutex->owner = { lwmutex::reserved }; lwmutex->recursive_count = 0; // call the syscall s32 res = _sys_lwcond_queue_wait(CPU, lwcond->lwcond_queue, lwmutex->sleep_queue, timeout); if (res == CELL_OK || res == CELL_ESRCH) { if (res == CELL_OK) { lwmutex->all_info--; } // restore owner and recursive value const auto old = lwmutex->owner.exchange(tid); lwmutex->recursive_count = recursive_value; if (old.data() != se32(lwmutex_reserved) && !Emu.IsStopped()) { sysPrxForUser.Fatal("sys_lwcond_wait(lwcond=*0x%x): locking failed (lwmutex->owner=0x%x)", lwcond, old); } return res; } if (res == CELL_EBUSY || res == CELL_ETIMEDOUT) { const s32 res2 = sys_lwmutex_lock(CPU, lwmutex, 0); if (res2 == CELL_OK) { // if successfully locked, restore recursive value lwmutex->recursive_count = recursive_value; return res == CELL_EBUSY ? CELL_OK : res; } return res2; } if (res == CELL_EDEADLK) { // restore owner and recursive value const auto old = lwmutex->owner.exchange(tid); lwmutex->recursive_count = recursive_value; if (old.data() != se32(lwmutex_reserved) && !Emu.IsStopped()) { sysPrxForUser.Fatal("sys_lwcond_wait(lwcond=*0x%x): locking failed after timeout (lwmutex->owner=0x%x)", lwcond, old); } return CELL_ETIMEDOUT; } sysPrxForUser.Fatal("sys_lwcond_wait(lwcond=*0x%x): unexpected syscall result (0x%x)", lwcond, res); return res; }
s32 sys_lwmutex_trylock(PPUThread& CPU, vm::ptr<sys_lwmutex_t> lwmutex) { sysPrxForUser.Log("sys_lwmutex_trylock(lwmutex=*0x%x)", lwmutex); const be_t<u32> tid = be_t<u32>::make(CPU.GetId()); // try to lock lightweight mutex const be_t<u32> old_owner = lwmutex->owner.compare_and_swap(lwmutex::free, tid); if (old_owner.data() == se32(lwmutex_free)) { // locking succeeded return CELL_OK; } if (old_owner.data() == tid.data()) { // recursive locking if ((lwmutex->attribute.data() & se32(SYS_SYNC_RECURSIVE)) == 0) { // if not recursive return CELL_EDEADLK; } if (lwmutex->recursive_count.data() == -1) { // if recursion limit reached return CELL_EKRESOURCE; } // recursive locking succeeded lwmutex->recursive_count++; lwmutex->lock_var.read_sync(); return CELL_OK; } if (old_owner.data() == se32(lwmutex_dead)) { // invalid or deleted mutex return CELL_EINVAL; } if (old_owner.data() == se32(lwmutex_reserved)) { // should be locked by the syscall const s32 res = _sys_lwmutex_trylock(lwmutex->sleep_queue); if (res == CELL_OK) { // locking succeeded auto old = lwmutex->owner.exchange(tid); if (old.data() != se32(lwmutex_reserved) && !Emu.IsStopped()) { sysPrxForUser.Fatal("sys_lwmutex_trylock(lwmutex=*0x%x): locking failed (owner=0x%x)", lwmutex, old); } } return res; } // locked by another thread return CELL_EBUSY; }
s32 sys_lwmutex_lock(PPUThread& CPU, vm::ptr<sys_lwmutex_t> lwmutex, u64 timeout) { sysPrxForUser.Log("sys_lwmutex_lock(lwmutex=*0x%x, timeout=0x%llx)", lwmutex, timeout); const be_t<u32> tid = be_t<u32>::make(CPU.GetId()); // try to lock lightweight mutex const be_t<u32> old_owner = lwmutex->owner.compare_and_swap(lwmutex::free, tid); if (old_owner.data() == se32(lwmutex_free)) { // locking succeeded return CELL_OK; } if (old_owner.data() == tid.data()) { // recursive locking if ((lwmutex->attribute.data() & se32(SYS_SYNC_RECURSIVE)) == 0) { // if not recursive return CELL_EDEADLK; } if (lwmutex->recursive_count.data() == -1) { // if recursion limit reached return CELL_EKRESOURCE; } // recursive locking succeeded lwmutex->recursive_count++; lwmutex->lock_var.read_sync(); return CELL_OK; } if (old_owner.data() == se32(lwmutex_dead)) { // invalid or deleted mutex return CELL_EINVAL; } for (u32 i = 0; i < 300; i++) { if (lwmutex->owner.read_relaxed().data() == se32(lwmutex_free)) { if (lwmutex->owner.compare_and_swap_test(lwmutex::free, tid)) { // locking succeeded return CELL_OK; } } } // atomically increment waiter value using 64 bit op lwmutex->all_info++; if (lwmutex->owner.compare_and_swap_test(lwmutex::free, tid)) { // locking succeeded lwmutex->all_info--; return CELL_OK; } // lock using the syscall const s32 res = _sys_lwmutex_lock(lwmutex->sleep_queue, timeout); lwmutex->all_info--; if (res == CELL_OK) { // locking succeeded auto old = lwmutex->owner.exchange(tid); if (old.data() != se32(lwmutex_reserved) && !Emu.IsStopped()) { sysPrxForUser.Fatal("sys_lwmutex_lock(lwmutex=*0x%x): locking failed (owner=0x%x)", lwmutex, old); } return CELL_OK; } if (res == CELL_EBUSY && lwmutex->attribute.data() & se32(SYS_SYNC_RETRY)) { // TODO (protocol is ignored in current implementation) throw __FUNCTION__; } return res; }