s32 _sys_lwcond_queue_wait(PPUThread& ppu, u32 lwcond_id, u32 lwmutex_id, u64 timeout) { sys_lwcond.Log("_sys_lwcond_queue_wait(lwcond_id=0x%x, lwmutex_id=0x%x, timeout=0x%llx)", lwcond_id, lwmutex_id, timeout); const u64 start_time = get_system_time(); LV2_LOCK; const auto cond = idm::get<lv2_lwcond_t>(lwcond_id); const auto mutex = idm::get<lv2_lwmutex_t>(lwmutex_id); if (!cond || !mutex) { return CELL_ESRCH; } // finalize unlocking the mutex mutex->unlock(lv2_lock); // add waiter; protocol is ignored in current implementation sleep_queue_entry_t waiter(ppu, cond->sq); // potential mutex waiter (not added immediately) sleep_queue_entry_t mutex_waiter(ppu, cond->sq, defer_sleep); while (!ppu.unsignal()) { CHECK_EMU_STATUS; if (timeout && waiter) { const u64 passed = get_system_time() - start_time; if (passed >= timeout) { // try to reown the mutex if timed out if (mutex->signaled) { mutex->signaled--; return CELL_EDEADLK; } else { return CELL_ETIMEDOUT; } } ppu.cv.wait_for(lv2_lock, std::chrono::microseconds(timeout - passed)); } else { ppu.cv.wait(lv2_lock); } } // return cause return ppu.GPR[3] ? CELL_EBUSY : CELL_OK; }
s32 sys_rwlock_rlock(PPUThread& ppu, u32 rw_lock_id, u64 timeout) { sys_rwlock.Log("sys_rwlock_rlock(rw_lock_id=0x%x, timeout=0x%llx)", rw_lock_id, timeout); const u64 start_time = get_system_time(); LV2_LOCK; const auto rwlock = idm::get<lv2_rwlock_t>(rw_lock_id); if (!rwlock) { return CELL_ESRCH; } if (!rwlock->writer && rwlock->wsq.empty()) { if (!++rwlock->readers) { throw EXCEPTION("Too many readers"); } return CELL_OK; } // add waiter; protocol is ignored in current implementation sleep_queue_entry_t waiter(ppu, rwlock->rsq); while (!ppu.unsignal()) { CHECK_EMU_STATUS; if (timeout) { const u64 passed = get_system_time() - start_time; if (passed >= timeout) { return CELL_ETIMEDOUT; } ppu.cv.wait_for(lv2_lock, std::chrono::microseconds(timeout - passed)); } else { ppu.cv.wait(lv2_lock); } } if (rwlock->writer || !rwlock->readers) { throw EXCEPTION("Unexpected"); } return CELL_OK; }
s32 _sys_lwmutex_lock(PPUThread& ppu, u32 lwmutex_id, u64 timeout) { sys_lwmutex.Log("_sys_lwmutex_lock(lwmutex_id=0x%x, timeout=0x%llx)", lwmutex_id, timeout); const u64 start_time = get_system_time(); LV2_LOCK; const auto mutex = idm::get<lv2_lwmutex_t>(lwmutex_id); if (!mutex) { return CELL_ESRCH; } if (mutex->signaled) { mutex->signaled--; return CELL_OK; } // add waiter; protocol is ignored in current implementation sleep_queue_entry_t waiter(ppu, mutex->sq); while (!ppu.unsignal()) { CHECK_EMU_STATUS; if (timeout) { const u64 passed = get_system_time() - start_time; if (passed >= timeout) { return CELL_ETIMEDOUT; } ppu.cv.wait_for(lv2_lock, std::chrono::microseconds(timeout - passed)); } else { ppu.cv.wait(lv2_lock); } } return CELL_OK; }
s32 sys_semaphore_wait(PPUThread& ppu, u32 sem_id, u64 timeout) { sys_semaphore.Log("sys_semaphore_wait(sem_id=0x%x, timeout=0x%llx)", sem_id, timeout); const u64 start_time = get_system_time(); LV2_LOCK; const auto sem = idm::get<lv2_sema_t>(sem_id); if (!sem) { return CELL_ESRCH; } if (sem->value > 0) { sem->value--; return CELL_OK; } // add waiter; protocol is ignored in current implementation sleep_queue_entry_t waiter(ppu, sem->sq); while (!ppu.unsignal()) { CHECK_EMU_STATUS; if (timeout) { const u64 passed = get_system_time() - start_time; if (passed >= timeout) { return CELL_ETIMEDOUT; } ppu.cv.wait_for(lv2_lock, std::chrono::microseconds(timeout - passed)); } else { ppu.cv.wait(lv2_lock); } } return CELL_OK; }
s32 sys_rwlock_wlock(PPUThread& ppu, u32 rw_lock_id, u64 timeout) { sys_rwlock.Log("sys_rwlock_wlock(rw_lock_id=0x%x, timeout=0x%llx)", rw_lock_id, timeout); const u64 start_time = get_system_time(); LV2_LOCK; const auto rwlock = idm::get<lv2_rwlock_t>(rw_lock_id); if (!rwlock) { return CELL_ESRCH; } if (rwlock->writer.get() == &ppu) { return CELL_EDEADLK; } if (!rwlock->readers && !rwlock->writer) { rwlock->writer = std::static_pointer_cast<CPUThread>(ppu.shared_from_this()); return CELL_OK; } // add waiter; protocol is ignored in current implementation sleep_queue_entry_t waiter(ppu, rwlock->wsq); while (!ppu.unsignal()) { CHECK_EMU_STATUS; if (timeout) { const u64 passed = get_system_time() - start_time; if (passed >= timeout) { // if the last waiter quit the writer sleep queue, readers must acquire the lock if (!rwlock->writer && rwlock->wsq.size() == 1) { if (rwlock->wsq.front().get() != &ppu) { throw EXCEPTION("Unexpected"); } rwlock->wsq.clear(); rwlock->notify_all(lv2_lock); } return CELL_ETIMEDOUT; } ppu.cv.wait_for(lv2_lock, std::chrono::microseconds(timeout - passed)); } else { ppu.cv.wait(lv2_lock); } } if (rwlock->readers || rwlock->writer.get() != &ppu) { throw EXCEPTION("Unexpected"); } return CELL_OK; }
s32 sys_event_queue_receive(PPUThread& ppu, u32 equeue_id, vm::ptr<sys_event_t> dummy_event, u64 timeout) { sys_event.Log("sys_event_queue_receive(equeue_id=0x%x, *0x%x, timeout=0x%llx)", equeue_id, dummy_event, timeout); const u64 start_time = get_system_time(); LV2_LOCK; const auto queue = idm::get<lv2_event_queue_t>(equeue_id); if (!queue) { return CELL_ESRCH; } if (queue->type != SYS_PPU_QUEUE) { return CELL_EINVAL; } if (queue->events.size()) { // event data is returned in registers (dummy_event is not used) std::tie(ppu.GPR[4], ppu.GPR[5], ppu.GPR[6], ppu.GPR[7]) = queue->events.front(); queue->events.pop_front(); return CELL_OK; } // cause (if cancelled) will be returned in r3 ppu.GPR[3] = 0; // add waiter; protocol is ignored in current implementation sleep_queue_entry_t waiter(ppu, queue->sq); while (!ppu.unsignal()) { CHECK_EMU_STATUS; if (timeout) { const u64 passed = get_system_time() - start_time; if (passed >= timeout) { return CELL_ETIMEDOUT; } ppu.cv.wait_for(lv2_lock, std::chrono::microseconds(timeout - passed)); } else { ppu.cv.wait(lv2_lock); } } if (ppu.GPR[3]) { if (idm::check<lv2_event_queue_t>(equeue_id)) { throw EXCEPTION("Unexpected"); } return CELL_ECANCELED; } // r4-r7 registers must be set by push() return CELL_OK; }
s32 sys_mutex_lock(PPUThread& ppu, u32 mutex_id, u64 timeout) { sys_mutex.Log("sys_mutex_lock(mutex_id=0x%x, timeout=0x%llx)", mutex_id, timeout); const u64 start_time = get_system_time(); LV2_LOCK; const auto mutex = idm::get<lv2_mutex_t>(mutex_id); if (!mutex) { return CELL_ESRCH; } // check current ownership if (mutex->owner.get() == &ppu) { if (mutex->recursive) { if (mutex->recursive_count == 0xffffffffu) { return CELL_EKRESOURCE; } mutex->recursive_count++; return CELL_OK; } return CELL_EDEADLK; } // lock immediately if not locked if (!mutex->owner) { mutex->owner = std::static_pointer_cast<CPUThread>(ppu.shared_from_this()); return CELL_OK; } // add waiter; protocol is ignored in current implementation sleep_queue_entry_t waiter(ppu, mutex->sq); while (!ppu.unsignal()) { CHECK_EMU_STATUS; if (timeout) { const u64 passed = get_system_time() - start_time; if (passed >= timeout) { return CELL_ETIMEDOUT; } ppu.cv.wait_for(lv2_lock, std::chrono::microseconds(timeout - passed)); } else { ppu.cv.wait(lv2_lock); } } // new owner must be set when unlocked if (mutex->owner.get() != &ppu) { throw EXCEPTION("Unexpected mutex owner"); } return CELL_OK; }
s32 sys_event_flag_wait(PPUThread& ppu, u32 id, u64 bitptn, u32 mode, vm::ptr<u64> result, u64 timeout) { sys_event_flag.Log("sys_event_flag_wait(id=0x%x, bitptn=0x%llx, mode=0x%x, result=*0x%x, timeout=0x%llx)", id, bitptn, mode, result, timeout); const u64 start_time = get_system_time(); // If this syscall is called through the SC instruction, these registers must already contain corresponding values. // But let's fixup them (in the case of explicit function call or something) because these values are used externally. ppu.GPR[4] = bitptn; ppu.GPR[5] = mode; LV2_LOCK; if (result) *result = 0; // This is very annoying. if (!lv2_event_flag_t::check_mode(mode)) { sys_event_flag.Error("sys_event_flag_wait(): unknown mode (0x%x)", mode); return CELL_EINVAL; } const auto eflag = idm::get<lv2_event_flag_t>(id); if (!eflag) { return CELL_ESRCH; } if (eflag->type == SYS_SYNC_WAITER_SINGLE && eflag->sq.size() > 0) { return CELL_EPERM; } if (eflag->check_pattern(bitptn, mode)) { const u64 pattern = eflag->clear_pattern(bitptn, mode); if (result) *result = pattern; return CELL_OK; } // add waiter; protocol is ignored in current implementation sleep_queue_entry_t waiter(ppu, eflag->sq); while (!ppu.unsignal()) { CHECK_EMU_STATUS; if (timeout) { const u64 passed = get_system_time() - start_time; if (passed >= timeout) { if (result) *result = eflag->pattern; return CELL_ETIMEDOUT; } ppu.cv.wait_for(lv2_lock, std::chrono::microseconds(timeout - passed)); } else { ppu.cv.wait(lv2_lock); } } // load pattern saved upon signaling if (result) { *result = ppu.GPR[4]; } // check cause if (ppu.GPR[5] == 0) { return CELL_ECANCELED; } return CELL_OK; }
s32 sys_cond_wait(PPUThread& ppu, u32 cond_id, u64 timeout) { sys_cond.Log("sys_cond_wait(cond_id=0x%x, timeout=%lld)", cond_id, timeout); const u64 start_time = get_system_time(); LV2_LOCK; const auto cond = Emu.GetIdManager().get<lv2_cond_t>(cond_id); if (!cond) { return CELL_ESRCH; } // check current ownership if (cond->mutex->owner.get() != &ppu) { return CELL_EPERM; } // save the recursive value const u32 recursive_value = cond->mutex->recursive_count.exchange(0); // unlock the mutex cond->mutex->unlock(lv2_lock); // add waiter; protocol is ignored in current implementation sleep_queue_entry_t waiter(ppu, cond->sq); // potential mutex waiter (not added immediately) sleep_queue_entry_t mutex_waiter(ppu, cond->mutex->sq, defer_sleep); while (!ppu.unsignal()) { CHECK_EMU_STATUS; // timeout is ignored if waiting on the cond var is already dropped if (timeout && waiter) { const u64 passed = get_system_time() - start_time; if (passed >= timeout) { // try to reown mutex and exit if timed out if (!cond->mutex->owner) { cond->mutex->owner = ppu.shared_from_this(); break; } // drop condition variable and start waiting on the mutex queue mutex_waiter.enter(); waiter.leave(); continue; } ppu.cv.wait_for(lv2_lock, std::chrono::microseconds(timeout - passed)); } else { ppu.cv.wait(lv2_lock); } } // mutex owner is restored after notification or unlocking if (cond->mutex->owner.get() != &ppu) { throw EXCEPTION("Unexpected mutex owner"); } // restore the recursive value cond->mutex->recursive_count = recursive_value; // check timeout (unclear) if (timeout && get_system_time() - start_time > timeout) { return CELL_ETIMEDOUT; } return CELL_OK; }