int sceKernelCancelMutex(SceUID uid, int count, u32 numWaitThreadsPtr) { u32 error; Mutex *mutex = kernelObjects.Get<Mutex>(uid, error); if (mutex) { bool lockable = count <= 0 || __KernelLockMutexCheck(mutex, count, error); if (!lockable) { // May still be okay. As long as the count/etc. are valid. if (error != 0 && error != PSP_MUTEX_ERROR_LOCK_OVERFLOW && error != PSP_MUTEX_ERROR_ALREADY_LOCKED) { DEBUG_LOG(SCEKERNEL, "sceKernelCancelMutex(%i, %d, %08x): invalid count", uid, count, numWaitThreadsPtr); return error; } } DEBUG_LOG(SCEKERNEL, "sceKernelCancelMutex(%i, %d, %08x)", uid, count, numWaitThreadsPtr); // Remove threads no longer waiting on this first (so the numWaitThreads value is correct.) HLEKernel::CleanupWaitingThreads(WAITTYPE_MUTEX, uid, mutex->waitingThreads); if (Memory::IsValidAddress(numWaitThreadsPtr)) Memory::Write_U32((u32)mutex->waitingThreads.size(), numWaitThreadsPtr); bool wokeThreads = false; for (auto iter = mutex->waitingThreads.begin(), end = mutex->waitingThreads.end(); iter != end; ++iter) wokeThreads |= __KernelUnlockMutexForThread(mutex, *iter, error, SCE_KERNEL_ERROR_WAIT_CANCEL); if (mutex->nm.lockThread != -1) __KernelMutexEraseLock(mutex); mutex->waitingThreads.clear(); if (count <= 0) { mutex->nm.lockLevel = 0; mutex->nm.lockThread = -1; } else __KernelMutexAcquireLock(mutex, count); if (wokeThreads) hleReSchedule("mutex canceled"); return 0; } else { DEBUG_LOG(SCEKERNEL, "sceKernelCancelMutex(%i, %d, %08x)", uid, count, numWaitThreadsPtr); return error; } }
bool __KernelLockMutex(Mutex *mutex, int count, u32 &error) { if (!__KernelLockMutexCheck(mutex, count, error)) return false; if (mutex->nm.lockLevel == 0) { __KernelMutexAcquireLock(mutex, count); // Nobody had it locked - no need to block return true; } if (mutex->nm.lockThread == __KernelGetCurThread()) { // __KernelLockMutexCheck() would've returned an error, so this must be recursive. mutex->nm.lockLevel += count; return true; } return false; }
// int sceKernelLockMutexCB(SceUID id, int count, int *timeout) int sceKernelLockMutexCB(SceUID id, int count, u32 timeoutPtr) { DEBUG_LOG(HLE, "sceKernelLockMutexCB(%i, %i, %08x)", id, count, timeoutPtr); u32 error; Mutex *mutex = kernelObjects.Get<Mutex>(id, error); if (!__KernelLockMutexCheck(mutex, count, error)) { if (error) return error; SceUID threadID = __KernelGetCurThread(); // May be in a tight loop timing out (where we don't remove from waitingThreads yet), don't want to add duplicates. if (std::find(mutex->waitingThreads.begin(), mutex->waitingThreads.end(), threadID) == mutex->waitingThreads.end()) mutex->waitingThreads.push_back(threadID); __KernelWaitMutex(mutex, timeoutPtr); __KernelWaitCurThread(WAITTYPE_MUTEX, id, count, timeoutPtr, true, "mutex waited"); // Return value will be overwritten by wait. return 0; } else { if (__KernelCurHasReadyCallbacks()) { // Might actually end up having to wait, so set the timeout. __KernelWaitMutex(mutex, timeoutPtr); __KernelWaitCallbacksCurThread(WAITTYPE_MUTEX, id, count, timeoutPtr); // Return value will be written to callback's v0, but... that's probably fine? } else __KernelLockMutex(mutex, count, error); return 0; } }