void __KernelMutexThreadEnd(SceUID threadID) { u32 error; // If it was waiting on the mutex, it should finish now. SceUID waitingMutexID = __KernelGetWaitID(threadID, WAITTYPE_MUTEX, error); if (waitingMutexID) { Mutex *mutex = kernelObjects.Get<Mutex>(waitingMutexID, error); if (mutex) mutex->waitingThreads.erase(std::remove(mutex->waitingThreads.begin(), mutex->waitingThreads.end(), threadID), mutex->waitingThreads.end()); } // Unlock all mutexes the thread had locked. std::pair<MutexMap::iterator, MutexMap::iterator> locked = mutexHeldLocks.equal_range(threadID); for (MutexMap::iterator iter = locked.first; iter != locked.second; ) { // Need to increment early so erase() doesn't invalidate. SceUID mutexID = (*iter++).second; Mutex *mutex = kernelObjects.Get<Mutex>(mutexID, error); if (mutex) { mutex->nm.lockLevel = 0; __KernelUnlockMutex(mutex, error); } } }
// int sceKernelUnlockMutex(SceUID id, int count) int sceKernelUnlockMutex(SceUID id, int count) { DEBUG_LOG(HLE, "sceKernelUnlockMutex(%i, %i)", id, count); u32 error; Mutex *mutex = kernelObjects.Get<Mutex>(id, error); if (error) return error; if (count <= 0) return SCE_KERNEL_ERROR_ILLEGAL_COUNT; if ((mutex->nm.attr & PSP_MUTEX_ATTR_ALLOW_RECURSIVE) == 0 && count > 1) return SCE_KERNEL_ERROR_ILLEGAL_COUNT; if (mutex->nm.lockLevel == 0 || mutex->nm.lockThread != __KernelGetCurThread()) return PSP_MUTEX_ERROR_NOT_LOCKED; if (mutex->nm.lockLevel < count) return PSP_MUTEX_ERROR_UNLOCK_UNDERFLOW; mutex->nm.lockLevel -= count; if (mutex->nm.lockLevel == 0) { if (__KernelUnlockMutex(mutex, error)) hleReSchedule("mutex unlocked"); } return 0; }
// int sceKernelUnlockMutex(SceUID id, int count) // void because it changes threads. void sceKernelUnlockMutex(SceUID id, int count) { DEBUG_LOG(HLE,"sceKernelUnlockMutex(%i, %i)", id, count); u32 error; Mutex *mutex = kernelObjects.Get<Mutex>(id, error); if (!error) { if (count <= 0) error = SCE_KERNEL_ERROR_ILLEGAL_COUNT; else if ((mutex->nm.attr & PSP_MUTEX_ATTR_ALLOW_RECURSIVE) == 0 && count > 1) error = SCE_KERNEL_ERROR_ILLEGAL_COUNT; else if (mutex->nm.lockLevel == 0 || mutex->nm.lockThread != __KernelGetCurThread()) error = PSP_MUTEX_ERROR_NOT_LOCKED; else if (mutex->nm.lockLevel < count) error = PSP_MUTEX_ERROR_UNLOCK_UNDERFLOW; } if (error) { RETURN(error); return; } mutex->nm.lockLevel -= count; RETURN(0); if (mutex->nm.lockLevel == 0) { __KernelUnlockMutex(mutex, error); __KernelReSchedule("mutex unlocked"); } }