int sceKernelDeleteMutex(SceUID id) { u32 error; Mutex *mutex = kernelObjects.Get<Mutex>(id, error); if (mutex) { DEBUG_LOG(SCEKERNEL, "sceKernelDeleteMutex(%i)", id); bool wokeThreads = false; std::vector<SceUID>::iterator iter, end; for (iter = mutex->waitingThreads.begin(), end = mutex->waitingThreads.end(); iter != end; ++iter) wokeThreads |= __KernelUnlockMutexForThread(mutex, *iter, error, SCE_KERNEL_ERROR_WAIT_DELETE); if (mutex->nm.lockThread != -1) __KernelMutexEraseLock(mutex); mutex->waitingThreads.clear(); if (wokeThreads) hleReSchedule("mutex deleted"); return kernelObjects.Destroy<Mutex>(id); } else { DEBUG_LOG(SCEKERNEL, "sceKernelDeleteMutex(%i): invalid mutex", id); return error; } }
int sceKernelCancelMutex(SceUID uid, int count, u32 numWaitThreadsPtr) { u32 error; Mutex *mutex = kernelObjects.Get<Mutex>(uid, error); if (mutex) { bool lockable = count <= 0 || __KernelLockMutexCheck(mutex, count, error); if (!lockable) { // May still be okay. As long as the count/etc. are valid. if (error != 0 && error != PSP_MUTEX_ERROR_LOCK_OVERFLOW && error != PSP_MUTEX_ERROR_ALREADY_LOCKED) { DEBUG_LOG(SCEKERNEL, "sceKernelCancelMutex(%i, %d, %08x): invalid count", uid, count, numWaitThreadsPtr); return error; } } DEBUG_LOG(SCEKERNEL, "sceKernelCancelMutex(%i, %d, %08x)", uid, count, numWaitThreadsPtr); // Remove threads no longer waiting on this first (so the numWaitThreads value is correct.) HLEKernel::CleanupWaitingThreads(WAITTYPE_MUTEX, uid, mutex->waitingThreads); if (Memory::IsValidAddress(numWaitThreadsPtr)) Memory::Write_U32((u32)mutex->waitingThreads.size(), numWaitThreadsPtr); bool wokeThreads = false; for (auto iter = mutex->waitingThreads.begin(), end = mutex->waitingThreads.end(); iter != end; ++iter) wokeThreads |= __KernelUnlockMutexForThread(mutex, *iter, error, SCE_KERNEL_ERROR_WAIT_CANCEL); if (mutex->nm.lockThread != -1) __KernelMutexEraseLock(mutex); mutex->waitingThreads.clear(); if (count <= 0) { mutex->nm.lockLevel = 0; mutex->nm.lockThread = -1; } else __KernelMutexAcquireLock(mutex, count); if (wokeThreads) hleReSchedule("mutex canceled"); return 0; } else { DEBUG_LOG(SCEKERNEL, "sceKernelCancelMutex(%i, %d, %08x)", uid, count, numWaitThreadsPtr); return error; } }
void __KernelMutexEndCallback(SceUID threadID, SceUID prevCallbackId, u32 &returnValue) { SceUID pauseKey = prevCallbackId == 0 ? threadID : prevCallbackId; u32 error; SceUID mutexID = __KernelGetWaitID(threadID, WAITTYPE_MUTEX, error); u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); Mutex *mutex = mutexID == 0 ? NULL : kernelObjects.Get<Mutex>(mutexID, error); if (!mutex || mutex->pausedWaitTimeouts.find(pauseKey) == mutex->pausedWaitTimeouts.end()) { // TODO: Since it was deleted, we don't know how long was actually left. // For now, we just say the full time was taken. if (timeoutPtr != 0 && mutexWaitTimer != -1) Memory::Write_U32(0, timeoutPtr); __KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_DELETE); return; } u64 waitDeadline = mutex->pausedWaitTimeouts[pauseKey]; mutex->pausedWaitTimeouts.erase(pauseKey); // TODO: Don't wake up if __KernelCurHasReadyCallbacks()? // Attempt to unlock. if (mutex->nm.lockThread == -1 && __KernelUnlockMutexForThread(mutex, threadID, error, 0)) return; // We only check if it timed out if it couldn't unlock. s64 cyclesLeft = waitDeadline - CoreTiming::GetTicks(); if (cyclesLeft < 0 && waitDeadline != 0) { if (timeoutPtr != 0 && mutexWaitTimer != -1) Memory::Write_U32(0, timeoutPtr); __KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_TIMEOUT); } else { if (timeoutPtr != 0 && mutexWaitTimer != -1) CoreTiming::ScheduleEvent(cyclesLeft, mutexWaitTimer, __KernelGetCurThread()); // TODO: Should this not go at the end? mutex->waitingThreads.push_back(threadID); DEBUG_LOG(HLE, "sceKernelLockMutexCB: Resuming lock wait for callback"); } }
bool __KernelUnlockMutex(Mutex *mutex, u32 &error) { __KernelMutexEraseLock(mutex); bool wokeThreads = false; std::vector<SceUID>::iterator iter; while (!wokeThreads && !mutex->waitingThreads.empty()) { if ((mutex->nm.attr & PSP_MUTEX_ATTR_PRIORITY) != 0) iter = __KernelMutexFindPriority(mutex->waitingThreads); else iter = mutex->waitingThreads.begin(); wokeThreads |= __KernelUnlockMutexForThread(mutex, *iter, error, 0); mutex->waitingThreads.erase(iter); } if (!wokeThreads) mutex->nm.lockThread = -1; return wokeThreads; }
bool __KernelUnlockMutexForThreadCheck(Mutex *mutex, SceUID threadID, u32 &error, int result, bool &wokeThreads) { if (mutex->nm.lockThread == -1 && __KernelUnlockMutexForThread(mutex, threadID, error, 0)) return true; return false; }