int sceKernelDeleteLwMutex(u32 workareaPtr) { DEBUG_LOG(HLE, "sceKernelDeleteLwMutex(%08x)", workareaPtr); if (!workareaPtr || !Memory::IsValidAddress(workareaPtr)) return SCE_KERNEL_ERROR_ILLEGAL_ADDR; auto workarea = Memory::GetStruct<NativeLwMutexWorkarea>(workareaPtr); u32 error; LwMutex *mutex = kernelObjects.Get<LwMutex>(workarea->uid, error); if (mutex) { bool wokeThreads = false; std::vector<SceUID>::iterator iter, end; for (iter = mutex->waitingThreads.begin(), end = mutex->waitingThreads.end(); iter != end; ++iter) wokeThreads |= __KernelUnlockLwMutexForThread(mutex, workarea, *iter, error, SCE_KERNEL_ERROR_WAIT_DELETE); mutex->waitingThreads.clear(); workarea->clear(); if (wokeThreads) hleReSchedule("lwmutex deleted"); return kernelObjects.Destroy<LwMutex>(mutex->GetUID()); } else return error; }
bool __KernelUnlockLwMutex(T workarea, u32 &error) { LwMutex *mutex = kernelObjects.Get<LwMutex>(workarea->uid, error); if (error) { workarea->lockThread = 0; return false; } bool wokeThreads = false; std::vector<SceUID>::iterator iter; while (!wokeThreads && !mutex->waitingThreads.empty()) { if ((mutex->nm.attr & PSP_MUTEX_ATTR_PRIORITY) != 0) iter = __KernelMutexFindPriority(mutex->waitingThreads); else iter = mutex->waitingThreads.begin(); wokeThreads |= __KernelUnlockLwMutexForThread(mutex, workarea, *iter, error, 0); mutex->waitingThreads.erase(iter); } if (!wokeThreads) workarea->lockThread = 0; return wokeThreads; }
void __KernelLwMutexEndCallback(SceUID threadID, SceUID prevCallbackId, u32 &returnValue) { SceUID pauseKey = prevCallbackId == 0 ? threadID : prevCallbackId; u32 error; SceUID mutexID = __KernelGetWaitID(threadID, WAITTYPE_LWMUTEX, error); u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); LwMutex *mutex = mutexID == 0 ? NULL : kernelObjects.Get<LwMutex>(mutexID, error); if (!mutex || mutex->pausedWaitTimeouts.find(pauseKey) == mutex->pausedWaitTimeouts.end()) { // TODO: Since it was deleted, we don't know how long was actually left. // For now, we just say the full time was taken. if (timeoutPtr != 0 && lwMutexWaitTimer != -1) Memory::Write_U32(0, timeoutPtr); __KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_DELETE); return; } u64 waitDeadline = mutex->pausedWaitTimeouts[pauseKey]; mutex->pausedWaitTimeouts.erase(pauseKey); // TODO: Don't wake up if __KernelCurHasReadyCallbacks()? // Attempt to unlock. if (mutex->nm.lockThread == -1 && __KernelUnlockLwMutexForThread(mutex, mutex->nm.workarea, threadID, error, 0)) return; // We only check if it timed out if it couldn't unlock. s64 cyclesLeft = waitDeadline - CoreTiming::GetTicks(); if (cyclesLeft < 0 && waitDeadline != 0) { if (timeoutPtr != 0 && lwMutexWaitTimer != -1) Memory::Write_U32(0, timeoutPtr); __KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_TIMEOUT); } else { if (timeoutPtr != 0 && lwMutexWaitTimer != -1) CoreTiming::ScheduleEvent(cyclesLeft, lwMutexWaitTimer, __KernelGetCurThread()); // TODO: Should this not go at the end? mutex->waitingThreads.push_back(threadID); DEBUG_LOG(HLE, "sceKernelLockLwMutexCB: Resuming lock wait for callback"); } }
bool __KernelUnlockLwMutexForThreadCheck(LwMutex *mutex, SceUID threadID, u32 &error, int result, bool &wokeThreads) { if (mutex->nm.lockThread == -1 && __KernelUnlockLwMutexForThread(mutex, mutex->nm.workarea, threadID, error, 0)) return true; return false; }