void __KernelMutexThreadEnd(SceUID threadID) { u32 error; // If it was waiting on the mutex, it should finish now. SceUID waitingMutexID = __KernelGetWaitID(threadID, WAITTYPE_MUTEX, error); if (waitingMutexID) { Mutex *mutex = kernelObjects.Get<Mutex>(waitingMutexID, error); if (mutex) mutex->waitingThreads.erase(std::remove(mutex->waitingThreads.begin(), mutex->waitingThreads.end(), threadID), mutex->waitingThreads.end()); } // Unlock all mutexes the thread had locked. std::pair<MutexMap::iterator, MutexMap::iterator> locked = mutexHeldLocks.equal_range(threadID); for (MutexMap::iterator iter = locked.first; iter != locked.second; ) { // Need to increment early so erase() doesn't invalidate. SceUID mutexID = (*iter++).second; Mutex *mutex = kernelObjects.Get<Mutex>(mutexID, error); if (mutex) { mutex->nm.lockLevel = 0; __KernelUnlockMutex(mutex, error); } } }
void __KernelEventFlagTimeout(u64 userdata, int cycleslate) { SceUID threadID = (SceUID)userdata; u32 error; u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); if (timeoutPtr != 0) Memory::Write_U32(0, timeoutPtr); SceUID flagID = __KernelGetWaitID(threadID, WAITTYPE_EVENTFLAG, error); EventFlag *e = kernelObjects.Get<EventFlag>(flagID, error); if (e) { for (size_t i = 0; i < e->waitingThreads.size(); i++) { EventFlagTh *t = &e->waitingThreads[i]; if (t->tid == threadID) { bool wokeThreads; // This thread isn't waiting anymore, but we'll remove it from waitingThreads later. // The reason is, if it times out, but what it was waiting on is DELETED prior to it // actually running, it will get a DELETE result instead of a TIMEOUT. // So, we need to remember it or we won't be able to mark it DELETE instead later. __KernelUnlockEventFlagForThread(e, *t, error, SCE_KERNEL_ERROR_WAIT_TIMEOUT, wokeThreads); e->nef.numWaitThreads--; break; } } } }
bool __KernelUnlockVplForThread(VPL *vpl, VplWaitingThread &threadInfo, u32 &error, int result, bool &wokeThreads) { const SceUID threadID = threadInfo.threadID; SceUID waitID = __KernelGetWaitID(threadID, WAITTYPE_VPL, error); u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); // The waitID may be different after a timeout. if (waitID != vpl->GetUID()) return true; // If result is an error code, we're just letting it go. if (result == 0) { int size = (int) __KernelGetWaitValue(threadID, error); // Padding (normally used to track the allocation.) u32 allocSize = size + 8; u32 addr = vpl->alloc.Alloc(allocSize, true); if (addr != (u32) -1) Memory::Write_U32(addr, threadInfo.addrPtr); else return false; } if (timeoutPtr != 0 && vplWaitTimer != -1) { // Remove any event for this thread. s64 cyclesLeft = CoreTiming::UnscheduleEvent(vplWaitTimer, threadID); Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr); } __KernelResumeThreadFromWait(threadID, result); wokeThreads = true; return true; }
int sceKernelReferSemaStatus(SceUID id, u32 infoPtr) { u32 error; Semaphore *s = kernelObjects.Get<Semaphore>(id, error); if (s) { DEBUG_LOG(HLE, "sceKernelReferSemaStatus(%i, %08x)", id, infoPtr); if (!Memory::IsValidAddress(infoPtr)) return -1; for (auto iter = s->waitingThreads.begin(); iter != s->waitingThreads.end(); ++iter) { SceUID waitID = __KernelGetWaitID(*iter, WAITTYPE_SEMA, error); // The thread is no longer waiting for this, clean it up. if (waitID != id) s->waitingThreads.erase(iter--); } s->ns.numWaitThreads = (int) s->waitingThreads.size(); if (Memory::Read_U32(infoPtr) != 0) Memory::WriteStruct(infoPtr, &s->ns); return 0; } else { ERROR_LOG(HLE, "sceKernelReferSemaStatus: error %08x", error); return error; } }
//int sceKernelReferEventFlagStatus(SceUID event, SceKernelEventFlagInfo *status); u32 sceKernelReferEventFlagStatus(SceUID id, u32 statusPtr) { u32 error; EventFlag *e = kernelObjects.Get<EventFlag>(id, error); if (e) { DEBUG_LOG(HLE, "sceKernelReferEventFlagStatus(%i, %08x)", id, statusPtr); if (!Memory::IsValidAddress(statusPtr)) return -1; for (auto iter = e->waitingThreads.begin(); iter != e->waitingThreads.end(); ++iter) { SceUID waitID = __KernelGetWaitID(iter->tid, WAITTYPE_EVENTFLAG, error); // The thread is no longer waiting for this, clean it up. if (waitID != id) e->waitingThreads.erase(iter--); } e->nef.numWaitThreads = (int) e->waitingThreads.size(); if (Memory::Read_U32(statusPtr) != 0) Memory::WriteStruct(statusPtr, &e->nef); return 0; } else { ERROR_LOG(HLE, "sceKernelReferEventFlagStatus(%i, %08x): invalid event flag", id, statusPtr); return error; } }
int sceKernelReferVplStatus(SceUID uid, u32 infoPtr) { u32 error; VPL *vpl = kernelObjects.Get<VPL>(uid, error); if (vpl) { DEBUG_LOG(HLE, "sceKernelReferVplStatus(%i, %08x)", uid, infoPtr); u32 error; for (auto iter = vpl->waitingThreads.begin(); iter != vpl->waitingThreads.end(); ++iter) { SceUID waitID = __KernelGetWaitID(iter->threadID, WAITTYPE_VPL, error); // The thread is no longer waiting for this, clean it up. if (waitID != uid) vpl->waitingThreads.erase(iter--); } vpl->nv.numWaitThreads = (int) vpl->waitingThreads.size(); vpl->nv.freeSize = vpl->alloc.GetTotalFreeBytes(); if (Memory::IsValidAddress(infoPtr) && Memory::Read_U32(infoPtr)) Memory::WriteStruct(infoPtr, &vpl->nv); return 0; } else return error; }
static void __CtrlDoSample() { // This samples the ctrl data into the buffers and updates the latch. __CtrlUpdateLatch(); // Wake up a single thread that was waiting for the buffer. retry: if (!waitingThreads.empty() && ctrlBuf != ctrlBufRead) { SceUID threadID = waitingThreads[0]; waitingThreads.erase(waitingThreads.begin()); u32 error; SceUID wVal = __KernelGetWaitID(threadID, WAITTYPE_CTRL, error); // Make sure it didn't get woken or something. if (wVal == 0) goto retry; PSPPointer<_ctrl_data> ctrlDataPtr; ctrlDataPtr = __KernelGetWaitValue(threadID, error); int retVal = __CtrlReadSingleBuffer(ctrlDataPtr, wVal == CTRL_WAIT_NEGATIVE); __KernelResumeThreadFromWait(threadID, retVal); __KernelReSchedule("ctrl buffers updated"); } }
bool __KernelUnlockEventFlagForThread(EventFlag *e, EventFlagTh &th, u32 &error, int result, bool &wokeThreads) { SceUID waitID = __KernelGetWaitID(th.tid, WAITTYPE_EVENTFLAG, error); u32 timeoutPtr = __KernelGetWaitTimeoutPtr(th.tid, error); // The waitID may be different after a timeout. if (waitID != e->GetUID()) return true; // If result is an error code, we're just letting it go. if (result == 0) { if (!__KernelEventFlagMatches(&e->nef.currentPattern, th.bits, th.wait, th.outAddr)) return false; e->nef.numWaitThreads--; } else { // Otherwise, we set the current result since we're bailing. if (Memory::IsValidAddress(th.outAddr)) Memory::Write_U32(e->nef.currentPattern, th.outAddr); } if (timeoutPtr != 0 && eventFlagWaitTimer != 0) { // Remove any event for this thread. u64 cyclesLeft = CoreTiming::UnscheduleEvent(eventFlagWaitTimer, th.tid); Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr); } __KernelResumeThreadFromWait(th.tid, result); wokeThreads = true; return true; }
inline void __AudioWakeThreads(AudioChannel &chan, int result, int step) { u32 error; bool wokeThreads = false; for (size_t w = 0; w < chan.waitingThreads.size(); ++w) { AudioChannelWaitInfo &waitInfo = chan.waitingThreads[w]; waitInfo.numSamples -= step; // If it's done (there will still be samples on queue) and actually still waiting, wake it up. u32 waitID = __KernelGetWaitID(waitInfo.threadID, WAITTYPE_AUDIOCHANNEL, error); if (waitInfo.numSamples <= 0 && waitID != 0) { // DEBUG_LOG(SCEAUDIO, "Woke thread %i for some buffer filling", waitingThread); u32 ret = result == 0 ? __KernelGetWaitValue(waitInfo.threadID, error) : SCE_ERROR_AUDIO_CHANNEL_NOT_RESERVED; __KernelResumeThreadFromWait(waitInfo.threadID, ret); wokeThreads = true; chan.waitingThreads.erase(chan.waitingThreads.begin() + w--); } // This means the thread stopped waiting, so stop trying to wake it. else if (waitID == 0) chan.waitingThreads.erase(chan.waitingThreads.begin() + w--); } if (wokeThreads) { __KernelReSchedule("audio drain"); } }
int __KernelReferLwMutexStatus(SceUID uid, u32 infoPtr) { u32 error; LwMutex *m = kernelObjects.Get<LwMutex>(uid, error); if (!m) return error; // Should we crash the thread somehow? if (!Memory::IsValidAddress(infoPtr)) return -1; if (Memory::Read_U32(infoPtr) != 0) { auto workarea = m->nm.workarea; u32 error; for (auto iter = m->waitingThreads.begin(); iter != m->waitingThreads.end(); ++iter) { SceUID waitID = __KernelGetWaitID(*iter, WAITTYPE_LWMUTEX, error); // The thread is no longer waiting for this, clean it up. if (waitID != uid) m->waitingThreads.erase(iter--); } // Refresh and write m->nm.currentCount = workarea->lockLevel; m->nm.lockThread = workarea->lockThread == 0 ? -1 : workarea->lockThread; m->nm.numWaitThreads = (int) m->waitingThreads.size(); Memory::WriteStruct(infoPtr, &m->nm); } return 0; }
void __UmdStatChange(u64 userdata, int cyclesLate) { // TODO: Why not a bool anyway? umdActivated = userdata & 0xFF; // Wake anyone waiting on this. for (size_t i = 0; i < umdWaitingThreads.size(); ++i) { SceUID threadID = umdWaitingThreads[i]; u32 error; SceUID waitID = __KernelGetWaitID(threadID, WAITTYPE_UMD, error); u32 stat = __KernelGetWaitValue(threadID, error); bool keep = false; if (waitID == 1) { if ((stat & __KernelUmdGetState()) != 0) __KernelResumeThreadFromWait(threadID, 0); // Only if they are still waiting do we keep them in the list. else keep = true; } if (!keep) umdWaitingThreads.erase(umdWaitingThreads.begin() + i--); } }
void __KernelSemaBeginCallback(SceUID threadID, SceUID prevCallbackId) { SceUID pauseKey = prevCallbackId == 0 ? threadID : prevCallbackId; u32 error; SceUID semaID = __KernelGetWaitID(threadID, WAITTYPE_SEMA, error); u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); Semaphore *s = semaID == 0 ? NULL : kernelObjects.Get<Semaphore>(semaID, error); if (s) { // This means two callbacks in a row. PSP crashes if the same callback runs inside itself. // TODO: Handle this better? if (s->pausedWaitTimeouts.find(pauseKey) != s->pausedWaitTimeouts.end()) return; if (timeoutPtr != 0 && semaWaitTimer != -1) { s64 cyclesLeft = CoreTiming::UnscheduleEvent(semaWaitTimer, threadID); s->pausedWaitTimeouts[pauseKey] = CoreTiming::GetTicks() + cyclesLeft; } else s->pausedWaitTimeouts[pauseKey] = 0; // TODO: Hmm, what about priority/fifo order? Does it lose its place in line? s->waitingThreads.erase(std::remove(s->waitingThreads.begin(), s->waitingThreads.end(), threadID), s->waitingThreads.end()); DEBUG_LOG(HLE, "sceKernelWaitSemaCB: Suspending sema wait for callback"); } else WARN_LOG_REPORT(HLE, "sceKernelWaitSemaCB: beginning callback with bad wait id?"); }
void __UmdBeginCallback(SceUID threadID, SceUID prevCallbackId) { SceUID pauseKey = prevCallbackId == 0 ? threadID : prevCallbackId; u32 error; SceUID waitID = __KernelGetWaitID(threadID, WAITTYPE_UMD, error); if (waitID == 1) { // This means two callbacks in a row. PSP crashes if the same callback runs inside itself. // TODO: Handle this better? if (umdPausedWaitTimeouts.find(pauseKey) != umdPausedWaitTimeouts.end()) return; _dbg_assert_msg_(HLE, umdStatTimeoutEvent != -1, "Must have a umd timer"); s64 cyclesLeft = CoreTiming::UnscheduleEvent(umdStatTimeoutEvent, threadID); if (cyclesLeft != 0) umdPausedWaitTimeouts[pauseKey] = CoreTiming::GetTicks() + cyclesLeft; else umdPausedWaitTimeouts[pauseKey] = 0; for (auto it = umdWaitingThreads.begin(); it < umdWaitingThreads.end(); ++it) { if (threadID == threadID) umdWaitingThreads.erase(it--); } DEBUG_LOG(HLE, "sceUmdWaitDriveStatCB: Suspending lock wait for callback"); } else WARN_LOG_REPORT(HLE, "sceUmdWaitDriveStatCB: beginning callback with bad wait id?"); }
int sceKernelReferMutexStatus(SceUID id, u32 infoAddr) { u32 error; Mutex *m = kernelObjects.Get<Mutex>(id, error); if (!m) { ERROR_LOG(HLE, "sceKernelReferMutexStatus(%i, %08x): invalid mutex id", id, infoAddr); return error; } DEBUG_LOG(HLE, "sceKernelReferMutexStatus(%08x, %08x)", id, infoAddr); // Should we crash the thread somehow? if (!Memory::IsValidAddress(infoAddr)) return -1; // Don't write if the size is 0. Anything else is A-OK, though, apparently. if (Memory::Read_U32(infoAddr) != 0) { u32 error; for (auto iter = m->waitingThreads.begin(); iter != m->waitingThreads.end(); ++iter) { SceUID waitID = __KernelGetWaitID(*iter, WAITTYPE_MUTEX, error); // The thread is no longer waiting for this, clean it up. if (waitID != id) m->waitingThreads.erase(iter--); } m->nm.numWaitThreads = (int) m->waitingThreads.size(); Memory::WriteStruct(infoAddr, &m->nm); } return 0; }
void __KernelLwMutexBeginCallback(SceUID threadID, SceUID prevCallbackId) { SceUID pauseKey = prevCallbackId == 0 ? threadID : prevCallbackId; u32 error; SceUID mutexID = __KernelGetWaitID(threadID, WAITTYPE_LWMUTEX, error); u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); LwMutex *mutex = mutexID == 0 ? NULL : kernelObjects.Get<LwMutex>(mutexID, error); if (mutex) { // This means two callbacks in a row. PSP crashes if the same callback runs inside itself. // TODO: Handle this better? if (mutex->pausedWaitTimeouts.find(pauseKey) != mutex->pausedWaitTimeouts.end()) return; if (timeoutPtr != 0 && lwMutexWaitTimer != -1) { s64 cyclesLeft = CoreTiming::UnscheduleEvent(lwMutexWaitTimer, threadID); mutex->pausedWaitTimeouts[pauseKey] = CoreTiming::GetTicks() + cyclesLeft; } else mutex->pausedWaitTimeouts[pauseKey] = 0; // TODO: Hmm, what about priority/fifo order? Does it lose its place in line? mutex->waitingThreads.erase(std::remove(mutex->waitingThreads.begin(), mutex->waitingThreads.end(), threadID), mutex->waitingThreads.end()); DEBUG_LOG(HLE, "sceKernelLockLwMutexCB: Suspending lock wait for callback"); } else WARN_LOG_REPORT(HLE, "sceKernelLockLwMutexCB: beginning callback with bad wait id?"); }
static void __KernelMsgPipeBeginCallback(SceUID threadID, SceUID prevCallbackId) { u32 error; u32 waitValue = __KernelGetWaitValue(threadID, error); u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); SceUID uid = __KernelGetWaitID(threadID, WAITTYPE_MSGPIPE, error); MsgPipe *ko = uid == 0 ? NULL : kernelObjects.Get<MsgPipe>(uid, error); switch (waitValue) { case MSGPIPE_WAIT_VALUE_SEND: if (ko) { auto result = HLEKernel::WaitBeginCallback<MsgPipeWaitingThread>(threadID, prevCallbackId, waitTimer, ko->sendWaitingThreads, ko->pausedSendWaits, timeoutPtr != 0); if (result == HLEKernel::WAIT_CB_SUCCESS) DEBUG_LOG(SCEKERNEL, "sceKernelSendMsgPipeCB: Suspending wait for callback"); } break; case MSGPIPE_WAIT_VALUE_RECV: if (ko) { auto result = HLEKernel::WaitBeginCallback<MsgPipeWaitingThread>(threadID, prevCallbackId, waitTimer, ko->receiveWaitingThreads, ko->pausedReceiveWaits, timeoutPtr != 0); if (result == HLEKernel::WAIT_CB_SUCCESS) DEBUG_LOG(SCEKERNEL, "sceKernelReceiveMsgPipeCB: Suspending wait for callback"); } break; default: break; } }
bool __KernelUnlockLwMutexForThread(LwMutex *mutex, T workarea, SceUID threadID, u32 &error, int result) { SceUID waitID = __KernelGetWaitID(threadID, WAITTYPE_LWMUTEX, error); u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); // The waitID may be different after a timeout. if (waitID != mutex->GetUID()) return false; // If result is an error code, we're just letting it go. if (result == 0) { workarea->lockLevel = (int) __KernelGetWaitValue(threadID, error); workarea->lockThread = threadID; } if (timeoutPtr != 0 && lwMutexWaitTimer != -1) { // Remove any event for this thread. s64 cyclesLeft = CoreTiming::UnscheduleEvent(lwMutexWaitTimer, threadID); Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr); } __KernelResumeThreadFromWait(threadID, result); return true; }
// Returns whether the thread should be removed. bool __KernelUnlockSemaForThread(Semaphore *s, SceUID threadID, u32 &error, int result, bool &wokeThreads) { SceUID waitID = __KernelGetWaitID(threadID, WAITTYPE_SEMA, error); u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); // The waitID may be different after a timeout. if (waitID != s->GetUID()) return true; // If result is an error code, we're just letting it go. if (result == 0) { int wVal = (int) __KernelGetWaitValue(threadID, error); if (wVal > s->ns.currentCount) return false; s->ns.currentCount -= wVal; s->ns.numWaitThreads--; } if (timeoutPtr != 0 && semaWaitTimer != -1) { // Remove any event for this thread. u64 cyclesLeft = CoreTiming::UnscheduleEvent(semaWaitTimer, threadID); Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr); } __KernelResumeThreadFromWait(threadID, result); wokeThreads = true; return true; }
void __UmdStatTimeout(u64 userdata, int cyclesLate) { SceUID threadID = (SceUID)userdata; u32 error; SceUID waitID = __KernelGetWaitID(threadID, WAITTYPE_UMD, error); // Assuming it's still waiting. if (waitID == 1) __KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_TIMEOUT); }
// Parameters are an educated guess. int sceKernelFreeTls(SceUID uid) { WARN_LOG(HLE, "UNIMPL sceKernelFreeTls(%08x)", uid); u32 error; TLS *tls = kernelObjects.Get<TLS>(uid, error); if (tls) { SceUID threadID = __KernelGetCurThread(); // Find the current thread's block. int freeBlock = -1; for (size_t i = 0; i < tls->ntls.totalBlocks; ++i) { if (tls->usage[i] == threadID) { freeBlock = (int) i; break; } } if (freeBlock != -1) { u32 error2; while (!tls->waitingThreads.empty()) { // TODO: What order do they wake in? SceUID waitingThreadID = tls->waitingThreads[0]; tls->waitingThreads.erase(tls->waitingThreads.begin()); // This thread must've been woken up. if (__KernelGetWaitID(waitingThreadID, WAITTYPE_TLS, error2) != uid) continue; // Otherwise, if there was a thread waiting, we were full, so this newly freed one is theirs. // TODO: Is the block wiped or anything? tls->usage[freeBlock] = waitingThreadID; __KernelResumeThreadFromWait(waitingThreadID, freeBlock); // No need to continue or free it, we're done. return 0; } // No one was waiting, so now we can really free it. tls->usage[freeBlock] = 0; ++tls->ntls.freeBlocks; return 0; } // TODO: Correct error code. else return -1; } else return error; }
void __KernelSemaEndCallback(SceUID threadID, SceUID prevCallbackId, u32 &returnValue) { SceUID pauseKey = prevCallbackId == 0 ? threadID : prevCallbackId; // Note: Cancel does not affect suspended semaphore waits. u32 error; SceUID semaID = __KernelGetWaitID(threadID, WAITTYPE_SEMA, error); u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); Semaphore *s = semaID == 0 ? NULL : kernelObjects.Get<Semaphore>(semaID, error); if (!s || s->pausedWaitTimeouts.find(pauseKey) == s->pausedWaitTimeouts.end()) { // TODO: Since it was deleted, we don't know how long was actually left. // For now, we just say the full time was taken. if (timeoutPtr != 0 && semaWaitTimer != -1) Memory::Write_U32(0, timeoutPtr); __KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_DELETE); return; } u64 waitDeadline = s->pausedWaitTimeouts[pauseKey]; s->pausedWaitTimeouts.erase(pauseKey); // TODO: Don't wake up if __KernelCurHasReadyCallbacks()? bool wokeThreads; // Attempt to unlock. if (__KernelUnlockSemaForThread(s, threadID, error, 0, wokeThreads)) return; // We only check if it timed out if it couldn't unlock. s64 cyclesLeft = waitDeadline - CoreTiming::GetTicks(); if (cyclesLeft < 0 && waitDeadline != 0) { if (timeoutPtr != 0 && semaWaitTimer != -1) Memory::Write_U32(0, timeoutPtr); __KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_TIMEOUT); } else { if (timeoutPtr != 0 && semaWaitTimer != -1) CoreTiming::ScheduleEvent(cyclesLeft, semaWaitTimer, __KernelGetCurThread()); // TODO: Should this not go at the end? s->waitingThreads.push_back(threadID); DEBUG_LOG(HLE, "sceKernelWaitSemaCB: Resuming sema wait for callback"); } }
void hleDelayResultFinish(u64 userdata, int cycleslate) { u32 error; SceUID threadID = (SceUID) userdata; SceUID verify = __KernelGetWaitID(threadID, WAITTYPE_HLEDELAY, error); // The top 32 bits of userdata are the top 32 bits of the 64 bit result. // We can't just put it all in userdata because we need to know the threadID... u64 result = (userdata & 0xFFFFFFFF00000000ULL) | __KernelGetWaitValue(threadID, error); if (error == 0 && verify == 1) __KernelResumeThreadFromWait(threadID, result); else WARN_LOG(HLE, "Someone else woke up HLE-blocked thread?"); }
void __KernelEventFlagEndCallback(SceUID threadID, SceUID prevCallbackId, u32 &returnValue) { SceUID pauseKey = prevCallbackId == 0 ? threadID : prevCallbackId; u32 error; SceUID flagID = __KernelGetWaitID(threadID, WAITTYPE_EVENTFLAG, error); u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); EventFlag *flag = flagID == 0 ? NULL : kernelObjects.Get<EventFlag>(flagID, error); if (!flag || flag->pausedWaits.find(pauseKey) == flag->pausedWaits.end()) { // TODO: Since it was deleted, we don't know how long was actually left. // For now, we just say the full time was taken. if (timeoutPtr != 0 && eventFlagWaitTimer != -1) Memory::Write_U32(0, timeoutPtr); __KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_DELETE); return; } EventFlagTh waitData = flag->pausedWaits[pauseKey]; u64 waitDeadline = waitData.pausedTimeout; flag->pausedWaits.erase(pauseKey); // TODO: Don't wake up if __KernelCurHasReadyCallbacks()? bool wokeThreads; // Attempt to unlock. if (__KernelUnlockEventFlagForThread(flag, waitData, error, 0, wokeThreads)) return; // We only check if it timed out if it couldn't unlock. s64 cyclesLeft = waitDeadline - CoreTiming::GetTicks(); if (cyclesLeft < 0 && waitDeadline != 0) { if (timeoutPtr != 0 && eventFlagWaitTimer != -1) Memory::Write_U32(0, timeoutPtr); __KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_TIMEOUT); } else { if (timeoutPtr != 0 && eventFlagWaitTimer != -1) CoreTiming::ScheduleEvent(cyclesLeft, eventFlagWaitTimer, __KernelGetCurThread()); // TODO: Should this not go at the end? flag->waitingThreads.push_back(waitData); DEBUG_LOG(HLE, "sceKernelWaitEventFlagCB: Resuming lock wait for callback"); } }
void __KernelEventFlagBeginCallback(SceUID threadID, SceUID prevCallbackId) { SceUID pauseKey = prevCallbackId == 0 ? threadID : prevCallbackId; u32 error; SceUID flagID = __KernelGetWaitID(threadID, WAITTYPE_EVENTFLAG, error); u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); EventFlag *flag = flagID == 0 ? NULL : kernelObjects.Get<EventFlag>(flagID, error); if (flag) { // This means two callbacks in a row. PSP crashes if the same callback runs inside itself. // TODO: Handle this better? if (flag->pausedWaits.find(pauseKey) != flag->pausedWaits.end()) return; EventFlagTh waitData = {0}; for (size_t i = 0; i < flag->waitingThreads.size(); i++) { EventFlagTh *t = &flag->waitingThreads[i]; if (t->tid == threadID) { waitData = *t; // TODO: Hmm, what about priority/fifo order? Does it lose its place in line? flag->waitingThreads.erase(flag->waitingThreads.begin() + i); break; } } if (waitData.tid != threadID) { ERROR_LOG_REPORT(HLE, "sceKernelWaitEventFlagCB: wait not found to pause for callback"); return; } if (timeoutPtr != 0 && eventFlagWaitTimer != -1) { s64 cyclesLeft = CoreTiming::UnscheduleEvent(eventFlagWaitTimer, threadID); waitData.pausedTimeout = CoreTiming::GetTicks() + cyclesLeft; } else waitData.pausedTimeout = 0; flag->pausedWaits[pauseKey] = waitData; DEBUG_LOG(HLE, "sceKernelWaitEventFlagCB: Suspending lock wait for callback"); } else WARN_LOG_REPORT(HLE, "sceKernelWaitEventFlagCB: beginning callback with bad wait id?"); }
void __UmdStatTimeout(u64 userdata, int cyclesLate) { SceUID threadID = (SceUID)userdata; u32 error; SceUID waitID = __KernelGetWaitID(threadID, WAITTYPE_UMD, error); // Assuming it's still waiting. if (waitID == 1) __KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_TIMEOUT); for (size_t i = 0; i < umdWaitingThreads.size(); ++i) { if (umdWaitingThreads[i].threadID == threadID) umdWaitingThreads.erase(umdWaitingThreads.begin() + i--); } }
void __KernelLwMutexEndCallback(SceUID threadID, SceUID prevCallbackId, u32 &returnValue) { SceUID pauseKey = prevCallbackId == 0 ? threadID : prevCallbackId; u32 error; SceUID mutexID = __KernelGetWaitID(threadID, WAITTYPE_LWMUTEX, error); u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); LwMutex *mutex = mutexID == 0 ? NULL : kernelObjects.Get<LwMutex>(mutexID, error); if (!mutex || mutex->pausedWaitTimeouts.find(pauseKey) == mutex->pausedWaitTimeouts.end()) { // TODO: Since it was deleted, we don't know how long was actually left. // For now, we just say the full time was taken. if (timeoutPtr != 0 && lwMutexWaitTimer != -1) Memory::Write_U32(0, timeoutPtr); __KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_DELETE); return; } u64 waitDeadline = mutex->pausedWaitTimeouts[pauseKey]; mutex->pausedWaitTimeouts.erase(pauseKey); // TODO: Don't wake up if __KernelCurHasReadyCallbacks()? // Attempt to unlock. if (mutex->nm.lockThread == -1 && __KernelUnlockLwMutexForThread(mutex, mutex->nm.workarea, threadID, error, 0)) return; // We only check if it timed out if it couldn't unlock. s64 cyclesLeft = waitDeadline - CoreTiming::GetTicks(); if (cyclesLeft < 0 && waitDeadline != 0) { if (timeoutPtr != 0 && lwMutexWaitTimer != -1) Memory::Write_U32(0, timeoutPtr); __KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_TIMEOUT); } else { if (timeoutPtr != 0 && lwMutexWaitTimer != -1) CoreTiming::ScheduleEvent(cyclesLeft, lwMutexWaitTimer, __KernelGetCurThread()); // TODO: Should this not go at the end? mutex->waitingThreads.push_back(threadID); DEBUG_LOG(HLE, "sceKernelLockLwMutexCB: Resuming lock wait for callback"); } }
void __KernelLwMutexTimeout(u64 userdata, int cyclesLate) { SceUID threadID = (SceUID)userdata; u32 error; u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); if (timeoutPtr != 0) Memory::Write_U32(0, timeoutPtr); SceUID mutexID = __KernelGetWaitID(threadID, WAITTYPE_LWMUTEX, error); if (mutexID != 0) __KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_TIMEOUT); // We intentionally don't remove from waitingThreads here yet. // The reason is, if it times out, but what it was waiting on is DELETED prior to it // actually running, it will get a DELETE result instead of a TIMEOUT. // So, we need to remember it or we won't be able to mark it DELETE instead later. }
inline void __AudioWakeThreads(AudioChannel &chan, int step) { u32 error; for (size_t w = 0; w < chan.waitingThreads.size(); ++w) { AudioChannelWaitInfo &waitInfo = chan.waitingThreads[w]; waitInfo.numSamples -= hwBlockSize; // If it's done (there will still be samples on queue) and actually still waiting, wake it up. if (waitInfo.numSamples <= 0 && __KernelGetWaitID(waitInfo.threadID, WAITTYPE_AUDIOCHANNEL, error) != 0) { // DEBUG_LOG(HLE, "Woke thread %i for some buffer filling", waitingThread); u32 ret = __KernelGetWaitValue(waitInfo.threadID, error); __KernelResumeThreadFromWait(waitInfo.threadID, ret); chan.waitingThreads.erase(chan.waitingThreads.begin() + w--); } } }
void __KernelLwMutexTimeout(u64 userdata, int cyclesLate) { SceUID threadID = (SceUID)userdata; u32 error; u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); if (timeoutPtr != 0) Memory::Write_U32(0, timeoutPtr); SceUID mutexID = __KernelGetWaitID(threadID, WAITTYPE_LWMUTEX, error); LwMutex *mutex = kernelObjects.Get<LwMutex>(mutexID, error); if (mutex) { // This thread isn't waiting anymore. mutex->waitingThreads.erase(std::remove(mutex->waitingThreads.begin(), mutex->waitingThreads.end(), threadID), mutex->waitingThreads.end()); } __KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_TIMEOUT); }
void __KernelSemaTimeout(u64 userdata, int cycleslate) { SceUID threadID = (SceUID)userdata; u32 error; u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); if (timeoutPtr != 0) Memory::Write_U32(0, timeoutPtr); SceUID semaID = __KernelGetWaitID(threadID, WAITTYPE_SEMA, error); Semaphore *s = kernelObjects.Get<Semaphore>(semaID, error); if (s) { // This thread isn't waiting anymore, but we'll remove it from waitingThreads later. // The reason is, if it times out, but what it was waiting on is DELETED prior to it // actually running, it will get a DELETE result instead of a TIMEOUT. // So, we need to remember it or we won't be able to mark it DELETE instead later. __KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_TIMEOUT); } }