void __KernelEventFlagTimeout(u64 userdata, int cycleslate) { SceUID threadID = (SceUID)userdata; u32 error; u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); if (timeoutPtr != 0) Memory::Write_U32(0, timeoutPtr); SceUID flagID = __KernelGetWaitID(threadID, WAITTYPE_EVENTFLAG, error); EventFlag *e = kernelObjects.Get<EventFlag>(flagID, error); if (e) { for (size_t i = 0; i < e->waitingThreads.size(); i++) { EventFlagTh *t = &e->waitingThreads[i]; if (t->tid == threadID) { bool wokeThreads; // This thread isn't waiting anymore, but we'll remove it from waitingThreads later. // The reason is, if it times out, but what it was waiting on is DELETED prior to it // actually running, it will get a DELETE result instead of a TIMEOUT. // So, we need to remember it or we won't be able to mark it DELETE instead later. __KernelUnlockEventFlagForThread(e, *t, error, SCE_KERNEL_ERROR_WAIT_TIMEOUT, wokeThreads); e->nef.numWaitThreads--; break; } } } }
static void __KernelMsgPipeBeginCallback(SceUID threadID, SceUID prevCallbackId) { u32 error; u32 waitValue = __KernelGetWaitValue(threadID, error); u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); SceUID uid = __KernelGetWaitID(threadID, WAITTYPE_MSGPIPE, error); MsgPipe *ko = uid == 0 ? NULL : kernelObjects.Get<MsgPipe>(uid, error); switch (waitValue) { case MSGPIPE_WAIT_VALUE_SEND: if (ko) { auto result = HLEKernel::WaitBeginCallback<MsgPipeWaitingThread>(threadID, prevCallbackId, waitTimer, ko->sendWaitingThreads, ko->pausedSendWaits, timeoutPtr != 0); if (result == HLEKernel::WAIT_CB_SUCCESS) DEBUG_LOG(SCEKERNEL, "sceKernelSendMsgPipeCB: Suspending wait for callback"); } break; case MSGPIPE_WAIT_VALUE_RECV: if (ko) { auto result = HLEKernel::WaitBeginCallback<MsgPipeWaitingThread>(threadID, prevCallbackId, waitTimer, ko->receiveWaitingThreads, ko->pausedReceiveWaits, timeoutPtr != 0); if (result == HLEKernel::WAIT_CB_SUCCESS) DEBUG_LOG(SCEKERNEL, "sceKernelReceiveMsgPipeCB: Suspending wait for callback"); } break; default: break; } }
bool __KernelUnlockEventFlagForThread(EventFlag *e, EventFlagTh &th, u32 &error, int result, bool &wokeThreads) { SceUID waitID = __KernelGetWaitID(th.tid, WAITTYPE_EVENTFLAG, error); u32 timeoutPtr = __KernelGetWaitTimeoutPtr(th.tid, error); // The waitID may be different after a timeout. if (waitID != e->GetUID()) return true; // If result is an error code, we're just letting it go. if (result == 0) { if (!__KernelEventFlagMatches(&e->nef.currentPattern, th.bits, th.wait, th.outAddr)) return false; e->nef.numWaitThreads--; } else { // Otherwise, we set the current result since we're bailing. if (Memory::IsValidAddress(th.outAddr)) Memory::Write_U32(e->nef.currentPattern, th.outAddr); } if (timeoutPtr != 0 && eventFlagWaitTimer != 0) { // Remove any event for this thread. u64 cyclesLeft = CoreTiming::UnscheduleEvent(eventFlagWaitTimer, th.tid); Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr); } __KernelResumeThreadFromWait(th.tid, result); wokeThreads = true; return true; }
bool __KernelUnlockLwMutexForThread(LwMutex *mutex, T workarea, SceUID threadID, u32 &error, int result) { SceUID waitID = __KernelGetWaitID(threadID, WAITTYPE_LWMUTEX, error); u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); // The waitID may be different after a timeout. if (waitID != mutex->GetUID()) return false; // If result is an error code, we're just letting it go. if (result == 0) { workarea->lockLevel = (int) __KernelGetWaitValue(threadID, error); workarea->lockThread = threadID; } if (timeoutPtr != 0 && lwMutexWaitTimer != -1) { // Remove any event for this thread. s64 cyclesLeft = CoreTiming::UnscheduleEvent(lwMutexWaitTimer, threadID); Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr); } __KernelResumeThreadFromWait(threadID, result); return true; }
// Returns whether the thread should be removed. bool __KernelUnlockSemaForThread(Semaphore *s, SceUID threadID, u32 &error, int result, bool &wokeThreads) { SceUID waitID = __KernelGetWaitID(threadID, WAITTYPE_SEMA, error); u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); // The waitID may be different after a timeout. if (waitID != s->GetUID()) return true; // If result is an error code, we're just letting it go. if (result == 0) { int wVal = (int) __KernelGetWaitValue(threadID, error); if (wVal > s->ns.currentCount) return false; s->ns.currentCount -= wVal; s->ns.numWaitThreads--; } if (timeoutPtr != 0 && semaWaitTimer != -1) { // Remove any event for this thread. u64 cyclesLeft = CoreTiming::UnscheduleEvent(semaWaitTimer, threadID); Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr); } __KernelResumeThreadFromWait(threadID, result); wokeThreads = true; return true; }
void __KernelLwMutexBeginCallback(SceUID threadID, SceUID prevCallbackId) { SceUID pauseKey = prevCallbackId == 0 ? threadID : prevCallbackId; u32 error; SceUID mutexID = __KernelGetWaitID(threadID, WAITTYPE_LWMUTEX, error); u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); LwMutex *mutex = mutexID == 0 ? NULL : kernelObjects.Get<LwMutex>(mutexID, error); if (mutex) { // This means two callbacks in a row. PSP crashes if the same callback runs inside itself. // TODO: Handle this better? if (mutex->pausedWaitTimeouts.find(pauseKey) != mutex->pausedWaitTimeouts.end()) return; if (timeoutPtr != 0 && lwMutexWaitTimer != -1) { s64 cyclesLeft = CoreTiming::UnscheduleEvent(lwMutexWaitTimer, threadID); mutex->pausedWaitTimeouts[pauseKey] = CoreTiming::GetTicks() + cyclesLeft; } else mutex->pausedWaitTimeouts[pauseKey] = 0; // TODO: Hmm, what about priority/fifo order? Does it lose its place in line? mutex->waitingThreads.erase(std::remove(mutex->waitingThreads.begin(), mutex->waitingThreads.end(), threadID), mutex->waitingThreads.end()); DEBUG_LOG(HLE, "sceKernelLockLwMutexCB: Suspending lock wait for callback"); } else WARN_LOG_REPORT(HLE, "sceKernelLockLwMutexCB: beginning callback with bad wait id?"); }
// Resume all waiting threads (for delete / cancel.) // Returns true if it woke any threads. bool __KernelClearSemaThreads(Semaphore *s, int reason) { bool wokeThreads = false; // TODO: PSP_SEMA_ATTR_PRIORITY std::vector<SceUID>::iterator iter; for (iter = s->waitingThreads.begin(); iter!=s->waitingThreads.end(); iter++) { SceUID threadID = *iter; u32 error; u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); if (timeoutPtr != 0 && semaWaitTimer != 0) { // Remove any event for this thread. int cyclesLeft = CoreTiming::UnscheduleEvent(semaWaitTimer, threadID); Memory::Write_U32(cyclesToUs(cyclesLeft), timeoutPtr); } __KernelResumeThreadFromWait(threadID, reason); wokeThreads = true; } s->waitingThreads.empty(); return wokeThreads; }
void __KernelSemaBeginCallback(SceUID threadID, SceUID prevCallbackId) { SceUID pauseKey = prevCallbackId == 0 ? threadID : prevCallbackId; u32 error; SceUID semaID = __KernelGetWaitID(threadID, WAITTYPE_SEMA, error); u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); Semaphore *s = semaID == 0 ? NULL : kernelObjects.Get<Semaphore>(semaID, error); if (s) { // This means two callbacks in a row. PSP crashes if the same callback runs inside itself. // TODO: Handle this better? if (s->pausedWaitTimeouts.find(pauseKey) != s->pausedWaitTimeouts.end()) return; if (timeoutPtr != 0 && semaWaitTimer != -1) { s64 cyclesLeft = CoreTiming::UnscheduleEvent(semaWaitTimer, threadID); s->pausedWaitTimeouts[pauseKey] = CoreTiming::GetTicks() + cyclesLeft; } else s->pausedWaitTimeouts[pauseKey] = 0; // TODO: Hmm, what about priority/fifo order? Does it lose its place in line? s->waitingThreads.erase(std::remove(s->waitingThreads.begin(), s->waitingThreads.end(), threadID), s->waitingThreads.end()); DEBUG_LOG(HLE, "sceKernelWaitSemaCB: Suspending sema wait for callback"); } else WARN_LOG_REPORT(HLE, "sceKernelWaitSemaCB: beginning callback with bad wait id?"); }
void sceKernelDeleteMutex(SceUID id) { DEBUG_LOG(HLE,"sceKernelDeleteMutex(%i)", id); u32 error; Mutex *mutex = kernelObjects.Get<Mutex>(id, error); if (mutex) { std::vector<SceUID>::iterator iter, end; for (iter = mutex->waitingThreads.begin(), end = mutex->waitingThreads.end(); iter != end; ++iter) { SceUID threadID = *iter; u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); if (timeoutPtr != 0 && mutexWaitTimer != 0) { // Remove any event for this thread. u64 cyclesLeft = CoreTiming::UnscheduleEvent(mutexWaitTimer, threadID); Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr); } __KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_DELETE); } if (mutex->nm.lockThread != -1) __KernelMutexEraseLock(mutex); mutex->waitingThreads.empty(); RETURN(kernelObjects.Destroy<Mutex>(id)); __KernelReSchedule("mutex deleted"); } else RETURN(error); }
bool __KernelUnlockMutex(Mutex *mutex, u32 &error) { __KernelMutexEraseLock(mutex); // TODO: PSP_MUTEX_ATTR_PRIORITY bool wokeThreads = false; std::vector<SceUID>::iterator iter, end; for (iter = mutex->waitingThreads.begin(), end = mutex->waitingThreads.end(); iter != end; ++iter) { SceUID threadID = *iter; int wVal = (int)__KernelGetWaitValue(threadID, error); u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); __KernelMutexAcquireLock(mutex, wVal, threadID); if (timeoutPtr != 0 && mutexWaitTimer != 0) { // Remove any event for this thread. u64 cyclesLeft = CoreTiming::UnscheduleEvent(mutexWaitTimer, threadID); Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr); } __KernelResumeThreadFromWait(threadID, 0); wokeThreads = true; mutex->waitingThreads.erase(iter); break; } if (!wokeThreads) mutex->nm.lockThread = -1; return wokeThreads; }
bool __KernelUnlockVplForThread(VPL *vpl, VplWaitingThread &threadInfo, u32 &error, int result, bool &wokeThreads) { const SceUID threadID = threadInfo.threadID; SceUID waitID = __KernelGetWaitID(threadID, WAITTYPE_VPL, error); u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); // The waitID may be different after a timeout. if (waitID != vpl->GetUID()) return true; // If result is an error code, we're just letting it go. if (result == 0) { int size = (int) __KernelGetWaitValue(threadID, error); // Padding (normally used to track the allocation.) u32 allocSize = size + 8; u32 addr = vpl->alloc.Alloc(allocSize, true); if (addr != (u32) -1) Memory::Write_U32(addr, threadInfo.addrPtr); else return false; } if (timeoutPtr != 0 && vplWaitTimer != -1) { // Remove any event for this thread. s64 cyclesLeft = CoreTiming::UnscheduleEvent(vplWaitTimer, threadID); Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr); } __KernelResumeThreadFromWait(threadID, result); wokeThreads = true; return true; }
// Returns whether the thread should be removed. static bool __KernelUnlockSemaForThread(Semaphore *s, SceUID threadID, u32 &error, int result, bool &wokeThreads) { if (!HLEKernel::VerifyWait(threadID, WAITTYPE_SEMA, s->GetUID())) return true; // If result is an error code, we're just letting it go. if (result == 0) { int wVal = (int) __KernelGetWaitValue(threadID, error); if (wVal > s->ns.currentCount) return false; s->ns.currentCount -= wVal; } u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); if (timeoutPtr != 0 && semaWaitTimer != -1) { // Remove any event for this thread. s64 cyclesLeft = CoreTiming::UnscheduleEvent(semaWaitTimer, threadID); if (cyclesLeft < 0) cyclesLeft = 0; Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr); } __KernelResumeThreadFromWait(threadID, result); wokeThreads = true; return true; }
void __KernelSemaEndCallback(SceUID threadID, SceUID prevCallbackId, u32 &returnValue) { SceUID pauseKey = prevCallbackId == 0 ? threadID : prevCallbackId; // Note: Cancel does not affect suspended semaphore waits. u32 error; SceUID semaID = __KernelGetWaitID(threadID, WAITTYPE_SEMA, error); u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); Semaphore *s = semaID == 0 ? NULL : kernelObjects.Get<Semaphore>(semaID, error); if (!s || s->pausedWaitTimeouts.find(pauseKey) == s->pausedWaitTimeouts.end()) { // TODO: Since it was deleted, we don't know how long was actually left. // For now, we just say the full time was taken. if (timeoutPtr != 0 && semaWaitTimer != -1) Memory::Write_U32(0, timeoutPtr); __KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_DELETE); return; } u64 waitDeadline = s->pausedWaitTimeouts[pauseKey]; s->pausedWaitTimeouts.erase(pauseKey); // TODO: Don't wake up if __KernelCurHasReadyCallbacks()? bool wokeThreads; // Attempt to unlock. if (__KernelUnlockSemaForThread(s, threadID, error, 0, wokeThreads)) return; // We only check if it timed out if it couldn't unlock. s64 cyclesLeft = waitDeadline - CoreTiming::GetTicks(); if (cyclesLeft < 0 && waitDeadline != 0) { if (timeoutPtr != 0 && semaWaitTimer != -1) Memory::Write_U32(0, timeoutPtr); __KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_TIMEOUT); } else { if (timeoutPtr != 0 && semaWaitTimer != -1) CoreTiming::ScheduleEvent(cyclesLeft, semaWaitTimer, __KernelGetCurThread()); // TODO: Should this not go at the end? s->waitingThreads.push_back(threadID); DEBUG_LOG(HLE, "sceKernelWaitSemaCB: Resuming sema wait for callback"); } }
void __KernelEventFlagEndCallback(SceUID threadID, SceUID prevCallbackId, u32 &returnValue) { SceUID pauseKey = prevCallbackId == 0 ? threadID : prevCallbackId; u32 error; SceUID flagID = __KernelGetWaitID(threadID, WAITTYPE_EVENTFLAG, error); u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); EventFlag *flag = flagID == 0 ? NULL : kernelObjects.Get<EventFlag>(flagID, error); if (!flag || flag->pausedWaits.find(pauseKey) == flag->pausedWaits.end()) { // TODO: Since it was deleted, we don't know how long was actually left. // For now, we just say the full time was taken. if (timeoutPtr != 0 && eventFlagWaitTimer != -1) Memory::Write_U32(0, timeoutPtr); __KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_DELETE); return; } EventFlagTh waitData = flag->pausedWaits[pauseKey]; u64 waitDeadline = waitData.pausedTimeout; flag->pausedWaits.erase(pauseKey); // TODO: Don't wake up if __KernelCurHasReadyCallbacks()? bool wokeThreads; // Attempt to unlock. if (__KernelUnlockEventFlagForThread(flag, waitData, error, 0, wokeThreads)) return; // We only check if it timed out if it couldn't unlock. s64 cyclesLeft = waitDeadline - CoreTiming::GetTicks(); if (cyclesLeft < 0 && waitDeadline != 0) { if (timeoutPtr != 0 && eventFlagWaitTimer != -1) Memory::Write_U32(0, timeoutPtr); __KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_TIMEOUT); } else { if (timeoutPtr != 0 && eventFlagWaitTimer != -1) CoreTiming::ScheduleEvent(cyclesLeft, eventFlagWaitTimer, __KernelGetCurThread()); // TODO: Should this not go at the end? flag->waitingThreads.push_back(waitData); DEBUG_LOG(HLE, "sceKernelWaitEventFlagCB: Resuming lock wait for callback"); } }
void WriteCurrentTimeout(SceUID waitID) const { u32 error; if (IsStillWaiting(waitID)) { u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); if (timeoutPtr != 0 && waitTimer != -1) { // Remove any event for this thread. s64 cyclesLeft = CoreTiming::UnscheduleEvent(waitTimer, threadID); Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr); } } }
void __KernelEventFlagBeginCallback(SceUID threadID, SceUID prevCallbackId) { SceUID pauseKey = prevCallbackId == 0 ? threadID : prevCallbackId; u32 error; SceUID flagID = __KernelGetWaitID(threadID, WAITTYPE_EVENTFLAG, error); u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); EventFlag *flag = flagID == 0 ? NULL : kernelObjects.Get<EventFlag>(flagID, error); if (flag) { // This means two callbacks in a row. PSP crashes if the same callback runs inside itself. // TODO: Handle this better? if (flag->pausedWaits.find(pauseKey) != flag->pausedWaits.end()) return; EventFlagTh waitData = {0}; for (size_t i = 0; i < flag->waitingThreads.size(); i++) { EventFlagTh *t = &flag->waitingThreads[i]; if (t->tid == threadID) { waitData = *t; // TODO: Hmm, what about priority/fifo order? Does it lose its place in line? flag->waitingThreads.erase(flag->waitingThreads.begin() + i); break; } } if (waitData.tid != threadID) { ERROR_LOG_REPORT(HLE, "sceKernelWaitEventFlagCB: wait not found to pause for callback"); return; } if (timeoutPtr != 0 && eventFlagWaitTimer != -1) { s64 cyclesLeft = CoreTiming::UnscheduleEvent(eventFlagWaitTimer, threadID); waitData.pausedTimeout = CoreTiming::GetTicks() + cyclesLeft; } else waitData.pausedTimeout = 0; flag->pausedWaits[pauseKey] = waitData; DEBUG_LOG(HLE, "sceKernelWaitEventFlagCB: Suspending lock wait for callback"); } else WARN_LOG_REPORT(HLE, "sceKernelWaitEventFlagCB: beginning callback with bad wait id?"); }
void __KernelLwMutexEndCallback(SceUID threadID, SceUID prevCallbackId, u32 &returnValue) { SceUID pauseKey = prevCallbackId == 0 ? threadID : prevCallbackId; u32 error; SceUID mutexID = __KernelGetWaitID(threadID, WAITTYPE_LWMUTEX, error); u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); LwMutex *mutex = mutexID == 0 ? NULL : kernelObjects.Get<LwMutex>(mutexID, error); if (!mutex || mutex->pausedWaitTimeouts.find(pauseKey) == mutex->pausedWaitTimeouts.end()) { // TODO: Since it was deleted, we don't know how long was actually left. // For now, we just say the full time was taken. if (timeoutPtr != 0 && lwMutexWaitTimer != -1) Memory::Write_U32(0, timeoutPtr); __KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_DELETE); return; } u64 waitDeadline = mutex->pausedWaitTimeouts[pauseKey]; mutex->pausedWaitTimeouts.erase(pauseKey); // TODO: Don't wake up if __KernelCurHasReadyCallbacks()? // Attempt to unlock. if (mutex->nm.lockThread == -1 && __KernelUnlockLwMutexForThread(mutex, mutex->nm.workarea, threadID, error, 0)) return; // We only check if it timed out if it couldn't unlock. s64 cyclesLeft = waitDeadline - CoreTiming::GetTicks(); if (cyclesLeft < 0 && waitDeadline != 0) { if (timeoutPtr != 0 && lwMutexWaitTimer != -1) Memory::Write_U32(0, timeoutPtr); __KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_TIMEOUT); } else { if (timeoutPtr != 0 && lwMutexWaitTimer != -1) CoreTiming::ScheduleEvent(cyclesLeft, lwMutexWaitTimer, __KernelGetCurThread()); // TODO: Should this not go at the end? mutex->waitingThreads.push_back(threadID); DEBUG_LOG(HLE, "sceKernelLockLwMutexCB: Resuming lock wait for callback"); } }
static bool __KernelUnlockMbxForThread(Mbx *m, MbxWaitingThread &th, u32 &error, int result, bool &wokeThreads) { if (!HLEKernel::VerifyWait(th.threadID, WAITTYPE_MBX, m->GetUID())) return true; u32 timeoutPtr = __KernelGetWaitTimeoutPtr(th.threadID, error); if (timeoutPtr != 0 && mbxWaitTimer != -1) { // Remove any event for this thread. s64 cyclesLeft = CoreTiming::UnscheduleEvent(mbxWaitTimer, th.threadID); Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr); } __KernelResumeThreadFromWait(th.threadID, result); wokeThreads = true; return true; }
void __KernelLwMutexTimeout(u64 userdata, int cyclesLate) { SceUID threadID = (SceUID)userdata; u32 error; u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); if (timeoutPtr != 0) Memory::Write_U32(0, timeoutPtr); SceUID mutexID = __KernelGetWaitID(threadID, WAITTYPE_LWMUTEX, error); if (mutexID != 0) __KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_TIMEOUT); // We intentionally don't remove from waitingThreads here yet. // The reason is, if it times out, but what it was waiting on is DELETED prior to it // actually running, it will get a DELETE result instead of a TIMEOUT. // So, we need to remember it or we won't be able to mark it DELETE instead later. }
void sceKernelDeleteLwMutex(u32 workareaPtr) { DEBUG_LOG(HLE,"sceKernelDeleteLwMutex(%08x)", workareaPtr); if (!workareaPtr || !Memory::IsValidAddress(workareaPtr)) { RETURN(SCE_KERNEL_ERROR_ILLEGAL_ADDR); return; } NativeLwMutexWorkarea workarea; Memory::ReadStruct(workareaPtr, &workarea); u32 error; LwMutex *mutex = kernelObjects.Get<LwMutex>(workarea.uid, error); if (mutex) { std::vector<SceUID>::iterator iter, end; for (iter = mutex->waitingThreads.begin(), end = mutex->waitingThreads.end(); iter != end; ++iter) { SceUID threadID = *iter; u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); if (timeoutPtr != 0 && lwMutexWaitTimer != 0) { // Remove any event for this thread. u64 cyclesLeft = CoreTiming::UnscheduleEvent(lwMutexWaitTimer, threadID); Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr); } __KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_DELETE); } mutex->waitingThreads.empty(); RETURN(kernelObjects.Destroy<LwMutex>(workarea.uid)); workarea.clear(); Memory::WriteStruct(workareaPtr, &workarea); __KernelReSchedule("mutex deleted"); } else RETURN(error); }
void __KernelLwMutexTimeout(u64 userdata, int cyclesLate) { SceUID threadID = (SceUID)userdata; u32 error; u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); if (timeoutPtr != 0) Memory::Write_U32(0, timeoutPtr); SceUID mutexID = __KernelGetWaitID(threadID, WAITTYPE_LWMUTEX, error); LwMutex *mutex = kernelObjects.Get<LwMutex>(mutexID, error); if (mutex) { // This thread isn't waiting anymore. mutex->waitingThreads.erase(std::remove(mutex->waitingThreads.begin(), mutex->waitingThreads.end(), threadID), mutex->waitingThreads.end()); } __KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_TIMEOUT); }
void __KernelSemaTimeout(u64 userdata, int cycleslate) { SceUID threadID = (SceUID)userdata; u32 error; u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); if (timeoutPtr != 0) Memory::Write_U32(0, timeoutPtr); SceUID semaID = __KernelGetWaitID(threadID, WAITTYPE_SEMA, error); Semaphore *s = kernelObjects.Get<Semaphore>(semaID, error); if (s) { // This thread isn't waiting anymore, but we'll remove it from waitingThreads later. // The reason is, if it times out, but what it was waiting on is DELETED prior to it // actually running, it will get a DELETE result instead of a TIMEOUT. // So, we need to remember it or we won't be able to mark it DELETE instead later. __KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_TIMEOUT); } }
void __KernelMsgPipeTimeout(u64 userdata, int cyclesLate) { SceUID threadID = (SceUID) (userdata & 0xFFFFFFFF); u32 error; u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); if (timeoutPtr != 0) Memory::Write_U32(0, timeoutPtr); SceUID uid = __KernelGetWaitID(threadID, WAITTYPE_MSGPIPE, error); MsgPipe *m = kernelObjects.Get<MsgPipe>(uid, error); if (m) { // This thread isn't waiting anymore, but we'll remove it from waitingThreads later. // The reason is, if it times out, but whhile it was waiting on is DELETED prior to it // actually running, it will get a DELETE result instead of a TIMEOUT. // So, we need to remember it or we won't be able to mark it DELETE instead later. __KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_TIMEOUT); } }
bool __KernelUnlockMbxForThread(Mbx *m, MbxWaitingThread &th, u32 &error, int result, bool &wokeThreads) { SceUID waitID = __KernelGetWaitID(th.first, WAITTYPE_MBX, error); u32 timeoutPtr = __KernelGetWaitTimeoutPtr(th.first, error); // The waitID may be different after a timeout. if (waitID != m->GetUID()) return true; if (timeoutPtr != 0 && mbxWaitTimer != -1) { // Remove any event for this thread. u64 cyclesLeft = CoreTiming::UnscheduleEvent(mbxWaitTimer, th.first); Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr); } __KernelResumeThreadFromWait(th.first, result); wokeThreads = true; return true; }
void __KernelSemaTimeout(u64 userdata, int cycleslate) { SceUID threadID = (SceUID)userdata; u32 error; u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); if (timeoutPtr != 0) Memory::Write_U32(0, timeoutPtr); SceUID semaID = __KernelGetWaitID(threadID, WAITTYPE_SEMA, error); Semaphore *s = kernelObjects.Get<Semaphore>(semaID, error); if (s) { // This thread isn't waiting anymore. s->waitingThreads.erase(std::remove(s->waitingThreads.begin(), s->waitingThreads.end(), threadID), s->waitingThreads.end()); s->ns.numWaitThreads--; } __KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_TIMEOUT); }
static void __KernelMsgPipeEndCallback(SceUID threadID, SceUID prevCallbackId) { u32 error; u32 waitValue = __KernelGetWaitValue(threadID, error); u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); SceUID uid = __KernelGetWaitID(threadID, WAITTYPE_MSGPIPE, error); MsgPipe *ko = uid == 0 ? NULL : kernelObjects.Get<MsgPipe>(uid, error); if (ko == NULL) return; switch (waitValue) { case MSGPIPE_WAIT_VALUE_SEND: { MsgPipeWaitingThread dummy; auto result = HLEKernel::WaitEndCallback<MsgPipe, WAITTYPE_MSGPIPE, MsgPipeWaitingThread>(threadID, prevCallbackId, waitTimer, __KernelCheckResumeMsgPipeSend, dummy, ko->sendWaitingThreads, ko->pausedSendWaits); if (result == HLEKernel::WAIT_CB_RESUMED_WAIT) { DEBUG_LOG(SCEKERNEL, "sceKernelSendMsgPipeCB: Resuming wait from callback"); } else if (result == HLEKernel::WAIT_CB_TIMED_OUT) { // It was re-added to the the waiting threads list, but it timed out. Let's remove it. ko->RemoveSendWaitingThread(threadID); } } break; case MSGPIPE_WAIT_VALUE_RECV: { MsgPipeWaitingThread dummy; auto result = HLEKernel::WaitEndCallback<MsgPipe, WAITTYPE_MSGPIPE, MsgPipeWaitingThread>(threadID, prevCallbackId, waitTimer, __KernelCheckResumeMsgPipeReceive, dummy, ko->receiveWaitingThreads, ko->pausedReceiveWaits); if (result == HLEKernel::WAIT_CB_RESUMED_WAIT) { DEBUG_LOG(SCEKERNEL, "sceKernelReceiveMsgPipeCB: Resuming wait from callback"); } else if (result == HLEKernel::WAIT_CB_TIMED_OUT) { // It was re-added to the the waiting threads list, but it timed out. Let's remove it. ko->RemoveReceiveWaitingThread(threadID); } } break; default: break; } }
bool __KernelUnlockLwMutex(NativeLwMutexWorkarea &workarea, u32 &error) { LwMutex *mutex = kernelObjects.Get<LwMutex>(workarea.uid, error); if (error) { workarea.lockThread = 0; return false; } // TODO: PSP_MUTEX_ATTR_PRIORITY bool wokeThreads = false; std::vector<SceUID>::iterator iter, end; for (iter = mutex->waitingThreads.begin(), end = mutex->waitingThreads.end(); iter != end; ++iter) { SceUID threadID = *iter; int wVal = (int)__KernelGetWaitValue(threadID, error); u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); workarea.lockLevel = wVal; workarea.lockThread = threadID; if (timeoutPtr != 0 && lwMutexWaitTimer != 0) { // Remove any event for this thread. u64 cyclesLeft = CoreTiming::UnscheduleEvent(lwMutexWaitTimer, threadID); Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr); } __KernelResumeThreadFromWait(threadID, 0); wokeThreads = true; mutex->waitingThreads.erase(iter); break; } if (!wokeThreads) workarea.lockThread = 0; return wokeThreads; }
void __KernelMbxTimeout(u64 userdata, int cyclesLate) { SceUID threadID = (SceUID)userdata; u32 error; u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); if (timeoutPtr != 0) Memory::Write_U32(0, timeoutPtr); SceUID mbxID = __KernelGetWaitID(threadID, WAITTYPE_MBX, error); Mbx *m = kernelObjects.Get<Mbx>(mbxID, error); if (m) { // This thread isn't waiting anymore, but we'll remove it from waitingThreads later. // The reason is, if it times out, but what it was waiting on is DELETED prior to it // actually running, it will get a DELETE result instead of a TIMEOUT. // So, we need to remember it or we won't be able to mark it DELETE instead later. // TODO: Should numWaitThreads be decreased yet? __KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_TIMEOUT); } }
bool __KernelUnlockMutexForThread(Mutex *mutex, SceUID threadID, u32 &error, int result) { if (!HLEKernel::VerifyWait(threadID, WAITTYPE_MUTEX, mutex->GetUID())) return false; // If result is an error code, we're just letting it go. if (result == 0) { int wVal = (int)__KernelGetWaitValue(threadID, error); __KernelMutexAcquireLock(mutex, wVal, threadID); } u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); if (timeoutPtr != 0 && mutexWaitTimer != -1) { // Remove any event for this thread. s64 cyclesLeft = CoreTiming::UnscheduleEvent(mutexWaitTimer, threadID); Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr); } __KernelResumeThreadFromWait(threadID, result); return true; }
//int sceKernelSignalSema(SceUID semaid, int signal); // void because it changes threads. void sceKernelSignalSema(SceUID id, int signal) { //TODO: check that this thing really works :) u32 error; Semaphore *s = kernelObjects.Get<Semaphore>(id, error); if (s) { if (s->ns.currentCount + signal > s->ns.maxCount) { RETURN(SCE_KERNEL_ERROR_SEMA_OVF); return; } int oldval = s->ns.currentCount; s->ns.currentCount += signal; DEBUG_LOG(HLE,"sceKernelSignalSema(%i, %i) (old: %i, new: %i)", id, signal, oldval, s->ns.currentCount); // We need to set the return value BEFORE processing other threads. RETURN(0); bool wokeThreads = false; retry: // TODO: PSP_SEMA_ATTR_PRIORITY std::vector<SceUID>::iterator iter; for (iter = s->waitingThreads.begin(); iter!=s->waitingThreads.end(); iter++) { SceUID threadID = *iter; int wVal = (int)__KernelGetWaitValue(threadID, error); u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); if (wVal <= s->ns.currentCount) { s->ns.currentCount -= wVal; s->ns.numWaitThreads--; if (timeoutPtr != 0 && semaWaitTimer != 0) { // Remove any event for this thread. int cyclesLeft = CoreTiming::UnscheduleEvent(semaWaitTimer, threadID); Memory::Write_U32(cyclesToUs(cyclesLeft), timeoutPtr); } __KernelResumeThreadFromWait(threadID, 0); wokeThreads = true; s->waitingThreads.erase(iter); goto retry; } else { break; } } __KernelReSchedule("semaphore signalled"); } else { ERROR_LOG(HLE, "sceKernelSignalSema : Trying to signal invalid semaphore %i", id); RETURN(error;) } }