// Returns whether the thread should be removed. bool __KernelUnlockSemaForThread(Semaphore *s, SceUID threadID, u32 &error, int result, bool &wokeThreads) { SceUID waitID = __KernelGetWaitID(threadID, WAITTYPE_SEMA, error); u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); // The waitID may be different after a timeout. if (waitID != s->GetUID()) return true; // If result is an error code, we're just letting it go. if (result == 0) { int wVal = (int) __KernelGetWaitValue(threadID, error); if (wVal > s->ns.currentCount) return false; s->ns.currentCount -= wVal; s->ns.numWaitThreads--; } if (timeoutPtr != 0 && semaWaitTimer != 0) { // Remove any event for this thread. u64 cyclesLeft = CoreTiming::UnscheduleEvent(semaWaitTimer, threadID); Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr); } __KernelResumeThreadFromWait(threadID, result); wokeThreads = true; return true; }
void hleDelayResultFinish(u64 userdata, int cycleslate) { u32 error; SceUID threadID = (SceUID) userdata; SceUID verify = __KernelGetWaitID(threadID, WAITTYPE_HLEDELAY, error); // The top 32 bits of userdata are the top 32 bits of the 64 bit result. // We can't just put it all in userdata because we need to know the threadID... u64 result = (userdata & 0xFFFFFFFF00000000ULL) | __KernelGetWaitValue(threadID, error); if (error == 0 && verify == 1) __KernelResumeThreadFromWait(threadID, result); else WARN_LOG(HLE, "Someone else woke up HLE-blocked thread?"); }
//int sceKernelSignalSema(SceUID semaid, int signal); void sceKernelSignalSema() { //TODO: check that this thing really works :) SceUID id = PARAM(0); u32 signal = PARAM(1); u32 error; Semaphore *s = kernelObjects.Get<Semaphore>(id, error); if (s) { int oldval = s->ns.currentCount; s->ns.currentCount += signal; DEBUG_LOG(HLE,"sceKernelSignalSema(%i, %i) (old: %i, new: %i)", id, signal, oldval, s->ns.currentCount); bool wokeThreads = false; retry: //TODO: check for threads to wake up - wake them std::vector<SceUID>::iterator iter; for (iter = s->waitingThreads.begin(); iter!=s->waitingThreads.end(); s++) { SceUID id = *iter; int wVal = (int)__KernelGetWaitValue(id, error); if (wVal <= s->ns.currentCount) { __KernelResumeThread(id); s->ns.currentCount -= wVal; wokeThreads = true; s->waitingThreads.erase(iter); goto retry; } else { break; } } //pop the thread that were released from waiting // I don't think we should reschedule here //if (wokeThreads) // __KernelReSchedule("semaphore signalled"); RETURN(0); } else { ERROR_LOG(HLE, "sceKernelSignalSema : Trying to signal invalid semaphore %i", id); RETURN(error); } }
inline void __AudioWakeThreads(AudioChannel &chan, int step) { u32 error; for (size_t w = 0; w < chan.waitingThreads.size(); ++w) { AudioChannelWaitInfo &waitInfo = chan.waitingThreads[w]; waitInfo.numSamples -= hwBlockSize; // If it's done (there will still be samples on queue) and actually still waiting, wake it up. if (waitInfo.numSamples <= 0 && __KernelGetWaitID(waitInfo.threadID, WAITTYPE_AUDIOCHANNEL, error) != 0) { // DEBUG_LOG(HLE, "Woke thread %i for some buffer filling", waitingThread); u32 ret = __KernelGetWaitValue(waitInfo.threadID, error); __KernelResumeThreadFromWait(waitInfo.threadID, ret); chan.waitingThreads.erase(chan.waitingThreads.begin() + w--); } } }
inline void __AudioWakeThreads(AudioChannel &chan, int result, int step) { u32 error; for (size_t w = 0; w < chan.waitingThreads.size(); ++w) { AudioChannelWaitInfo &waitInfo = chan.waitingThreads[w]; waitInfo.numSamples -= step; // If it's done (there will still be samples on queue) and actually still waiting, wake it up. u32 waitID = __KernelGetWaitID(waitInfo.threadID, WAITTYPE_AUDIOCHANNEL, error); if (waitInfo.numSamples <= 0 && waitID != 0) { // DEBUG_LOG(SCEAUDIO, "Woke thread %i for some buffer filling", waitingThread); u32 ret = result == 0 ? __KernelGetWaitValue(waitInfo.threadID, error) : SCE_ERROR_AUDIO_CHANNEL_NOT_RESERVED; __KernelResumeThreadFromWait(waitInfo.threadID, ret); chan.waitingThreads.erase(chan.waitingThreads.begin() + w--); } // This means the thread stopped waiting, so stop trying to wake it. else if (waitID == 0) chan.waitingThreads.erase(chan.waitingThreads.begin() + w--); } }
static void __KernelMsgPipeEndCallback(SceUID threadID, SceUID prevCallbackId) { u32 error; u32 waitValue = __KernelGetWaitValue(threadID, error); u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); SceUID uid = __KernelGetWaitID(threadID, WAITTYPE_MSGPIPE, error); MsgPipe *ko = uid == 0 ? NULL : kernelObjects.Get<MsgPipe>(uid, error); if (ko == NULL) return; switch (waitValue) { case MSGPIPE_WAIT_VALUE_SEND: { MsgPipeWaitingThread dummy; auto result = HLEKernel::WaitEndCallback<MsgPipe, WAITTYPE_MSGPIPE, MsgPipeWaitingThread>(threadID, prevCallbackId, waitTimer, __KernelCheckResumeMsgPipeSend, dummy, ko->sendWaitingThreads, ko->pausedSendWaits); if (result == HLEKernel::WAIT_CB_RESUMED_WAIT) { DEBUG_LOG(SCEKERNEL, "sceKernelSendMsgPipeCB: Resuming wait from callback"); } else if (result == HLEKernel::WAIT_CB_TIMED_OUT) { // It was re-added to the the waiting threads list, but it timed out. Let's remove it. ko->RemoveSendWaitingThread(threadID); } } break; case MSGPIPE_WAIT_VALUE_RECV: { MsgPipeWaitingThread dummy; auto result = HLEKernel::WaitEndCallback<MsgPipe, WAITTYPE_MSGPIPE, MsgPipeWaitingThread>(threadID, prevCallbackId, waitTimer, __KernelCheckResumeMsgPipeReceive, dummy, ko->receiveWaitingThreads, ko->pausedReceiveWaits); if (result == HLEKernel::WAIT_CB_RESUMED_WAIT) { DEBUG_LOG(SCEKERNEL, "sceKernelReceiveMsgPipeCB: Resuming wait from callback"); } else if (result == HLEKernel::WAIT_CB_TIMED_OUT) { // It was re-added to the the waiting threads list, but it timed out. Let's remove it. ko->RemoveReceiveWaitingThread(threadID); } } break; default: break; } }
bool __KernelUnlockLwMutex(NativeLwMutexWorkarea &workarea, u32 &error) { LwMutex *mutex = kernelObjects.Get<LwMutex>(workarea.uid, error); if (error) { workarea.lockThread = 0; return false; } // TODO: PSP_MUTEX_ATTR_PRIORITY bool wokeThreads = false; std::vector<SceUID>::iterator iter, end; for (iter = mutex->waitingThreads.begin(), end = mutex->waitingThreads.end(); iter != end; ++iter) { SceUID threadID = *iter; int wVal = (int)__KernelGetWaitValue(threadID, error); u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); workarea.lockLevel = wVal; workarea.lockThread = threadID; if (timeoutPtr != 0 && lwMutexWaitTimer != 0) { // Remove any event for this thread. u64 cyclesLeft = CoreTiming::UnscheduleEvent(lwMutexWaitTimer, threadID); Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr); } __KernelResumeThreadFromWait(threadID, 0); wokeThreads = true; mutex->waitingThreads.erase(iter); break; } if (!wokeThreads) workarea.lockThread = 0; return wokeThreads; }
void __UmdEndCallback(SceUID threadID, SceUID prevCallbackId) { SceUID pauseKey = prevCallbackId == 0 ? threadID : prevCallbackId; u32 error; SceUID waitID = __KernelGetWaitID(threadID, WAITTYPE_UMD, error); u32 stat = __KernelGetWaitValue(threadID, error); if (umdPausedWaitTimeouts.find(pauseKey) == umdPausedWaitTimeouts.end()) { WARN_LOG_REPORT(HLE, "__UmdEndCallback(): UMD paused wait missing"); __KernelResumeThreadFromWait(threadID, 0); return; } u64 waitDeadline = umdPausedWaitTimeouts[pauseKey]; umdPausedWaitTimeouts.erase(pauseKey); // TODO: Don't wake up if __KernelCurHasReadyCallbacks()? if ((stat & __KernelUmdGetState()) != 0) { __KernelResumeThreadFromWait(threadID, 0); return; } s64 cyclesLeft = waitDeadline - CoreTiming::GetTicks(); if (cyclesLeft < 0 && waitDeadline != 0) __KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_TIMEOUT); else { _dbg_assert_msg_(HLE, umdStatTimeoutEvent != -1, "Must have a umd timer"); CoreTiming::ScheduleEvent(cyclesLeft, umdStatTimeoutEvent, __KernelGetCurThread()); umdWaitingThreads.push_back(threadID); DEBUG_LOG(HLE, "sceUmdWaitDriveStatCB: Resuming lock wait for callback"); } }
void __CtrlDoSample() { // This samples the ctrl data into the buffers and updates the latch. __CtrlUpdateLatch(); // Wake up a single thread that was waiting for the buffer. retry: if (!waitingThreads.empty() && ctrlBuf != ctrlBufRead) { SceUID threadID = waitingThreads[0]; waitingThreads.erase(waitingThreads.begin()); u32 error; SceUID wVal = __KernelGetWaitID(threadID, WAITTYPE_CTRL, error); // Make sure it didn't get woken or something. if (wVal == 0) goto retry; u32 ctrlDataPtr = __KernelGetWaitValue(threadID, error); int retVal = __CtrlReadSingleBuffer(ctrlDataPtr, wVal == CTRL_WAIT_NEGATIVE); __KernelResumeThreadFromWait(threadID, retVal); } }
bool __KernelUnlockLwMutexForThread(LwMutex *mutex, T workarea, SceUID threadID, u32 &error, int result) { if (!HLEKernel::VerifyWait(threadID, WAITTYPE_LWMUTEX, mutex->GetUID())) return false; // If result is an error code, we're just letting it go. if (result == 0) { workarea->lockLevel = (int) __KernelGetWaitValue(threadID, error); workarea->lockThread = threadID; } u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); if (timeoutPtr != 0 && lwMutexWaitTimer != -1) { // Remove any event for this thread. s64 cyclesLeft = CoreTiming::UnscheduleEvent(lwMutexWaitTimer, threadID); Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr); } __KernelResumeThreadFromWait(threadID, result); return true; }
bool __KernelUnlockVplForThread(VPL *vpl, VplWaitingThread &threadInfo, u32 &error, int result, bool &wokeThreads) { const SceUID threadID = threadInfo.threadID; SceUID waitID = __KernelGetWaitID(threadID, WAITTYPE_VPL, error); u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); // The waitID may be different after a timeout. if (waitID != vpl->GetUID()) return true; // If result is an error code, we're just letting it go. if (result == 0) { int size = (int) __KernelGetWaitValue(threadID, error); // Padding (normally used to track the allocation.) u32 allocSize = size + 8; u32 addr = vpl->alloc.Alloc(allocSize, true); if (addr != (u32) -1) Memory::Write_U32(addr, threadInfo.addrPtr); else return false; vpl->nv.numWaitThreads--; } if (timeoutPtr != 0 && vplWaitTimer != -1) { // Remove any event for this thread. s64 cyclesLeft = CoreTiming::UnscheduleEvent(vplWaitTimer, threadID); Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr); } __KernelResumeThreadFromWait(threadID, result); wokeThreads = true; return true; }
void __UmdStatChange(u64 userdata, int cyclesLate) { // TODO: Why not a bool anyway? umdActivated = userdata & 0xFF; // Wake anyone waiting on this. for (size_t i = 0; i < umdWaitingThreads.size(); ++i) { const SceUID threadID = umdWaitingThreads[i]; u32 error; u32 stat = __KernelGetWaitValue(threadID, error); bool keep = false; if (HLEKernel::VerifyWait(threadID, WAITTYPE_UMD, 1)) { if ((stat & __KernelUmdGetState()) != 0) __KernelResumeThreadFromWait(threadID, 0); // Only if they are still waiting do we keep them in the list. else keep = true; } if (!keep) umdWaitingThreads.erase(umdWaitingThreads.begin() + i--); } }
// 7. One must call sceIoWaitAsync / sceIoWaitAsyncCB / sceIoPollAsync / possibly sceIoGetAsyncStat. // 8. Finally, the fd is usable (or closed via sceIoCloseAsync.) Presumably the io thread has joined now. // TODO: Closed files are a bit special: until the fd is reused (?), the async result is still available. // Clearly a buffer is used, it doesn't seem like they are actually kernel objects. // TODO: We don't do any of that yet. // For now, let's at least delay the callback mnotification. void __IoAsyncNotify(u64 userdata, int cyclesLate) { SceUID threadID = userdata >> 32; SceUID fd = (SceUID) (userdata & 0xFFFFFFFF); __IoCompleteAsyncIO(fd); u32 error; SceUID waitID = __KernelGetWaitID(threadID, WAITTYPE_IO, error); u32 address = __KernelGetWaitValue(threadID, error); if (waitID == fd && error == 0) { __KernelResumeThreadFromWait(threadID, 0); FileNode *f = kernelObjects.Get<FileNode>(fd, error); if (Memory::IsValidAddress(address) && f) { Memory::Write_U64((u64) f->asyncResult, address); } // If this was a sceIoCloseAsync, we should close it at this point. if (f->closePending) { kernelObjects.Destroy<FileNode>(fd); } } }
//int sceKernelSignalSema(SceUID semaid, int signal); // void because it changes threads. void sceKernelSignalSema(SceUID id, int signal) { //TODO: check that this thing really works :) u32 error; Semaphore *s = kernelObjects.Get<Semaphore>(id, error); if (s) { if (s->ns.currentCount + signal > s->ns.maxCount) { RETURN(SCE_KERNEL_ERROR_SEMA_OVF); return; } int oldval = s->ns.currentCount; s->ns.currentCount += signal; DEBUG_LOG(HLE,"sceKernelSignalSema(%i, %i) (old: %i, new: %i)", id, signal, oldval, s->ns.currentCount); // We need to set the return value BEFORE processing other threads. RETURN(0); bool wokeThreads = false; retry: // TODO: PSP_SEMA_ATTR_PRIORITY std::vector<SceUID>::iterator iter; for (iter = s->waitingThreads.begin(); iter!=s->waitingThreads.end(); iter++) { SceUID threadID = *iter; int wVal = (int)__KernelGetWaitValue(threadID, error); u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error); if (wVal <= s->ns.currentCount) { s->ns.currentCount -= wVal; s->ns.numWaitThreads--; if (timeoutPtr != 0 && semaWaitTimer != 0) { // Remove any event for this thread. int cyclesLeft = CoreTiming::UnscheduleEvent(semaWaitTimer, threadID); Memory::Write_U32(cyclesToUs(cyclesLeft), timeoutPtr); } __KernelResumeThreadFromWait(threadID, 0); wokeThreads = true; s->waitingThreads.erase(iter); goto retry; } else { break; } } __KernelReSchedule("semaphore signalled"); } else { ERROR_LOG(HLE, "sceKernelSignalSema : Trying to signal invalid semaphore %i", id); RETURN(error;) } }