int sceKernelCancelVpl(SceUID uid, u32 numWaitThreadsPtr) { DEBUG_LOG(HLE, "sceKernelCancelVpl(%i, %08x)", uid, numWaitThreadsPtr); u32 error; VPL *vpl = kernelObjects.Get<VPL>(uid, error); if (vpl) { vpl->nv.numWaitThreads = (int) vpl->waitingThreads.size(); if (Memory::IsValidAddress(numWaitThreadsPtr)) Memory::Write_U32(vpl->nv.numWaitThreads, numWaitThreadsPtr); bool wokeThreads = __KernelClearVplThreads(vpl, SCE_KERNEL_ERROR_WAIT_CANCEL); if (wokeThreads) hleReSchedule("vpl canceled"); return 0; } else return error; }
static bool __KernelCheckResumeMsgPipeReceive(MsgPipe *m, MsgPipeWaitingThread &waitInfo, u32 &error, int result, bool &wokeThreads) { if (!waitInfo.IsStillWaiting(m->GetUID())) return true; bool needsResched = false; bool needsWait = false; result = __KernelReceiveMsgPipe(m, waitInfo.bufAddr, waitInfo.bufSize, waitInfo.waitMode, waitInfo.transferredBytes.ptr, 0, true, false, needsResched, needsWait); if (needsResched) hleReSchedule(true, "msgpipe data received"); if (needsWait) return false; waitInfo.Complete(m->GetUID(), result); wokeThreads = true; return true; }
int sceIoWaitAsyncCB(int id, u32 address) { // Should process callbacks here u32 error; FileNode *f = kernelObjects.Get < FileNode > (id, error); if (f) { u64 res = f->asyncResult; if (defAction) { res = defAction(id, defParam); defAction = 0; } Memory::Write_U64(res, address); DEBUG_LOG(HLE, "%i = sceIoWaitAsyncCB(%i, %08x) (HACK)", (u32) res, id, address); hleCheckCurrentCallbacks(); hleReSchedule(true, "io waited"); return 0; //completed } else { ERROR_LOG(HLE, "ERROR - sceIoWaitAsyncCB waiting for invalid id %i", id); return -1; } }
int sceKernelDeleteSema(SceUID id) { u32 error; Semaphore *s = kernelObjects.Get<Semaphore>(id, error); if (s) { DEBUG_LOG(SCEKERNEL, "sceKernelDeleteSema(%i)", id); bool wokeThreads = __KernelClearSemaThreads(s, SCE_KERNEL_ERROR_WAIT_DELETE); if (wokeThreads) hleReSchedule("semaphore deleted"); return kernelObjects.Destroy<Semaphore>(id); } else { DEBUG_LOG(SCEKERNEL, "sceKernelDeleteSema(%i): invalid semaphore", id); return error; } }
//int sceKernelSignalSema(SceUID semaid, int signal); int sceKernelSignalSema(SceUID id, int signal) { u32 error; Semaphore *s = kernelObjects.Get<Semaphore>(id, error); if (s) { if (s->ns.currentCount + signal - s->ns.numWaitThreads > s->ns.maxCount) return SCE_KERNEL_ERROR_SEMA_OVF; int oldval = s->ns.currentCount; s->ns.currentCount += signal; DEBUG_LOG(HLE, "sceKernelSignalSema(%i, %i) (old: %i, new: %i)", id, signal, oldval, s->ns.currentCount); bool wokeThreads = false; std::vector<SceUID>::iterator iter, end, best; retry: for (iter = s->waitingThreads.begin(), end = s->waitingThreads.end(); iter != end; ++iter) { if ((s->ns.attr & PSP_SEMA_ATTR_PRIORITY) != 0) best = __KernelSemaFindPriority(s->waitingThreads, iter); else best = iter; if (__KernelUnlockSemaForThread(s, *best, error, 0, wokeThreads)) { s->waitingThreads.erase(best); goto retry; } } if (wokeThreads) hleReSchedule("semaphore signaled"); return 0; } else { ERROR_LOG(HLE, "sceKernelSignalSema : Trying to signal invalid semaphore %i", id); return error; } }
int sceKernelSignalSema(SceUID id, int signal) { u32 error; Semaphore *s = kernelObjects.Get<Semaphore>(id, error); if (s) { if (s->ns.currentCount + signal - (int) s->waitingThreads.size() > s->ns.maxCount) { DEBUG_LOG(HLE, "sceKernelSignalSema(%i, %i): overflow (at %i)", id, signal, s->ns.currentCount); return SCE_KERNEL_ERROR_SEMA_OVF; } int oldval = s->ns.currentCount; s->ns.currentCount += signal; DEBUG_LOG(HLE, "sceKernelSignalSema(%i, %i) (count: %i -> %i)", id, signal, oldval, s->ns.currentCount); if ((s->ns.attr & PSP_SEMA_ATTR_PRIORITY) != 0) std::stable_sort(s->waitingThreads.begin(), s->waitingThreads.end(), __KernelThreadSortPriority); bool wokeThreads = false; retry: for (auto iter = s->waitingThreads.begin(), end = s->waitingThreads.end(); iter != end; ++iter) { if (__KernelUnlockSemaForThread(s, *iter, error, 0, wokeThreads)) { s->waitingThreads.erase(iter); goto retry; } } if (wokeThreads) hleReSchedule("semaphore signaled"); return 0; } else { DEBUG_LOG(HLE, "sceKernelSignalSema(%i, %i): invalid semaphore", id, signal); return error; } }
static int __KernelSendMsgPipe(MsgPipe *m, u32 sendBufAddr, u32 sendSize, int waitMode, u32 resultAddr, u32 timeoutPtr, bool cbEnabled, bool poll) { hleEatCycles(2400); bool needsResched = false; bool needsWait = false; int result = __KernelSendMsgPipe(m, sendBufAddr, sendSize, waitMode, resultAddr, timeoutPtr, cbEnabled, poll, needsResched, needsWait); if (needsResched) hleReSchedule(cbEnabled, "msgpipe data sent"); if (needsWait) { if (__KernelSetMsgPipeTimeout(timeoutPtr)) __KernelWaitCurThread(WAITTYPE_MSGPIPE, m->GetUID(), MSGPIPE_WAIT_VALUE_SEND, timeoutPtr, cbEnabled, "msgpipe send waited"); else result = SCE_KERNEL_ERROR_WAIT_TIMEOUT; } return result; }
static bool __KernelCheckResumeMsgPipeSend(MsgPipe *m, MsgPipeWaitingThread &waitInfo, u32 &error, int result, bool &wokeThreads) { if (!waitInfo.IsStillWaiting(m->GetUID())) return true; bool needsResched = false; bool needsWait = false; result = __KernelSendMsgPipe(m, waitInfo.bufAddr, waitInfo.bufSize, waitInfo.waitMode, waitInfo.transferredBytes.ptr, 0, true, false, needsResched, needsWait); if (needsResched) hleReSchedule(true, "msgpipe data sent"); // Could not wake up. May have sent some stuff. if (needsWait) return false; waitInfo.Complete(m->GetUID(), result); wokeThreads = true; return true; }
u32 sceKernelCancelEventFlag(SceUID uid, u32 pattern, u32 numWaitThreadsPtr) { DEBUG_LOG(HLE, "sceKernelCancelEventFlag(%i, %08X, %08X)", uid, pattern, numWaitThreadsPtr); u32 error; EventFlag *e = kernelObjects.Get<EventFlag>(uid, error); if (e) { if (Memory::IsValidAddress(numWaitThreadsPtr)) Memory::Write_U32(e->nef.numWaitThreads, numWaitThreadsPtr); e->nef.currentPattern = pattern; e->nef.numWaitThreads = 0; if (__KernelClearEventFlagThreads(e, SCE_KERNEL_ERROR_WAIT_CANCEL)) hleReSchedule("event flag canceled"); return 0; } else return error; }
int sceKernelDeleteMbx(SceUID id) { u32 error; Mbx *m = kernelObjects.Get<Mbx>(id, error); if (m) { DEBUG_LOG(HLE, "sceKernelDeleteMbx(%i)", id); bool wokeThreads = false; for (size_t i = 0; i < m->waitingThreads.size(); i++) __KernelUnlockMbxForThread(m, m->waitingThreads[i], error, SCE_KERNEL_ERROR_WAIT_DELETE, wokeThreads); m->waitingThreads.clear(); if (wokeThreads) hleReSchedule("mbx deleted"); } else { ERROR_LOG(HLE, "sceKernelDeleteMbx(%i): invalid mbx id", id); } return kernelObjects.Destroy<Mbx>(id); }
int sceKernelVolatileMemUnlock(int type) { if (type != 0) { ERROR_LOG_REPORT(HLE, "sceKernelVolatileMemUnlock(%i) - invalid mode", type); return SCE_KERNEL_ERROR_INVALID_MODE; } if (volatileMemLocked) { volatileMemLocked = false; // Wake someone, always fifo. bool wokeThreads = false; u32 error; while (!volatileWaitingThreads.empty() && !volatileMemLocked) { VolatileWaitingThread waitInfo = volatileWaitingThreads.front(); volatileWaitingThreads.erase(volatileWaitingThreads.begin()); int waitID = __KernelGetWaitID(waitInfo.threadID, WAITTYPE_VMEM, error); // If they were force-released, just skip. if (waitID == 1 && __KernelVolatileMemLock(0, waitInfo.addrPtr, waitInfo.sizePtr) == 0) { __KernelResumeThreadFromWait(waitInfo.threadID, 0); wokeThreads = true; } } if (wokeThreads) { INFO_LOG(HLE, "sceKernelVolatileMemUnlock(%i) handed over to another thread", type); hleReSchedule("volatile mem unlocked"); } else { DEBUG_LOG(HLE, "sceKernelVolatileMemUnlock(%i)", type); } } else { ERROR_LOG_REPORT(HLE, "sceKernelVolatileMemUnlock(%i) FAILED - not locked", type); // I guess it must use a sema. return SCE_KERNEL_ERROR_SEMA_OVF; } return 0; }
int sceKernelDeleteMutex(SceUID id) { DEBUG_LOG(HLE,"sceKernelDeleteMutex(%i)", id); u32 error; Mutex *mutex = kernelObjects.Get<Mutex>(id, error); if (mutex) { bool wokeThreads = false; std::vector<SceUID>::iterator iter, end; for (iter = mutex->waitingThreads.begin(), end = mutex->waitingThreads.end(); iter != end; ++iter) wokeThreads |= __KernelUnlockMutexForThread(mutex, *iter, error, SCE_KERNEL_ERROR_WAIT_DELETE); if (mutex->nm.lockThread != -1) __KernelMutexEraseLock(mutex); mutex->waitingThreads.clear(); if (wokeThreads) hleReSchedule("mutex deleted"); return kernelObjects.Destroy<Mutex>(id); } else return error; }
int sceKernelSendMbx(SceUID id, u32 packetAddr) { u32 error; Mbx *m = kernelObjects.Get<Mbx>(id, error); if (!m) { ERROR_LOG(HLE, "sceKernelSendMbx(%i, %08x): invalid mbx id", id, packetAddr); return error; } NativeMbxPacket *addPacket = (NativeMbxPacket*)Memory::GetPointer(packetAddr); if (addPacket == 0) { ERROR_LOG(HLE, "sceKernelSendMbx(%i, %08x): invalid packet address", id, packetAddr); return -1; } // If the queue is empty, maybe someone is waiting. // We have to check them first, they might've timed out. if (m->nmb.numMessages == 0) { bool wokeThreads = false; std::vector<MbxWaitingThread>::iterator iter; while (!wokeThreads && !m->waitingThreads.empty()) { if ((m->nmb.attr & SCE_KERNEL_MBA_THPRI) != 0) iter = __KernelMbxFindPriority(m->waitingThreads); else iter = m->waitingThreads.begin(); MbxWaitingThread t = *iter; __KernelUnlockMbxForThread(m, t, error, 0, wokeThreads); m->waitingThreads.erase(iter); if (wokeThreads) { DEBUG_LOG(HLE, "sceKernelSendMbx(%i, %08x): threads waiting, resuming %d", id, packetAddr, t.first); Memory::Write_U32(packetAddr, t.second); hleReSchedule("mbx sent"); // We don't need to do anything else, finish here. return 0; } } } DEBUG_LOG(HLE, "sceKernelSendMbx(%i, %08x): no threads currently waiting, adding message to queue", id, packetAddr); if (m->nmb.numMessages == 0) m->AddInitialMessage(packetAddr); else { u32 next = m->nmb.packetListHead, prev; for (int i = 0, n = m->nmb.numMessages; i < n; i++) { if (next == packetAddr) return PSP_MBX_ERROR_DUPLICATE_MSG; if (!Memory::IsValidAddress(next)) return SCE_KERNEL_ERROR_ILLEGAL_ADDR; prev = next; next = Memory::Read_U32(next); } bool inserted = false; if (m->nmb.attr & SCE_KERNEL_MBA_MSPRI) { NativeMbxPacket p; for (int i = 0, n = m->nmb.numMessages; i < n; i++) { Memory::ReadStruct<NativeMbxPacket>(next, &p); if (addPacket->priority < p.priority) { if (i == 0) m->AddFirstMessage(prev, packetAddr); else m->AddMessage(prev, next, packetAddr); inserted = true; break; } prev = next; next = Memory::Read_U32(next); } } if (!inserted) m->AddLastMessage(prev, packetAddr); } return 0; }
static u32 sceDisplayGetVcount() { hleEatCycles(150); hleReSchedule("get vcount"); return hleLogSuccessVerboseI(SCEDISPLAY, vCount); }
void hleReSchedule(bool callbacks, const char *reason) { hleReSchedule(reason); if (callbacks) hleAfterSyscall |= HLE_AFTER_RESCHED_CALLBACKS; }
int __KernelReceiveMsgPipe(MsgPipe *m, u32 receiveBufAddr, u32 receiveSize, int waitMode, u32 resultAddr, u32 timeoutPtr, bool cbEnabled, bool poll) { u32 curReceiveAddr = receiveBufAddr; SceUID uid = m->GetUID(); // MsgPipe buffer size is 0, receiving directly from waiting send threads if (m->nmp.bufSize == 0) { m->SortSendThreads(); // While they're still sending waiting threads (which can send data) while (!m->sendWaitingThreads.empty() && receiveSize != 0) { MsgPipeWaitingThread *thread = &m->sendWaitingThreads.front(); // For send threads, "freeSize" is "free to be read". u32 bytesToReceive = std::min(thread->freeSize, receiveSize); if (bytesToReceive > 0) { thread->ReadBuffer(Memory::GetPointer(curReceiveAddr), bytesToReceive); receiveSize -= bytesToReceive; curReceiveAddr += bytesToReceive; if (thread->freeSize == 0 || thread->waitMode == SCE_KERNEL_MPW_ASAP) { thread->Complete(uid, 0); m->sendWaitingThreads.erase(m->sendWaitingThreads.begin()); hleReSchedule(cbEnabled, "msgpipe data received"); thread = NULL; } } } // All data hasn't been received and (mode isn't ASAP or nothing was received) if (receiveSize != 0 && (waitMode != SCE_KERNEL_MPW_ASAP || curReceiveAddr == receiveBufAddr)) { if (poll) { // Generally, result is not updated in this case. But for a 0 size buffer in ASAP mode, it is. if (Memory::IsValidAddress(resultAddr) && waitMode == SCE_KERNEL_MPW_ASAP) Memory::Write_U32(curReceiveAddr - receiveBufAddr, resultAddr); return SCE_KERNEL_ERROR_MPP_EMPTY; } else { m->AddReceiveWaitingThread(__KernelGetCurThread(), curReceiveAddr, receiveSize, waitMode, resultAddr); if (__KernelSetMsgPipeTimeout(timeoutPtr)) __KernelWaitCurThread(WAITTYPE_MSGPIPE, uid, 0, timeoutPtr, cbEnabled, "msgpipe receive waited"); else return SCE_KERNEL_ERROR_WAIT_TIMEOUT; return 0; } } } // Getting data from the MsgPipe buffer else { if (receiveSize > (u32) m->nmp.bufSize) { ERROR_LOG(HLE, "__KernelReceiveMsgPipe(%d): size %d too large for buffer", uid, receiveSize); return SCE_KERNEL_ERROR_ILLEGAL_SIZE; } while (m->GetUsedSize() > 0) { u32 bytesToReceive = std::min(receiveSize, m->GetUsedSize()); if (bytesToReceive != 0) { Memory::Memcpy(curReceiveAddr, Memory::GetPointer(m->buffer), bytesToReceive); m->nmp.freeSize += bytesToReceive; memmove(Memory::GetPointer(m->buffer), Memory::GetPointer(m->buffer) + bytesToReceive, m->GetUsedSize()); curReceiveAddr += bytesToReceive; receiveSize -= bytesToReceive; m->CheckSendThreads(); } else break; } if (receiveSize != 0 && (waitMode != SCE_KERNEL_MPW_ASAP || curReceiveAddr == receiveBufAddr)) { if (poll) return SCE_KERNEL_ERROR_MPP_EMPTY; else { m->AddReceiveWaitingThread(__KernelGetCurThread(), curReceiveAddr, receiveSize, waitMode, resultAddr); if (__KernelSetMsgPipeTimeout(timeoutPtr)) __KernelWaitCurThread(WAITTYPE_MSGPIPE, uid, 0, timeoutPtr, cbEnabled, "msgpipe receive waited"); else return SCE_KERNEL_ERROR_WAIT_TIMEOUT; return 0; } } } if (Memory::IsValidAddress(resultAddr)) Memory::Write_U32(curReceiveAddr - receiveBufAddr, resultAddr); return 0; }
int __KernelSendMsgPipe(MsgPipe *m, u32 sendBufAddr, u32 sendSize, int waitMode, u32 resultAddr, u32 timeoutPtr, bool cbEnabled, bool poll) { u32 curSendAddr = sendBufAddr; SceUID uid = m->GetUID(); // If the buffer size is 0, nothing is buffered and all operations wait. if (m->nmp.bufSize == 0) { m->SortReceiveThreads(); while (!m->receiveWaitingThreads.empty() && sendSize != 0) { MsgPipeWaitingThread *thread = &m->receiveWaitingThreads.front(); u32 bytesToSend = std::min(thread->freeSize, sendSize); if (bytesToSend > 0) { thread->WriteBuffer(Memory::GetPointer(curSendAddr), bytesToSend); sendSize -= bytesToSend; curSendAddr += bytesToSend; if (thread->freeSize == 0 || thread->waitMode == SCE_KERNEL_MPW_ASAP) { thread->Complete(uid, 0); m->receiveWaitingThreads.erase(m->receiveWaitingThreads.begin()); hleReSchedule(cbEnabled, "msgpipe data sent"); thread = NULL; } } } // If there is still data to send and (we want to send all of it or we didn't send anything) if (sendSize != 0 && (waitMode != SCE_KERNEL_MPW_ASAP || curSendAddr == sendBufAddr)) { if (poll) { // Generally, result is not updated in this case. But for a 0 size buffer in ASAP mode, it is. if (Memory::IsValidAddress(resultAddr) && waitMode == SCE_KERNEL_MPW_ASAP) Memory::Write_U32(curSendAddr - sendBufAddr, resultAddr); return SCE_KERNEL_ERROR_MPP_FULL; } else { m->AddSendWaitingThread(__KernelGetCurThread(), curSendAddr, sendSize, waitMode, resultAddr); if (__KernelSetMsgPipeTimeout(timeoutPtr)) __KernelWaitCurThread(WAITTYPE_MSGPIPE, uid, 0, timeoutPtr, cbEnabled, "msgpipe send waited"); else return SCE_KERNEL_ERROR_WAIT_TIMEOUT; return 0; } } } else { if (sendSize > (u32) m->nmp.bufSize) { ERROR_LOG(HLE, "__KernelSendMsgPipe(%d): size %d too large for buffer", uid, sendSize); return SCE_KERNEL_ERROR_ILLEGAL_SIZE; } u32 bytesToSend = 0; // If others are already waiting, space or not, we have to get in line. m->SortSendThreads(); if (m->sendWaitingThreads.empty()) { if (sendSize <= (u32) m->nmp.freeSize) bytesToSend = sendSize; else if (waitMode == SCE_KERNEL_MPW_ASAP) bytesToSend = m->nmp.freeSize; } if (bytesToSend != 0) { Memory::Memcpy(m->buffer + (m->nmp.bufSize - m->nmp.freeSize), Memory::GetPointer(sendBufAddr), bytesToSend); m->nmp.freeSize -= bytesToSend; curSendAddr += bytesToSend; sendSize -= bytesToSend; if (m->CheckReceiveThreads()) hleReSchedule(cbEnabled, "msgpipe data sent"); } else if (sendSize != 0) { if (poll) return SCE_KERNEL_ERROR_MPP_FULL; else { m->AddSendWaitingThread(__KernelGetCurThread(), curSendAddr, sendSize, waitMode, resultAddr); if (__KernelSetMsgPipeTimeout(timeoutPtr)) __KernelWaitCurThread(WAITTYPE_MSGPIPE, uid, 0, timeoutPtr, cbEnabled, "msgpipe send waited"); else return SCE_KERNEL_ERROR_WAIT_TIMEOUT; return 0; } } } // We didn't wait, so update the number of bytes transferred now. if (Memory::IsValidAddress(resultAddr)) Memory::Write_U32(curSendAddr - sendBufAddr, resultAddr); return 0; }
u32 sceUtilityLoadModule(u32 module) { DEBUG_LOG(HLE,"sceUtilityLoadModule(%i)", module); hleReSchedule("utilityloadmodule"); return 0; }
u32 sceUtilityUnloadAvModule(u32 module) { DEBUG_LOG(HLE,"sceUtilityUnloadAvModule(%i)", module); hleReSchedule("utilityunloadavmodule"); return 0; }