void Idle(int maxIdle) { int cyclesDown = currentMIPS->downcount; if (maxIdle != 0 && cyclesDown > maxIdle) cyclesDown = maxIdle; if (first && cyclesDown > 0) { int cyclesExecuted = slicelength - currentMIPS->downcount; int cyclesNextEvent = (int) (first->time - globalTimer); if (cyclesNextEvent < cyclesExecuted + cyclesDown) { cyclesDown = cyclesNextEvent - cyclesExecuted; // Now, now... no time machines, please. if (cyclesDown < 0) cyclesDown = 0; } } VERBOSE_LOG(TIME, "Idle for %i cycles! (%f ms)", cyclesDown, cyclesDown / (float)(CPU_HZ * 0.001f)); idledCycles += cyclesDown; currentMIPS->downcount -= cyclesDown; if (currentMIPS->downcount == 0) currentMIPS->downcount = -1; }
// Returns false for invalid parameters (e.g. don't check callbacks, etc.) // Successful allocation is indicated by error == 0. bool __KernelAllocateVpl(SceUID uid, u32 size, u32 addrPtr, u32 &error, const char *funcname) { VPL *vpl = kernelObjects.Get<VPL>(uid, error); if (vpl) { if (size == 0 || size > (u32) vpl->nv.poolSize) { WARN_LOG(HLE, "%s(vpl=%i, size=%i, ptrout=%08x): invalid size", funcname, uid, size, addrPtr); error = SCE_KERNEL_ERROR_ILLEGAL_MEMSIZE; return false; } VERBOSE_LOG(HLE, "%s(vpl=%i, size=%i, ptrout=%08x)", funcname, uid, size, addrPtr); // Padding (normally used to track the allocation.) u32 allocSize = size + 8; u32 addr = vpl->alloc.Alloc(allocSize, true); if (addr != (u32) -1) { Memory::Write_U32(addr, addrPtr); error = 0; } else error = SCE_KERNEL_ERROR_NO_MEMORY; return true; } return false; }
u32 sceKernelUtilsMt19937UInt(u32 ctx) { VERBOSE_LOG(HLE, "sceKernelUtilsMt19937UInt(%08x)", ctx); if (!Memory::IsValidAddress(ctx)) return -1; MersenneTwister *mt = (MersenneTwister *)Memory::GetPointer(ctx); return mt->R32(); }
int sceKernelLockLwMutexCB(u32 workareaPtr, int count, u32 timeoutPtr) { VERBOSE_LOG(HLE, "sceKernelLockLwMutexCB(%08x, %i, %08x)", workareaPtr, count, timeoutPtr); auto workarea = Memory::GetStruct<NativeLwMutexWorkarea>(workareaPtr); u32 error = 0; if (__KernelLockLwMutex(workarea, count, error)) return 0; else if (error) return error; else { LwMutex *mutex = kernelObjects.Get<LwMutex>(workarea->uid, error); if (mutex) { SceUID threadID = __KernelGetCurThread(); // May be in a tight loop timing out (where we don't remove from waitingThreads yet), don't want to add duplicates. if (std::find(mutex->waitingThreads.begin(), mutex->waitingThreads.end(), threadID) == mutex->waitingThreads.end()) mutex->waitingThreads.push_back(threadID); __KernelWaitLwMutex(mutex, timeoutPtr); __KernelWaitCurThread(WAITTYPE_LWMUTEX, workarea->uid, count, timeoutPtr, true, "lwmutex cb waited"); // Return value will be overwritten by wait. return 0; } else return error; } }
u32 sceKernelGetSystemTimeLow() { // This clock should tick at 1 Mhz. u64 t = CoreTiming::GetTicks() / CoreTiming::GetClockFrequencyMHz(); VERBOSE_LOG(HLE,"%08x=sceKernelGetSystemTimeLow()",(u32)t); return (u32)t; }
static u32 sceDisplayGetVcount() { VERBOSE_LOG(SCEDISPLAY,"%i=sceDisplayGetVcount()", vCount); hleEatCycles(150); hleReSchedule("get vcount"); return vCount; }
void __KernelReturnFromInterrupt() { VERBOSE_LOG(SCEINTC, "Left interrupt handler at %08x", currentMIPS->pc); // This is what we just ran. PendingInterrupt pend = pendingInterrupts.front(); pendingInterrupts.pop_front(); intrHandlers[pend.intr]->handleResult(pend); inInterrupt = false; // Restore context after running the interrupt. intState.restore(); // All should now be back to normal, including PC. // Alright, let's see if there's any more interrupts queued... if (!__RunOnePendingInterrupt()) { // Otherwise, we reschedule when dispatch was enabled, or switch back otherwise. if (__KernelIsDispatchEnabled()) __KernelReSchedule("left interrupt"); else __KernelSwitchToThread(threadBeforeInterrupt, "left interrupt"); } }
int sceKernelUnlockLwMutex(u32 workareaPtr, int count) { VERBOSE_LOG(HLE, "sceKernelUnlockLwMutex(%08x, %i)", workareaPtr, count); auto workarea = Memory::GetStruct<NativeLwMutexWorkarea>(workareaPtr); if (workarea->uid == -1) return PSP_LWMUTEX_ERROR_NO_SUCH_LWMUTEX; else if (count <= 0) return SCE_KERNEL_ERROR_ILLEGAL_COUNT; else if ((workarea->attr & PSP_MUTEX_ATTR_ALLOW_RECURSIVE) == 0 && count > 1) return SCE_KERNEL_ERROR_ILLEGAL_COUNT; else if (workarea->lockLevel == 0 || workarea->lockThread != __KernelGetCurThread()) return PSP_LWMUTEX_ERROR_NOT_LOCKED; else if (workarea->lockLevel < count) return PSP_LWMUTEX_ERROR_UNLOCK_UNDERFLOW; workarea->lockLevel -= count; if (workarea->lockLevel == 0) { u32 error; if (__KernelUnlockLwMutex(workarea, error)) hleReSchedule("lwmutex unlocked"); } return 0; }
void VagDecoder::DecodeBlock(u8 *&readp) { int predict_nr = *readp++; int shift_factor = predict_nr & 0xf; predict_nr >>= 4; int flags = *readp++; if (flags == 7) { VERBOSE_LOG(SAS, "VAG ending block at %d", curBlock_); end_ = true; return; } else if (flags == 6) { loopStartBlock_ = curBlock_; } else if (flags == 3) { if (loopEnabled_) { loopAtNextBlock_ = true; } } for (int i = 0; i < 28; i += 2) { int d = *readp++; int s = (short)((d & 0xf) << 12); DecodeSample(i, s >> shift_factor, predict_nr); s = (short)((d & 0xf0) << 8); DecodeSample(i + 1, s >> shift_factor, predict_nr); } curSample = 0; curBlock_++; if (curBlock_ == numBlocks_) { end_ = true; } }
// numFrames is number of stereo frames. // This is called from *outside* the emulator thread. int __AudioMix(short *outstereo, int numFrames) { // TODO: if mixFrequency != the actual output frequency, resample! int underrun = -1; s16 sampleL = 0; s16 sampleR = 0; const s16 *buf1 = 0, *buf2 = 0; size_t sz1, sz2; { lock_guard guard(section); outAudioQueue.popPointers(numFrames * 2, &buf1, &sz1, &buf2, &sz2); memcpy(outstereo, buf1, sz1 * sizeof(s16)); if (buf2) { memcpy(outstereo + sz1, buf2, sz2 * sizeof(s16)); } } int remains = (int)(numFrames * 2 - sz1 - sz2); if (remains > 0) memset(outstereo + numFrames * 2 - remains, 0, remains); if (sz1 + sz2 < (size_t)numFrames) { underrun = (int)(sz1 + sz2) / 2; VERBOSE_LOG(SCEAUDIO, "Audio out buffer UNDERRUN at %i of %i", underrun, numFrames); } return underrun >= 0 ? underrun : numFrames; }
// I'm so sorry Ced but this is highly endian unsafe :( bool ParamSFOData::ReadSFO(const u8 *paramsfo, size_t size) { const Header *header = (const Header *)paramsfo; if (header->magic != 0x46535000) return false; if (header->version != 0x00000101) WARN_LOG(LOADER, "Unexpected SFO header version: %08x", header->version); const IndexTable *indexTables = (const IndexTable *)(paramsfo + sizeof(Header)); const u8 *key_start = paramsfo + header->key_table_start; const u8 *data_start = paramsfo + header->data_table_start; for (u32 i = 0; i < header->index_table_entries; i++) { const char *key = (const char *)(key_start + indexTables[i].key_table_offset); switch (indexTables[i].param_fmt) { case 0x0404: { // Unsigned int const u32 *data = (const u32 *)(data_start + indexTables[i].data_table_offset); SetValue(key,*data,indexTables[i].param_max_len); VERBOSE_LOG(LOADER, "%s %08x", key, *data); } break; case 0x0004: // Special format UTF-8 { const u8 *utfdata = (const u8 *)(data_start + indexTables[i].data_table_offset); VERBOSE_LOG(LOADER, "%s %s", key, utfdata); SetValue(key, utfdata, indexTables[i].param_len, indexTables[i].param_max_len); } break; case 0x0204: // Regular UTF-8 { const char *utfdata = (const char *)(data_start + indexTables[i].data_table_offset); VERBOSE_LOG(LOADER, "%s %s", key, utfdata); SetValue(key,std::string(utfdata /*, indexTables[i].param_len*/), indexTables[i].param_max_len); } break; } } return true; }
int sceKernelUSec2SysClock(u32 usec, u32 clockPtr) { VERBOSE_LOG(SCEKERNEL, "sceKernelUSec2SysClock(%i, %08x)", usec, clockPtr); if (Memory::IsValidAddress(clockPtr)) Memory::Write_U64((usec & 0xFFFFFFFFL), clockPtr); hleEatCycles(165); return 0; }
static HRESULT STDMETHODCALLTYPE MyQueryInterface(IReferenceClock* pThis, REFIID riid, void** ppvObj) { VERBOSE_LOG() << riid.Data1; HRESULT rv = QueryInterface(pThis, riid, ppvObj); if (SUCCEEDED(rv)) HookCOMInterface(riid, ppvObj); return rv; }
u64 sceKernelGetSystemTimeWide() { u64 t = CoreTiming::GetGlobalTimeUsScaled(); VERBOSE_LOG(SCEKERNEL,"%i=sceKernelGetSystemTimeWide()",(u32)t); hleEatCycles(250); hleReSchedule("system time"); return t; }
static test_outcome run_test_with_siglongjmp(const test_suite suite, const test* test_case) { int sigsetret; if (0 == test_case->test_case) { fprintf(stderr, "%s is a null test\n", current_test); return INVALID; } sigsetret = sigsetjmp(recovery_buffer, 1); VERBOSE_LOG("Test %s:%s checkpoint status %d\n", sigsetret); switch (sigsetret) { /* clean path */ case 0: current_stage = SETUP; VERBOSE_LOG("Running %s:%s suite setup\n"); suite.setup(); VERBOSE_LOG("Running %s:%s setup\n"); test_case->setup(); current_stage = TEST; VERBOSE_LOG("Running %s:%s\n"); test_case->test_case(); current_stage = TEARDOWN; VERBOSE_LOG("Running %s:%s tear down\n"); test_case->teardown(); VERBOSE_LOG("Running %s:%s tear down\n"); suite.teardown(); return PASS; break; case SETUP: LOG_TEST_ERROR("Error in [%s:%s] during setup\n"); sigsetret = sigsetjmp(recovery_buffer, 1); if (0 == sigsetret) { current_stage = TEARDOWN; VERBOSE_LOG("Running %s:%s setup failure tear down\n"); test_case->teardown(); } break; case TEST: LOG_TEST_ERROR("Test failure in [%s:%s] ***%s\n", TEST_OUTCOMES[FAIL]); sigsetret = sigsetjmp(recovery_buffer, 1); if (0 == sigsetret) { current_stage = TEARDOWN; VERBOSE_LOG("Running %s:%s test failure tear down\n"); test_case->teardown(); } return FAIL; break; case TEARDOWN: LOG_TEST_ERROR("Error in [%s:%s] during teardown\n"); break; default: LOG_TEST_ERROR("Error in [%s:%s] with unknown failure type %u\n", sigsetret); break; } return UNRESOLVED; }
void __TriggerInterrupt(int type, PSPInterrupt intno, int subintr) { if (interruptsEnabled || (type & PSP_INTR_ONLY_IF_ENABLED) == 0) { intrHandlers[intno]->queueUp(subintr); VERBOSE_LOG(SCEINTC, "Triggering subinterrupts for interrupt %i sub %i (%i in queue)", intno, subintr, (u32)pendingInterrupts.size()); __TriggerRunInterrupts(type); } }
u32 sceKernelGetSystemTimeLow() { // This clock should tick at 1 Mhz. u64 t = CoreTiming::GetGlobalTimeUs(); VERBOSE_LOG(SCEKERNEL,"%08x=sceKernelGetSystemTimeLow()",(u32)t); hleEatCycles(165); hleReSchedule("system time"); return (u32)t; }
u32 sceDisplayWaitVblankStartMultiCB(int vblanks) { if (vblanks <= 0) { WARN_LOG(HLE, "sceDisplayWaitVblankStartMultiCB(%d): invalid number of vblanks", vblanks); return SCE_KERNEL_ERROR_INVALID_VALUE; } VERBOSE_LOG(HLE,"sceDisplayWaitVblankStartMultiCB(%d)", vblanks); vblankWaitingThreads.push_back(WaitVBlankInfo(__KernelGetCurThread(), vblanks)); __KernelWaitCurThread(WAITTYPE_VBLANK, 0, 0, 0, true, "vblank start multi waited"); return 0; }
int sceKernelGetSystemTime(u32 sysclockPtr) { u64 t = CoreTiming::GetGlobalTimeUs(); if (Memory::IsValidAddress(sysclockPtr)) Memory::Write_U64(t, sysclockPtr); VERBOSE_LOG(SCEKERNEL, "sceKernelGetSystemTime(out:%16llx)", t); hleEatCycles(265); hleReSchedule("system time"); return 0; }
static u32 sceRtcGetCurrentTick(u32 tickPtr) { VERBOSE_LOG(SCERTC, "sceRtcGetCurrentTick(%08x)", tickPtr); u64 curTick = __RtcGetCurrentTick(); if (Memory::IsValidAddress(tickPtr)) Memory::Write_U64(curTick, tickPtr); hleEatCycles(300); hleReSchedule("rtc current tick"); return 0; }
u32 sceDisplayWaitVblankCB() { if (!isVblank) { VERBOSE_LOG(HLE,"sceDisplayWaitVblankCB()"); vblankWaitingThreads.push_back(WaitVBlankInfo(__KernelGetCurThread())); __KernelWaitCurThread(WAITTYPE_VBLANK, 0, 0, 0, true, "vblank waited"); return 0; } else { DEBUG_LOG(HLE,"sceDisplayWaitVblank() - not waiting since in vBlank"); hleEatCycles(5 * 222); return 1; } }
int scePsmfPlayerGetCurrentStatus(u32 psmfPlayer) { PsmfPlayer *psmfplayer = getPsmfPlayer(psmfPlayer); if (!psmfplayer) { // Mana Khemia and other games call this even when not necessary. // It's annoying so the logging is verbose'd out. VERBOSE_LOG(ME, "scePsmfPlayerGetCurrentStatus(%08x): invalid psmf player", psmfPlayer); return ERROR_PSMF_NOT_FOUND; } DEBUG_LOG(ME, "%d=scePsmfPlayerGetCurrentStatus(%08x)", psmfplayer->status, psmfPlayer); return psmfplayer->status; }
void sceKernelCpuResumeIntr(u32 enable) { VERBOSE_LOG(HLE, "sceKernelCpuResumeIntr(%i)", enable); if (enable) { __EnableInterrupts(); hleRunInterrupts(); } else { __DisableInterrupts(); } }
static u32 sceDisplayWaitVblankCB() { if (!isVblank) { VERBOSE_LOG(SCEDISPLAY,"sceDisplayWaitVblankCB()"); vblankWaitingThreads.push_back(WaitVBlankInfo(__KernelGetCurThread())); __KernelWaitCurThread(WAITTYPE_VBLANK, 1, 0, 0, true, "vblank waited"); return 0; } else { DEBUG_LOG(SCEDISPLAY,"sceDisplayWaitVblankCB() - not waiting since in vBlank"); hleEatCycles(1110); hleReSchedule("vblank wait skipped"); return 1; } }
bool MetaFileSystem::MapFilePath(const std::string &_inpath, std::string &outpath, MountPoint **system) { lock_guard guard(lock); std::string realpath; // Special handling: host0:command.txt (as seen in Super Monkey Ball Adventures, for example) // appears to mean the current directory on the UMD. Let's just assume the current directory. std::string inpath = _inpath; if (strncasecmp(inpath.c_str(), "host0:", strlen("host0:")) == 0) { INFO_LOG(HLE, "Host0 path detected, stripping: %s", inpath.c_str()); inpath = inpath.substr(strlen("host0:")); } const std::string *currentDirectory = &startingDirectory; int currentThread = __KernelGetCurThread(); currentDir_t::iterator it = currentDir.find(currentThread); if (it == currentDir.end()) { //Attempt to emulate SCE_KERNEL_ERROR_NOCWD / 8002032C: may break things requiring fixes elsewhere if (inpath.find(':') == std::string::npos /* means path is relative */) { lastOpenError = SCE_KERNEL_ERROR_NOCWD; WARN_LOG(HLE, "Path is relative, but current directory not set for thread %i. returning 8002032C(SCE_KERNEL_ERROR_NOCWD) instead.", currentThread); } } else { currentDirectory = &(it->second); } if ( RealPath(*currentDirectory, inpath, realpath) ) { for (size_t i = 0; i < fileSystems.size(); i++) { size_t prefLen = fileSystems[i].prefix.size(); if (strncasecmp(fileSystems[i].prefix.c_str(), realpath.c_str(), prefLen) == 0) { outpath = realpath.substr(prefLen); *system = &(fileSystems[i]); VERBOSE_LOG(HLE, "MapFilePath: mapped \"%s\" to prefix: \"%s\", path: \"%s\"", inpath.c_str(), fileSystems[i].prefix.c_str(), outpath.c_str()); return true; } } } DEBUG_LOG(HLE, "MapFilePath: failed mapping \"%s\", returning false", inpath.c_str()); return false; }
void sceKernelCpuResumeIntr(u32 enable) { VERBOSE_LOG(SCEINTC, "sceKernelCpuResumeIntr(%i)", enable); if (enable) { __EnableInterrupts(); hleRunInterrupts(); hleReSchedule("interrupts resumed"); } else { __DisableInterrupts(); } }
u32 sceDisplayWaitVblankStartMultiCB(int vblanks) { if (vblanks <= 0) { WARN_LOG(SCEDISPLAY, "sceDisplayWaitVblankStartMultiCB(%d): invalid number of vblanks", vblanks); return SCE_KERNEL_ERROR_INVALID_VALUE; } VERBOSE_LOG(SCEDISPLAY,"sceDisplayWaitVblankStartMultiCB(%d)", vblanks); if (!__KernelIsDispatchEnabled()) return SCE_KERNEL_ERROR_CAN_NOT_WAIT; if (__IsInInterrupt()) return SCE_KERNEL_ERROR_ILLEGAL_CONTEXT; vblankWaitingThreads.push_back(WaitVBlankInfo(__KernelGetCurThread(), vblanks)); __KernelWaitCurThread(WAITTYPE_VBLANK, 1, 0, 0, true, "vblank start multi waited"); return 0; }
void sceKernelCpuSuspendIntr() { VERBOSE_LOG(HLE, "sceKernelCpuSuspendIntr"); int returnValue; if (__InterruptsEnabled()) { returnValue = 1; __DisableInterrupts(); } else { returnValue = 0; } RETURN(returnValue); }
void sceKernelCpuSuspendIntr() { VERBOSE_LOG(SCEINTC, "sceKernelCpuSuspendIntr"); int returnValue; if (__InterruptsEnabled()) { returnValue = 1; __DisableInterrupts(); } else { returnValue = 0; } hleEatCycles(15); RETURN(returnValue); }
bool ProtectMemoryPages(const void* ptr, size_t size, uint32_t memProtFlags) { VERBOSE_LOG(JIT, "ProtectMemoryPages: %p (%d) : r%d w%d x%d", ptr, (int)size, (memProtFlags & MEM_PROT_READ) != 0, (memProtFlags & MEM_PROT_WRITE) != 0, (memProtFlags & MEM_PROT_EXEC) != 0); if (PlatformIsWXExclusive()) { if ((memProtFlags & (MEM_PROT_WRITE | MEM_PROT_EXEC)) == (MEM_PROT_WRITE | MEM_PROT_EXEC)) { ERROR_LOG(MEMMAP, "Bad memory protection %d!", memProtFlags); PanicAlert("Bad memory protect : W^X is in effect, can't both write and exec"); } } // Note - VirtualProtect will affect the full pages containing the requested range. // mprotect does not seem to, at least not on Android unless I made a mistake somewhere, so we manually round. #ifdef _WIN32 uint32_t protect = ConvertProtFlagsWin32(memProtFlags); #if PPSSPP_PLATFORM(UWP) DWORD oldValue; if (!VirtualProtectFromApp((void *)ptr, size, protect, &oldValue)) { PanicAlert("WriteProtectMemory failed!\n%s", GetLastErrorMsg()); return false; } #else DWORD oldValue; if (!VirtualProtect((void *)ptr, size, protect, &oldValue)) { PanicAlert("WriteProtectMemory failed!\n%s", GetLastErrorMsg()); return false; } #endif return true; #else uint32_t protect = ConvertProtFlagsUnix(memProtFlags); uintptr_t page_size = GetMemoryProtectPageSize(); uintptr_t start = (uintptr_t)ptr; uintptr_t end = (uintptr_t)ptr + size; start &= ~(page_size - 1); end = (end + page_size - 1) & ~(page_size - 1); int retval = mprotect((void *)start, end - start, protect); if (retval != 0) { ERROR_LOG(MEMMAP, "mprotect failed (%p)! errno=%d (%s)", (void *)start, errno, strerror(errno)); return false; } return true; #endif }