int sceUmdWaitDriveStatWithTimer(u32 stat, u32 timeout) { if (stat == 0) { DEBUG_LOG(SCEIO, "sceUmdWaitDriveStatWithTimer(stat = %08x, timeout = %d): bad status", stat, timeout); return SCE_KERNEL_ERROR_ERRNO_INVALID_ARGUMENT; } if (!__KernelIsDispatchEnabled()) { DEBUG_LOG(SCEIO, "sceUmdWaitDriveStatWithTimer(stat = %08x, timeout = %d): dispatch disabled", stat, timeout); return SCE_KERNEL_ERROR_CAN_NOT_WAIT; } if (__IsInInterrupt()) { DEBUG_LOG(SCEIO, "sceUmdWaitDriveStatWithTimer(stat = %08x, timeout = %d): inside interrupt", stat, timeout); return SCE_KERNEL_ERROR_ILLEGAL_CONTEXT; } if ((stat & __KernelUmdGetState()) == 0) { DEBUG_LOG(SCEIO, "sceUmdWaitDriveStatWithTimer(stat = %08x, timeout = %d): waiting", stat, timeout); __UmdWaitStat(timeout); umdWaitingThreads.push_back(__KernelGetCurThread()); __KernelWaitCurThread(WAITTYPE_UMD, 1, stat, 0, 0, "umd stat waited with timer"); return 0; } else { hleReSchedule("umd stat checked"); } DEBUG_LOG(SCEIO, "0=sceUmdWaitDriveStatWithTimer(stat = %08x, timeout = %d)", stat, timeout); return 0; }
static int __CtrlReadBuffer(u32 ctrlDataPtr, u32 nBufs, bool negative, bool peek) { if (nBufs > NUM_CTRL_BUFFERS) return SCE_KERNEL_ERROR_INVALID_SIZE; if (!peek && !__KernelIsDispatchEnabled()) return SCE_KERNEL_ERROR_CAN_NOT_WAIT; if (!peek && __IsInInterrupt()) return SCE_KERNEL_ERROR_ILLEGAL_CONTEXT; u32 resetRead = ctrlBufRead; u32 availBufs; // Peeks always work, they just go go from now X buffers. if (peek) availBufs = nBufs; else { availBufs = (ctrlBuf - ctrlBufRead + NUM_CTRL_BUFFERS) % NUM_CTRL_BUFFERS; if (availBufs > nBufs) availBufs = nBufs; } ctrlBufRead = (ctrlBuf - availBufs + NUM_CTRL_BUFFERS) % NUM_CTRL_BUFFERS; int done = 0; auto data = PSPPointer<_ctrl_data>::Create(ctrlDataPtr); for (u32 i = 0; i < availBufs; ++i) done += __CtrlReadSingleBuffer(data++, negative); if (peek) ctrlBufRead = resetRead; return done; }
inline void CallSyscallWithFlags(const HLEFunction *info) { latestSyscall = info; const u32 flags = info->flags; if (flags & HLE_CLEAR_STACK_BYTES) { u32 stackStart = __KernelGetCurThreadStackStart(); if (currentMIPS->r[MIPS_REG_SP] - info->stackBytesToClear >= stackStart) { Memory::Memset(currentMIPS->r[MIPS_REG_SP] - info->stackBytesToClear, 0, info->stackBytesToClear); } } if ((flags & HLE_NOT_DISPATCH_SUSPENDED) && !__KernelIsDispatchEnabled()) { RETURN(hleLogDebug(HLE, SCE_KERNEL_ERROR_CAN_NOT_WAIT, "dispatch suspended")); } else if ((flags & HLE_NOT_IN_INTERRUPT) && __IsInInterrupt()) { RETURN(hleLogDebug(HLE, SCE_KERNEL_ERROR_ILLEGAL_CONTEXT, "in interrupt")); } else { info->func(); } if (hleAfterSyscall != HLE_AFTER_NOTHING) hleFinishSyscall(*info); else SetDeadbeefRegs(); }
int sceUmdWaitDriveStatCB(u32 stat, u32 timeout) { if (stat == 0) { DEBUG_LOG(HLE, "sceUmdWaitDriveStatCB(stat = %08x, timeout = %d): bad status", stat, timeout); return SCE_KERNEL_ERROR_ERRNO_INVALID_ARGUMENT; } if (!__KernelIsDispatchEnabled()) { DEBUG_LOG(HLE, "sceUmdWaitDriveStatCB(stat = %08x, timeout = %d): dispatch disabled", stat, timeout); return SCE_KERNEL_ERROR_CAN_NOT_WAIT; } if (__IsInInterrupt()) { DEBUG_LOG(HLE, "sceUmdWaitDriveStatCB(stat = %08x, timeout = %d): inside interrupt", stat, timeout); return SCE_KERNEL_ERROR_ILLEGAL_CONTEXT; } hleCheckCurrentCallbacks(); if ((stat & __KernelUmdGetState()) == 0) { DEBUG_LOG(HLE, "0=sceUmdWaitDriveStatCB(stat = %08x, timeout = %d): waiting", stat, timeout); if (timeout == 0) { timeout = 8000; } __UmdWaitStat(timeout); umdWaitingThreads.push_back(UmdWaitingThread::Make(__KernelGetCurThread(), stat)); __KernelWaitCurThread(WAITTYPE_UMD, 1, stat, 0, true, "umd stat waited"); } else { hleReSchedule("umd stat waited"); } DEBUG_LOG(HLE, "0=sceUmdWaitDriveStatCB(stat = %08x, timeout = %d)", stat, timeout); return 0; }
void __KernelReturnFromInterrupt() { DEBUG_LOG(CPU, "Left interrupt handler at %08x", currentMIPS->pc); // This is what we just ran. PendingInterrupt pend = pendingInterrupts.front(); pendingInterrupts.pop_front(); intrHandlers[pend.intr]->handleResult(pend); inInterrupt = false; // Restore context after running the interrupt. intState.restore(); // All should now be back to normal, including PC. // Alright, let's see if there's any more interrupts queued... if (!__RunOnePendingInterrupt()) { // Otherwise, we reschedule when dispatch was enabled, or switch back otherwise. if (__KernelIsDispatchEnabled()) __KernelReSchedule("return from interrupt"); else __KernelSwitchToThread(threadBeforeInterrupt, "return from interrupt"); } }
/** * Wait for a drive to reach a certain state * * @param stat - The drive stat to wait for. * @return < 0 on error * */ int sceUmdWaitDriveStat(u32 stat) { if (stat == 0) { DEBUG_LOG(HLE, "sceUmdWaitDriveStat(stat = %08x): bad status", stat); return SCE_KERNEL_ERROR_ERRNO_INVALID_ARGUMENT; } if (!__KernelIsDispatchEnabled()) { DEBUG_LOG(HLE, "sceUmdWaitDriveStat(stat = %08x): dispatch disabled", stat); return SCE_KERNEL_ERROR_CAN_NOT_WAIT; } if (__IsInInterrupt()) { DEBUG_LOG(HLE, "sceUmdWaitDriveStat(stat = %08x): inside interrupt", stat); return SCE_KERNEL_ERROR_ILLEGAL_CONTEXT; } if ((stat & __KernelUmdGetState()) == 0) { DEBUG_LOG(HLE, "sceUmdWaitDriveStat(stat = %08x): waiting", stat); umdWaitingThreads.push_back(UmdWaitingThread::Make(__KernelGetCurThread(), stat)); __KernelWaitCurThread(WAITTYPE_UMD, 1, stat, 0, 0, "umd stat waited"); return 0; } DEBUG_LOG(HLE, "0=sceUmdWaitDriveStat(stat = %08x)", stat); return 0; }
static int sceKernelVolatileMemLock(int type, u32 paddr, u32 psize) { u32 error = 0; // If dispatch is disabled or in an interrupt, don't check, just return an error. // But still write the addr and size (some games require this to work, and it's testably true.) if (!__KernelIsDispatchEnabled()) { error = SCE_KERNEL_ERROR_CAN_NOT_WAIT; } else if (__IsInInterrupt()) { error = SCE_KERNEL_ERROR_ILLEGAL_CONTEXT; } else { error = __KernelVolatileMemLock(type, paddr, psize); } switch (error) { case 0: // HACK: This fixes Crash Tag Team Racing. // Should only wait 1200 cycles though according to Unknown's testing, // and with that it's still broken. So it's not this, unfortunately. // Leaving it in for the 0.9.8 release anyway. hleEatCycles(500000); DEBUG_LOG(HLE, "sceKernelVolatileMemLock(%i, %08x, %08x) - success", type, paddr, psize); break; case SCE_KERNEL_ERROR_POWER_VMEM_IN_USE: { WARN_LOG(HLE, "sceKernelVolatileMemLock(%i, %08x, %08x) - already locked, waiting", type, paddr, psize); const VolatileWaitingThread waitInfo = { __KernelGetCurThread(), paddr, psize }; volatileWaitingThreads.push_back(waitInfo); __KernelWaitCurThread(WAITTYPE_VMEM, 1, 0, 0, false, "volatile mem waited"); } break; case SCE_KERNEL_ERROR_CAN_NOT_WAIT: { WARN_LOG(HLE, "sceKernelVolatileMemLock(%i, %08x, %08x): dispatch disabled", type, paddr, psize); Memory::Write_U32(0x08400000, paddr); Memory::Write_U32(0x00400000, psize); } break; case SCE_KERNEL_ERROR_ILLEGAL_CONTEXT: { WARN_LOG(HLE, "sceKernelVolatileMemLock(%i, %08x, %08x): in interrupt", type, paddr, psize); Memory::Write_U32(0x08400000, paddr); Memory::Write_U32(0x00400000, psize); } break; default: ERROR_LOG_REPORT(HLE, "%08x=sceKernelVolatileMemLock(%i, %08x, %08x) - error", type, paddr, psize, error); break; } return error; }
int sceKernelWaitEventFlag(SceUID id, u32 bits, u32 wait, u32 outBitsPtr, u32 timeoutPtr) { DEBUG_LOG(HLE, "sceKernelWaitEventFlag(%i, %08x, %i, %08x, %08x)", id, bits, wait, outBitsPtr, timeoutPtr); if ((wait & ~PSP_EVENT_WAITKNOWN) != 0) { WARN_LOG_REPORT(HLE, "sceKernelWaitEventFlag(%i) invalid mode parameter: %08x", id, wait); return SCE_KERNEL_ERROR_ILLEGAL_MODE; } // Can't wait on 0, that's guaranteed to wait forever. if (bits == 0) return SCE_KERNEL_ERROR_EVF_ILPAT; if (!__KernelIsDispatchEnabled()) return SCE_KERNEL_ERROR_CAN_NOT_WAIT; u32 error; EventFlag *e = kernelObjects.Get<EventFlag>(id, error); if (e) { EventFlagTh th; if (!__KernelEventFlagMatches(&e->nef.currentPattern, bits, wait, outBitsPtr)) { // If this thread was left in waitingThreads after a timeout, remove it. // Otherwise we might write the outBitsPtr in the wrong place. __KernelEventFlagRemoveThread(e, __KernelGetCurThread()); u32 timeout = 0xFFFFFFFF; if (Memory::IsValidAddress(timeoutPtr)) timeout = Memory::Read_U32(timeoutPtr); // Do we allow more than one thread to wait? if (e->waitingThreads.size() > 0 && (e->nef.attr & PSP_EVENT_WAITMULTIPLE) == 0) return SCE_KERNEL_ERROR_EVF_MULTI; // No match - must wait. th.tid = __KernelGetCurThread(); th.bits = bits; th.wait = wait; // If < 5ms, sometimes hardware doesn't write this, but it's unpredictable. th.outAddr = timeout == 0 ? 0 : outBitsPtr; e->waitingThreads.push_back(th); __KernelSetEventFlagTimeout(e, timeoutPtr); __KernelWaitCurThread(WAITTYPE_EVENTFLAG, id, 0, timeoutPtr, false, "event flag waited"); } return 0; } else { return error; } }
static u32 sceDisplayWaitVblankStartMultiCB(int vblanks) { if (vblanks <= 0) { return hleLogWarning(SCEDISPLAY, SCE_KERNEL_ERROR_INVALID_VALUE, "invalid number of vblanks"); } if (!__KernelIsDispatchEnabled()) return hleLogWarning(SCEDISPLAY, SCE_KERNEL_ERROR_CAN_NOT_WAIT, "dispatch disabled"); if (__IsInInterrupt()) return hleLogWarning(SCEDISPLAY, SCE_KERNEL_ERROR_ILLEGAL_CONTEXT, "in interrupt"); return DisplayWaitForVblanksCB("vblank start multi waited", vblanks); }
u32 hleDelayResult(u32 result, const char *reason, int usec) { if (__KernelIsDispatchEnabled()) { CoreTiming::ScheduleEvent(usToCycles(usec), delayedResultEvent, __KernelGetCurThread()); __KernelWaitCurThread(WAITTYPE_HLEDELAY, 1, result, 0, false, reason); } else WARN_LOG(HLE, "Dispatch disabled, not delaying HLE result (right thing to do?)"); return result; }
int GPUCommon::ListSync(int listid, int mode) { if (g_Config.bSeparateCPUThread) { // FIXME: Workaround for displaylists sometimes hanging unprocessed. Not yet sure of the cause. ScheduleEvent(GPU_EVENT_PROCESS_QUEUE); // Sync first, because the CPU is usually faster than the emulated GPU. SyncThread(); } easy_guard guard(listLock); if (listid < 0 || listid >= DisplayListMaxCount) return SCE_KERNEL_ERROR_INVALID_ID; if (mode < 0 || mode > 1) return SCE_KERNEL_ERROR_INVALID_MODE; DisplayList& dl = dls[listid]; if (mode == 1) { switch (dl.state) { case PSP_GE_DL_STATE_QUEUED: if (dl.interrupted) return PSP_GE_LIST_PAUSED; return PSP_GE_LIST_QUEUED; case PSP_GE_DL_STATE_RUNNING: if (dl.pc == dl.stall) return PSP_GE_LIST_STALLING; return PSP_GE_LIST_DRAWING; case PSP_GE_DL_STATE_COMPLETED: return PSP_GE_LIST_COMPLETED; case PSP_GE_DL_STATE_PAUSED: return PSP_GE_LIST_PAUSED; default: return SCE_KERNEL_ERROR_INVALID_ID; } } if (!__KernelIsDispatchEnabled()) { return SCE_KERNEL_ERROR_CAN_NOT_WAIT; } if (__IsInInterrupt()) { return SCE_KERNEL_ERROR_ILLEGAL_CONTEXT; } if (dl.waitTicks > CoreTiming::GetTicks()) { __GeWaitCurrentThread(WAITTYPE_GELISTSYNC, listid, "GeListSync"); } return PSP_GE_LIST_COMPLETED; }
int sceKernelVolatileMemLock(int type, u32 paddr, u32 psize) { u32 error = 0; // If dispatch is disabled or in an interrupt, don't check, just return an error. // But still write the addr and size (some games require this to work, and it's testably true.) if (!__KernelIsDispatchEnabled()) { error = SCE_KERNEL_ERROR_CAN_NOT_WAIT; } else if (__IsInInterrupt()) { error = SCE_KERNEL_ERROR_ILLEGAL_CONTEXT; } else { error = __KernelVolatileMemLock(type, paddr, psize); } switch (error) { case 0: DEBUG_LOG(HLE, "sceKernelVolatileMemLock(%i, %08x, %08x) - success", type, paddr, psize); break; case ERROR_POWER_VMEM_IN_USE: { WARN_LOG(HLE, "sceKernelVolatileMemLock(%i, %08x, %08x) - already locked, waiting", type, paddr, psize); const VolatileWaitingThread waitInfo = { __KernelGetCurThread(), paddr, psize }; volatileWaitingThreads.push_back(waitInfo); __KernelWaitCurThread(WAITTYPE_VMEM, 1, 0, 0, false, "volatile mem waited"); } break; case SCE_KERNEL_ERROR_CAN_NOT_WAIT: { WARN_LOG(HLE, "sceKernelVolatileMemLock(%i, %08x, %08x): dispatch disabled", type, paddr, psize); Memory::Write_U32(0x08400000, paddr); Memory::Write_U32(0x00400000, psize); } break; case SCE_KERNEL_ERROR_ILLEGAL_CONTEXT: { WARN_LOG(HLE, "sceKernelVolatileMemLock(%i, %08x, %08x): in interrupt", type, paddr, psize); Memory::Write_U32(0x08400000, paddr); Memory::Write_U32(0x00400000, psize); } break; default: ERROR_LOG_REPORT(HLE, "%08x=sceKernelVolatileMemLock(%i, %08x, %08x) - error", type, paddr, psize, error); break; } return error; }
u32 sceDisplayWaitVblankStartMultiCB(int vblanks) { if (vblanks <= 0) { WARN_LOG(SCEDISPLAY, "sceDisplayWaitVblankStartMultiCB(%d): invalid number of vblanks", vblanks); return SCE_KERNEL_ERROR_INVALID_VALUE; } VERBOSE_LOG(SCEDISPLAY,"sceDisplayWaitVblankStartMultiCB(%d)", vblanks); if (!__KernelIsDispatchEnabled()) return SCE_KERNEL_ERROR_CAN_NOT_WAIT; if (__IsInInterrupt()) return SCE_KERNEL_ERROR_ILLEGAL_CONTEXT; vblankWaitingThreads.push_back(WaitVBlankInfo(__KernelGetCurThread(), vblanks)); __KernelWaitCurThread(WAITTYPE_VBLANK, 1, 0, 0, true, "vblank start multi waited"); return 0; }
u32 GPUCommon::DrawSync(int mode) { // FIXME: Workaround for displaylists sometimes hanging unprocessed. Not yet sure of the cause. if (g_Config.bSeparateCPUThread) { // FIXME: Workaround for displaylists sometimes hanging unprocessed. Not yet sure of the cause. ScheduleEvent(GPU_EVENT_PROCESS_QUEUE); // Sync first, because the CPU is usually faster than the emulated GPU. SyncThread(); } easy_guard guard(listLock); if (mode < 0 || mode > 1) return SCE_KERNEL_ERROR_INVALID_MODE; if (mode == 0) { if (!__KernelIsDispatchEnabled()) { return SCE_KERNEL_ERROR_CAN_NOT_WAIT; } if (__IsInInterrupt()) { return SCE_KERNEL_ERROR_ILLEGAL_CONTEXT; } if (drawCompleteTicks > CoreTiming::GetTicks()) { __GeWaitCurrentThread(WAITTYPE_GEDRAWSYNC, 1, "GeDrawSync"); } else { for (int i = 0; i < DisplayListMaxCount; ++i) { if (dls[i].state == PSP_GE_DL_STATE_COMPLETED) { dls[i].state = PSP_GE_DL_STATE_NONE; } } } return 0; } // If there's no current list, it must be complete. DisplayList *top = NULL; for (auto it = dlQueue.begin(), end = dlQueue.end(); it != end; ++it) { if (dls[*it].state != PSP_GE_DL_STATE_COMPLETED) { top = &dls[*it]; break; } } if (!top || top->state == PSP_GE_DL_STATE_COMPLETED) return PSP_GE_LIST_COMPLETED; if (currentList->pc == currentList->stall) return PSP_GE_LIST_STALLING; return PSP_GE_LIST_DRAWING; }
inline void CallSyscallWithFlags(const HLEFunction *info) { const u32 flags = info->flags; if ((flags & HLE_NOT_DISPATCH_SUSPENDED) && !__KernelIsDispatchEnabled()) { DEBUG_LOG(HLE, "%s: dispatch suspended", info->name); RETURN(SCE_KERNEL_ERROR_CAN_NOT_WAIT); } else if ((flags & HLE_NOT_IN_INTERRUPT) && __IsInInterrupt()) { DEBUG_LOG(HLE, "%s: in interrupt", info->name); RETURN(SCE_KERNEL_ERROR_ILLEGAL_CONTEXT); } else info->func(); if (hleAfterSyscall != HLE_AFTER_NOTHING) hleFinishSyscall(*info); else SetDeadbeefRegs(); }
void __TriggerRunInterrupts(int type) { // If interrupts aren't enabled, we run them later. if (interruptsEnabled && !inInterrupt) { if ((type & PSP_INTR_HLE) != 0) hleRunInterrupts(); else if ((type & PSP_INTR_ALWAYS_RESCHED) != 0) { // "Always" only means if dispatch is enabled. if (!__RunOnePendingInterrupt() && __KernelIsDispatchEnabled()) { SceUID savedThread = __KernelGetCurThread(); if (__KernelSwitchOffThread("interrupt")) threadBeforeInterrupt = savedThread; } } else __RunOnePendingInterrupt(); } }
static int __KernelValidateReceiveMsgPipe(SceUID uid, u32 receiveBufAddr, u32 receiveSize, int waitMode, u32 resultAddr, bool tryMode = false) { if (receiveSize & 0x80000000) { ERROR_LOG(SCEKERNEL, "__KernelReceiveMsgPipe(%d): illegal size %d", uid, receiveSize); return SCE_KERNEL_ERROR_ILLEGAL_ADDR; } if (receiveSize != 0 && !Memory::IsValidAddress(receiveBufAddr)) { ERROR_LOG(SCEKERNEL, "__KernelReceiveMsgPipe(%d): bad buffer address %08x (should crash?)", uid, receiveBufAddr); return SCE_KERNEL_ERROR_ILLEGAL_ADDR; } if (waitMode != SCE_KERNEL_MPW_ASAP && waitMode != SCE_KERNEL_MPW_FULL) { ERROR_LOG(SCEKERNEL, "__KernelReceiveMsgPipe(%d): invalid wait mode %d", uid, waitMode); return SCE_KERNEL_ERROR_ILLEGAL_MODE; } if (!tryMode) { if (!__KernelIsDispatchEnabled()) { WARN_LOG(SCEKERNEL, "__KernelReceiveMsgPipe(%d): dispatch disabled", uid); return SCE_KERNEL_ERROR_CAN_NOT_WAIT; } if (__IsInInterrupt()) { WARN_LOG(SCEKERNEL, "__KernelReceiveMsgPipe(%d): in interrupt", uid); return SCE_KERNEL_ERROR_ILLEGAL_CONTEXT; } } return 0; }
u32 __AudioEnqueue(AudioChannel &chan, int chanNum, bool blocking) { u32 ret = chan.sampleCount; if (chan.sampleAddress == 0) { // For some reason, multichannel audio lies and returns the sample count here. if (chanNum == PSP_AUDIO_CHANNEL_SRC || chanNum == PSP_AUDIO_CHANNEL_OUTPUT2) { ret = 0; } } // If there's anything on the queue at all, it should be busy, but we try to be a bit lax. //if (chan.sampleQueue.size() > chan.sampleCount * 2 * chanQueueMaxSizeFactor || chan.sampleAddress == 0) { if (chan.sampleQueue.size() > 0) { if (blocking) { // TODO: Regular multichannel audio seems to block for 64 samples less? Or enqueue the first 64 sync? int blockSamples = (int)chan.sampleQueue.size() / 2 / chanQueueMinSizeFactor; if (__KernelIsDispatchEnabled()) { AudioChannelWaitInfo waitInfo = {__KernelGetCurThread(), blockSamples}; chan.waitingThreads.push_back(waitInfo); // Also remember the value to return in the waitValue. __KernelWaitCurThread(WAITTYPE_AUDIOCHANNEL, (SceUID)chanNum + 1, ret, 0, false, "blocking audio"); } else { // TODO: Maybe we shouldn't take this audio after all? ret = SCE_KERNEL_ERROR_CAN_NOT_WAIT; } // Fall through to the sample queueing, don't want to lose the samples even though // we're getting full. The PSP would enqueue after blocking. } else { // Non-blocking doesn't even enqueue, but it's not commonly used. return SCE_ERROR_AUDIO_CHANNEL_BUSY; } } if (chan.sampleAddress == 0) { return ret; } int leftVol = chan.leftVolume; int rightVol = chan.rightVolume; if (leftVol == (1 << 15) && rightVol == (1 << 15) && chan.format == PSP_AUDIO_FORMAT_STEREO && IS_LITTLE_ENDIAN) { // TODO: Add mono->stereo conversion to this path. // Good news: the volume doesn't affect the values at all. // We can just do a direct memory copy. const u32 totalSamples = chan.sampleCount * (chan.format == PSP_AUDIO_FORMAT_STEREO ? 2 : 1); s16 *buf1 = 0, *buf2 = 0; size_t sz1, sz2; chan.sampleQueue.pushPointers(totalSamples, &buf1, &sz1, &buf2, &sz2); if (Memory::IsValidAddress(chan.sampleAddress + (totalSamples - 1) * sizeof(s16_le))) { Memory::Memcpy(buf1, chan.sampleAddress, (u32)sz1 * sizeof(s16)); if (buf2) Memory::Memcpy(buf2, chan.sampleAddress + (u32)sz1 * sizeof(s16), (u32)sz2 * sizeof(s16)); } } else { // Remember that maximum volume allowed is 0xFFFFF so left shift is no issue. // This way we can optimally shift by 16. leftVol <<=1; rightVol <<=1; if (chan.format == PSP_AUDIO_FORMAT_STEREO) { const u32 totalSamples = chan.sampleCount * 2; s16_le *sampleData = (s16_le *) Memory::GetPointer(chan.sampleAddress); // Walking a pointer for speed. But let's make sure we wouldn't trip on an invalid ptr. if (Memory::IsValidAddress(chan.sampleAddress + (totalSamples - 1) * sizeof(s16_le))) { s16 *buf1 = 0, *buf2 = 0; size_t sz1, sz2; chan.sampleQueue.pushPointers(totalSamples, &buf1, &sz1, &buf2, &sz2); AdjustVolumeBlock(buf1, sampleData, sz1, leftVol, rightVol); if (buf2) { AdjustVolumeBlock(buf2, sampleData + sz1, sz2, leftVol, rightVol); } } } else if (chan.format == PSP_AUDIO_FORMAT_MONO) { // Rare, so unoptimized. Expands to stereo. for (u32 i = 0; i < chan.sampleCount; i++) { s16 sample = (s16)Memory::Read_U16(chan.sampleAddress + 2 * i); chan.sampleQueue.push(ApplySampleVolume(sample, leftVol)); chan.sampleQueue.push(ApplySampleVolume(sample, rightVol)); } } } return ret; }
u32 __AudioEnqueue(AudioChannel &chan, int chanNum, bool blocking) { u32 ret = chan.sampleCount; if (chan.sampleAddress == 0) { // For some reason, multichannel audio lies and returns the sample count here. if (chanNum == PSP_AUDIO_CHANNEL_SRC || chanNum == PSP_AUDIO_CHANNEL_OUTPUT2) { ret = 0; } } // If there's anything on the queue at all, it should be busy, but we try to be a bit lax. //if (chan.sampleQueue.size() > chan.sampleCount * 2 * chanQueueMaxSizeFactor || chan.sampleAddress == 0) { if (chan.sampleQueue.size() > 0) { if (blocking) { // TODO: Regular multichannel audio seems to block for 64 samples less? Or enqueue the first 64 sync? int blockSamples = (int)chan.sampleQueue.size() / 2 / chanQueueMinSizeFactor; if (__KernelIsDispatchEnabled()) { AudioChannelWaitInfo waitInfo = {__KernelGetCurThread(), blockSamples}; chan.waitingThreads.push_back(waitInfo); // Also remember the value to return in the waitValue. __KernelWaitCurThread(WAITTYPE_AUDIOCHANNEL, (SceUID)chanNum + 1, ret, 0, false, "blocking audio"); } else { // TODO: Maybe we shouldn't take this audio after all? ret = SCE_KERNEL_ERROR_CAN_NOT_WAIT; } // Fall through to the sample queueing, don't want to lose the samples even though // we're getting full. The PSP would enqueue after blocking. } else { // Non-blocking doesn't even enqueue, but it's not commonly used. return SCE_ERROR_AUDIO_CHANNEL_BUSY; } } if (chan.sampleAddress == 0) { return ret; } if (chan.format == PSP_AUDIO_FORMAT_STEREO) { const u32 totalSamples = chan.sampleCount * 2; if (IS_LITTLE_ENDIAN) { s16 *sampleData = (s16 *) Memory::GetPointer(chan.sampleAddress); // Walking a pointer for speed. But let's make sure we wouldn't trip on an invalid ptr. if (Memory::IsValidAddress(chan.sampleAddress + (totalSamples - 1) * sizeof(s16))) { #if 0 for (u32 i = 0; i < totalSamples; i += 2) { chan.sampleQueue.push(adjustvolume(*sampleData++, chan.leftVolume)); chan.sampleQueue.push(adjustvolume(*sampleData++, chan.rightVolume)); } #else s16 *buf1 = 0, *buf2 = 0; size_t sz1, sz2; chan.sampleQueue.pushPointers(totalSamples, &buf1, &sz1, &buf2, &sz2); int leftVol = chan.leftVolume; int rightVol = chan.rightVolume; // TODO: SSE/NEON implementations for (u32 i = 0; i < sz1; i += 2) { buf1[i] = adjustvolume(sampleData[i], leftVol); buf1[i + 1] = adjustvolume(sampleData[i + 1], rightVol); } if (buf2) { sampleData += sz1; for (u32 i = 0; i < sz2; i += 2) { buf2[i] = adjustvolume(sampleData[i], leftVol); buf2[i + 1] = adjustvolume(sampleData[i + 1], rightVol); } } #endif } } else { for (u32 i = 0; i < totalSamples; i++) { s16 sampleL = (s16)Memory::Read_U16(chan.sampleAddress + sizeof(s16) * i); sampleL = adjustvolume(sampleL, chan.leftVolume); chan.sampleQueue.push(sampleL); i++; s16 sampleR = (s16)Memory::Read_U16(chan.sampleAddress + sizeof(s16) * i); sampleR = adjustvolume(sampleR, chan.rightVolume); chan.sampleQueue.push(sampleR); } } } else if (chan.format == PSP_AUDIO_FORMAT_MONO) { for (u32 i = 0; i < chan.sampleCount; i++) { // Expand to stereo s16 sample = (s16)Memory::Read_U16(chan.sampleAddress + 2 * i); chan.sampleQueue.push(adjustvolume(sample, chan.leftVolume)); chan.sampleQueue.push(adjustvolume(sample, chan.rightVolume)); } } return ret; }