Example #1
0
inline void hleFinishSyscall(const HLEFunction &info)
{
	if ((hleAfterSyscall & HLE_AFTER_SKIP_DEADBEEF) == 0)
		SetDeadbeefRegs();

	if ((hleAfterSyscall & HLE_AFTER_CURRENT_CALLBACKS) != 0 && (hleAfterSyscall & HLE_AFTER_RESCHED_CALLBACKS) == 0)
		__KernelForceCallbacks();

	if ((hleAfterSyscall & HLE_AFTER_RUN_INTERRUPTS) != 0)
		__RunOnePendingInterrupt();

	if ((hleAfterSyscall & HLE_AFTER_RESCHED_CALLBACKS) != 0)
		__KernelReSchedule(true, hleAfterSyscallReschedReason);
	else if ((hleAfterSyscall & HLE_AFTER_RESCHED) != 0)
		__KernelReSchedule(hleAfterSyscallReschedReason);

	if ((hleAfterSyscall & HLE_AFTER_DEBUG_BREAK) != 0)
	{
		if (!hleExecuteDebugBreak(info))
		{
			// We'll do it next syscall.
			hleAfterSyscall = HLE_AFTER_DEBUG_BREAK;
			hleAfterSyscallReschedReason = 0;
			return;
		}
	}

	hleAfterSyscall = HLE_AFTER_NOTHING;
	hleAfterSyscallReschedReason = 0;
}
Example #2
0
void sceKernelLockLwMutexCB(u32 workareaPtr, int count, u32 timeoutPtr)
{
	DEBUG_LOG(HLE,"sceKernelLockLwMutexCB(%08x, %i, %08x)", workareaPtr, count, timeoutPtr);

	NativeLwMutexWorkarea workarea;
	Memory::ReadStruct(workareaPtr, &workarea);

	u32 error = 0;
	if (__KernelLockLwMutex(workarea, count, error))
	{
		Memory::WriteStruct(workareaPtr, &workarea);
		RETURN(0);
		__KernelReSchedule("lwmutex locked");
	}
	else if (error)
		RETURN(error);
	else
	{
		LwMutex *mutex = kernelObjects.Get<LwMutex>(workarea.uid, error);
		if (mutex)
		{
			mutex->waitingThreads.push_back(__KernelGetCurThread());
			__KernelWaitLwMutex(mutex, timeoutPtr);
			__KernelWaitCurThread(WAITTYPE_LWMUTEX, workarea.uid, count, timeoutPtr, true);
			__KernelCheckCallbacks();
		}
		else
			RETURN(error);
	}
}
Example #3
0
static void __CtrlDoSample()
{
	// This samples the ctrl data into the buffers and updates the latch.
	__CtrlUpdateLatch();

	// Wake up a single thread that was waiting for the buffer.
retry:
	if (!waitingThreads.empty() && ctrlBuf != ctrlBufRead)
	{
		SceUID threadID = waitingThreads[0];
		waitingThreads.erase(waitingThreads.begin());

		u32 error;
		SceUID wVal = __KernelGetWaitID(threadID, WAITTYPE_CTRL, error);
		// Make sure it didn't get woken or something.
		if (wVal == 0)
			goto retry;

		PSPPointer<_ctrl_data> ctrlDataPtr;
		ctrlDataPtr = __KernelGetWaitValue(threadID, error);
		int retVal = __CtrlReadSingleBuffer(ctrlDataPtr, wVal == CTRL_WAIT_NEGATIVE);
		__KernelResumeThreadFromWait(threadID, retVal);
		__KernelReSchedule("ctrl buffers updated");
	}
}
Example #4
0
// Should be same as WaitSema but without the wait, instead returning SCE_KERNEL_ERROR_SEMA_ZERO
void sceKernelPollSema(SceUID id, int wantedCount)
{
	DEBUG_LOG(HLE,"sceKernelPollSema(%i, %i)", id, wantedCount);

	if (wantedCount <= 0)
	{
		RETURN(SCE_KERNEL_ERROR_ILLEGAL_COUNT);
		return;
	}

	u32 error;
	Semaphore *s = kernelObjects.Get<Semaphore>(id, error);
	if (s)
	{
		if (s->ns.currentCount >= wantedCount)
		{
			s->ns.currentCount -= wantedCount;
			RETURN(0);
		}
		else
			RETURN(SCE_KERNEL_ERROR_SEMA_ZERO);

		__KernelReSchedule("semaphore polled");
	}
	else
	{
		ERROR_LOG(HLE, "sceKernelPollSema: Trying to poll invalid semaphore %i", id);
		RETURN(error);
	}
}
Example #5
0
inline void __AudioWakeThreads(AudioChannel &chan, int result, int step) {
	u32 error;
	bool wokeThreads = false;
	for (size_t w = 0; w < chan.waitingThreads.size(); ++w) {
		AudioChannelWaitInfo &waitInfo = chan.waitingThreads[w];
		waitInfo.numSamples -= step;

		// If it's done (there will still be samples on queue) and actually still waiting, wake it up.
		u32 waitID = __KernelGetWaitID(waitInfo.threadID, WAITTYPE_AUDIOCHANNEL, error);
		if (waitInfo.numSamples <= 0 && waitID != 0) {
			// DEBUG_LOG(SCEAUDIO, "Woke thread %i for some buffer filling", waitingThread);
			u32 ret = result == 0 ? __KernelGetWaitValue(waitInfo.threadID, error) : SCE_ERROR_AUDIO_CHANNEL_NOT_RESERVED;
			__KernelResumeThreadFromWait(waitInfo.threadID, ret);
			wokeThreads = true;

			chan.waitingThreads.erase(chan.waitingThreads.begin() + w--);
		}
		// This means the thread stopped waiting, so stop trying to wake it.
		else if (waitID == 0)
			chan.waitingThreads.erase(chan.waitingThreads.begin() + w--);
	}

	if (wokeThreads) {
		__KernelReSchedule("audio drain");
	}
}
Example #6
0
void __KernelWaitSema(SceUID id, int wantedCount, u32 timeoutPtr, const char *badSemaMessage, bool processCallbacks)
{
	u32 error;
	Semaphore *s = kernelObjects.Get<Semaphore>(id, error);
	if (s)
	{
		if (wantedCount > s->ns.maxCount || wantedCount <= 0)
		{
			RETURN(SCE_KERNEL_ERROR_ILLEGAL_COUNT);
			return;
		}

		// We need to set the return value BEFORE processing callbacks / etc.
		RETURN(0);

		if (s->ns.currentCount >= wantedCount)
			s->ns.currentCount -= wantedCount;
		else
		{
			s->ns.numWaitThreads++;
			s->waitingThreads.push_back(__KernelGetCurThread());
			__KernelSetSemaTimeout(s, timeoutPtr);
			__KernelWaitCurThread(WAITTYPE_SEMA, id, wantedCount, timeoutPtr, processCallbacks);
			if (processCallbacks)
				__KernelCheckCallbacks();
		}

		__KernelReSchedule("semaphore waited");
	}
	else
	{
		ERROR_LOG(HLE, badSemaMessage, id);
		RETURN(error);
	}
}
Example #7
0
// int sceKernelUnlockMutex(SceUID id, int count)
// void because it changes threads.
void sceKernelUnlockMutex(SceUID id, int count)
{
	DEBUG_LOG(HLE,"sceKernelUnlockMutex(%i, %i)", id, count);
	u32 error;
	Mutex *mutex = kernelObjects.Get<Mutex>(id, error);

	if (!error)
	{
		if (count <= 0)
			error = SCE_KERNEL_ERROR_ILLEGAL_COUNT;
		else if ((mutex->nm.attr & PSP_MUTEX_ATTR_ALLOW_RECURSIVE) == 0 && count > 1)
			error = SCE_KERNEL_ERROR_ILLEGAL_COUNT;
		else if (mutex->nm.lockLevel == 0 || mutex->nm.lockThread != __KernelGetCurThread())
			error = PSP_MUTEX_ERROR_NOT_LOCKED;
		else if (mutex->nm.lockLevel < count)
			error = PSP_MUTEX_ERROR_UNLOCK_UNDERFLOW;
	}

	if (error)
	{
		RETURN(error);
		return;
	}

	mutex->nm.lockLevel -= count;
	RETURN(0);

	if (mutex->nm.lockLevel == 0)
	{
		__KernelUnlockMutex(mutex, error);
		__KernelReSchedule("mutex unlocked");
	}
}
Example #8
0
void sceKernelDeleteMutex(SceUID id)
{
	DEBUG_LOG(HLE,"sceKernelDeleteMutex(%i)", id);
	u32 error;
	Mutex *mutex = kernelObjects.Get<Mutex>(id, error);
	if (mutex)
	{
		std::vector<SceUID>::iterator iter, end;
		for (iter = mutex->waitingThreads.begin(), end = mutex->waitingThreads.end(); iter != end; ++iter)
		{
			SceUID threadID = *iter;

			u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);
			if (timeoutPtr != 0 && mutexWaitTimer != 0)
			{
				// Remove any event for this thread.
				u64 cyclesLeft = CoreTiming::UnscheduleEvent(mutexWaitTimer, threadID);
				Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr);
			}

			__KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_DELETE);
		}
		if (mutex->nm.lockThread != -1)
			__KernelMutexEraseLock(mutex);
		mutex->waitingThreads.empty();

		RETURN(kernelObjects.Destroy<Mutex>(id));
		__KernelReSchedule("mutex deleted");
	}
	else
		RETURN(error);
}
Example #9
0
void __KernelReturnFromInterrupt()
{
    DEBUG_LOG(CPU, "Left interrupt handler at %08x", currentMIPS->pc);

    // This is what we just ran.
    PendingInterrupt pend = pendingInterrupts.front();
    pendingInterrupts.pop_front();

    intrHandlers[pend.intr]->handleResult(pend);
    inInterrupt = false;

    // Restore context after running the interrupt.
    intState.restore();
    // All should now be back to normal, including PC.

    // Alright, let's see if there's any more interrupts queued...
    if (!__RunOnePendingInterrupt())
    {
        // Otherwise, we reschedule when dispatch was enabled, or switch back otherwise.
        if (__KernelIsDispatchEnabled())
            __KernelReSchedule("return from interrupt");
        else
            __KernelSwitchToThread(threadBeforeInterrupt, "return from interrupt");
    }
}
Example #10
0
void sceUmdDeactivate(u32 unknown, const char *name)
{
	// Why 18?  No idea.
	if (unknown < 0 || unknown > 18)
	{
		RETURN(PSP_ERROR_UMD_INVALID_PARAM);
		return;
	}

	bool changed = umdActivated != 0;
	__KernelUmdDeactivate();

	if (unknown == 1)
	{
		DEBUG_LOG(HLE, "0=sceUmdDeactivate(%d, %s)", unknown, name);
	}
	else
	{
		ERROR_LOG(HLE, "UNTESTED 0=sceUmdDeactivate(%d, %s)", unknown, name);
	}

	u32 notifyArg = UMD_PRESENT | UMD_READY;
	__KernelNotifyCallbackType(THREAD_CALLBACK_UMD, -1, notifyArg);
	RETURN(0);

	if (changed)
		__KernelReSchedule("umd deactivated");
}
Example #11
0
//SceUID sceKernelCreateSema(const char *name, SceUInt attr, int initVal, int maxVal, SceKernelSemaOptParam *option);
// void because it changes threads.
void sceKernelCreateSema(const char* name, u32 attr, int initVal, int maxVal, u32 optionPtr)
{
	if (!semaInitComplete)
		__KernelSemaInit();

	if (!name)
	{
		RETURN(SCE_KERNEL_ERROR_ERROR);
		return;
	}

	Semaphore *s = new Semaphore;
	SceUID id = kernelObjects.Create(s);

	s->ns.size = sizeof(NativeSemaphore);
	strncpy(s->ns.name, name, 31);
	s->ns.name[31] = 0;
	s->ns.attr = attr;
	s->ns.initCount = initVal;
	s->ns.currentCount = s->ns.initCount;
	s->ns.maxCount = maxVal;
	s->ns.numWaitThreads = 0;

	DEBUG_LOG(HLE,"%i=sceKernelCreateSema(%s, %08x, %i, %i, %08x)", id, s->ns.name, s->ns.attr, s->ns.initCount, s->ns.maxCount, optionPtr);

	if (optionPtr != 0)
		WARN_LOG(HLE,"sceKernelCreateSema(%s) unsupported options parameter.", name);

	RETURN(id);

	__KernelReSchedule("semaphore created");
}
Example #12
0
void sceKernelSendMbx(SceUID id, u32 packetAddr)
{
	u32 error;
	Mbx *m = kernelObjects.Get<Mbx>(id, error);
	NativeMbxPacket *addPacket = (NativeMbxPacket*)Memory::GetPointer(packetAddr);
	if (addPacket == 0)
	{
		ERROR_LOG(HLE, "sceKernelSendMbx(%i, %08x): invalid packet address", id, packetAddr);
		RETURN(-1);
		return;
	}

	if (!m)
	{
		ERROR_LOG(HLE, "sceKernelSendMbx(%i, %08x): invalid mbx id", id, packetAddr);
		RETURN(error);
		return;
	}

	if (m->waitingThreads.empty())
	{
		DEBUG_LOG(HLE, "sceKernelSendMbx(%i, %08x): no threads currently waiting, adding message to queue", id, packetAddr);
		if (m->nmb.attr & SCE_KERNEL_MBA_MSPRI)
		{
			for (std::vector<u32>::iterator it = m->messageQueue.begin(); it != m->messageQueue.end(); it++)
			{
				NativeMbxPacket *p = (NativeMbxPacket*)Memory::GetPointer(*it);
				if (addPacket->priority >= p->priority)
				{
					m->messageQueue.insert(it, packetAddr);
					break;
				}
			}
		}
		else
		{
			m->messageQueue.push_back(packetAddr);
		}
		RETURN(0);
	}
	else if (m->messageQueue.empty())
	{
		Memory::Write_U32(packetAddr, m->waitingThreads.front().second);
		__KernelResumeThreadFromWait(m->waitingThreads.front().first);
		DEBUG_LOG(HLE, "sceKernelSendMbx(%i, %08x): threads waiting, resuming %d", id, packetAddr, m->waitingThreads.front().first);
		m->waitingThreads.erase(m->waitingThreads.begin());
		RETURN(0);
		__KernelReSchedule();
	}
	else
	{
		ERROR_LOG(HLE, "sceKernelSendMbx(%i, %08x): WTF!? thread waiting while there is a message in the queue?", id, packetAddr);
		RETURN(-1);
	}
}
Example #13
0
void sceKernelCheckCallback()
{
	//only check those of current thread
	ERROR_LOG(HLE,"UNIMPL sceKernelCheckCallback()");

	// HACK that makes the audio thread work in Puyo Puyo Fever - the main thread never yields!
	// Probably because callbacks aren't working right.
	__KernelReSchedule("checkcallbackhack");

	RETURN(0);
}
Example #14
0
// int sceKernelLockMutexCB(SceUID id, int count, int *timeout)
// void because it changes threads.
void sceKernelLockMutexCB(SceUID id, int count, u32 timeoutPtr)
{
	DEBUG_LOG(HLE,"sceKernelLockMutexCB(%i, %i, %08x)", id, count, timeoutPtr);
	u32 error;
	Mutex *mutex = kernelObjects.Get<Mutex>(id, error);

	if (__KernelLockMutex(mutex, count, error))
	{
		RETURN(0);
		__KernelReSchedule("mutex locked");
	}
	else if (error)
		RETURN(error);
	else
	{
		mutex->waitingThreads.push_back(__KernelGetCurThread());
		__KernelWaitMutex(mutex, timeoutPtr);
		__KernelWaitCurThread(WAITTYPE_MUTEX, id, count, timeoutPtr, true);
		__KernelCheckCallbacks();
	}

	__KernelReSchedule("mutex locked");
}
Example #15
0
void sceKernelCreateLwMutex(u32 workareaPtr, const char *name, u32 attr, int initialCount, u32 optionsPtr)
{
	if (!mutexInitComplete)
		__KernelMutexInit();

	DEBUG_LOG(HLE,"sceKernelCreateLwMutex(%08x, %s, %08x, %d, %08x)", workareaPtr, name, attr, initialCount, optionsPtr);

	u32 error = 0;
	if (!name)
		error = SCE_KERNEL_ERROR_ERROR;
	else if (initialCount < 0)
		error = SCE_KERNEL_ERROR_ILLEGAL_COUNT;
	else if ((attr & PSP_MUTEX_ATTR_ALLOW_RECURSIVE) == 0 && initialCount > 1)
		error = SCE_KERNEL_ERROR_ILLEGAL_COUNT;

	if (error)
	{
		RETURN(error);
		return;
	}

	LwMutex *mutex = new LwMutex();
	SceUID id = kernelObjects.Create(mutex);
	mutex->nm.size = sizeof(mutex);
	strncpy(mutex->nm.name, name, 31);
	mutex->nm.name[31] = 0;
	mutex->nm.attr = attr;
	mutex->nm.workareaPtr = workareaPtr;

	NativeLwMutexWorkarea workarea;
	workarea.init();
	workarea.lockLevel = initialCount;
	if (initialCount == 0)
		workarea.lockThread = 0;
	else
		workarea.lockThread = __KernelGetCurThread();
	workarea.attr = attr;
	workarea.uid = id;

	Memory::WriteStruct(workareaPtr, &workarea);

	if (optionsPtr != 0)
		WARN_LOG(HLE,"sceKernelCreateLwMutex(%s) unsupported options parameter.", name);

	RETURN(0);

	__KernelReSchedule("lwmutex created");
}
Example #16
0
// int sceKernelTryLockMutex(SceUID id, int count)
// void because it changes threads.
void sceKernelTryLockMutex(SceUID id, int count)
{
	DEBUG_LOG(HLE,"sceKernelTryLockMutex(%i, %i)", id, count);
	u32 error;
	Mutex *mutex = kernelObjects.Get<Mutex>(id, error);

	if (__KernelLockMutex(mutex, count, error))
	{
		RETURN(0);
		__KernelReSchedule("mutex trylocked");
	}
	else if (error)
		RETURN(error);
	else
		RETURN(PSP_MUTEX_ERROR_TRYLOCK_FAILED);
}
Example #17
0
void hleDelayResultFinish(u64 userdata, int cycleslate)
{
	u32 error;
	SceUID threadID = (SceUID) userdata;
	SceUID verify = __KernelGetWaitID(threadID, WAITTYPE_HLEDELAY, error);
	// The top 32 bits of userdata are the top 32 bits of the 64 bit result.
	// We can't just put it all in userdata because we need to know the threadID...
	u64 result = (userdata & 0xFFFFFFFF00000000ULL) | __KernelGetWaitValue(threadID, error);

	if (error == 0 && verify == 1)
	{
		__KernelResumeThreadFromWait(threadID, result);
		__KernelReSchedule("woke from hle delay");
	}
	else
		WARN_LOG(HLE, "Someone else woke up HLE-blocked thread?");
}
Example #18
0
//int sceKernelDeleteSema(SceUID semaid, SceKernelSemaInfo *info);
// void because it changes threads.
void sceKernelReferSemaStatus(SceUID id, u32 infoPtr)
{
	u32 error;
	Semaphore *s = kernelObjects.Get<Semaphore>(id, error);
	if (s)
	{
		DEBUG_LOG(HLE,"sceKernelReferSemaStatus(%i, %08x)", id, infoPtr);
		Memory::WriteStruct(infoPtr, &s->ns);
		RETURN(0);
		__KernelReSchedule("semaphore refer status");
	}
	else
	{
		ERROR_LOG(HLE,"Error %08x", error);
		RETURN(error);
	}
}
Example #19
0
//int sceKernelDeleteSema(SceUID semaid);
// void because it changes threads.
void sceKernelDeleteSema(SceUID id)
{
	DEBUG_LOG(HLE,"sceKernelDeleteSema(%i)", id);

	u32 error;
	Semaphore *s = kernelObjects.Get<Semaphore>(id, error);
	if (s)
	{
		__KernelClearSemaThreads(s, SCE_KERNEL_ERROR_WAIT_DELETE);
		RETURN(kernelObjects.Destroy<Semaphore>(id));
		__KernelReSchedule("semaphore deleted");
	}
	else
	{
		ERROR_LOG(HLE, "sceKernelDeleteSema : Trying to delete invalid semaphore %i", id);
		RETURN(error);
	}
}
Example #20
0
void sceKernelCreateMutex(const char *name, u32 attr, int initialCount, u32 optionsPtr)
{
	if (!mutexInitComplete)
		__KernelMutexInit();

	u32 error = 0;
	if (!name)
		error = SCE_KERNEL_ERROR_ERROR;
	else if (initialCount < 0)
		error = SCE_KERNEL_ERROR_ILLEGAL_COUNT;
	else if ((attr & PSP_MUTEX_ATTR_ALLOW_RECURSIVE) == 0 && initialCount > 1)
		error = SCE_KERNEL_ERROR_ILLEGAL_COUNT;

	if (error)
	{
		RETURN(error);
		return;
	}

	DEBUG_LOG(HLE,"sceKernelCreateMutex(%s, %08x, %d, %08x)", name, attr, initialCount, optionsPtr);

	Mutex *mutex = new Mutex();
	SceUID id = kernelObjects.Create(mutex);

	mutex->nm.size = sizeof(mutex);
	strncpy(mutex->nm.name, name, 31);
	mutex->nm.name[31] = 0;
	mutex->nm.attr = attr;
	if (initialCount == 0)
	{
		mutex->nm.lockLevel = 0;
		mutex->nm.lockThread = -1;
	}
	else
		__KernelMutexAcquireLock(mutex, initialCount);

	if (optionsPtr != 0)
		WARN_LOG(HLE,"sceKernelCreateMutex(%s) unsupported options parameter.", name);

	RETURN(id);

	__KernelReSchedule("mutex created");
}
Example #21
0
void sceKernelDeleteLwMutex(u32 workareaPtr)
{
	DEBUG_LOG(HLE,"sceKernelDeleteLwMutex(%08x)", workareaPtr);

	if (!workareaPtr || !Memory::IsValidAddress(workareaPtr))
	{
		RETURN(SCE_KERNEL_ERROR_ILLEGAL_ADDR);
		return;
	}

	NativeLwMutexWorkarea workarea;
	Memory::ReadStruct(workareaPtr, &workarea);

	u32 error;
	LwMutex *mutex = kernelObjects.Get<LwMutex>(workarea.uid, error);
	if (mutex)
	{
		std::vector<SceUID>::iterator iter, end;
		for (iter = mutex->waitingThreads.begin(), end = mutex->waitingThreads.end(); iter != end; ++iter)
		{
			SceUID threadID = *iter;

			u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);
			if (timeoutPtr != 0 && lwMutexWaitTimer != 0)
			{
				// Remove any event for this thread.
				u64 cyclesLeft = CoreTiming::UnscheduleEvent(lwMutexWaitTimer, threadID);
				Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr);
			}

			__KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_DELETE);
		}
		mutex->waitingThreads.empty();

		RETURN(kernelObjects.Destroy<LwMutex>(workarea.uid));
		workarea.clear();
		Memory::WriteStruct(workareaPtr, &workarea);

		__KernelReSchedule("mutex deleted");
	}
	else
		RETURN(error);
}
Example #22
0
void sceKernelTryLockLwMutex_600(u32 workareaPtr, int count)
{
	DEBUG_LOG(HLE,"sceKernelTryLockLwMutex_600(%08x, %i)", workareaPtr, count);

	NativeLwMutexWorkarea workarea;
	Memory::ReadStruct(workareaPtr, &workarea);

	u32 error = 0;
	if (__KernelLockLwMutex(workarea, count, error))
	{
		Memory::WriteStruct(workareaPtr, &workarea);
		RETURN(0);
		__KernelReSchedule("lwmutex trylocked");
	}
	else if (error)
		RETURN(error);
	else
		RETURN(PSP_LWMUTEX_ERROR_TRYLOCK_FAILED);
}
Example #23
0
void __KernelReturnFromInterrupt()
{
	DEBUG_LOG(CPU, "Left interrupt handler at %08x", currentMIPS->pc);

	// This is what we just ran.
	PendingInterrupt pend = pendingInterrupts.front();
	pendingInterrupts.pop_front();

	intrHandlers[pend.intr]->handleResult(pend);
	inInterrupt = false;

	// Restore context after running the interrupt.
	intState.restore();
	// All should now be back to normal, including PC.

	// Alright, let's see if there's any more interrupts queued...
	if (!__RunOnePendingInterrupt())
		__KernelReSchedule("return from interrupt");
}
Example #24
0
void sceKernelUnlockLwMutex(u32 workareaPtr, int count)
{
	DEBUG_LOG(HLE,"sceKernelUnlockLwMutex(%08x, %i)", workareaPtr, count);

	NativeLwMutexWorkarea workarea;
	Memory::ReadStruct(workareaPtr, &workarea);

	u32 error = 0;
	if (workarea.uid == -1)
		error = PSP_LWMUTEX_ERROR_NO_SUCH_LWMUTEX;
	else if (count <= 0)
		error = SCE_KERNEL_ERROR_ILLEGAL_COUNT;
	else if ((workarea.attr & PSP_MUTEX_ATTR_ALLOW_RECURSIVE) == 0 && count > 1)
		error = SCE_KERNEL_ERROR_ILLEGAL_COUNT;
	else if (workarea.lockLevel == 0 || workarea.lockThread != __KernelGetCurThread())
		error = PSP_LWMUTEX_ERROR_NOT_LOCKED;
	else if (workarea.lockLevel < count)
		error = PSP_LWMUTEX_ERROR_UNLOCK_UNDERFLOW;

	if (error)
	{
		RETURN(error);
		return;
	}

	workarea.lockLevel -= count;
	RETURN(0);

	if (workarea.lockLevel == 0)
	{
		__KernelUnlockLwMutex(workarea, error);
		Memory::WriteStruct(workareaPtr, &workarea);
		__KernelReSchedule("mutex unlocked");
	}
	else
		Memory::WriteStruct(workareaPtr, &workarea);

}
Example #25
0
// int sceKernelCancelSema(SceUID id, int newCount, int *numWaitThreads);
// void because it changes threads.
void sceKernelCancelSema(SceUID id, int newCount, u32 numWaitThreadsPtr)
{
	DEBUG_LOG(HLE,"sceKernelCancelSema(%i)", id);

	u32 error;
	Semaphore *s = kernelObjects.Get<Semaphore>(id, error);
	if (s)
	{
		if (newCount > s->ns.maxCount)
		{
			RETURN(SCE_KERNEL_ERROR_ILLEGAL_COUNT);
			return;
		}

		if (numWaitThreadsPtr)
		{
			u32* numWaitThreads = (u32*)Memory::GetPointer(numWaitThreadsPtr);
			*numWaitThreads = s->ns.numWaitThreads;
		}

		if (newCount < 0)
			s->ns.currentCount = s->ns.initCount;
		else
			s->ns.currentCount = newCount;
		s->ns.numWaitThreads = 0;

		// We need to set the return value BEFORE rescheduling threads.
		RETURN(0);

		__KernelClearSemaThreads(s, SCE_KERNEL_ERROR_WAIT_CANCEL);
		__KernelReSchedule("semaphore cancelled");
	}
	else
	{
		ERROR_LOG(HLE, "sceKernelCancelSema : Trying to cancel invalid semaphore %i", id);
		RETURN(error);
	}
}
void __KernelReturnFromInterrupt()
{
	DEBUG_LOG(CPU, "Left interrupt handler at %08x", currentMIPS->pc);
	inInterrupt = false;

	// This is what we just ran.
	PendingInterrupt pend = pendingInterrupts.front();
	pendingInterrupts.pop_front();

	SubIntrHandler *handler = intrHandlers[pend.intr].get(pend.subintr);
	if (handler != NULL)
		handler->handleResult(currentMIPS->r[MIPS_REG_V0]);
	else
		ERROR_LOG(HLE, "Interrupt released itself?  Should not happen.");

	// Restore context after running the interrupt.
	intState.restore();
	// All should now be back to normal, including PC.

	// Alright, let's see if there's any more interrupts queued...
	if (!__RunOnePendingInterrupt())
		__KernelReSchedule("return from interrupt");
}
Example #27
0
//int sceKernelSignalSema(SceUID semaid, int signal);
// void because it changes threads.
void sceKernelSignalSema(SceUID id, int signal)
{
	//TODO: check that this thing really works :)
	u32 error;
	Semaphore *s = kernelObjects.Get<Semaphore>(id, error);
	if (s)
	{
		if (s->ns.currentCount + signal > s->ns.maxCount)
		{
			RETURN(SCE_KERNEL_ERROR_SEMA_OVF);
			return;
		}

		int oldval = s->ns.currentCount;
		s->ns.currentCount += signal;
		DEBUG_LOG(HLE,"sceKernelSignalSema(%i, %i) (old: %i, new: %i)", id, signal, oldval, s->ns.currentCount);

		// We need to set the return value BEFORE processing other threads.
		RETURN(0);

		bool wokeThreads = false;
retry:
		// TODO: PSP_SEMA_ATTR_PRIORITY
		std::vector<SceUID>::iterator iter;
		for (iter = s->waitingThreads.begin(); iter!=s->waitingThreads.end(); iter++)
		{
			SceUID threadID = *iter;

			int wVal = (int)__KernelGetWaitValue(threadID, error);
			u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);

			if (wVal <= s->ns.currentCount)
			{
				s->ns.currentCount -= wVal;
				s->ns.numWaitThreads--;

				if (timeoutPtr != 0 && semaWaitTimer != 0)
				{
					// Remove any event for this thread.
					int cyclesLeft = CoreTiming::UnscheduleEvent(semaWaitTimer, threadID);
					Memory::Write_U32(cyclesToUs(cyclesLeft), timeoutPtr);
				}

				__KernelResumeThreadFromWait(threadID, 0);
				wokeThreads = true;
				s->waitingThreads.erase(iter);
				goto retry;
			}
			else
			{
				break;
			}
		}

		__KernelReSchedule("semaphore signalled");
	}
	else
	{
		ERROR_LOG(HLE, "sceKernelSignalSema : Trying to signal invalid semaphore %i", id);
		RETURN(error;)
	}
}
Example #28
0
void hleEnterVblank(u64 userdata, int cyclesLate) {
	int vbCount = userdata;

	DEBUG_LOG(SCEDISPLAY, "Enter VBlank %i", vbCount);

	isVblank = 1;
	vCount++; // vCount increases at each VBLANK.
	hCountBase += hCountPerVblank; // This is the "accumulated" hcount base.
	if (hCountBase > 0x7FFFFFFF) {
		hCountBase -= 0x80000000;
	}
	frameStartTicks = CoreTiming::GetTicks();

	CoreTiming::ScheduleEvent(msToCycles(vblankMs) - cyclesLate, leaveVblankEvent, vbCount + 1);

	// Wake up threads waiting for VBlank
	u32 error;
	bool wokeThreads = false;
	for (size_t i = 0; i < vblankWaitingThreads.size(); i++) {
		if (--vblankWaitingThreads[i].vcountUnblock == 0) {
			// Only wake it if it wasn't already released by someone else.
			SceUID waitID = __KernelGetWaitID(vblankWaitingThreads[i].threadID, WAITTYPE_VBLANK, error);
			if (waitID == 1) {
				__KernelResumeThreadFromWait(vblankWaitingThreads[i].threadID, 0);
				wokeThreads = true;
			}
			vblankWaitingThreads.erase(vblankWaitingThreads.begin() + i--);
		}
	}
	if (wokeThreads) {
		__KernelReSchedule("entered vblank");
	}

	// Trigger VBlank interrupt handlers.
	__TriggerInterrupt(PSP_INTR_IMMEDIATE | PSP_INTR_ONLY_IF_ENABLED | PSP_INTR_ALWAYS_RESCHED, PSP_VBLANK_INTR, PSP_INTR_SUB_ALL);

	gpuStats.numVBlanks++;

	numVBlanksSinceFlip++;

	// TODO: Should this be done here or in hleLeaveVblank?
	if (framebufIsLatched) {
		DEBUG_LOG(SCEDISPLAY, "Setting latched framebuffer %08x (prev: %08x)", latchedFramebuf.topaddr, framebuf.topaddr);
		framebuf = latchedFramebuf;
		framebufIsLatched = false;
		gpu->SetDisplayFramebuffer(framebuf.topaddr, framebuf.pspFramebufLinesize, framebuf.pspFramebufFormat);
	}
	// We flip only if the framebuffer was dirty. This eliminates flicker when using
	// non-buffered rendering. The interaction with frame skipping seems to need
	// some work.
	// But, let's flip at least once every 10 frames if possible, since there may be sound effects.
	if (gpu->FramebufferDirty() || (g_Config.iRenderingMode != 0 && numVBlanksSinceFlip >= 10)) {
		if (g_Config.iShowFPSCounter && g_Config.iShowFPSCounter < 4) {
			CalculateFPS();
		}

		// Setting CORE_NEXTFRAME causes a swap.
		// Check first though, might've just quit / been paused.
		if (gpu->FramebufferReallyDirty()) {
			if (coreState == CORE_RUNNING) {
				coreState = CORE_NEXTFRAME;
				gpu->CopyDisplayToOutput();
				actualFlips++;
			}
		}

		gpuStats.numFlips++;

		bool throttle, skipFrame;
		DoFrameTiming(throttle, skipFrame, (float)numVBlanksSinceFlip * timePerVblank);

		int maxFrameskip = 8;
		if (throttle) {
			// 4 here means 1 drawn, 4 skipped - so 12 fps minimum.
			maxFrameskip = g_Config.iFrameSkip;
		}
		if (numSkippedFrames >= maxFrameskip) {
			skipFrame = false;
		}

		if (skipFrame) {
			gstate_c.skipDrawReason |= SKIPDRAW_SKIPFRAME;
			numSkippedFrames++;
		} else {
			gstate_c.skipDrawReason &= ~SKIPDRAW_SKIPFRAME;
			numSkippedFrames = 0;
		}

		// Returning here with coreState == CORE_NEXTFRAME causes a buffer flip to happen (next frame).
		// Right after, we regain control for a little bit in hleAfterFlip. I think that's a great
		// place to do housekeeping.
		CoreTiming::ScheduleEvent(0 - cyclesLate, afterFlipEvent, 0);
		numVBlanksSinceFlip = 0;
	} else {
		// Okay, there's no new frame to draw.  But audio may be playing, so we need to time still.
		DoFrameIdleTiming();
	}
}