void __KernelEventFlagTimeout(u64 userdata, int cycleslate)
{
	SceUID threadID = (SceUID)userdata;

	u32 error;
	u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);
	if (timeoutPtr != 0)
		Memory::Write_U32(0, timeoutPtr);

	SceUID flagID = __KernelGetWaitID(threadID, WAITTYPE_EVENTFLAG, error);
	EventFlag *e = kernelObjects.Get<EventFlag>(flagID, error);
	if (e)
	{
		for (size_t i = 0; i < e->waitingThreads.size(); i++)
		{
			EventFlagTh *t = &e->waitingThreads[i];
			if (t->tid == threadID)
			{
				bool wokeThreads;

				// This thread isn't waiting anymore, but we'll remove it from waitingThreads later.
				// The reason is, if it times out, but what it was waiting on is DELETED prior to it
				// actually running, it will get a DELETE result instead of a TIMEOUT.
				// So, we need to remember it or we won't be able to mark it DELETE instead later.
				__KernelUnlockEventFlagForThread(e, *t, error, SCE_KERNEL_ERROR_WAIT_TIMEOUT, wokeThreads);
				e->nef.numWaitThreads--;
				break;
			}
		}
	}
}
u32 sceKernelSetEventFlag(SceUID id, u32 bitsToSet)
{
	u32 error;
	DEBUG_LOG(HLE, "sceKernelSetEventFlag(%i, %08x)", id, bitsToSet);
	EventFlag *e = kernelObjects.Get<EventFlag>(id, error);
	if (e)
	{
		bool wokeThreads = false;

		e->nef.currentPattern |= bitsToSet;

		for (size_t i = 0; i < e->waitingThreads.size(); ++i)
		{
			EventFlagTh *t = &e->waitingThreads[i];
			if (__KernelUnlockEventFlagForThread(e, *t, error, 0, wokeThreads))
			{
				e->waitingThreads.erase(e->waitingThreads.begin() + i);
				// Try the one that used to be in this place next.
				--i;
			}
		}

		if (wokeThreads)
			hleReSchedule("event flag set");

		return 0;
	}
	else
	{
		return error;
	}
}
bool __KernelClearEventFlagThreads(EventFlag *e, int reason)
{
	u32 error;
	bool wokeThreads = false;
	std::vector<EventFlagTh>::iterator iter, end;
	for (iter = e->waitingThreads.begin(), end = e->waitingThreads.end(); iter != end; ++iter)
		__KernelUnlockEventFlagForThread(e, *iter, error, reason, wokeThreads);
	e->waitingThreads.clear();

	return wokeThreads;
}
Example #4
0
void __KernelEventFlagEndCallback(SceUID threadID, SceUID prevCallbackId, u32 &returnValue)
{
	SceUID pauseKey = prevCallbackId == 0 ? threadID : prevCallbackId;

	u32 error;
	SceUID flagID = __KernelGetWaitID(threadID, WAITTYPE_EVENTFLAG, error);
	u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);
	EventFlag *flag = flagID == 0 ? NULL : kernelObjects.Get<EventFlag>(flagID, error);
	if (!flag || flag->pausedWaits.find(pauseKey) == flag->pausedWaits.end())
	{
		// TODO: Since it was deleted, we don't know how long was actually left.
		// For now, we just say the full time was taken.
		if (timeoutPtr != 0 && eventFlagWaitTimer != -1)
			Memory::Write_U32(0, timeoutPtr);

		__KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_DELETE);
		return;
	}

	EventFlagTh waitData = flag->pausedWaits[pauseKey];
	u64 waitDeadline = waitData.pausedTimeout;
	flag->pausedWaits.erase(pauseKey);

	// TODO: Don't wake up if __KernelCurHasReadyCallbacks()?

	bool wokeThreads;
	// Attempt to unlock.
	if (__KernelUnlockEventFlagForThread(flag, waitData, error, 0, wokeThreads))
		return;

	// We only check if it timed out if it couldn't unlock.
	s64 cyclesLeft = waitDeadline - CoreTiming::GetTicks();
	if (cyclesLeft < 0 && waitDeadline != 0)
	{
		if (timeoutPtr != 0 && eventFlagWaitTimer != -1)
			Memory::Write_U32(0, timeoutPtr);

		__KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_TIMEOUT);
	}
	else
	{
		if (timeoutPtr != 0 && eventFlagWaitTimer != -1)
			CoreTiming::ScheduleEvent(cyclesLeft, eventFlagWaitTimer, __KernelGetCurThread());

		// TODO: Should this not go at the end?
		flag->waitingThreads.push_back(waitData);

		DEBUG_LOG(HLE, "sceKernelWaitEventFlagCB: Resuming lock wait for callback");
	}
}