예제 #1
0
bool __KernelUnlockMutexForThread(Mutex *mutex, SceUID threadID, u32 &error, int result)
{
	SceUID waitID = __KernelGetWaitID(threadID, WAITTYPE_MUTEX, error);
	u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);

	// The waitID may be different after a timeout.
	if (waitID != mutex->GetUID())
		return false;

	// If result is an error code, we're just letting it go.
	if (result == 0)
	{
		int wVal = (int)__KernelGetWaitValue(threadID, error);
		__KernelMutexAcquireLock(mutex, wVal, threadID);
	}

	if (timeoutPtr != 0 && mutexWaitTimer != -1)
	{
		// Remove any event for this thread.
		s64 cyclesLeft = CoreTiming::UnscheduleEvent(mutexWaitTimer, threadID);
		Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr);
	}

	__KernelResumeThreadFromWait(threadID, result);
	return true;
}
예제 #2
0
bool __KernelUnlockMutex(Mutex *mutex, u32 &error)
{
	__KernelMutexEraseLock(mutex);

	// TODO: PSP_MUTEX_ATTR_PRIORITY
	bool wokeThreads = false;
	std::vector<SceUID>::iterator iter, end;
	for (iter = mutex->waitingThreads.begin(), end = mutex->waitingThreads.end(); iter != end; ++iter)
	{
		SceUID threadID = *iter;

		int wVal = (int)__KernelGetWaitValue(threadID, error);
		u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);

		__KernelMutexAcquireLock(mutex, wVal, threadID);

		if (timeoutPtr != 0 && mutexWaitTimer != 0)
		{
			// Remove any event for this thread.
			u64 cyclesLeft = CoreTiming::UnscheduleEvent(mutexWaitTimer, threadID);
			Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr);
		}

		__KernelResumeThreadFromWait(threadID, 0);
		wokeThreads = true;
		mutex->waitingThreads.erase(iter);
		break;
	}

	if (!wokeThreads)
		mutex->nm.lockThread = -1;

	return wokeThreads;
}
예제 #3
0
int sceKernelCancelMutex(SceUID uid, int count, u32 numWaitThreadsPtr)
{
	u32 error;
	Mutex *mutex = kernelObjects.Get<Mutex>(uid, error);
	if (mutex)
	{
		bool lockable = count <= 0 || __KernelLockMutexCheck(mutex, count, error);
		if (!lockable)
		{
			// May still be okay.  As long as the count/etc. are valid.
			if (error != 0 && error != PSP_MUTEX_ERROR_LOCK_OVERFLOW && error != PSP_MUTEX_ERROR_ALREADY_LOCKED)
			{
				DEBUG_LOG(SCEKERNEL, "sceKernelCancelMutex(%i, %d, %08x): invalid count", uid, count, numWaitThreadsPtr);
				return error;
			}
		}

		DEBUG_LOG(SCEKERNEL, "sceKernelCancelMutex(%i, %d, %08x)", uid, count, numWaitThreadsPtr);

		// Remove threads no longer waiting on this first (so the numWaitThreads value is correct.)
		HLEKernel::CleanupWaitingThreads(WAITTYPE_MUTEX, uid, mutex->waitingThreads);

		if (Memory::IsValidAddress(numWaitThreadsPtr))
			Memory::Write_U32((u32)mutex->waitingThreads.size(), numWaitThreadsPtr);

		bool wokeThreads = false;
		for (auto iter = mutex->waitingThreads.begin(), end = mutex->waitingThreads.end(); iter != end; ++iter)
			wokeThreads |= __KernelUnlockMutexForThread(mutex, *iter, error, SCE_KERNEL_ERROR_WAIT_CANCEL);

		if (mutex->nm.lockThread != -1)
			__KernelMutexEraseLock(mutex);
		mutex->waitingThreads.clear();

		if (count <= 0)
		{
			mutex->nm.lockLevel = 0;
			mutex->nm.lockThread = -1;
		}
		else
			__KernelMutexAcquireLock(mutex, count);

		if (wokeThreads)
			hleReSchedule("mutex canceled");

		return 0;
	}
	else
	{
		DEBUG_LOG(SCEKERNEL, "sceKernelCancelMutex(%i, %d, %08x)", uid, count, numWaitThreadsPtr);
		return error;
	}
}
예제 #4
0
int sceKernelCreateMutex(const char *name, u32 attr, int initialCount, u32 optionsPtr)
{
	if (!name)
	{
		WARN_LOG_REPORT(HLE, "%08x=sceKernelCreateMutex(): invalid name", SCE_KERNEL_ERROR_ERROR);
		return SCE_KERNEL_ERROR_ERROR;
	}
	if (attr & ~0xBFF)
	{
		WARN_LOG_REPORT(HLE, "%08x=sceKernelCreateMutex(): invalid attr parameter: %08x", SCE_KERNEL_ERROR_ILLEGAL_ATTR, attr);
		return SCE_KERNEL_ERROR_ILLEGAL_ATTR;
	}

	if (initialCount < 0)
		return SCE_KERNEL_ERROR_ILLEGAL_COUNT;
	if ((attr & PSP_MUTEX_ATTR_ALLOW_RECURSIVE) == 0 && initialCount > 1)
		return SCE_KERNEL_ERROR_ILLEGAL_COUNT;

	Mutex *mutex = new Mutex();
	SceUID id = kernelObjects.Create(mutex);

	mutex->nm.size = sizeof(mutex->nm);
	strncpy(mutex->nm.name, name, KERNELOBJECT_MAX_NAME_LENGTH);
	mutex->nm.name[KERNELOBJECT_MAX_NAME_LENGTH] = 0;
	mutex->nm.attr = attr;
	mutex->nm.initialCount = initialCount;
	if (initialCount == 0)
	{
		mutex->nm.lockLevel = 0;
		mutex->nm.lockThread = -1;
	}
	else
		__KernelMutexAcquireLock(mutex, initialCount);

	DEBUG_LOG(HLE, "%i=sceKernelCreateMutex(%s, %08x, %d, %08x)", id, name, attr, initialCount, optionsPtr);

	if (optionsPtr != 0)
	{
		u32 size = Memory::Read_U32(optionsPtr);
		if (size != 0)
			WARN_LOG_REPORT(HLE, "sceKernelCreateMutex(%s) unsupported options parameter, size = %d", name, size);
	}
	if ((attr & ~PSP_MUTEX_ATTR_KNOWN) != 0)
		WARN_LOG_REPORT(HLE, "sceKernelCreateMutex(%s) unsupported attr parameter: %08x", name, attr);

	return id;
}
예제 #5
0
void sceKernelCreateMutex(const char *name, u32 attr, int initialCount, u32 optionsPtr)
{
	if (!mutexInitComplete)
		__KernelMutexInit();

	u32 error = 0;
	if (!name)
		error = SCE_KERNEL_ERROR_ERROR;
	else if (initialCount < 0)
		error = SCE_KERNEL_ERROR_ILLEGAL_COUNT;
	else if ((attr & PSP_MUTEX_ATTR_ALLOW_RECURSIVE) == 0 && initialCount > 1)
		error = SCE_KERNEL_ERROR_ILLEGAL_COUNT;

	if (error)
	{
		RETURN(error);
		return;
	}

	DEBUG_LOG(HLE,"sceKernelCreateMutex(%s, %08x, %d, %08x)", name, attr, initialCount, optionsPtr);

	Mutex *mutex = new Mutex();
	SceUID id = kernelObjects.Create(mutex);

	mutex->nm.size = sizeof(mutex);
	strncpy(mutex->nm.name, name, 31);
	mutex->nm.name[31] = 0;
	mutex->nm.attr = attr;
	if (initialCount == 0)
	{
		mutex->nm.lockLevel = 0;
		mutex->nm.lockThread = -1;
	}
	else
		__KernelMutexAcquireLock(mutex, initialCount);

	if (optionsPtr != 0)
		WARN_LOG(HLE,"sceKernelCreateMutex(%s) unsupported options parameter.", name);

	RETURN(id);

	__KernelReSchedule("mutex created");
}
예제 #6
0
bool __KernelLockMutex(Mutex *mutex, int count, u32 &error)
{
	if (!__KernelLockMutexCheck(mutex, count, error))
		return false;

	if (mutex->nm.lockLevel == 0)
	{
		__KernelMutexAcquireLock(mutex, count);
		// Nobody had it locked - no need to block
		return true;
	}

	if (mutex->nm.lockThread == __KernelGetCurThread())
	{
		// __KernelLockMutexCheck() would've returned an error, so this must be recursive.
		mutex->nm.lockLevel += count;
		return true;
	}

	return false;
}
예제 #7
0
bool __KernelLockMutex(Mutex *mutex, int count, u32 &error)
{
	if (!error)
	{
		if (count <= 0)
			error = SCE_KERNEL_ERROR_ILLEGAL_COUNT;
		else if (count > 1 && !(mutex->nm.attr & PSP_MUTEX_ATTR_ALLOW_RECURSIVE))
			error = SCE_KERNEL_ERROR_ILLEGAL_COUNT;
		// Two positive ints will always overflow to negative.
		else if (count + mutex->nm.lockLevel < 0)
			error = PSP_MUTEX_ERROR_LOCK_OVERFLOW;
	}

	if (error)
		return false;

	if (mutex->nm.lockLevel == 0)
	{
		__KernelMutexAcquireLock(mutex, count);
		// Nobody had it locked - no need to block
		return true;
	}

	if (mutex->nm.lockThread == __KernelGetCurThread())
	{
		// Recursive mutex, let's just increase the lock count and keep going
		if (mutex->nm.attr & PSP_MUTEX_ATTR_ALLOW_RECURSIVE)
		{
			mutex->nm.lockLevel += count;
			return true;
		}
		else
		{
			error = PSP_MUTEX_ERROR_ALREADY_LOCKED;
			return false;
		}
	}

	return false;
}
예제 #8
0
void __KernelMutexAcquireLock(Mutex *mutex, int count)
{
	__KernelMutexAcquireLock(mutex, count, __KernelGetCurThread());
}