Пример #1
0
void sceUmdWaitDriveStatCB(u32 stat, u32 timeout)
{
	RETURN(0);

	if (driveCBId != -1)
	{
		DEBUG_LOG(HLE,"0=sceUmdWaitDriveStatCB(stat = %08x, timeout = %d)", stat, timeout);

		bool callbacksProcessed = __KernelForceCallbacks();
		if (callbacksProcessed)
			__KernelExecutePendingMipsCalls();
	}
	else
	{
		WARN_LOG(HLE, "0=sceUmdWaitDriveStatCB(stat = %08x, timeout = %d) without callback", stat, timeout);
	}

	if ((stat & __KernelUmdGetState()) == 0)
	{
		if (timeout == 0)
			timeout = 8000;

		__UmdWaitStat(timeout);
		__KernelWaitCurThread(WAITTYPE_UMD, 1, stat, 0, true);
		__KernelCheckCallbacks();
	}
}
Пример #2
0
void __KernelWaitSema(SceUID id, int wantedCount, u32 timeoutPtr, const char *badSemaMessage, bool processCallbacks)
{
	u32 error;
	Semaphore *s = kernelObjects.Get<Semaphore>(id, error);
	if (s)
	{
		if (wantedCount > s->ns.maxCount || wantedCount <= 0)
		{
			RETURN(SCE_KERNEL_ERROR_ILLEGAL_COUNT);
			return;
		}

		// We need to set the return value BEFORE processing callbacks / etc.
		RETURN(0);

		if (s->ns.currentCount >= wantedCount)
			s->ns.currentCount -= wantedCount;
		else
		{
			s->ns.numWaitThreads++;
			s->waitingThreads.push_back(__KernelGetCurThread());
			__KernelSetSemaTimeout(s, timeoutPtr);
			__KernelWaitCurThread(WAITTYPE_SEMA, id, wantedCount, timeoutPtr, processCallbacks);
			if (processCallbacks)
				__KernelCheckCallbacks();
		}

		__KernelReSchedule("semaphore waited");
	}
	else
	{
		ERROR_LOG(HLE, badSemaMessage, id);
		RETURN(error);
	}
}
Пример #3
0
void sceKernelAllocateFplCB()
{
	SceUID id = PARAM(0);
	u32 error;
	FPL *fpl = kernelObjects.Get<FPL>(id, error);
	if (fpl)
	{
		u32 blockPtrAddr = PARAM(1);
		int timeOut = PARAM(2);

		int blockNum = fpl->allocateBlock();
		if (blockNum >= 0) {
			u32 blockPtr = fpl->address + fpl->nf.blocksize * blockNum;
			Memory::Write_U32(blockPtr, blockPtrAddr);
			RETURN(0);
		} else {
			// TODO: Should block and process callbacks!
			__KernelCheckCallbacks();
		}

		DEBUG_LOG(HLE,"sceKernelAllocateFpl(%i, %08x, %i)", id, PARAM(1), timeOut);
	}
	else
	{
		DEBUG_LOG(HLE,"ERROR: sceKernelAllocateFplCB(%i)", id);
		RETURN(error);
	}
}
Пример #4
0
void sceKernelLockLwMutexCB(u32 workareaPtr, int count, u32 timeoutPtr)
{
	DEBUG_LOG(HLE,"sceKernelLockLwMutexCB(%08x, %i, %08x)", workareaPtr, count, timeoutPtr);

	NativeLwMutexWorkarea workarea;
	Memory::ReadStruct(workareaPtr, &workarea);

	u32 error = 0;
	if (__KernelLockLwMutex(workarea, count, error))
	{
		Memory::WriteStruct(workareaPtr, &workarea);
		RETURN(0);
		__KernelReSchedule("lwmutex locked");
	}
	else if (error)
		RETURN(error);
	else
	{
		LwMutex *mutex = kernelObjects.Get<LwMutex>(workarea.uid, error);
		if (mutex)
		{
			mutex->waitingThreads.push_back(__KernelGetCurThread());
			__KernelWaitLwMutex(mutex, timeoutPtr);
			__KernelWaitCurThread(WAITTYPE_LWMUTEX, workarea.uid, count, timeoutPtr, true);
			__KernelCheckCallbacks();
		}
		else
			RETURN(error);
	}
}
Пример #5
0
void sceKernelReceiveMbxCB(SceUID id, u32 packetAddrPtr, u32 timeoutPtr)
{
	u32 error;
	Mbx *m = kernelObjects.Get<Mbx>(id, error);
	__KernelCheckCallbacks();

	if (!m)
	{
		ERROR_LOG(HLE, "sceKernelReceiveMbxCB(%i, %08x, %08x): invalid mbx id", id, packetAddrPtr, timeoutPtr);
		RETURN(error);
		return;
	}

	if (!m->messageQueue.empty())
	{
		DEBUG_LOG(HLE, "sceKernelReceiveMbxCB(%i, %08x, %08x): sending first queue message", id, packetAddrPtr, timeoutPtr);
		Memory::Write_U32(m->messageQueue.front(), packetAddrPtr);
		m->messageQueue.erase(m->messageQueue.begin());
		RETURN(0);
	}
	else
	{
		DEBUG_LOG(HLE, "sceKernelReceiveMbxCB(%i, %08x, %08x): no message in queue, waiting", id, packetAddrPtr, timeoutPtr);
		m->AddWaitingThread(id, packetAddrPtr);
		RETURN(0);
		__KernelWaitCurThread(WAITTYPE_MBX, 0, 0, 0, true); // ?
	}
}
Пример #6
0
inline void hleFinishSyscall(const HLEFunction &info)
{
	if ((hleAfterSyscall & HLE_AFTER_SKIP_DEADBEEF) == 0)
		SetDeadbeefRegs();

	if ((hleAfterSyscall & HLE_AFTER_CURRENT_CALLBACKS) != 0)
		__KernelForceCallbacks();

	if ((hleAfterSyscall & HLE_AFTER_RUN_INTERRUPTS) != 0)
		__RunOnePendingInterrupt();

	// Rescheduling will also do HLE_AFTER_ALL_CALLBACKS.
	if ((hleAfterSyscall & HLE_AFTER_RESCHED_CALLBACKS) != 0)
		__KernelReSchedule(true, hleAfterSyscallReschedReason);
	else if ((hleAfterSyscall & HLE_AFTER_RESCHED) != 0)
		__KernelReSchedule(hleAfterSyscallReschedReason);
	else if ((hleAfterSyscall & HLE_AFTER_ALL_CALLBACKS) != 0)
		__KernelCheckCallbacks();

	if ((hleAfterSyscall & HLE_AFTER_DEBUG_BREAK) != 0)
	{
		if (!hleExecuteDebugBreak(info))
		{
			// We'll do it next syscall.
			hleAfterSyscall = HLE_AFTER_DEBUG_BREAK;
			hleAfterSyscallReschedReason = 0;
			return;
		}
	}

	hleAfterSyscall = HLE_AFTER_NOTHING;
	hleAfterSyscallReschedReason = 0;
}
Пример #7
0
// int sceKernelLockMutexCB(SceUID id, int count, int *timeout)
// void because it changes threads.
void sceKernelLockMutexCB(SceUID id, int count, u32 timeoutPtr)
{
	DEBUG_LOG(HLE,"sceKernelLockMutexCB(%i, %i, %08x)", id, count, timeoutPtr);
	u32 error;
	Mutex *mutex = kernelObjects.Get<Mutex>(id, error);

	if (__KernelLockMutex(mutex, count, error))
	{
		RETURN(0);
		__KernelReSchedule("mutex locked");
	}
	else if (error)
		RETURN(error);
	else
	{
		mutex->waitingThreads.push_back(__KernelGetCurThread());
		__KernelWaitMutex(mutex, timeoutPtr);
		__KernelWaitCurThread(WAITTYPE_MUTEX, id, count, timeoutPtr, true);
		__KernelCheckCallbacks();
	}

	__KernelReSchedule("mutex locked");
}
Пример #8
0
void sceDisplayWaitVblankStartCB()
{
	DEBUG_LOG(HLE,"sceDisplayWaitVblankStartCB()");	
	__KernelWaitCurThread(WAITTYPE_VBLANK, 0, 0, 0, true);
	__KernelCheckCallbacks();
}