Ejemplo n.º 1
0
// Returns whether the thread should be removed.
static bool __KernelUnlockSemaForThread(Semaphore *s, SceUID threadID, u32 &error, int result, bool &wokeThreads)
{
	if (!HLEKernel::VerifyWait(threadID, WAITTYPE_SEMA, s->GetUID()))
		return true;

	// If result is an error code, we're just letting it go.
	if (result == 0)
	{
		int wVal = (int) __KernelGetWaitValue(threadID, error);
		if (wVal > s->ns.currentCount)
			return false;

		s->ns.currentCount -= wVal;
	}

	u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);
	if (timeoutPtr != 0 && semaWaitTimer != -1)
	{
		// Remove any event for this thread.
		s64 cyclesLeft = CoreTiming::UnscheduleEvent(semaWaitTimer, threadID);
		if (cyclesLeft < 0)
			cyclesLeft = 0;
		Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr);
	}

	__KernelResumeThreadFromWait(threadID, result);
	wokeThreads = true;
	return true;
}
Ejemplo n.º 2
0
// Some games (GTA) never call this during gameplay, so bad place to put a framerate counter.
static u32 sceDisplaySetFramebuf(u32 topaddr, int linesize, int pixelformat, int sync) {
	FrameBufferState fbstate;
	DEBUG_LOG(SCEDISPLAY,"sceDisplaySetFramebuf(topaddr=%08x,linesize=%d,pixelsize=%d,sync=%d)", topaddr, linesize, pixelformat, sync);
	hleEatCycles(290);
	if (topaddr == 0) {
		DEBUG_LOG(SCEDISPLAY,"- screen off");
	} else {
		fbstate.topaddr = topaddr;
		fbstate.pspFramebufFormat = (GEBufferFormat)pixelformat;
		fbstate.pspFramebufLinesize = linesize;
	}

	s64 delayCycles = 0;
	if (topaddr != framebuf.topaddr && g_Config.iForceMaxEmulatedFPS > 0) {
		// Sometimes we get a small number, there's probably no need to delay the thread for this.
		// sceDisplaySetFramebuf() isn't supposed to delay threads at all.  This is a hack.
		const int FLIP_DELAY_CYCLES_MIN = 10;
		// Some games (like Final Fantasy 4) only call this too much in spurts.
		// The goal is to fix games where this would result in a consistent overhead.
		const int FLIP_DELAY_MIN_FLIPS = 30;

		u64 now = CoreTiming::GetTicks();
		// 1001 to account for NTSC timing (59.94 fps.)
		u64 expected = msToCycles(1001) / g_Config.iForceMaxEmulatedFPS;
		u64 actual = now - lastFlipCycles;
		if (actual < expected - FLIP_DELAY_CYCLES_MIN) {
			if (lastFlipsTooFrequent >= FLIP_DELAY_MIN_FLIPS) {
				delayCycles = expected - actual;
			} else {
				++lastFlipsTooFrequent;
			}
		} else {
			--lastFlipsTooFrequent;
		}
		lastFlipCycles = CoreTiming::GetTicks();
	}

	if (sync == PSP_DISPLAY_SETBUF_IMMEDIATE) {
		// Write immediately to the current framebuffer parameters
		if (topaddr != 0) {
			framebuf = fbstate;
			gpu->SetDisplayFramebuffer(framebuf.topaddr, framebuf.pspFramebufLinesize, framebuf.pspFramebufFormat);
		} else {
			WARN_LOG(SCEDISPLAY, "%s: PSP_DISPLAY_SETBUF_IMMEDIATE without topaddr?", __FUNCTION__);
		}
	} else if (topaddr != 0) {
		// Delay the write until vblank
		latchedFramebuf = fbstate;
		framebufIsLatched = true;
	}

	if (delayCycles > 0) {
		// Okay, the game is going at too high a frame rate.  God of War and Fat Princess both do this.
		// Simply eating the cycles works and is fast, but breaks other games (like Jeanne d'Arc.)
		// So, instead, we delay this HLE thread only (a small deviation from correct behavior.)
		return hleDelayResult(0, "set framebuf", cyclesToUs(delayCycles));
	} else {
		return 0;
	}
}
Ejemplo n.º 3
0
// Returns whether the thread should be removed.
bool __KernelUnlockSemaForThread(Semaphore *s, SceUID threadID, u32 &error, int result, bool &wokeThreads)
{
	SceUID waitID = __KernelGetWaitID(threadID, WAITTYPE_SEMA, error);
	u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);

	// The waitID may be different after a timeout.
	if (waitID != s->GetUID())
		return true;

	// If result is an error code, we're just letting it go.
	if (result == 0)
	{
		int wVal = (int) __KernelGetWaitValue(threadID, error);
		if (wVal > s->ns.currentCount)
			return false;

		s->ns.currentCount -= wVal;
		s->ns.numWaitThreads--;
	}

	if (timeoutPtr != 0 && semaWaitTimer != -1)
	{
		// Remove any event for this thread.
		u64 cyclesLeft = CoreTiming::UnscheduleEvent(semaWaitTimer, threadID);
		Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr);
	}

	__KernelResumeThreadFromWait(threadID, result);
	wokeThreads = true;
	return true;
}
Ejemplo n.º 4
0
void hleLagSync(u64 userdata, int cyclesLate) {
	// The goal here is to prevent network, audio, and input lag from the real world.
	// Our normal timing is very "stop and go".  This is efficient, but causes real world lag.
	// This event (optionally) runs every 1ms to sync with the real world.

	if (!FrameTimingThrottled()) {
		lagSyncScheduled = false;
		return;
	}

	float scale = 1.0f;
	if (PSP_CoreParameter().fpsLimit == FPS_LIMIT_CUSTOM) {
		// 0 is handled in FrameTimingThrottled().
		scale = 60.0f / g_Config.iFpsLimit;
	}

	const double goal = lastLagSync + (scale / 1000.0f);
	time_update();
	// Don't lag too long ever, if they leave it paused.
	while (time_now_d() < goal && goal < time_now_d() + 0.01) {
#ifndef _WIN32
		const double left = goal - time_now_d();
		usleep((long)(left * 1000000));
#endif
		time_update();
	}

	const int emuOver = (int)cyclesToUs(cyclesLate);
	const int over = (int)((time_now_d() - goal) * 1000000);
	ScheduleLagSync(over - emuOver);
}
Ejemplo n.º 5
0
bool __KernelUnlockVplForThread(VPL *vpl, VplWaitingThread &threadInfo, u32 &error, int result, bool &wokeThreads)
{
	const SceUID threadID = threadInfo.threadID;
	SceUID waitID = __KernelGetWaitID(threadID, WAITTYPE_VPL, error);
	u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);

	// The waitID may be different after a timeout.
	if (waitID != vpl->GetUID())
		return true;

	// If result is an error code, we're just letting it go.
	if (result == 0)
	{
		int size = (int) __KernelGetWaitValue(threadID, error);

		// Padding (normally used to track the allocation.)
		u32 allocSize = size + 8;
		u32 addr = vpl->alloc.Alloc(allocSize, true);
		if (addr != (u32) -1)
			Memory::Write_U32(addr, threadInfo.addrPtr);
		else
			return false;
	}

	if (timeoutPtr != 0 && vplWaitTimer != -1)
	{
		// Remove any event for this thread.
		s64 cyclesLeft = CoreTiming::UnscheduleEvent(vplWaitTimer, threadID);
		Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr);
	}

	__KernelResumeThreadFromWait(threadID, result);
	wokeThreads = true;
	return true;
}
Ejemplo n.º 6
0
bool __KernelUnlockMutex(Mutex *mutex, u32 &error)
{
	__KernelMutexEraseLock(mutex);

	// TODO: PSP_MUTEX_ATTR_PRIORITY
	bool wokeThreads = false;
	std::vector<SceUID>::iterator iter, end;
	for (iter = mutex->waitingThreads.begin(), end = mutex->waitingThreads.end(); iter != end; ++iter)
	{
		SceUID threadID = *iter;

		int wVal = (int)__KernelGetWaitValue(threadID, error);
		u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);

		__KernelMutexAcquireLock(mutex, wVal, threadID);

		if (timeoutPtr != 0 && mutexWaitTimer != 0)
		{
			// Remove any event for this thread.
			u64 cyclesLeft = CoreTiming::UnscheduleEvent(mutexWaitTimer, threadID);
			Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr);
		}

		__KernelResumeThreadFromWait(threadID, 0);
		wokeThreads = true;
		mutex->waitingThreads.erase(iter);
		break;
	}

	if (!wokeThreads)
		mutex->nm.lockThread = -1;

	return wokeThreads;
}
Ejemplo n.º 7
0
void __startVTimer(VTimer *vt) {
	vt->nvt.active = 1;
	vt->nvt.base = cyclesToUs(CoreTiming::GetTicks());

	if (vt->nvt.schedule != 0 && vt->nvt.handlerAddr != 0)
		__KernelScheduleVTimer(vt, vt->nvt.schedule);
}
Ejemplo n.º 8
0
void sceKernelDeleteMutex(SceUID id)
{
	DEBUG_LOG(HLE,"sceKernelDeleteMutex(%i)", id);
	u32 error;
	Mutex *mutex = kernelObjects.Get<Mutex>(id, error);
	if (mutex)
	{
		std::vector<SceUID>::iterator iter, end;
		for (iter = mutex->waitingThreads.begin(), end = mutex->waitingThreads.end(); iter != end; ++iter)
		{
			SceUID threadID = *iter;

			u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);
			if (timeoutPtr != 0 && mutexWaitTimer != 0)
			{
				// Remove any event for this thread.
				u64 cyclesLeft = CoreTiming::UnscheduleEvent(mutexWaitTimer, threadID);
				Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr);
			}

			__KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_DELETE);
		}
		if (mutex->nm.lockThread != -1)
			__KernelMutexEraseLock(mutex);
		mutex->waitingThreads.empty();

		RETURN(kernelObjects.Destroy<Mutex>(id));
		__KernelReSchedule("mutex deleted");
	}
	else
		RETURN(error);
}
Ejemplo n.º 9
0
bool __KernelUnlockEventFlagForThread(EventFlag *e, EventFlagTh &th, u32 &error, int result, bool &wokeThreads)
{
	SceUID waitID = __KernelGetWaitID(th.tid, WAITTYPE_EVENTFLAG, error);
	u32 timeoutPtr = __KernelGetWaitTimeoutPtr(th.tid, error);

	// The waitID may be different after a timeout.
	if (waitID != e->GetUID())
		return true;

	// If result is an error code, we're just letting it go.
	if (result == 0)
	{
		if (!__KernelEventFlagMatches(&e->nef.currentPattern, th.bits, th.wait, th.outAddr))
			return false;

		e->nef.numWaitThreads--;
	}
	else
	{
		// Otherwise, we set the current result since we're bailing.
		if (Memory::IsValidAddress(th.outAddr))
			Memory::Write_U32(e->nef.currentPattern, th.outAddr);
	}

	if (timeoutPtr != 0 && eventFlagWaitTimer != 0)
	{
		// Remove any event for this thread.
		u64 cyclesLeft = CoreTiming::UnscheduleEvent(eventFlagWaitTimer, th.tid);
		Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr);
	}

	__KernelResumeThreadFromWait(th.tid, result);
	wokeThreads = true;
	return true;
}
Ejemplo n.º 10
0
bool __KernelUnlockLwMutexForThread(LwMutex *mutex, T workarea, SceUID threadID, u32 &error, int result)
{
	SceUID waitID = __KernelGetWaitID(threadID, WAITTYPE_LWMUTEX, error);
	u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);

	// The waitID may be different after a timeout.
	if (waitID != mutex->GetUID())
		return false;

	// If result is an error code, we're just letting it go.
	if (result == 0)
	{
		workarea->lockLevel = (int) __KernelGetWaitValue(threadID, error);
		workarea->lockThread = threadID;
	}

	if (timeoutPtr != 0 && lwMutexWaitTimer != -1)
	{
		// Remove any event for this thread.
		s64 cyclesLeft = CoreTiming::UnscheduleEvent(lwMutexWaitTimer, threadID);
		Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr);
	}

	__KernelResumeThreadFromWait(threadID, result);
	return true;
}
Ejemplo n.º 11
0
// Resume all waiting threads (for delete / cancel.)
// Returns true if it woke any threads.
bool __KernelClearSemaThreads(Semaphore *s, int reason)
{
	bool wokeThreads = false;

	// TODO: PSP_SEMA_ATTR_PRIORITY
	std::vector<SceUID>::iterator iter;
	for (iter = s->waitingThreads.begin(); iter!=s->waitingThreads.end(); iter++)
	{
		SceUID threadID = *iter;

		u32 error;
		u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);
		if (timeoutPtr != 0 && semaWaitTimer != 0)
		{
			// Remove any event for this thread.
			int cyclesLeft = CoreTiming::UnscheduleEvent(semaWaitTimer, threadID);
			Memory::Write_U32(cyclesToUs(cyclesLeft), timeoutPtr);
		}

		__KernelResumeThreadFromWait(threadID, reason);
		wokeThreads = true;
	}
	s->waitingThreads.empty();

	return wokeThreads;
}
Ejemplo n.º 12
0
void __RtcTimeOfDay(PSPTimeval *tv)
{
	s64 additionalUs = cyclesToUs(CoreTiming::GetTicks());
	*tv = rtcBaseTime;

	s64 adjustedUs = additionalUs + tv->tv_usec;
	tv->tv_sec += long(adjustedUs / 1000000UL);
	tv->tv_usec = adjustedUs % 1000000UL;
}
Ejemplo n.º 13
0
/**
 * Updates the WaitSynchronization timeout paramter according to the difference
 * between ticks of the last WaitSynchronization call and the incoming one.
 * @param timeout_low a pointer to the register for the low part of the timeout parameter
 * @param timeout_high a pointer to the register for the high part of the timeout parameter
 * @param last_tick tick of the last WaitSynchronization call
 */
static void UpdateTimeoutParameter(u32* timeout_low, u32* timeout_high, u64 last_tick) {
    s64 timeout = ((s64)*timeout_high << 32) | *timeout_low;

    if (timeout != -1) {
        timeout -= cyclesToUs(CoreTiming::GetTicks() - last_tick) * 1000; // in nanoseconds

        if (timeout < 0)
            timeout = 0;

        *timeout_low = timeout & 0xFFFFFFFF;
        *timeout_high = timeout >> 32;
    }
Ejemplo n.º 14
0
	void WriteCurrentTimeout(SceUID waitID) const
	{
		u32 error;
		if (IsStillWaiting(waitID))
		{
			u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);
			if (timeoutPtr != 0 && waitTimer != -1)
			{
				// Remove any event for this thread.
				s64 cyclesLeft = CoreTiming::UnscheduleEvent(waitTimer, threadID);
				Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr);
			}
		}
	}
Ejemplo n.º 15
0
static bool __KernelUnlockMbxForThread(Mbx *m, MbxWaitingThread &th, u32 &error, int result, bool &wokeThreads)
{
	if (!HLEKernel::VerifyWait(th.threadID, WAITTYPE_MBX, m->GetUID()))
		return true;

	u32 timeoutPtr = __KernelGetWaitTimeoutPtr(th.threadID, error);
	if (timeoutPtr != 0 && mbxWaitTimer != -1)
	{
		// Remove any event for this thread.
		s64 cyclesLeft = CoreTiming::UnscheduleEvent(mbxWaitTimer, th.threadID);
		Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr);
	}

	__KernelResumeThreadFromWait(th.threadID, result);
	wokeThreads = true;
	return true;
}
Ejemplo n.º 16
0
void sceKernelDeleteLwMutex(u32 workareaPtr)
{
	DEBUG_LOG(HLE,"sceKernelDeleteLwMutex(%08x)", workareaPtr);

	if (!workareaPtr || !Memory::IsValidAddress(workareaPtr))
	{
		RETURN(SCE_KERNEL_ERROR_ILLEGAL_ADDR);
		return;
	}

	NativeLwMutexWorkarea workarea;
	Memory::ReadStruct(workareaPtr, &workarea);

	u32 error;
	LwMutex *mutex = kernelObjects.Get<LwMutex>(workarea.uid, error);
	if (mutex)
	{
		std::vector<SceUID>::iterator iter, end;
		for (iter = mutex->waitingThreads.begin(), end = mutex->waitingThreads.end(); iter != end; ++iter)
		{
			SceUID threadID = *iter;

			u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);
			if (timeoutPtr != 0 && lwMutexWaitTimer != 0)
			{
				// Remove any event for this thread.
				u64 cyclesLeft = CoreTiming::UnscheduleEvent(lwMutexWaitTimer, threadID);
				Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr);
			}

			__KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_DELETE);
		}
		mutex->waitingThreads.empty();

		RETURN(kernelObjects.Destroy<LwMutex>(workarea.uid));
		workarea.clear();
		Memory::WriteStruct(workareaPtr, &workarea);

		__KernelReSchedule("mutex deleted");
	}
	else
		RETURN(error);
}
Ejemplo n.º 17
0
bool __KernelUnlockMbxForThread(Mbx *m, MbxWaitingThread &th, u32 &error, int result, bool &wokeThreads)
{
	SceUID waitID = __KernelGetWaitID(th.first, WAITTYPE_MBX, error);
	u32 timeoutPtr = __KernelGetWaitTimeoutPtr(th.first, error);

	// The waitID may be different after a timeout.
	if (waitID != m->GetUID())
		return true;

	if (timeoutPtr != 0 && mbxWaitTimer != -1)
	{
		// Remove any event for this thread.
		u64 cyclesLeft = CoreTiming::UnscheduleEvent(mbxWaitTimer, th.first);
		Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr);
	}

	__KernelResumeThreadFromWait(th.first, result);
	wokeThreads = true;
	return true;
}
Ejemplo n.º 18
0
bool __KernelUnlockLwMutex(NativeLwMutexWorkarea &workarea, u32 &error)
{
	LwMutex *mutex = kernelObjects.Get<LwMutex>(workarea.uid, error);
	if (error)
	{
		workarea.lockThread = 0;
		return false;
	}

	// TODO: PSP_MUTEX_ATTR_PRIORITY
	bool wokeThreads = false;
	std::vector<SceUID>::iterator iter, end;
	for (iter = mutex->waitingThreads.begin(), end = mutex->waitingThreads.end(); iter != end; ++iter)
	{
		SceUID threadID = *iter;

		int wVal = (int)__KernelGetWaitValue(threadID, error);
		u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);

		workarea.lockLevel = wVal;
		workarea.lockThread = threadID;

		if (timeoutPtr != 0 && lwMutexWaitTimer != 0)
		{
			// Remove any event for this thread.
			u64 cyclesLeft = CoreTiming::UnscheduleEvent(lwMutexWaitTimer, threadID);
			Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr);
		}

		__KernelResumeThreadFromWait(threadID, 0);
		wokeThreads = true;
		mutex->waitingThreads.erase(iter);
		break;
	}

	if (!wokeThreads)
		workarea.lockThread = 0;

	return wokeThreads;
}
Ejemplo n.º 19
0
bool __KernelUnlockMutexForThread(Mutex *mutex, SceUID threadID, u32 &error, int result)
{
	if (!HLEKernel::VerifyWait(threadID, WAITTYPE_MUTEX, mutex->GetUID()))
		return false;

	// If result is an error code, we're just letting it go.
	if (result == 0)
	{
		int wVal = (int)__KernelGetWaitValue(threadID, error);
		__KernelMutexAcquireLock(mutex, wVal, threadID);
	}

	u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);
	if (timeoutPtr != 0 && mutexWaitTimer != -1)
	{
		// Remove any event for this thread.
		s64 cyclesLeft = CoreTiming::UnscheduleEvent(mutexWaitTimer, threadID);
		Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr);
	}

	__KernelResumeThreadFromWait(threadID, result);
	return true;
}
Ejemplo n.º 20
0
// Some games (GTA) never call this during gameplay, so bad place to put a framerate counter.
static u32 sceDisplaySetFramebuf(u32 topaddr, int linesize, int pixelformat, int sync) {
	FrameBufferState fbstate = {0};
	fbstate.topaddr = topaddr;
	fbstate.fmt = (GEBufferFormat)pixelformat;
	fbstate.stride = linesize;

	if (sync != PSP_DISPLAY_SETBUF_IMMEDIATE && sync != PSP_DISPLAY_SETBUF_NEXTFRAME) {
		return hleLogError(SCEDISPLAY, SCE_KERNEL_ERROR_INVALID_MODE, "invalid sync mode");
	}
	if (topaddr != 0 && !Memory::IsRAMAddress(topaddr) && !Memory::IsVRAMAddress(topaddr)) {
		return hleLogError(SCEDISPLAY, SCE_KERNEL_ERROR_INVALID_POINTER, "invalid address");
	}
	if ((topaddr & 0xF) != 0) {
		return hleLogError(SCEDISPLAY, SCE_KERNEL_ERROR_INVALID_POINTER, "misaligned address");
	}
	if ((linesize & 0x3F) != 0 || (linesize == 0 && topaddr != 0)) {
		return hleLogError(SCEDISPLAY, SCE_KERNEL_ERROR_INVALID_SIZE, "invalid stride");
	}
	if (pixelformat < 0 || pixelformat > GE_FORMAT_8888) {
		return hleLogError(SCEDISPLAY, SCE_KERNEL_ERROR_INVALID_FORMAT, "invalid format");
	}

	if (sync == PSP_DISPLAY_SETBUF_IMMEDIATE) {
		if (fbstate.fmt != latchedFramebuf.fmt || fbstate.stride != latchedFramebuf.stride) {
			return hleReportError(SCEDISPLAY, SCE_KERNEL_ERROR_INVALID_MODE, "must change latched framebuf first");
		}
	}

	hleEatCycles(290);

	s64 delayCycles = 0;
	// Don't count transitions between display off and display on.
	if (topaddr != 0 && topaddr != framebuf.topaddr && framebuf.topaddr != 0 && g_Config.iForceMaxEmulatedFPS > 0) {
		// Sometimes we get a small number, there's probably no need to delay the thread for this.
		// sceDisplaySetFramebuf() isn't supposed to delay threads at all.  This is a hack.
		const int FLIP_DELAY_CYCLES_MIN = 10;
		// Some games (like Final Fantasy 4) only call this too much in spurts.
		// The goal is to fix games where this would result in a consistent overhead.
		const int FLIP_DELAY_MIN_FLIPS = 30;

		u64 now = CoreTiming::GetTicks();
		// 1001 to account for NTSC timing (59.94 fps.)
		u64 expected = msToCycles(1001) / g_Config.iForceMaxEmulatedFPS;
		u64 actual = now - lastFlipCycles;
		if (actual < expected - FLIP_DELAY_CYCLES_MIN) {
			if (lastFlipsTooFrequent >= FLIP_DELAY_MIN_FLIPS) {
				delayCycles = expected - actual;
			} else {
				++lastFlipsTooFrequent;
			}
		} else {
			--lastFlipsTooFrequent;
		}
		lastFlipCycles = CoreTiming::GetTicks();
	}

	if (sync == PSP_DISPLAY_SETBUF_IMMEDIATE) {
		// Write immediately to the current framebuffer parameters
		framebuf = fbstate;
		gpu->SetDisplayFramebuffer(framebuf.topaddr, framebuf.stride, framebuf.fmt);
	} else {
		// Delay the write until vblank
		latchedFramebuf = fbstate;
		framebufIsLatched = true;

		// If we update the format or stride, this affects the current framebuf immediately.
		framebuf.fmt = latchedFramebuf.fmt;
		framebuf.stride = latchedFramebuf.stride;
	}

	if (delayCycles > 0) {
		// Okay, the game is going at too high a frame rate.  God of War and Fat Princess both do this.
		// Simply eating the cycles works and is fast, but breaks other games (like Jeanne d'Arc.)
		// So, instead, we delay this HLE thread only (a small deviation from correct behavior.)
		return hleDelayResult(hleLogSuccessI(SCEDISPLAY, 0, "delaying frame thread"), "set framebuf", cyclesToUs(delayCycles));
	} else {
		if (topaddr == 0) {
			return hleLogSuccessI(SCEDISPLAY, 0, "disabling display");
		} else {
			return hleLogSuccessI(SCEDISPLAY, 0);
		}
	}
}
Ejemplo n.º 21
0
u64 __getVTimerRunningTime(VTimer *vt) {
	if (!vt->nvt.active)
		return 0;

	return cyclesToUs(CoreTiming::GetTicks()) - vt->nvt.base;
}
Ejemplo n.º 22
0
u64 __RtcGetCurrentTick()
{
	// TODO: It's probably expecting ticks since January 1, 0001?
	return cyclesToUs(CoreTiming::GetTicks()) + rtcMagicOffset;
}
Ejemplo n.º 23
0
//int sceKernelSignalSema(SceUID semaid, int signal);
// void because it changes threads.
void sceKernelSignalSema(SceUID id, int signal)
{
	//TODO: check that this thing really works :)
	u32 error;
	Semaphore *s = kernelObjects.Get<Semaphore>(id, error);
	if (s)
	{
		if (s->ns.currentCount + signal > s->ns.maxCount)
		{
			RETURN(SCE_KERNEL_ERROR_SEMA_OVF);
			return;
		}

		int oldval = s->ns.currentCount;
		s->ns.currentCount += signal;
		DEBUG_LOG(HLE,"sceKernelSignalSema(%i, %i) (old: %i, new: %i)", id, signal, oldval, s->ns.currentCount);

		// We need to set the return value BEFORE processing other threads.
		RETURN(0);

		bool wokeThreads = false;
retry:
		// TODO: PSP_SEMA_ATTR_PRIORITY
		std::vector<SceUID>::iterator iter;
		for (iter = s->waitingThreads.begin(); iter!=s->waitingThreads.end(); iter++)
		{
			SceUID threadID = *iter;

			int wVal = (int)__KernelGetWaitValue(threadID, error);
			u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);

			if (wVal <= s->ns.currentCount)
			{
				s->ns.currentCount -= wVal;
				s->ns.numWaitThreads--;

				if (timeoutPtr != 0 && semaWaitTimer != 0)
				{
					// Remove any event for this thread.
					int cyclesLeft = CoreTiming::UnscheduleEvent(semaWaitTimer, threadID);
					Memory::Write_U32(cyclesToUs(cyclesLeft), timeoutPtr);
				}

				__KernelResumeThreadFromWait(threadID, 0);
				wokeThreads = true;
				s->waitingThreads.erase(iter);
				goto retry;
			}
			else
			{
				break;
			}
		}

		__KernelReSchedule("semaphore signalled");
	}
	else
	{
		ERROR_LOG(HLE, "sceKernelSignalSema : Trying to signal invalid semaphore %i", id);
		RETURN(error;)
	}
}