示例#1
0
// This is to be called when outside threads, such as the graphics thread, wants to
// schedule things to be executed on the main thread.
void ScheduleEvent_Threadsafe(int cyclesIntoFuture, int event_type, u64 userdata)
{
	std::lock_guard<std::mutex> lk(tsWriteLock);
	Event ne;
	ne.time = globalTimer + cyclesIntoFuture;
	ne.type = event_type;
	ne.userdata = userdata;
	tsQueue.Push(ne);
}
示例#2
0
void MoveEvents()
{
    for (Event ev; s_ts_queue.Pop(ev);)
    {
        ev.fifo_order = s_event_fifo_id++;
        s_event_queue.emplace_back(std::move(ev));
        std::push_heap(s_event_queue.begin(), s_event_queue.end(), std::greater<Event>());
    }
}
示例#3
0
// This is to be called when outside threads, such as the graphics thread, wants to
// schedule things to be executed on the main thread.
void ScheduleEvent_Threadsafe(int cyclesIntoFuture, int event_type, u64 userdata)
{
	// TODO: Fix UI thread safety problems, and enable this assertion
	// _assert_msg_(POWERPC, !Core::IsCPUThread(), "ScheduleEvent_Threadsafe from wrong thread");
	std::lock_guard<std::mutex> lk(tsWriteLock);
	Event ne;
	ne.time = globalTimer + cyclesIntoFuture;
	ne.type = event_type;
	ne.userdata = userdata;
	tsQueue.Push(ne);
}
示例#4
0
void MoveEvents()
{
	BaseEvent sevt;
	while (tsQueue.Pop(sevt))
	{
		Event *evt = GetNewEvent();
		evt->time = sevt.time;
		evt->userdata = sevt.userdata;
		evt->type = sevt.type;
		AddEventToQueue(evt);
	}
}
示例#5
0
// This is to be called when outside threads, such as the graphics thread, wants to
// schedule things to be executed on the main thread.
void ScheduleEvent_Threadsafe(s64 cyclesIntoFuture, int event_type, u64 userdata)
{
	_assert_msg_(POWERPC, !Core::IsCPUThread(), "ScheduleEvent_Threadsafe from wrong thread");
	if (Core::g_want_determinism)
	{
		ERROR_LOG(POWERPC, "Someone scheduled an off-thread \"%s\" event while netplay or movie play/record "
		                   "was active.  This is likely to cause a desync.",
		                   event_types[event_type].name.c_str());
	}
	std::lock_guard<std::mutex> lk(tsWriteLock);
	Event ne;
	ne.time = g_globalTimer + cyclesIntoFuture;
	ne.type = event_type;
	ne.userdata = userdata;
	tsQueue.Push(ne);
}
示例#6
0
void ScheduleEvent(s64 cycles_into_future, EventType* event_type, u64 userdata, FromThread from)
{
    _assert_msg_(POWERPC, event_type, "Event type is nullptr, will crash now.");

    bool from_cpu_thread;
    if (from == FromThread::ANY)
    {
        from_cpu_thread = Core::IsCPUThread();
    }
    else
    {
        from_cpu_thread = from == FromThread::CPU;
        _assert_msg_(POWERPC, from_cpu_thread == Core::IsCPUThread(),
                     "ScheduleEvent from wrong thread (%s)", from_cpu_thread ? "CPU" : "non-CPU");
    }

    if (from_cpu_thread)
    {
        s64 timeout = GetTicks() + cycles_into_future;

        // If this event needs to be scheduled before the next advance(), force one early
        if (!s_is_global_timer_sane)
            ForceExceptionCheck(cycles_into_future);

        s_event_queue.emplace_back(Event{timeout, s_event_fifo_id++, userdata, event_type});
        std::push_heap(s_event_queue.begin(), s_event_queue.end(), std::greater<Event>());
    }
    else
    {
        if (Core::g_want_determinism)
        {
            ERROR_LOG(POWERPC, "Someone scheduled an off-thread \"%s\" event while netplay or "
                      "movie play/record was active.  This is likely to cause a desync.",
                      event_type->name->c_str());
        }

        std::lock_guard<std::mutex> lk(s_ts_write_lock);
        s_ts_queue.Push(Event{g_global_timer + cycles_into_future, 0, userdata, event_type});
    }
}
示例#7
0
namespace CoreTiming
{
struct EventType
{
    TimedCallback callback;
    const std::string* name;
};

struct Event
{
    s64 time;
    u64 fifo_order;
    u64 userdata;
    EventType* type;
};

// Sort by time, unless the times are the same, in which case sort by the order added to the queue
static bool operator>(const Event& left, const Event& right)
{
    return std::tie(left.time, left.fifo_order) > std::tie(right.time, right.fifo_order);
}
static bool operator<(const Event& left, const Event& right)
{
    return std::tie(left.time, left.fifo_order) < std::tie(right.time, right.fifo_order);
}

// unordered_map stores each element separately as a linked list node so pointers to elements
// remain stable regardless of rehashes/resizing.
static std::unordered_map<std::string, EventType> s_event_types;

// STATE_TO_SAVE
// The queue is a min-heap using std::make_heap/push_heap/pop_heap.
// We don't use std::priority_queue because we need to be able to serialize, unserialize and
// erase arbitrary events (RemoveEvent()) regardless of the queue order. These aren't accomodated
// by the standard adaptor class.
static std::vector<Event> s_event_queue;
static u64 s_event_fifo_id;
static std::mutex s_ts_write_lock;
static Common::FifoQueue<Event, false> s_ts_queue;

static float s_last_OC_factor;
float g_last_OC_factor_inverted;
int g_slice_length;
static constexpr int MAX_SLICE_LENGTH = 20000;

static s64 s_idled_cycles;
static u32 s_fake_dec_start_value;
static u64 s_fake_dec_start_ticks;

// Are we in a function that has been called from Advance()
static bool s_is_global_timer_sane;

s64 g_global_timer;
u64 g_fake_TB_start_value;
u64 g_fake_TB_start_ticks;

static EventType* s_ev_lost = nullptr;

static void EmptyTimedCallback(u64 userdata, s64 cyclesLate)
{
}

// Changing the CPU speed in Dolphin isn't actually done by changing the physical clock rate,
// but by changing the amount of work done in a particular amount of time. This tends to be more
// compatible because it stops the games from actually knowing directly that the clock rate has
// changed, and ensures that anything based on waiting a specific number of cycles still works.
//
// Technically it might be more accurate to call this changing the IPC instead of the CPU speed,
// but the effect is largely the same.
static int DowncountToCycles(int downcount)
{
    return static_cast<int>(downcount * g_last_OC_factor_inverted);
}

static int CyclesToDowncount(int cycles)
{
    return static_cast<int>(cycles * s_last_OC_factor);
}

EventType* RegisterEvent(const std::string& name, TimedCallback callback)
{
    // check for existing type with same name.
    // we want event type names to remain unique so that we can use them for serialization.
    _assert_msg_(POWERPC, s_event_types.find(name) == s_event_types.end(),
                 "CoreTiming Event \"%s\" is already registered. Events should only be registered "
                 "during Init to avoid breaking save states.",
                 name.c_str());

    auto info = s_event_types.emplace(name, EventType{callback, nullptr});
    EventType* event_type = &info.first->second;
    event_type->name = &info.first->first;
    return event_type;
}

void UnregisterAllEvents()
{
    _assert_msg_(POWERPC, s_event_queue.empty(), "Cannot unregister events with events pending");
    s_event_types.clear();
}

void Init()
{
    s_last_OC_factor = SConfig::GetInstance().m_OCEnable ? SConfig::GetInstance().m_OCFactor : 1.0f;
    g_last_OC_factor_inverted = 1.0f / s_last_OC_factor;
    PowerPC::ppcState.downcount = CyclesToDowncount(MAX_SLICE_LENGTH);
    g_slice_length = MAX_SLICE_LENGTH;
    g_global_timer = 0;
    s_idled_cycles = 0;

    // The time between CoreTiming being intialized and the first call to Advance() is considered
    // the slice boundary between slice -1 and slice 0. Dispatcher loops must call Advance() before
    // executing the first PPC cycle of each slice to prepare the slice length and downcount for
    // that slice.
    s_is_global_timer_sane = true;

    s_event_fifo_id = 0;
    s_ev_lost = RegisterEvent("_lost_event", &EmptyTimedCallback);
}

void Shutdown()
{
    std::lock_guard<std::mutex> lk(s_ts_write_lock);
    MoveEvents();
    ClearPendingEvents();
    UnregisterAllEvents();
}

void DoState(PointerWrap& p)
{
    std::lock_guard<std::mutex> lk(s_ts_write_lock);
    p.Do(g_slice_length);
    p.Do(g_global_timer);
    p.Do(s_idled_cycles);
    p.Do(s_fake_dec_start_value);
    p.Do(s_fake_dec_start_ticks);
    p.Do(g_fake_TB_start_value);
    p.Do(g_fake_TB_start_ticks);
    p.Do(s_last_OC_factor);
    g_last_OC_factor_inverted = 1.0f / s_last_OC_factor;
    p.Do(s_event_fifo_id);

    p.DoMarker("CoreTimingData");

    MoveEvents();
    p.DoEachElement(s_event_queue, [](PointerWrap& pw, Event& ev) {
        pw.Do(ev.time);
        pw.Do(ev.fifo_order);

        // this is why we can't have (nice things) pointers as userdata
        pw.Do(ev.userdata);

        // we can't savestate ev.type directly because events might not get registered in the same
        // order (or at all) every time.
        // so, we savestate the event's type's name, and derive ev.type from that when loading.
        std::string name;
        if (pw.GetMode() != PointerWrap::MODE_READ)
            name = *ev.type->name;

        pw.Do(name);
        if (pw.GetMode() == PointerWrap::MODE_READ)
        {
            auto itr = s_event_types.find(name);
            if (itr != s_event_types.end())
            {
                ev.type = &itr->second;
            }
            else
            {
                WARN_LOG(POWERPC,
                         "Lost event from savestate because its type, \"%s\", has not been registered.",
                         name.c_str());
                ev.type = s_ev_lost;
            }
        }
    });
    p.DoMarker("CoreTimingEvents");

    // When loading from a save state, we must assume the Event order is random and meaningless.
    // The exact layout of the heap in memory is implementation defined, therefore it is platform
    // and library version specific.
    if (p.GetMode() == PointerWrap::MODE_READ)
        std::make_heap(s_event_queue.begin(), s_event_queue.end(), std::greater<Event>());
}

// This should only be called from the CPU thread. If you are calling
// it from any other thread, you are doing something evil
u64 GetTicks()
{
    u64 ticks = static_cast<u64>(g_global_timer);
    if (!s_is_global_timer_sane)
    {
        int downcount = DowncountToCycles(PowerPC::ppcState.downcount);
        ticks += g_slice_length - downcount;
    }
    return ticks;
}

u64 GetIdleTicks()
{
    return static_cast<u64>(s_idled_cycles);
}

void ClearPendingEvents()
{
    s_event_queue.clear();
}

void ScheduleEvent(s64 cycles_into_future, EventType* event_type, u64 userdata, FromThread from)
{
    _assert_msg_(POWERPC, event_type, "Event type is nullptr, will crash now.");

    bool from_cpu_thread;
    if (from == FromThread::ANY)
    {
        from_cpu_thread = Core::IsCPUThread();
    }
    else
    {
        from_cpu_thread = from == FromThread::CPU;
        _assert_msg_(POWERPC, from_cpu_thread == Core::IsCPUThread(),
                     "ScheduleEvent from wrong thread (%s)", from_cpu_thread ? "CPU" : "non-CPU");
    }

    if (from_cpu_thread)
    {
        s64 timeout = GetTicks() + cycles_into_future;

        // If this event needs to be scheduled before the next advance(), force one early
        if (!s_is_global_timer_sane)
            ForceExceptionCheck(cycles_into_future);

        s_event_queue.emplace_back(Event{timeout, s_event_fifo_id++, userdata, event_type});
        std::push_heap(s_event_queue.begin(), s_event_queue.end(), std::greater<Event>());
    }
    else
    {
        if (Core::g_want_determinism)
        {
            ERROR_LOG(POWERPC, "Someone scheduled an off-thread \"%s\" event while netplay or "
                      "movie play/record was active.  This is likely to cause a desync.",
                      event_type->name->c_str());
        }

        std::lock_guard<std::mutex> lk(s_ts_write_lock);
        s_ts_queue.Push(Event{g_global_timer + cycles_into_future, 0, userdata, event_type});
    }
}

void RemoveEvent(EventType* event_type)
{
    auto itr = std::remove_if(s_event_queue.begin(), s_event_queue.end(),
    [&](const Event& e) {
        return e.type == event_type;
    });

    // Removing random items breaks the invariant so we have to re-establish it.
    if (itr != s_event_queue.end())
    {
        s_event_queue.erase(itr, s_event_queue.end());
        std::make_heap(s_event_queue.begin(), s_event_queue.end(), std::greater<Event>());
    }
}

void RemoveAllEvents(EventType* event_type)
{
    MoveEvents();
    RemoveEvent(event_type);
}

void ForceExceptionCheck(s64 cycles)
{
    cycles = std::max<s64>(0, cycles);
    if (DowncountToCycles(PowerPC::ppcState.downcount) > cycles)
    {
        // downcount is always (much) smaller than MAX_INT so we can safely cast cycles to an int here.
        // Account for cycles already executed by adjusting the g_slice_length
        g_slice_length -= DowncountToCycles(PowerPC::ppcState.downcount) - static_cast<int>(cycles);
        PowerPC::ppcState.downcount = CyclesToDowncount(static_cast<int>(cycles));
    }
}

void MoveEvents()
{
    for (Event ev; s_ts_queue.Pop(ev);)
    {
        ev.fifo_order = s_event_fifo_id++;
        s_event_queue.emplace_back(std::move(ev));
        std::push_heap(s_event_queue.begin(), s_event_queue.end(), std::greater<Event>());
    }
}

void Advance()
{
    MoveEvents();

    int cyclesExecuted = g_slice_length - DowncountToCycles(PowerPC::ppcState.downcount);
    g_global_timer += cyclesExecuted;
    s_last_OC_factor = SConfig::GetInstance().m_OCEnable ? SConfig::GetInstance().m_OCFactor : 1.0f;
    g_last_OC_factor_inverted = 1.0f / s_last_OC_factor;
    g_slice_length = MAX_SLICE_LENGTH;

    s_is_global_timer_sane = true;

    while (!s_event_queue.empty() && s_event_queue.front().time <= g_global_timer)
    {
        Event evt = std::move(s_event_queue.front());
        std::pop_heap(s_event_queue.begin(), s_event_queue.end(), std::greater<Event>());
        s_event_queue.pop_back();
        // NOTICE_LOG(POWERPC, "[Scheduler] %-20s (%lld, %lld)", evt.type->name->c_str(),
        //            g_global_timer, evt.time);
        evt.type->callback(evt.userdata, g_global_timer - evt.time);
    }

    s_is_global_timer_sane = false;

    // Still events left (scheduled in the future)
    if (!s_event_queue.empty())
    {
        g_slice_length = static_cast<int>(
                             std::min<s64>(s_event_queue.front().time - g_global_timer, MAX_SLICE_LENGTH));
    }

    PowerPC::ppcState.downcount = CyclesToDowncount(g_slice_length);

    // Check for any external exceptions.
    // It's important to do this after processing events otherwise any exceptions will be delayed
    // until the next slice:
    //        Pokemon Box refuses to boot if the first exception from the audio DMA is received late
    PowerPC::CheckExternalExceptions();
}

void LogPendingEvents()
{
    auto clone = s_event_queue;
    std::sort(clone.begin(), clone.end());
    for (const Event& ev : clone)
    {
        INFO_LOG(POWERPC, "PENDING: Now: %" PRId64 " Pending: %" PRId64 " Type: %s", g_global_timer,
                 ev.time, ev.type->name->c_str());
    }
}

void Idle()
{
    if (SConfig::GetInstance().bSyncGPUOnSkipIdleHack)
    {
        // When the FIFO is processing data we must not advance because in this way
        // the VI will be desynchronized. So, We are waiting until the FIFO finish and
        // while we process only the events required by the FIFO.
        Fifo::FlushGpu();
    }

    s_idled_cycles += DowncountToCycles(PowerPC::ppcState.downcount);
    PowerPC::ppcState.downcount = 0;
}

std::string GetScheduledEventsSummary()
{
    std::string text = "Scheduled events\n";
    text.reserve(1000);

    auto clone = s_event_queue;
    std::sort(clone.begin(), clone.end());
    for (const Event& ev : clone)
    {
        text += StringFromFormat("%s : %" PRIi64 " %016" PRIx64 "\n", ev.type->name->c_str(), ev.time,
                                 ev.userdata);
    }
    return text;
}

u32 GetFakeDecStartValue()
{
    return s_fake_dec_start_value;
}

void SetFakeDecStartValue(u32 val)
{
    s_fake_dec_start_value = val;
}

u64 GetFakeDecStartTicks()
{
    return s_fake_dec_start_ticks;
}

void SetFakeDecStartTicks(u64 val)
{
    s_fake_dec_start_ticks = val;
}

u64 GetFakeTBStartValue()
{
    return g_fake_TB_start_value;
}

void SetFakeTBStartValue(u64 val)
{
    g_fake_TB_start_value = val;
}

u64 GetFakeTBStartTicks()
{
    return g_fake_TB_start_ticks;
}

void SetFakeTBStartTicks(u64 val)
{
    g_fake_TB_start_ticks = val;
}

}  // namespace
示例#8
0
namespace CoreTiming
{

struct EventType
{
	TimedCallback callback;
	std::string name;
};

static std::vector<EventType> event_types;

struct BaseEvent
{
	s64 time;
	u64 userdata;
	int type;
};

typedef LinkedListItem<BaseEvent> Event;

// STATE_TO_SAVE
static Event *first;
static std::mutex tsWriteLock;
static Common::FifoQueue<BaseEvent, false> tsQueue;

// event pools
static Event *eventPool = nullptr;

int slicelength;
static int maxSliceLength = MAX_SLICE_LENGTH;

static s64 globalTimer;
static s64 idledCycles;

static u32 fakeDecStartValue;
static u64 fakeDecStartTicks;
static u64 fakeTBStartValue;
static u64 fakeTBStartTicks;

static int ev_lost;


static void (*advanceCallback)(int cyclesExecuted) = nullptr;

static Event* GetNewEvent()
{
	if (!eventPool)
		return new Event;

	Event* ev = eventPool;
	eventPool = ev->next;
	return ev;
}

static void FreeEvent(Event* ev)
{
	ev->next = eventPool;
	eventPool = ev;
}

static void EmptyTimedCallback(u64 userdata, int cyclesLate) {}

int RegisterEvent(const std::string& name, TimedCallback callback)
{
	EventType type;
	type.name = name;
	type.callback = callback;

	// check for existing type with same name.
	// we want event type names to remain unique so that we can use them for serialization.
	for (auto& event_type : event_types)
	{
		if (name == event_type.name)
		{
			WARN_LOG(POWERPC, "Discarded old event type \"%s\" because a new type with the same name was registered.", name.c_str());
			// we don't know if someone might be holding on to the type index,
			// so we gut the old event type instead of actually removing it.
			event_type.name = "_discarded_event";
			event_type.callback = &EmptyTimedCallback;
		}
	}

	event_types.push_back(type);
	return (int)event_types.size() - 1;
}

void UnregisterAllEvents()
{
	if (first)
		PanicAlertT("Cannot unregister events with events pending");
	event_types.clear();
}

void Init()
{
	PowerPC::ppcState.downcount = maxSliceLength;
	slicelength = maxSliceLength;
	globalTimer = 0;
	idledCycles = 0;

	ev_lost = RegisterEvent("_lost_event", &EmptyTimedCallback);
}

void Shutdown()
{
	std::lock_guard<std::mutex> lk(tsWriteLock);
	MoveEvents();
	ClearPendingEvents();
	UnregisterAllEvents();

	while (eventPool)
	{
		Event *ev = eventPool;
		eventPool = ev->next;
		delete ev;
	}
}

static void EventDoState(PointerWrap &p, BaseEvent* ev)
{
	p.Do(ev->time);

	// this is why we can't have (nice things) pointers as userdata
	p.Do(ev->userdata);

	// we can't savestate ev->type directly because events might not get registered in the same order (or at all) every time.
	// so, we savestate the event's type's name, and derive ev->type from that when loading.
	std::string name;
	if (p.GetMode() != PointerWrap::MODE_READ)
		name = event_types[ev->type].name;

	p.Do(name);
	if (p.GetMode() == PointerWrap::MODE_READ)
	{
		bool foundMatch = false;
		for (unsigned int i = 0; i < event_types.size(); ++i)
		{
			if (name == event_types[i].name)
			{
				ev->type = i;
				foundMatch = true;
				break;
			}
		}
		if (!foundMatch)
		{
			WARN_LOG(POWERPC, "Lost event from savestate because its type, \"%s\", has not been registered.", name.c_str());
			ev->type = ev_lost;
		}
	}
}

void DoState(PointerWrap &p)
{
	std::lock_guard<std::mutex> lk(tsWriteLock);
	p.Do(slicelength);
	p.Do(globalTimer);
	p.Do(idledCycles);
	p.Do(fakeDecStartValue);
	p.Do(fakeDecStartTicks);
	p.Do(fakeTBStartValue);
	p.Do(fakeTBStartTicks);
	p.DoMarker("CoreTimingData");

	MoveEvents();

	p.DoLinkedList<BaseEvent, GetNewEvent, FreeEvent, EventDoState>(first);
	p.DoMarker("CoreTimingEvents");
}

u64 GetTicks()
{
	return (u64)globalTimer;
}

u64 GetIdleTicks()
{
	return (u64)idledCycles;
}

// This is to be called when outside threads, such as the graphics thread, wants to
// schedule things to be executed on the main thread.
void ScheduleEvent_Threadsafe(int cyclesIntoFuture, int event_type, u64 userdata)
{
	std::lock_guard<std::mutex> lk(tsWriteLock);
	Event ne;
	ne.time = globalTimer + cyclesIntoFuture;
	ne.type = event_type;
	ne.userdata = userdata;
	tsQueue.Push(ne);
}

// Same as ScheduleEvent_Threadsafe(0, ...) EXCEPT if we are already on the CPU thread
// in which case the event will get handled immediately, before returning.
void ScheduleEvent_Threadsafe_Immediate(int event_type, u64 userdata)
{
	if (Core::IsCPUThread())
	{
		event_types[event_type].callback(userdata, 0);
	}
	else
	{
		ScheduleEvent_Threadsafe(0, event_type, userdata);
	}
}

void ClearPendingEvents()
{
	while (first)
	{
		Event *e = first->next;
		FreeEvent(first);
		first = e;
	}
}

static void AddEventToQueue(Event* ne)
{
	Event* prev = nullptr;
	Event** pNext = &first;
	for (;;)
	{
		Event*& next = *pNext;
		if (!next || ne->time < next->time)
		{
			ne->next = next;
			next = ne;
			break;
		}
		prev = next;
		pNext = &prev->next;
	}
}

// This must be run ONLY from within the cpu thread
// cyclesIntoFuture may be VERY inaccurate if called from anything else
// than Advance
void ScheduleEvent(int cyclesIntoFuture, int event_type, u64 userdata)
{
	Event *ne = GetNewEvent();
	ne->userdata = userdata;
	ne->type = event_type;
	ne->time = globalTimer + cyclesIntoFuture;
	AddEventToQueue(ne);
}

void RegisterAdvanceCallback(void (*callback)(int cyclesExecuted))
{
	advanceCallback = callback;
}

bool IsScheduled(int event_type)
{
	if (!first)
		return false;
	Event *e = first;
	while (e)
	{
		if (e->type == event_type)
			return true;
		e = e->next;
	}
	return false;
}

void RemoveEvent(int event_type)
{
	if (!first)
		return;

	while (first)
	{
		if (first->type == event_type)
		{
			Event *next = first->next;
			FreeEvent(first);
			first = next;
		}
		else
		{
			break;
		}
	}

	if (!first)
		return;

	Event *prev = first;
	Event *ptr = prev->next;
	while (ptr)
	{
		if (ptr->type == event_type)
		{
			prev->next = ptr->next;
			FreeEvent(ptr);
			ptr = prev->next;
		}
		else
		{
			prev = ptr;
			ptr = ptr->next;
		}
	}
}

void RemoveAllEvents(int event_type)
{
	MoveEvents();
	RemoveEvent(event_type);
}

void SetMaximumSlice(int maximumSliceLength)
{
	maxSliceLength = maximumSliceLength;
}

void ForceExceptionCheck(int cycles)
{
	if (PowerPC::ppcState.downcount > cycles)
	{
		slicelength -= (PowerPC::ppcState.downcount - cycles); // Account for cycles already executed by adjusting the slicelength
		PowerPC::ppcState.downcount = cycles;
	}
}

void ResetSliceLength()
{
	maxSliceLength = MAX_SLICE_LENGTH;
}


//This raise only the events required while the fifo is processing data
void ProcessFifoWaitEvents()
{
	MoveEvents();

	if (!first)
		return;

	while (first)
	{
		if (first->time <= globalTimer)
		{
			Event* evt = first;
			first = first->next;
			event_types[evt->type].callback(evt->userdata, (int)(globalTimer - evt->time));
			FreeEvent(evt);
		}
		else
		{
			break;
		}
	}
}

void MoveEvents()
{
	BaseEvent sevt;
	while (tsQueue.Pop(sevt))
	{
		Event *evt = GetNewEvent();
		evt->time = sevt.time;
		evt->userdata = sevt.userdata;
		evt->type = sevt.type;
		AddEventToQueue(evt);
	}
}

void Advance()
{
	MoveEvents();

	int cyclesExecuted = slicelength - PowerPC::ppcState.downcount;
	globalTimer += cyclesExecuted;
	PowerPC::ppcState.downcount = slicelength;

	while (first)
	{
		if (first->time <= globalTimer)
		{
			//LOG(POWERPC, "[Scheduler] %s     (%lld, %lld) ",
			//             event_types[first->type].name ? event_types[first->type].name : "?", (u64)globalTimer, (u64)first->time);
			Event* evt = first;
			first = first->next;
			event_types[evt->type].callback(evt->userdata, (int)(globalTimer - evt->time));
			FreeEvent(evt);
		}
		else
		{
			break;
		}
	}

	if (!first)
	{
		WARN_LOG(POWERPC, "WARNING - no events in queue. Setting downcount to 10000");
		PowerPC::ppcState.downcount += 10000;
	}
	else
	{
		slicelength = (int)(first->time - globalTimer);
		if (slicelength > maxSliceLength)
			slicelength = maxSliceLength;
		PowerPC::ppcState.downcount = slicelength;
	}

	if (advanceCallback)
		advanceCallback(cyclesExecuted);
}

void LogPendingEvents()
{
	Event *ptr = first;
	while (ptr)
	{
		INFO_LOG(POWERPC, "PENDING: Now: %" PRId64 " Pending: %" PRId64 " Type: %d", globalTimer, ptr->time, ptr->type);
		ptr = ptr->next;
	}
}

void Idle()
{
	//DEBUG_LOG(POWERPC, "Idle");

	//When the FIFO is processing data we must not advance because in this way
	//the VI will be desynchronized. So, We are waiting until the FIFO finish and
	//while we process only the events required by the FIFO.
	while (g_video_backend->Video_IsPossibleWaitingSetDrawDone())
	{
		ProcessFifoWaitEvents();
		Common::YieldCPU();
	}

	idledCycles += PowerPC::ppcState.downcount;
	PowerPC::ppcState.downcount = 0;

	Advance();
}

std::string GetScheduledEventsSummary()
{
	Event *ptr = first;
	std::string text = "Scheduled events\n";
	text.reserve(1000);
	while (ptr)
	{
		unsigned int t = ptr->type;
		if (t >= event_types.size())
			PanicAlertT("Invalid event type %i", t);

		const std::string& name = event_types[ptr->type].name;

		text += StringFromFormat("%s : %" PRIi64 " %016" PRIx64 "\n", name.c_str(), ptr->time, ptr->userdata);
		ptr = ptr->next;
	}
	return text;
}

u32 GetFakeDecStartValue()
{
	return fakeDecStartValue;
}

void SetFakeDecStartValue(u32 val)
{
	fakeDecStartValue = val;
}

u64 GetFakeDecStartTicks()
{
	return fakeDecStartTicks;
}

void SetFakeDecStartTicks(u64 val)
{
	fakeDecStartTicks = val;
}

u64 GetFakeTBStartValue()
{
	return fakeTBStartValue;
}

void SetFakeTBStartValue(u64 val)
{
	fakeTBStartValue = val;
}

u64 GetFakeTBStartTicks()
{
	return fakeTBStartTicks;
}

void SetFakeTBStartTicks(u64 val)
{
	fakeTBStartTicks = val;
}

}  // namespace
示例#9
0
namespace CoreTiming
{

struct EventType
{
	TimedCallback callback;
	std::string name;
};

static std::vector<EventType> event_types;

struct BaseEvent
{
	s64 time;
	u64 userdata;
	int type;
};

typedef LinkedListItem<BaseEvent> Event;

// STATE_TO_SAVE
static Event *first;
static std::mutex tsWriteLock;
static Common::FifoQueue<BaseEvent, false> tsQueue;

// event pools
static Event *eventPool = nullptr;

static float s_lastOCFactor;
float g_lastOCFactor_inverted;
int g_slicelength;
static int maxslicelength = MAX_SLICE_LENGTH;

static s64 idledCycles;
static u32 fakeDecStartValue;
static u64 fakeDecStartTicks;

// Are we in a function that has been called from Advance()
static bool globalTimerIsSane;

s64 g_globalTimer;
u64 g_fakeTBStartValue;
u64 g_fakeTBStartTicks;

static int ev_lost;

static Event* GetNewEvent()
{
	if (!eventPool)
		return new Event;

	Event* ev = eventPool;
	eventPool = ev->next;
	return ev;
}

static void FreeEvent(Event* ev)
{
	ev->next = eventPool;
	eventPool = ev;
}

static void EmptyTimedCallback(u64 userdata, s64 cyclesLate) {}

// Changing the CPU speed in Dolphin isn't actually done by changing the physical clock rate,
// but by changing the amount of work done in a particular amount of time. This tends to be more
// compatible because it stops the games from actually knowing directly that the clock rate has
// changed, and ensures that anything based on waiting a specific number of cycles still works.
//
// Technically it might be more accurate to call this changing the IPC instead of the CPU speed,
// but the effect is largely the same.
static int DowncountToCycles(int downcount)
{
	return (int)(downcount * g_lastOCFactor_inverted);
}

static int CyclesToDowncount(int cycles)
{
	return (int)(cycles * s_lastOCFactor);
}

int RegisterEvent(const std::string& name, TimedCallback callback)
{
	EventType type;
	type.name = name;
	type.callback = callback;

	// check for existing type with same name.
	// we want event type names to remain unique so that we can use them for serialization.
	for (auto& event_type : event_types)
	{
		if (name == event_type.name)
		{
			WARN_LOG(POWERPC, "Discarded old event type \"%s\" because a new type with the same name was registered.", name.c_str());
			// we don't know if someone might be holding on to the type index,
			// so we gut the old event type instead of actually removing it.
			event_type.name = "_discarded_event";
			event_type.callback = &EmptyTimedCallback;
		}
	}

	event_types.push_back(type);
	return (int)event_types.size() - 1;
}

void UnregisterAllEvents()
{
	if (first)
		PanicAlert("Cannot unregister events with events pending");
	event_types.clear();
}

void Init()
{
	s_lastOCFactor = SConfig::GetInstance().m_OCEnable ? SConfig::GetInstance().m_OCFactor : 1.0f;
	g_lastOCFactor_inverted = 1.0f / s_lastOCFactor;
	PowerPC::ppcState.downcount = CyclesToDowncount(maxslicelength);
	g_slicelength = maxslicelength;
	g_globalTimer = 0;
	idledCycles = 0;
	globalTimerIsSane = true;

	ev_lost = RegisterEvent("_lost_event", &EmptyTimedCallback);
}

void Shutdown()
{
	std::lock_guard<std::mutex> lk(tsWriteLock);
	MoveEvents();
	ClearPendingEvents();
	UnregisterAllEvents();

	while (eventPool)
	{
		Event *ev = eventPool;
		eventPool = ev->next;
		delete ev;
	}
}

static void EventDoState(PointerWrap &p, BaseEvent* ev)
{
	p.Do(ev->time);

	// this is why we can't have (nice things) pointers as userdata
	p.Do(ev->userdata);

	// we can't savestate ev->type directly because events might not get registered in the same order (or at all) every time.
	// so, we savestate the event's type's name, and derive ev->type from that when loading.
	std::string name;
	if (p.GetMode() != PointerWrap::MODE_READ)
		name = event_types[ev->type].name;

	p.Do(name);
	if (p.GetMode() == PointerWrap::MODE_READ)
	{
		bool foundMatch = false;
		for (unsigned int i = 0; i < event_types.size(); ++i)
		{
			if (name == event_types[i].name)
			{
				ev->type = i;
				foundMatch = true;
				break;
			}
		}
		if (!foundMatch)
		{
			WARN_LOG(POWERPC, "Lost event from savestate because its type, \"%s\", has not been registered.", name.c_str());
			ev->type = ev_lost;
		}
	}
}

void DoState(PointerWrap &p)
{
	std::lock_guard<std::mutex> lk(tsWriteLock);
	p.Do(g_slicelength);
	p.Do(g_globalTimer);
	p.Do(idledCycles);
	p.Do(fakeDecStartValue);
	p.Do(fakeDecStartTicks);
	p.Do(g_fakeTBStartValue);
	p.Do(g_fakeTBStartTicks);
	p.Do(s_lastOCFactor);
	if (p.GetMode() == PointerWrap::MODE_READ)
		g_lastOCFactor_inverted = 1.0f / s_lastOCFactor;

	p.DoMarker("CoreTimingData");

	MoveEvents();

	p.DoLinkedList<BaseEvent, GetNewEvent, FreeEvent, EventDoState>(first);
	p.DoMarker("CoreTimingEvents");
}

// This should only be called from the CPU thread, if you are calling it any other thread, you are doing something evil
u64 GetTicks()
{
	u64 ticks = (u64)g_globalTimer;
	if (!globalTimerIsSane)
	{
		int downcount = DowncountToCycles(PowerPC::ppcState.downcount);
		ticks += g_slicelength - downcount;
	}
	return ticks;
}

u64 GetIdleTicks()
{
	return (u64)idledCycles;
}

// This is to be called when outside threads, such as the graphics thread, wants to
// schedule things to be executed on the main thread.
void ScheduleEvent_Threadsafe(s64 cyclesIntoFuture, int event_type, u64 userdata)
{
	_assert_msg_(POWERPC, !Core::IsCPUThread(), "ScheduleEvent_Threadsafe from wrong thread");
	if (Core::g_want_determinism)
	{
		ERROR_LOG(POWERPC, "Someone scheduled an off-thread \"%s\" event while netplay or movie play/record "
		                   "was active.  This is likely to cause a desync.",
		                   event_types[event_type].name.c_str());
	}
	std::lock_guard<std::mutex> lk(tsWriteLock);
	Event ne;
	ne.time = g_globalTimer + cyclesIntoFuture;
	ne.type = event_type;
	ne.userdata = userdata;
	tsQueue.Push(ne);
}

// Executes an event immediately, then returns.
void ScheduleEvent_Immediate(int event_type, u64 userdata)
{
	_assert_msg_(POWERPC, Core::IsCPUThread(), "ScheduleEvent_Immediate from wrong thread");
	event_types[event_type].callback(userdata, 0);
}

// Same as ScheduleEvent_Threadsafe(0, ...) EXCEPT if we are already on the CPU thread
// in which case this is the same as ScheduleEvent_Immediate.
void ScheduleEvent_Threadsafe_Immediate(int event_type, u64 userdata)
{
	if (Core::IsCPUThread())
	{
		event_types[event_type].callback(userdata, 0);
	}
	else
	{
		ScheduleEvent_Threadsafe(0, event_type, userdata);
	}
}

// To be used from any thread, including the CPU thread
void ScheduleEvent_AnyThread(s64 cyclesIntoFuture, int event_type, u64 userdata)
{
	if (Core::IsCPUThread())
		ScheduleEvent(cyclesIntoFuture, event_type, userdata);
	else
		ScheduleEvent_Threadsafe(cyclesIntoFuture, event_type, userdata);
}

void ClearPendingEvents()
{
	while (first)
	{
		Event *e = first->next;
		FreeEvent(first);
		first = e;
	}
}

static void AddEventToQueue(Event* ne)
{
	Event* prev = nullptr;
	Event** pNext = &first;
	for (;;)
	{
		Event*& next = *pNext;
		if (!next || ne->time < next->time)
		{
			ne->next = next;
			next = ne;
			break;
		}
		prev = next;
		pNext = &prev->next;
	}
}

// This must be run ONLY from within the CPU thread
// cyclesIntoFuture may be VERY inaccurate if called from anything else
// than Advance
void ScheduleEvent(s64 cyclesIntoFuture, int event_type, u64 userdata)
{
	_assert_msg_(POWERPC, Core::IsCPUThread() || Core::GetState() == Core::CORE_PAUSE,
				 "ScheduleEvent from wrong thread");

	Event *ne = GetNewEvent();
	ne->userdata = userdata;
	ne->type = event_type;
	ne->time = GetTicks() + cyclesIntoFuture;

	// If this event needs to be scheduled before the next advance(), force one early
	if (!globalTimerIsSane)
		ForceExceptionCheck(cyclesIntoFuture);


	AddEventToQueue(ne);
}

void RemoveEvent(int event_type)
{
	while (first && first->type == event_type)
	{
		Event* next = first->next;
		FreeEvent(first);
		first = next;
	}

	if (!first)
		return;

	Event *prev = first;
	Event *ptr = prev->next;
	while (ptr)
	{
		if (ptr->type == event_type)
		{
			prev->next = ptr->next;
			FreeEvent(ptr);
			ptr = prev->next;
		}
		else
		{
			prev = ptr;
			ptr = ptr->next;
		}
	}
}

void RemoveAllEvents(int event_type)
{
	MoveEvents();
	RemoveEvent(event_type);
}

void ForceExceptionCheck(s64 cycles)
{
	if (s64(DowncountToCycles(PowerPC::ppcState.downcount)) > cycles)
	{
		// downcount is always (much) smaller than MAX_INT so we can safely cast cycles to an int here.
		g_slicelength -= (DowncountToCycles(PowerPC::ppcState.downcount) - (int)cycles); // Account for cycles already executed by adjusting the g_slicelength
		PowerPC::ppcState.downcount = CyclesToDowncount((int)cycles);
	}
}


//This raise only the events required while the fifo is processing data
void ProcessFifoWaitEvents()
{
	MoveEvents();

	if (!first)
		return;

	while (first)
	{
		if (first->time <= g_globalTimer)
		{
			Event* evt = first;
			first = first->next;
			event_types[evt->type].callback(evt->userdata, (int)(g_globalTimer - evt->time));
			FreeEvent(evt);
		}
		else
		{
			break;
		}
	}
}

void MoveEvents()
{
	BaseEvent sevt;
	while (tsQueue.Pop(sevt))
	{
		Event *evt = GetNewEvent();
		evt->time = sevt.time;
		evt->userdata = sevt.userdata;
		evt->type = sevt.type;
		AddEventToQueue(evt);
	}
}

void Advance()
{
	MoveEvents();

	int cyclesExecuted = g_slicelength - DowncountToCycles(PowerPC::ppcState.downcount);
	g_globalTimer += cyclesExecuted;
	s_lastOCFactor = SConfig::GetInstance().m_OCEnable ? SConfig::GetInstance().m_OCFactor : 1.0f;
	g_lastOCFactor_inverted = 1.0f / s_lastOCFactor;
	PowerPC::ppcState.downcount = CyclesToDowncount(g_slicelength);

	globalTimerIsSane = true;

	while (first && first->time <= g_globalTimer)
	{
		//LOG(POWERPC, "[Scheduler] %s     (%lld, %lld) ",
		//             event_types[first->type].name ? event_types[first->type].name : "?", (u64)g_globalTimer, (u64)first->time);
		Event* evt = first;
		first = first->next;
		event_types[evt->type].callback(evt->userdata, (int)(g_globalTimer - evt->time));
		FreeEvent(evt);
	}

	globalTimerIsSane = false;

	if (!first)
	{
		WARN_LOG(POWERPC, "WARNING - no events in queue. Setting downcount to 10000");
		PowerPC::ppcState.downcount += CyclesToDowncount(10000);
	}
	else
	{
		g_slicelength = (int)(first->time - g_globalTimer);
		if (g_slicelength > maxslicelength)
			g_slicelength = maxslicelength;
		PowerPC::ppcState.downcount = CyclesToDowncount(g_slicelength);
	}
}

void LogPendingEvents()
{
	Event *ptr = first;
	while (ptr)
	{
		INFO_LOG(POWERPC, "PENDING: Now: %" PRId64 " Pending: %" PRId64 " Type: %d", g_globalTimer, ptr->time, ptr->type);
		ptr = ptr->next;
	}
}

void Idle()
{
	//DEBUG_LOG(POWERPC, "Idle");

	if (SConfig::GetInstance().bSyncGPUOnSkipIdleHack)
	{
		//When the FIFO is processing data we must not advance because in this way
		//the VI will be desynchronized. So, We are waiting until the FIFO finish and
		//while we process only the events required by the FIFO.
		ProcessFifoWaitEvents();
		Fifo::FlushGpu();
	}

	idledCycles += DowncountToCycles(PowerPC::ppcState.downcount);
	PowerPC::ppcState.downcount = 0;
}

std::string GetScheduledEventsSummary()
{
	Event *ptr = first;
	std::string text = "Scheduled events\n";
	text.reserve(1000);
	while (ptr)
	{
		unsigned int t = ptr->type;
		if (t >= event_types.size())
			PanicAlertT("Invalid event type %i", t);

		const std::string& name = event_types[ptr->type].name;

		text += StringFromFormat("%s : %" PRIi64 " %016" PRIx64 "\n", name.c_str(), ptr->time, ptr->userdata);
		ptr = ptr->next;
	}
	return text;
}

u32 GetFakeDecStartValue()
{
	return fakeDecStartValue;
}

void SetFakeDecStartValue(u32 val)
{
	fakeDecStartValue = val;
}

u64 GetFakeDecStartTicks()
{
	return fakeDecStartTicks;
}

void SetFakeDecStartTicks(u64 val)
{
	fakeDecStartTicks = val;
}

u64 GetFakeTBStartValue()
{
	return g_fakeTBStartValue;
}

void SetFakeTBStartValue(u64 val)
{
	g_fakeTBStartValue = val;
}

u64 GetFakeTBStartTicks()
{
	return g_fakeTBStartTicks;
}

void SetFakeTBStartTicks(u64 val)
{
	g_fakeTBStartTicks = val;
}

}  // namespace