Exemple #1
0
	static void coroutineEntry()
	{
		Coroutine::Impl *pImpl = (Coroutine::Impl*)GetFiberData();
		pImpl->entry();
		pImpl->status = Coroutine::Status::Terminated;
		SwitchToFiber(pImpl->pReturnFiber);
	}
Exemple #2
0
VOID CALLBACK Fbt_Startup(PVOID pParam)
{
 assert(pParam == GetFiberData());
 Fbt_AfterSwitch(pParam);
 DoStuff();
 Fbt_Exit();
}
Exemple #3
0
	static void yield(YieldOperation &&yieldOperation)
	{
		Coroutine::Impl *pImpl = (Coroutine::Impl*)GetFiberData();
		pImpl->yieldOperation = &yieldOperation;
		assert(pImpl != nullptr);
		SwitchToFiber(pImpl->pReturnFiber);
	}
Exemple #4
0
static void start_thread(void)
{
    void (*func)(void) = GetFiberData();
    func();
    /* go out if thread function returns */
    thread_exit();
}
void clemu_SetEndOfGroup(void)
{
CL_EMU_FIBER *pFiber = (CL_EMU_FIBER *)GetFiberData();
clemuKernelGroup * group = (clemuKernelGroup * )pFiber->m_group_ptr;
     group->SetEndof(1);

}
uint32 jsWait(uint32 ms)
{

	Fiber* pSelf = GetFiberData();
	WaitingFiber* pWaitingFiber;
	for (;;)
	{
		int oldWaitingFiberHead = g_waitingFiberHead;
		int waitingFiberHead = g_waitingFiberHead;

		do
		{
			waitingFiberHead = (waitingFiberHead + 1) % MAX_WAITING_FIBERS;
		} while (g_waitingFibers[waitingFiberHead].pFiber != NULL);

		if (InterlockedCompareExchange(&g_waitingFiberHead, waitingFiberHead, oldWaitingFiberHead) == oldWaitingFiberHead)
		{
			pWaitingFiber = g_waitingFibers + waitingFiberHead;
			break;
		}
	}

	pWaitingFiber->pFiber = pSelf;

	uint64 perfCounter;
	QueryPerformanceCounter((LARGE_INTEGER*)&perfCounter);
	pWaitingFiber->time = perfCounter + ms*(g_performanceFrequency / 1000);

	//sort into waiting list
	for (;;)
	{
		WaitingFiber* pLastFiber = &g_waitListTail;
		WaitingFiber* pNextWaitingFiber = g_waitListTail.pNextWaitingFiber;

		while (pNextWaitingFiber != NULL && pNextWaitingFiber->time < pWaitingFiber->time)
		{
			pLastFiber = pNextWaitingFiber;
			pNextWaitingFiber = pNextWaitingFiber->pNextWaitingFiber;
		}

		pWaitingFiber->pNextWaitingFiber = pNextWaitingFiber;

		if (InterlockedCompareExchangePointer(&pLastFiber->pNextWaitingFiber, pWaitingFiber, pNextWaitingFiber) == pNextWaitingFiber)
		{
			break;
		}
	}

	pSelf->status = WAITING;
	runNewFiber();
	uint64 perfCounter2;
	QueryPerformanceCounter((LARGE_INTEGER*)&perfCounter2);
	uint64 elapsedTime = (perfCounter2 - perfCounter) * 1000 / g_performanceFrequency;
	pWaitingFiber->pFiber = NULL;
	return (uint32)elapsedTime;
}
Exemple #7
0
 static void MainThreadInit()
 {            
     PVOID pData = GetCurrentFiber();
     if (pData == (void*)0x1E00)  // magic
     {
         LPVOID h = ConvertThreadToFiber( &MainFiberId );
         ESS_ASSERT(h != 0);
         ESS_ASSERT( GetFiberData() == &MainFiberId );
     }
 }
/*************************************************************************************
INTERNAL
*************************************************************************************/
static VOID 
__stdcall clemu_wavefrontthread_proc ( LPVOID lpParameter )
{
CL_EMU_FIBER *pFiber = (CL_EMU_FIBER *)GetFiberData();
clemuKernelGroup * group = (clemuKernelGroup * )pFiber->m_group_ptr;
//clemuKernelJob *job = (clemuKernelJob *)(group->GetParent());
KERNEL_ENTRY_POINT kernel_entry = 0;
    kernel_entry = group->GetKernel();
//	while(true)
	{
       kernel_entry(lpParameter);
	   clemu_SetEndOfGroup();
       clemu_ScheduleGrpThread();
	}
}
void clemu_ScheduleThread(int _group)
{
CL_EMU_FIBER *pFiber = (CL_EMU_FIBER *)GetFiberData();
clemuKernelGroup * group = (clemuKernelGroup * )pFiber->m_group_ptr;
clemuKernelJob *job = (clemuKernelJob *)(group->GetParent());
int wf_sz = job->GetRealWFSz(pFiber->m_wfid);
CL_EMU_FIBER *pNextFiber = 0;
int new_thread = (pFiber->m_wf_tid + 1) % wf_sz;
   if (!_group || (_group && new_thread > 0 ))
   {
// wrap around the wavefront
      pNextFiber = group->GetTFiber(pFiber->m_wfid, new_thread);
   }
   else
   {
// reshcedule wavefront
       pNextFiber = group->GetFiber();
   }

   SwitchToFiber(pNextFiber->m_FIBER_id);

}
void jsWaitForCounter(Counter* pCounter)
{
	// counter already zero
	if (pCounter->counter == 0)
		return;

	Fiber* pSelf = GetFiberData();

	// push fiber in wait queue
	pSelf->pNextInWaitList = NULL;
	for (;;)
	{
		Fiber* pOldWaitListHead = pCounter->pWaitListHead;
		if (InterlockedCompareExchangePointer(&pCounter->pWaitListHead, pSelf, pOldWaitListHead) == pOldWaitListHead)
		{
			pOldWaitListHead->pNextInWaitList = pSelf;
			break;
		}
	}

	// swap in new fiber
	pSelf->status = WAITING;
	runNewFiber();
}
Exemple #11
0
VOID
WINAPI
BaseFiberStartup(VOID)
{
#ifdef _M_IX86
    PFIBER Fiber = GetCurrentFiber();

    /* Call the Thread Startup Routine */
    DPRINT("Starting Fiber\n");
    BaseThreadStartup((LPTHREAD_START_ROUTINE)Fiber->Context.Eax,
                      (LPVOID)Fiber->Context.Ebx);
#elif defined(_M_AMD64)
    PFIBER Fiber = GetFiberData();

    /* Call the Thread Startup Routine */
    DPRINT1("Starting Fiber\n");
    BaseThreadStartup((LPTHREAD_START_ROUTINE)Fiber->Context.Rax,
                      (LPVOID)Fiber->Context.Rbx);
#else
#warning Unknown architecture
    UNIMPLEMENTED;
    DbgBreakPoint();
#endif
}
Exemple #12
0
	static inline uthread_impl *current()
	{
		return (uthread_impl*)GetFiberData();
	}
Exemple #13
0
struct FiberData * Fbt_GetCurrent(VOID)
{
 return GetFiberData();
}
Exemple #14
0
void Fbt_Dispatch(struct FiberData * pfdCur, int bExit)
{
 UCHAR i;
 UCHAR n;
 struct FiberData * pfdNext;

 assert(pfdCur == GetFiberData());

 ++ nQuantum;

 /* Every ten quantums check for starving threads */
 /* FIXME: this implementation of starvation prevention isn't that great */
 if(nQuantum % 10 == 0)
 {
  int j;
  int k;
  int b;
  int bResume;
  PLIST_ENTRY ple = NULL;

  bResume = 0;
  i = 0;

  /* Pick up from where we left last time */
  if(pfdLastStarveScan)
  {
   unsigned nPrio;

   nPrio = pfdLastStarveScan->nPrio;

   /* The last fiber we scanned for starvation isn't queued anymore */
   if(IsListEmpty(&pfdLastStarveScan->leQueue))
    /* Scan the ready queue for its priority */
    i = nPrio;
   /* Last fiber for its priority level */
   else if(pfdLastStarveScan->leQueue.Flink == &a_leQueues[nPrio])
    /* Scan the ready queue for the next priority level */
    i = nPrio + 1;
   /* Scan the next fiber in the ready queue */
   else
   {
    i = nPrio;
    ple = pfdLastStarveScan->leQueue.Flink;
    bResume = 1;
   }

   /* Priority levels 15-31 are never checked for starvation */
   if(i >= 15)
   {
    if(bResume)
     bResume = 0;

    i = 0;
   }
  }

  /*
   Scan at most 16 threads, in the priority range 0-14, applying in total at
   most 10 boosts. This loop scales O(1)
  */
  for(j = 0, k = 0, b = 0; j < 16 && k < 15 && b < 10; ++ j)
  {
   unsigned nDiff;

   /* No previous state to resume from */
   if(!bResume)
   {
    int nQueue;

    /* Get the first element in the current queue */
    nQueue = (k + i) % 15;

    if(IsListEmpty(&a_leQueues[nQueue]))
    {
     ++ k;
     continue;
    }

    ple = (PLIST_ENTRY)a_leQueues[nQueue].Flink;
   }
   else
    bResume = 0;

   /* Get the current fiber */
   pfdLastStarveScan = CONTAINING_RECORD(ple, struct FiberData, leQueue);
   assert(pfdLastStarveScan->nMagic == 0x12345678);
   assert(pfdLastStarveScan != pfdCur);

   /* Calculate the number of quantums the fiber has been in the queue */
   if(nQuantum > pfdLastStarveScan->nQuantumQueued)
    nDiff = nQuantum - pfdLastStarveScan->nQuantumQueued;
   else
    nDiff = UINT_MAX - pfdLastStarveScan->nQuantumQueued + nQuantum;

   /* The fiber has been ready for more than 30 quantums: it's starving */
   if(nDiff > 30)
   {
    /* Plus one boost applied */
    ++ b;

    /* Apply the boost */
    pfdLastStarveScan->nBoost = 1;
    pfdLastStarveScan->nRealPrio = pfdLastStarveScan->nPrio;
    pfdLastStarveScan->nPrio = 15;

    /* Re-enqueue the fiber in the correct priority queue */
    RemoveEntryList(&pfdLastStarveScan->leQueue);
    InsertTailList(&a_leQueues[15], &pfdLastStarveScan->leQueue);
   }
  }
 }