//--------------------------------------------------------------------------- void Mutex_Release( Mutex_t *pstMutex_ ) { KERNEL_TRACE_1( STR_MUTEX_RELEASE_1, (K_USHORT)Thread_GetID( g_pstCurrent ) ); K_BOOL bSchedule = 0; // Disable the scheduler while we deal with internal data structures. Scheduler_SetScheduler( false ); // This thread had better be the one that owns the Mutex_t currently... KERNEL_ASSERT( (g_pstCurrent == pstMutex_->pstOwner) ); // If the owner had claimed the lock multiple times, decrease the lock // count and return immediately. if (pstMutex_->ucRecurse) { pstMutex_->ucRecurse--; Scheduler_SetScheduler( true ); return; } // Restore the thread's original priority if (Thread_GetCurPriority( g_pstCurrent ) != Thread_GetPriority( g_pstCurrent )) { Thread_SetPriority( g_pstCurrent, Thread_GetPriority(g_pstCurrent) ); // In this case, we want to reschedule bSchedule = 1; } // No threads are waiting on this Mutex_t? if ( LinkList_GetHead( (LinkList_t*)pstMutex_ ) == NULL) { // Re-initialize the Mutex_t to its default values pstMutex_->bReady = 1; pstMutex_->ucMaxPri = 0; pstMutex_->pstOwner = NULL; } else { // Wake the highest priority Thread_t pending on the Mutex_t if( Mutex_WakeNext( pstMutex_ ) ) { // Switch threads if it's higher or equal priority than the current thread bSchedule = 1; } } // Must enable the scheduler again in order to switch threads. Scheduler_SetScheduler( true ); if(bSchedule) { // Switch threads if a higher-priority thread was woken Thread_Yield(); } }
//--------------------------------------------------------------------------- static void Thread_Profiling() { K_USHORT i; for (i = 0; i < 100; i++) { // Profile the amount of time it takes to initialize a representative // test Thread_t, simulating an "average" system Thread_t. Create the // Thread_t at a higher priority than the current Thread_t. ProfileTimer_Start( &stThreadInitTimer ); Thread_Init( &stTestThread1, aucTestStack1, TEST_STACK1_SIZE, 2, (ThreadEntry_t)Thread_ProfilingThread, NULL); ProfileTimer_Stop( &stThreadInitTimer ); // Profile the time it takes from calling "start" to the time when the // Thread_t becomes active ProfileTimer_Start( &stThreadStartTimer ); Thread_Start( &stTestThread1 ); //-- Switch to the test Thread_t -- // Stop the Thread_t-exit profiling timer, which was started from the // test Thread_t ProfileTimer_Stop( &stThreadExitTimer ); } Scheduler_SetScheduler(0); for (i = 0; i < 100; i++) { // Context switch profiling - this is equivalent to what's actually // done within the AVR-implementation. ProfileTimer_Start( &stContextSwitchTimer ); { Thread_SaveContext(); g_pstNext = g_pstCurrent; Thread_RestoreContext(); } ProfileTimer_Stop( &stContextSwitchTimer ); } Scheduler_SetScheduler(1); }
//--------------------------------------------------------------------------- void ThreadPort_StartThreads() { KernelSWI_Config(); // configure the task switch SWI KernelTimer_Config(); // configure the kernel timer Scheduler_SetScheduler(1); // enable the scheduler Scheduler_Schedule(); // run the scheduler - determine the first Thread_t to run Thread_Switch(); // Set the next scheduled Thread_t to the current Thread_t KernelTimer_Start(); // enable the kernel timer KernelSWI_Start(); // enable the task switch SWI ThreadPort_StartFirstThread(); // Jump to the first Thread_t (does not return) }
//--------------------------------------------------------------------------- void ThreadPort_StartThreads() { KernelSWI_Config(); // configure the task switch SWI KernelTimer_Config(); // configure the kernel timer Scheduler_SetScheduler(1); // enable the scheduler Scheduler_Schedule(); // run the scheduler - determine the first thread to run Thread_Switch(); // Set the next scheduled thread to the current thread KernelTimer_Start(); // enable the kernel timer KernelSWI_Start(); // enable the task switch SWI // Restore the context... Thread_RestoreContext(); // restore the context of the first running thread ASM("reti"); // return from interrupt - will return to the first scheduled thread }
/* SchedulerType.tp_init */ static int Scheduler_tp_init(Scheduler *self, PyObject *args, PyObject *kwargs) { PyObject *scheduler; Loop *loop; PyObject *callback, *data = NULL; int priority = 0; static char *kwlist[] = {"scheduler", "loop", "callback", "data", "priority", NULL }; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO!O|Oi:__init__", kwlist, &scheduler, &LoopType, &loop, &callback, &data, &priority)) { return -1; } if (Watcher_Init((Watcher *)self, loop, callback, data, priority)) { return -1; } return Scheduler_SetScheduler(self, scheduler); }
static int Scheduler_scheduler_set(Scheduler *self, PyObject *value, void *closure) { PYEV_PROTECTED_ATTRIBUTE(value); return Scheduler_SetScheduler(self, value); }
void Mutex_Claii( Mutex_t *pstMutex_ ) #endif { KERNEL_TRACE_1( STR_MUTEX_CLAIM_1, (K_USHORT)Thread_GetID( g_pstCurrent ) ); #if KERNEL_USE_TIMEOUTS Timer_t stTimer; K_BOOL bUseTimer = false; #endif // Disable the scheduler while claiming the Mutex_t - we're dealing with all // sorts of private thread data, can't have a thread switch while messing // with internal data structures. Scheduler_SetScheduler( false ); // Check to see if the Mutex_t is claimed or not if (pstMutex_->bReady != 0) { // Mutex_t isn't claimed, claim it. pstMutex_->bReady = 0; pstMutex_->ucRecurse = 0; pstMutex_->ucMaxPri = Thread_GetPriority( g_pstCurrent ); pstMutex_->pstOwner = g_pstCurrent; Scheduler_SetScheduler( true ); #if KERNEL_USE_TIMEOUTS return true; #else return; #endif } // If the Mutex_t is already claimed, check to see if this is the owner thread, // since we allow the Mutex_t to be claimed recursively. if (g_pstCurrent == pstMutex_->pstOwner) { // Ensure that we haven't exceeded the maximum recursive-lock count KERNEL_ASSERT( (pstMutex_->ucRecurse < 255) ); pstMutex_->ucRecurse++; // Increment the lock count and bail Scheduler_SetScheduler( true ); #if KERNEL_USE_TIMEOUTS return true; #else return; #endif } // The Mutex_t is claimed already - we have to block now. Move the // current thread to the list of threads waiting on the Mutex_t. #if KERNEL_USE_TIMEOUTS if (ulWaitTimeMS_) { Thread_SetExpired( g_pstCurrent, false ); Timer_Init( &stTimer ); Timer_Start( &stTimer, false, ulWaitTimeMS_, (TimerCallback_t)TimedMutex_Calback, (void*)pstMutex_); bUseTimer = true; } #endif BlockingObject_Block( (ThreadList_t*)pstMutex_, g_pstCurrent ); // Check if priority inheritence is necessary. We do this in order // to ensure that we don't end up with priority inversions in case // multiple threads are waiting on the same resource. if(pstMutex_->ucMaxPri <= Thread_GetPriority( g_pstCurrent ) ) { pstMutex_->ucMaxPri = Thread_GetPriority( g_pstCurrent ); Thread_t *pstTemp = (Thread_t*)(LinkList_GetHead( (LinkList_t*)pstMutex_ )); while(pstTemp) { Thread_InheritPriority( pstTemp, pstMutex_->ucMaxPri ); if(pstTemp == (Thread_t*)(LinkList_GetTail( (LinkList_t*)pstMutex_ )) ) { break; } pstTemp = (Thread_t*)LinkListNode_GetNext( (LinkListNode_t*)pstTemp ); } Thread_InheritPriority( pstMutex_->pstOwner, pstMutex_->ucMaxPri ); } // Done with thread data -reenable the scheduler Scheduler_SetScheduler( true ); // Switch threads if this thread acquired the Mutex_t Thread_Yield(); #if KERNEL_USE_TIMEOUTS if (bUseTimer) { Timer_Stop( &stTimer ); return ( Thread_GetExpired( g_pstCurrent ) == 0); } return true; #endif }