//--------------------------------------------------------------------------- int main(void) { Kernel_Init(); Thread_Init( &stMainThread, aucMainStack, MAIN_STACK_SIZE, 1, (ThreadEntry_t)AppMain, NULL ); Thread_Init( &stIdleThread, aucIdleStack, MAIN_STACK_SIZE, 0, (ThreadEntry_t)IdleMain, NULL ); Thread_Start( &stMainThread ); Thread_Start( &stIdleThread ); Driver_SetName( (Driver_t*) &stUART, "/dev/tty"); ATMegaUART_Init( &stUART ); DriverList_Add( (Driver_t*)&stUART ); Kernel_Start(); }
//--------------------------------------------------------------------------- int main(void) { // See the annotations in lab1. Kernel_Init(); // In this exercise, we create two threads at the same priority level. // As a result, the CPU will automatically swap between these threads // at runtime to ensure that each get a chance to execute. Thread_Init( &stApp1Thread, awApp1Stack, APP1_STACK_SIZE, 1, App1Main, 0); Thread_Init( &stApp2Thread, awApp2Stack, APP2_STACK_SIZE, 1, App2Main, 0); // Set the threads up so that Thread 1 can get 4ms of CPU time uninterrupted, // but Thread 2 can get 8ms of CPU time uninterrupted. This means that // in an ideal situation, Thread 2 will get to do twice as much work as // Thread 1 - even though they share the same scheduling priority. // Note that if SetQuantum() isn't called on a thread, a default value // is set such that each thread gets equal timeslicing in the same // priority group by default. You can play around with these values and // observe how it affects the execution of both threads. Thread_SetQuantum( &stApp1Thread, 4 ); Thread_SetQuantum( &stApp2Thread, 8 ); Thread_Start( &stApp1Thread ); Thread_Start( &stApp2Thread ); Kernel_Start(); return 0; }
//START: ThreadStartsAfterStart TEST(Thread, StartedThreadRunsBeforeItIsDestroyed) { thread = Thread_Create(threadEntry, 0); Thread_Start(thread); Thread_Destroy(thread); CHECK(TRUE == threadRan); }
//--------------------------------------------------------------------------- int main(void) { // See the annotations in previous labs for details on init. Kernel_Init(); Thread_Init( &stApp1Thread, awApp1Stack, APP1_STACK_SIZE, 1, App1Main, 0); Thread_Init( &stApp2Thread, awApp2Stack, APP2_STACK_SIZE, 1, App2Main, 0); Thread_Start( &stApp1Thread ); Thread_Start( &stApp2Thread ); EventFlag_Init( &stFlags ); Kernel_Start(); return 0; }
TEST(Thread, Join) { void * result; thread = Thread_Create(threadEntry, 0); Thread_Start(thread); Thread_Join(thread, &result); Thread_Destroy(thread); LONGS_EQUAL(42, *((int *)result)); }
//--------------------------------------------------------------------------- int main(void) { // See the annotations in previous labs for details on init. Kernel_Init(); Thread_Init( &stApp1Thread, awApp1Stack, APP1_STACK_SIZE, 1, App1Main, 0); Thread_Init( &stApp2Thread, awApp2Stack, APP2_STACK_SIZE, 1, App2Main, 0); Thread_Start( &stApp1Thread ); Thread_Start( &stApp2Thread ); // Initialize the mutex used in this example. Mutex_Init( &stMyMutex ); Kernel_Start(); return 0; }
TEST_END //=========================================================================== TEST(ut_thread_stop) { // Test point - stop and restart a Thread_t Thread_Stop( &stThread1 ); Thread_Sleep(10); Thread_Start( &stThread1 ); // Poke the Thread_t using a Semaphore_t, verify it's still responding Semaphore_Post( &stSem2 ); Semaphore_TimedPend( &stSem1, 10 ); EXPECT_FALSE( Thread_GetExpired( Scheduler_GetCurrentThread() ) ); }
//--------------------------------------------------------------------------- static void Thread_Profiling() { K_USHORT i; for (i = 0; i < 100; i++) { // Profile the amount of time it takes to initialize a representative // test Thread_t, simulating an "average" system Thread_t. Create the // Thread_t at a higher priority than the current Thread_t. ProfileTimer_Start( &stThreadInitTimer ); Thread_Init( &stTestThread1, aucTestStack1, TEST_STACK1_SIZE, 2, (ThreadEntry_t)Thread_ProfilingThread, NULL); ProfileTimer_Stop( &stThreadInitTimer ); // Profile the time it takes from calling "start" to the time when the // Thread_t becomes active ProfileTimer_Start( &stThreadStartTimer ); Thread_Start( &stTestThread1 ); //-- Switch to the test Thread_t -- // Stop the Thread_t-exit profiling timer, which was started from the // test Thread_t ProfileTimer_Stop( &stThreadExitTimer ); } Scheduler_SetScheduler(0); for (i = 0; i < 100; i++) { // Context switch profiling - this is equivalent to what's actually // done within the AVR-implementation. ProfileTimer_Start( &stContextSwitchTimer ); { Thread_SaveContext(); g_pstNext = g_pstCurrent; Thread_RestoreContext(); } ProfileTimer_Stop( &stContextSwitchTimer ); } Scheduler_SetScheduler(1); }
//--------------------------------------------------------------------------- static void Semaphore_Profiling() { Semaphore_t stSem; K_USHORT i; for (i = 0; i < 100; i++) { ProfileTimer_Start( &stSemInitTimer ); Semaphore_Init( &stSem, 0, 1000); ProfileTimer_Stop( &stSemInitTimer ); } for (i = 0; i < 100; i++) { ProfileTimer_Start( &stSemPostTimer ); Semaphore_Post( &stSem ); ProfileTimer_Stop( &stSemPostTimer ); } for (i = 0; i < 100; i++) { ProfileTimer_Start( &stSemPendTimer ); Semaphore_Pend( &stSem ); ProfileTimer_Stop( &stSemPendTimer ); } Semaphore_Init( &stSem, 0, 1); for (i = 0; i < 100; i++) { Thread_Init( &stTestThread1, aucTestStack1, TEST_STACK1_SIZE, 2, (ThreadEntry_t)Semaphore_Flyback, (void*)&stSem); Thread_Start( &stTestThread1 ); Semaphore_Post( &stSem ); } return; }
TEST_END //=========================================================================== TEST(ut_quanta) { K_ULONG ulAvg; K_ULONG ulMax; K_ULONG ulMin; K_ULONG ulRange; // Create three Thread_ts that only increment counters - similar to the // previous test. However, modify the Thread_t quanta such that each Thread_t // will get a different proportion of the CPU cycles. Thread_Init( &stThread1, aucStack1, TEST_STACK_SIZE, 1, RR_EntryPoint, (void*)&ulRR1); Thread_Init( &stThread2, aucStack2, TEST_STACK_SIZE, 1, RR_EntryPoint, (void*)&ulRR2); Thread_Init( &stThread3, aucStack3, TEST_STACK_SIZE, 1, RR_EntryPoint, (void*)&ulRR3); ulRR1 = 0; ulRR2 = 0; ulRR3 = 0; // Adjust Thread_t priority before starting test Thread_ts to ensure // they all start at the same time (when we hit the 1 second sleep) Thread_SetPriority( Scheduler_GetCurrentThread(), 2); // Set a different execution quanta for each Thread_t Thread_SetQuantum( &stThread1, 3); Thread_SetQuantum( &stThread2, 6); Thread_SetQuantum( &stThread3, 9); Thread_Start( &stThread1 ); Thread_Start( &stThread2 ); Thread_Start( &stThread3 ); Thread_Sleep(1800); // When the sleep ends, this will preempt the Thread_t in progress, // allowing us to stop them, and drop priority. Thread_Stop( &stThread1 ); Thread_Stop( &stThread2 ); Thread_Stop( &stThread3 ); Thread_SetPriority( Scheduler_GetCurrentThread(), 1); // Test point - make sure that Q3 > Q2 > Q1 EXPECT_GT( ulRR2, ulRR1 ); EXPECT_GT( ulRR3, ulRR2 ); // scale the counters relative to the largest value, and compare. ulRR1 *= 3; ulRR2 *= 3; ulRR2 = (ulRR2 + 1) / 2; // After scaling, they should be nearly identical (well, stose at least) if (ulRR1 > ulRR2) { ulMax = ulRR1; } else { ulMax = ulRR2; } if (ulMax < ulRR3) { ulMax = ulRR3; } if (ulRR1 < ulRR2) { ulMin = ulRR1; } else { ulMin = ulRR2; } if (ulMin > ulRR3) { ulMin = ulRR3; } ulRange = ulMax - ulMin; ulAvg = (ulRR1 + ulRR2 + ulRR3) / 3; #if KERNEL_TIMERS_TICKLESS // Max-Min delta should not exceed 5% of average for this test EXPECT_LT( ulRange, ulAvg / 20); #else // Max-Min delta should not exceed 20% of average for this test -- tick-based timers // are coarse, and prone to Thread_t preference due to phase. EXPECT_LT( ulRange, ulAvg / 5); #endif // Make sure none of the component values are 0 EXPECT_FAIL_EQUALS( ulRR1, 0 ); EXPECT_FAIL_EQUALS( ulRR2, 0 ); EXPECT_FAIL_EQUALS( ulRR3, 0 ); }