Exemplo n.º 1
0
static void taskEntryC0(uint16_t initCondition)
{
    /* A minor problem because of using one function for all tasks: The function needs to
       know, which specific task executes it. We start the task with task-dependent general
       purpose events ant let the function evaluate the parameter to find out. */
    uint8_t idxTask;
    if(initCondition == EVT_START_TASK_T0_C0)
        idxTask = 0;
    else if(initCondition == EVT_START_TASK_T1_C0)
        idxTask = 1;
    else    
    {
        ASSERT(initCondition == EVT_START_TASK_T2_C0);
        idxTask = 2;
    }

    /* Next problem: All tasks using this function use the round robin pattern in order to
       have permament unforeseen task switches. The idle task can set an event to start
       such a task, but then it'll never be activated again. It would fail to activate any
       of the remaining tasks. Therefore, we chain the initial activation: one task
       initiates the next one. */
    if(idxTask == 0)
        rtos_sendEvent(EVT_START_TASK_T1_C0);
    else if(idxTask == 1)
        rtos_sendEvent(EVT_START_TASK_T2_C0);
    
    /* Here, the actual task code begins. The next statement will never return. */
    taskC0(idxTask);
    
} /* End of taskEntryC0 */
Exemplo n.º 2
0
void itq_writeElem(int16_t queuedElem)
{
    rtos_enterCriticalSection();    
    {
        _ringBuf[_writePos++] = queuedElem;
        
        /* A productive implementation would probably deny writing and return an error
           indication. We don't need such code as it simply is a requirement of our test
           case that an overrun must never happen. */
        ASSERT(_writePos != _readPos);
    }
    rtos_leaveCriticalSection();
    
    /* Only now, after completion of the actual queuing the semaphore is incremented to
       signal the new data to possibly waiting consumer tasks. Queuing and signaling is
       not atomic, we already left the critical section. This is however not dangerous. The
       meaning of the semaphore is not to precisely indicate the number of queued elements
       but to give a guarantee that there's at least one element in case I could acquire
       the semaphore.
         By the way, incrementing the semaphore by a call of rtos_sendEvent implicitly ends
       a critical section. We could implement write and increment as atomic by simply
       omitting the call of rtos_leaveCriticalSection - this would however make the code
       less readable and is useless. */
    rtos_sendEvent(EVT_SEMAPHORE_ELEM_IN_QUEUE);
    
} /* End of itq_writeElem */
static void task01_class00(uint16_t initCondition)

{
    for(;;)
    {
        uint16_t u;

        ++ _noLoopsTask01_C0;

        /* For test purpose only: This task consumes the CPU for about 50% of the cycle
           time. */
        delay(5 /*ms*/);

        /* Release high priority task for a single cycle. It should continue operation
           before we return from the suspend function sendEvent. Check it. */
        u = _noLoopsTask00_C1;
        rtos_sendEvent(/* eventVec */ RTOS_EVT_EVENT_00);
        ASSERT(u+1 == _noLoopsTask00_C1)

        /* Double-check that this task keep in sync with the triggered task of higher
           priority. */
        ASSERT(_noLoopsTask01_C0 == _noLoopsTask00_C1)

        /* This tasks cycles with about 10 ms. This will succeed only if the other task in
           the same priority class does not use lengthy blocking operations. */
        rtos_suspendTaskTillTime(/* deltaTimeTillRelease */ 10 /*ms*/);
    }
} /* End of task01_class00 */
Exemplo n.º 4
0
static void task01_class00(uint16_t initCondition)

{
    uint32_t tiCycle0 = millis();
    for(;;)
    {
        uint16_t u;
        
        ++ _noLoopsTask01_C0;

        /* The next operation (Arduino delay function) takes the demanded world time in ms
           (as opposed to CPU time) even if it is interrupted because of an elapsed round
           robin counter.
             As this task has a round robin time slice of 4 ms, the delay operation will
           surely be interrupted by the other task - which may consume the CPU for up to 20
           ms. The delay operation may thus return after 24 ms. */
        uint32_t ti0 = millis();
        delay(8 /* ms */);
        uint16_t dT = (uint16_t)(millis() - ti0);
        ASSERT(dT >= 7);
        ASSERT(dT <= 25);

        /* Release the high priority task for a single cycle. It should continue operation
           before we leave the suspend function here. Check it. */
        ti0 = millis();
        u = _noLoopsTask00_C1;
        rtos_sendEvent(/* eventVec */ RTOS_EVT_EVENT_00);
        ASSERT(u+1 == _noLoopsTask00_C1)
        ASSERT(_noLoopsTask01_C0 == _noLoopsTask00_C1)
        dT = (uint16_t)(millis() - ti0);
        ASSERT(dT <= 2);
        
        /* The body of this task takes up to about 26 ms (see before). If it suspends here,
           the other round robin task will most often become active and consume the CPU the
           next 20 ms. This tasks wants to cycle with 40 ms. So it'll become due while the
           other round robin task is active. This task will become active only after the
           time slice of the other task has elapsed. Exact cycle time is impossible for
           this task.
             It can even be worse if the other round robin task should be suspendend while
           this task suspends itself till the next multiple of 40 ms: Occasionally, the
           other task will resume just before this task and the activation of this task
           will be delayed by the full time slice duration of the other round robin task.
           Task overruns are unavoidable for this (ir-)regular task, but we can give an
           upper boundary for the cycle time, which is tested by assertion. */
        rtos_suspendTaskTillTime(/* deltaTimeTillRelease */ 20 /* unit 2 ms */);
        uint32_t tiCycleEnd = millis();
        dT = (uint16_t)(tiCycleEnd - tiCycle0);
        tiCycle0 = tiCycleEnd;
        ASSERT(dT <= 62);
    }
} /* End of task01_class00 */
static void blink(uint8_t noFlashes)
{
#define TI_FLASH 150

    while(noFlashes-- > 0)
    {
        digitalWrite(LED, HIGH);  /* Turn the LED on. (HIGH is the voltage level.) */
        delay(TI_FLASH);          /* The flash time. */
        digitalWrite(LED, LOW);   /* Turn the LED off by making the voltage LOW. */
        delay(TI_FLASH);          /* Time between flashes. */

        /* Blink takes many hundreds of milli seconds. To prevent too many timeouts in
           task00_C0 we post the event also inside of blink. */
        rtos_sendEvent(/* eventVec */ RTOS_EVT_EVENT_03);
    }

    /* Wait for a second after the last flash - this command could easily be invoked
       immediately again and the series need to be separated. */
    delay(500);
    rtos_sendEvent(/* eventVec */ RTOS_EVT_EVENT_03);
    delay(500-TI_FLASH);

#undef TI_FLASH
}
Exemplo n.º 6
0
void loop()
{
    /* Give an alive sign. */
    blink(3);

#ifdef DEBUG
    printf("\nRTuinOS is idle\n");
#endif

    /* Share result of CPU load computation with the displaying idle follower task. No
       acces synchronization is needed here for two reasons: Writing a uint8 is atomic and
       we have a strict coupling in time between the idle task and the data reading task:
       They become active one after another. */
    _cpuLoad = gsl_getSystemLoad();

#ifdef DEBUG
    cli();
    uint16_t adcResult       = adc_inputVoltage;
    uint16_t adcResultButton = adc_buttonVoltage;
    uint32_t noAdcResults = adc_noAdcResults;
    uint8_t hour = clk_noHour
          , min  = clk_noMin
          , sec  = clk_noSec;
    sei();

    printf("At %02u:%02u:%02u:\n", hour, min, sec);
    printf( "ADC result %7lu at %7.2f s: %.4f V (input), %.4f V (buttons)\n"
          , noAdcResults
          , 1e-3*millis()
          , ADC_SCALING_BIN_TO_V(adcResult)
          , ADC_SCALING_BIN_TO_V(adcResultButton)
          );
    printf("CPU load: %.1f %%\n", (double)_cpuLoad/2.0);
    ASSERT(rtos_getTaskOverrunCounter(/* idxTask */ idxTaskRTC, /* doReset */ false) == 0);
    
    uint8_t u;
    for(u=0; u<RTOS_NO_TASKS; ++u)
        printf("Unused stack area of task %u: %u Byte\n", u, rtos_getStackReserve(u));
#endif

    /* Trigger the follower task, which is capable to safely display the results. */
    rtos_sendEvent(EVT_TRIGGER_IDLE_FOLLOWER_TASK);

} /* End of loop */
Exemplo n.º 7
0
void loop(void)
{
    /* Idle is used only to start the first round robin task. */
    rtos_sendEvent(EVT_START_TASK_T0_C0);

    /* In test case tc09 of RTuinOS we had written: "Since we have a pseudo mutex only,
       which is implemented by polling (try and suspend until next try) there are minor
       gaps in time where all tasks are suspended. To not run into the sendEvent again, we
       place an empty loop here. Having a true mutex implementation, we would place an
       ASSERT(false) instead."
         Now we have re-implemented the test case using a true mutex but the statement
       turned out to be wrong, such an assertion fires occasionally. The reason is, that
       the tasks apply the other suspend command rtos_delay so that it might happen that
       all of them are suspended at the same time and idle becomes active. If all calls of
       rtos_delay are commented out the assertion is indeed safe. */
    //ASSERT(false);
    while(true)
        ;

} /* End of loop */
Exemplo n.º 8
0
inline void dpy_display_t::releaseMutex()
{
    /* Release the mutex. */
    rtos_sendEvent(EVT_MUTEX_LCD);

} /* End of dpy_display_t::releaseMutex */
Exemplo n.º 9
0
static void releaseResource()
{
    /* Signal the availability of the resource to possibly waiting tasks. */
    rtos_sendEvent(EVT_MUTEX_OWNING_RESOURCE);
    
} /* End of releaseResource */