/** * \b atomSemPut * * Perform a put operation on a semaphore. * * This increments the current count value for the semaphore and returns. * * If the count value was previously zero and there are threads blocking on the * semaphore, the call will wake up the highest priority thread suspended. Only * one thread is woken per call to atomSemPut(). If multiple threads of the * same priority are suspended, they are woken in order of suspension (FIFO). * * This function can be called from interrupt context. * * @param[in] sem Pointer to semaphore object * * @retval ATOM_OK Success * @retval ATOM_ERR_OVF The semaphore count would have overflowed (>255) * @retval ATOM_ERR_PARAM Bad parameter * @retval ATOM_ERR_QUEUE Problem putting a woken thread on the ready queue * @retval ATOM_ERR_TIMER Problem cancelling a timeout for a woken thread */ uint8_t atomSemPut (ATOM_SEM * sem) { uint8_t status; CRITICAL_STORE; ATOM_TCB *tcb_ptr; /* Check parameters */ if (sem == NULL) { /* Bad semaphore pointer */ status = ATOM_ERR_PARAM; } else { /* Protect access to the semaphore object and OS queues */ CRITICAL_START (); /* If any threads are blocking on the semaphore, wake up one */ if (sem->suspQ) { /** * Threads are woken up in priority order, with a FIFO system * used on same priority threads. We always take the head, * ordering is taken care of by an ordered list enqueue. */ tcb_ptr = tcbDequeueHead (&sem->suspQ); if (tcbEnqueuePriority (&tcbReadyQ, tcb_ptr) != ATOM_OK) { /* Exit critical region */ CRITICAL_END (); /* There was a problem putting the thread on the ready queue */ status = ATOM_ERR_QUEUE; } else { /* Set OK status to be returned to the waiting thread */ tcb_ptr->suspend_wake_status = ATOM_OK; /* If there's a timeout on this suspension, cancel it */ if ((tcb_ptr->suspend_timo_cb != NULL) && (atomTimerCancel (tcb_ptr->suspend_timo_cb) != ATOM_OK)) { /* There was a problem cancelling a timeout on this semaphore */ status = ATOM_ERR_TIMER; } else { /* Flag as no timeout registered */ tcb_ptr->suspend_timo_cb = NULL; /* Successful */ status = ATOM_OK; } /* Exit critical region */ CRITICAL_END (); /** * The scheduler may now make a policy decision to thread * switch if we are currently in thread context. If we are * in interrupt context it will be handled by atomIntExit(). */ if (atomCurrentContext()) atomSched (FALSE); } } /* If no threads waiting, just increment the count and return */ else { /* Check for count overflow */ if (sem->count == 255) { /* Don't increment, just return error status */ status = ATOM_ERR_OVF; } else { /* Increment the count and return success */ sem->count++; status = ATOM_OK; } /* Exit critical region */ CRITICAL_END (); } } return (status); }
/** * \b atomSched * * This is an internal function not for use by application code. * * This is the main scheduler routine. It is called by the various OS * library routines to check if any threads should be scheduled in now. * If so, the context will be switched from the current thread to the * new one. * * The scheduler is priority-based with round-robin performed on threads * with the same priority. Round-robin is only performed on timer ticks * however. During reschedules caused by an OS operation (e.g. after * giving or taking a semaphore) we only allow the scheduling in of * threads with higher priority than current priority. On timer ticks we * also allow the scheduling of same-priority threads - in that case we * schedule in the head of the ready list for that priority and put the * current thread at the tail. * * @param[in] timer_tick Should be TRUE when called from the system tick * * @return None */ void atomSched (uint8_t timer_tick) { CRITICAL_STORE; ATOM_TCB *new_tcb = NULL; int16_t lowest_pri; /** * Check the OS has actually started. As long as the proper initialisation * sequence is followed there should be no calls here until the OS is * started, but we check to handle badly-behaved ports. */ if (atomOSStarted == FALSE) { /* Don't schedule anything in until the OS is started */ return; } /* Enter critical section */ CRITICAL_START (); /** * If the current thread is going into suspension, then * unconditionally dequeue the next thread for execution. */ if (curr_tcb->suspended == TRUE) { /** * Dequeue the next ready to run thread. There will always be * at least the idle thread waiting. Note that this could * actually be the suspending thread if it was unsuspended * before the scheduler was called. */ new_tcb = tcbDequeueHead (&tcbReadyQ); /** * Don't need to add the current thread to any queue because * it was suspended by another OS mechanism and will be * sitting on a suspend queue or similar within one of the OS * primitive libraries (e.g. semaphore). */ /* Switch to the new thread */ atomThreadSwitch (curr_tcb, new_tcb); } /** * Otherwise the current thread is still ready, but check * if any other threads are ready. */ else { /* Calculate which priority is allowed to be scheduled in */ if (timer_tick == TRUE) { /* Same priority or higher threads can preempt */ lowest_pri = (int16_t)curr_tcb->priority; } else if (curr_tcb->priority > 0) { /* Only higher priority threads can preempt, invalid for 0 (highest) */ lowest_pri = (int16_t)(curr_tcb->priority - 1); } else { /** * Current priority is already highest (0), don't allow preempt by * threads of any priority because this is not a time-slice. */ lowest_pri = -1; } /* Check if a reschedule is allowed */ if (lowest_pri >= 0) { /* Check for a thread at the given minimum priority level or higher */ new_tcb = tcbDequeuePriority (&tcbReadyQ, (uint8_t)lowest_pri); /* If a thread was found, schedule it in */ if (new_tcb) { /* Add the current thread to the ready queue */ (void)tcbEnqueuePriority (&tcbReadyQ, curr_tcb); /* Switch to the new thread */ atomThreadSwitch (curr_tcb, new_tcb); } } } /* Exit critical section */ CRITICAL_END (); }
/** * \b atomSemDelete * * Deletes a semaphore object. * * Any threads currently suspended on the semaphore will be woken up with * return status ATOM_ERR_DELETED. If called at thread context then the * scheduler will be called during this function which may schedule in one * of the woken threads depending on relative priorities. * * This function can be called from interrupt context, but loops internally * waking up all threads blocking on the semaphore, so the potential * execution cycles cannot be determined in advance. * * @param[in] sem Pointer to semaphore object * * @retval ATOM_OK Success * @retval ATOM_ERR_QUEUE Problem putting a woken thread on the ready queue * @retval ATOM_ERR_TIMER Problem cancelling a timeout on a woken thread */ uint8_t atomSemDelete (ATOM_SEM *sem) { uint8_t status; CRITICAL_STORE; ATOM_TCB *tcb_ptr; uint8_t woken_threads = FALSE; /* Parameter check */ if (sem == NULL) { /* Bad semaphore pointer */ status = ATOM_ERR_PARAM; } else { /* Default to success status unless errors occur during wakeup */ status = ATOM_OK; /* Wake up all suspended tasks */ while (1) { /* Enter critical region */ CRITICAL_START (); /* Check if any threads are suspended */ tcb_ptr = tcbDequeueHead (&sem->suspQ); /* A thread is suspended on the semaphore */ if (tcb_ptr) { /* Return error status to the waiting thread */ tcb_ptr->suspend_wake_status = ATOM_ERR_DELETED; /* Put the thread on the ready queue */ if (tcbEnqueuePriority (&tcbReadyQ, tcb_ptr) != ATOM_OK) { /* Exit critical region */ CRITICAL_END (); /* Quit the loop, returning error */ status = ATOM_ERR_QUEUE; break; } /* If there's a timeout on this suspension, cancel it */ if (tcb_ptr->suspend_timo_cb) { /* Cancel the callback */ if (atomTimerCancel (tcb_ptr->suspend_timo_cb) != ATOM_OK) { /* Exit critical region */ CRITICAL_END (); /* Quit the loop, returning error */ status = ATOM_ERR_TIMER; break; } /* Flag as no timeout registered */ tcb_ptr->suspend_timo_cb = NULL; } /* Exit critical region */ CRITICAL_END (); /* Request a reschedule */ woken_threads = TRUE; } /* No more suspended threads */ else { /* Exit critical region and quit the loop */ CRITICAL_END (); break; } } /* Call scheduler if any threads were woken up */ if (woken_threads == TRUE) { /** * Only call the scheduler if we are in thread context, otherwise * it will be called on exiting the ISR by atomIntExit(). */ if (atomCurrentContext()) atomSched (FALSE); } } return (status); }
/** * \b atomMutexPut * * Give back the lock on a mutex. * * This checks that the mutex is owned by the calling thread, and decrements * the recursive lock count. Once the lock count reaches zero, the lock is * considered relinquished and no longer owned by this thread. * * If the lock is relinquished and there are threads blocking on the mutex, the * call will wake up the highest priority thread suspended. Only one thread is * woken per call to atomMutexPut(). If multiple threads of the same priority * are suspended, they are woken in order of suspension (FIFO). * * This function can only be called from thread context. A mutex has the * concept of an owner thread, so it is never valid to make a mutex call * from interrupt context when there is no thread to associate with. * * @param[in] mutex Pointer to mutex object * * @retval ATOM_OK Success * @retval ATOM_ERR_PARAM Bad parameter * @retval ATOM_ERR_QUEUE Problem putting a woken thread on the ready queue * @retval ATOM_ERR_TIMER Problem cancelling a timeout for a woken thread * @retval ATOM_ERR_OWNERSHIP Attempt to unlock mutex not owned by this thread */ uint8_t atomMutexPut (ATOM_MUTEX * mutex){ uint8_t status; CRITICAL_STORE; ATOM_TCB *tcb_ptr, *curr_tcb_ptr; /* Check parameters */ if (mutex == NULL) { /* Bad mutex pointer */ status = ATOM_ERR_PARAM; } else { /* Get the current TCB */ curr_tcb_ptr = atomCurrentContext(); /* Protect access to the mutex object and OS queues */ CRITICAL_START (); /* Check if the calling thread owns this mutex */ if (mutex->owner != curr_tcb_ptr) { /* Exit critical region */ CRITICAL_END (); /* Attempt to unlock by non-owning thread */ status = ATOM_ERR_OWNERSHIP; } else { /* Lock is owned by this thread, decrement the recursive lock count */ mutex->count--; /* Once recursive lock count reaches zero, we relinquish ownership */ if (mutex->count == 0) { /* Relinquish ownership */ mutex->owner = NULL; /* If any threads are blocking on this mutex, wake them now */ if (mutex->suspQ) { /** * Threads are woken up in priority order, with a FIFO system * used on same priority threads. We always take the head, * ordering is taken care of by an ordered list enqueue. */ tcb_ptr = tcbDequeueHead (&mutex->suspQ); if (tcbEnqueuePriority (&tcbReadyQ, tcb_ptr) != ATOM_OK) { /* Exit critical region */ CRITICAL_END (); /* There was a problem putting the thread on the ready queue */ status = ATOM_ERR_QUEUE; } else { /* Set OK status to be returned to the waiting thread */ tcb_ptr->suspend_wake_status = ATOM_OK; /* Set this thread as the new owner of the mutex */ mutex->owner = tcb_ptr; /* If there's a timeout on this suspension, cancel it */ if ((tcb_ptr->suspend_timo_cb != NULL) && (atomTimerCancel (tcb_ptr->suspend_timo_cb) != ATOM_OK)) { /* There was a problem cancelling a timeout on this mutex */ status = ATOM_ERR_TIMER; } else { /* Flag as no timeout registered */ tcb_ptr->suspend_timo_cb = NULL; /* Successful */ status = ATOM_OK; } /* Exit critical region */ CRITICAL_END (); /** * The scheduler may now make a policy decision to * thread switch. We already know we are in thread * context so can call the scheduler from here. */ atomSched (FALSE); } } else { /** * Relinquished ownership and no threads waiting. * Nothing to do. */ /* Exit critical region */ CRITICAL_END (); /* Successful */ status = ATOM_OK; } } else { /** * Decremented lock but still retain ownership due to * recursion. Nothing to do. */ /* Exit critical region */ CRITICAL_END (); /* Successful */ status = ATOM_OK; } } } return (status); }