/** * \b atomSemGet * * Perform a get operation on a semaphore. * * This decrements the current count value for the semaphore and returns. * If the count value is already zero then the call will block until the * count is incremented by another thread, or until the specified \c timeout * is reached. Blocking threads will also be woken if the semaphore is * deleted by another thread while blocking. * * Depending on the \c timeout value specified the call will do one of * the following if the count value is zero: * * \c timeout == 0 : Call will block until the count is non-zero \n * \c timeout > 0 : Call will block until non-zero up to the specified timeout \n * \c timeout == -1 : Return immediately if the count is zero \n * * If the call needs to block and \c timeout is zero, it will block * indefinitely until atomSemPut() or atomSemDelete() is called on the * semaphore. * * If the call needs to block and \c timeout is non-zero, the call will only * block for the specified number of system ticks after which time, if the * thread was not already woken, the call will return with \c ATOM_TIMEOUT. * * If the call would normally block and \c timeout is -1, the call will * return immediately with \c ATOM_WOULDBLOCK. * * This function can only be called from interrupt context if the \c timeout * parameter is -1 (in which case it does not block). * * @param[in] sem Pointer to semaphore object * @param[in] timeout Max system ticks to block (0 = forever) * * @retval ATOM_OK Success * @retval ATOM_TIMEOUT Semaphore timed out before being woken * @retval ATOM_WOULDBLOCK Called with timeout == -1 but count is zero * @retval ATOM_ERR_DELETED Semaphore was deleted while suspended * @retval ATOM_ERR_CONTEXT Not called in thread context and attempted to block * @retval ATOM_ERR_PARAM Bad parameter * @retval ATOM_ERR_QUEUE Problem putting the thread on the suspend queue * @retval ATOM_ERR_TIMER Problem registering the timeout */ uint8_t atomSemGet (ATOM_SEM *sem, int32_t timeout) { CRITICAL_STORE; uint8_t status; SEM_TIMER timer_data; ATOM_TIMER timer_cb; ATOM_TCB *curr_tcb_ptr; /* Check parameters */ if (sem == NULL) { /* Bad semaphore pointer */ status = ATOM_ERR_PARAM; } else { /* Protect access to the semaphore object and OS queues */ CRITICAL_START (); /* If count is zero, block the calling thread */ if (sem->count == 0) { /* If called with timeout >= 0, we should block */ if (timeout >= 0) { /* Count is zero, block the calling thread */ /* Get the current TCB */ curr_tcb_ptr = atomCurrentContext(); /* Check we are actually in thread context */ if (curr_tcb_ptr) { /* Add current thread to the suspend list on this semaphore */ if (tcbEnqueuePriority (&sem->suspQ, curr_tcb_ptr) != ATOM_OK) { /* Exit critical region */ CRITICAL_END (); /* There was an error putting this thread on the suspend list */ status = ATOM_ERR_QUEUE; } else { /* Set suspended status for the current thread */ curr_tcb_ptr->suspended = TRUE; /* Track errors */ status = ATOM_OK; /* Register a timer callback if requested */ if (timeout) { /* Fill out the data needed by the callback to wake us up */ timer_data.tcb_ptr = curr_tcb_ptr; timer_data.sem_ptr = sem; /* Fill out the timer callback request structure */ timer_cb.cb_func = atomSemTimerCallback; timer_cb.cb_data = (POINTER)&timer_data; timer_cb.cb_ticks = timeout; /** * Store the timer details in the TCB so that we can * cancel the timer callback if the semaphore is put * before the timeout occurs. */ curr_tcb_ptr->suspend_timo_cb = &timer_cb; /* Register a callback on timeout */ if (atomTimerRegister (&timer_cb) != ATOM_OK) { /* Timer registration failed */ status = ATOM_ERR_TIMER; /* Clean up and return to the caller */ (void)tcbDequeueEntry (&sem->suspQ, curr_tcb_ptr); curr_tcb_ptr->suspended = FALSE; curr_tcb_ptr->suspend_timo_cb = NULL; } } /* Set no timeout requested */ else { /* No need to cancel timeouts on this one */ curr_tcb_ptr->suspend_timo_cb = NULL; } /* Exit critical region */ CRITICAL_END (); /* Check no errors have occurred */ if (status == ATOM_OK) { /** * Current thread now blocking, schedule in a new * one. We already know we are in thread context * so can call the scheduler from here. */ atomSched (FALSE); /** * Normal atomSemPut() wakeups will set ATOM_OK status, * while timeouts will set ATOM_TIMEOUT and semaphore * deletions will set ATOM_ERR_DELETED. */ status = curr_tcb_ptr->suspend_wake_status; /** * If we have been woken up with ATOM_OK then * another thread incremented the semaphore and * handed control to this thread. In theory the * the posting thread increments the counter and * as soon as this thread wakes up we decrement * the counter here, but to prevent another * thread preempting this thread and decrementing * the semaphore before this section was * scheduled back in, we emulate the increment * and decrement by not incrementing in the * atomSemPut() and not decrementing here. The * count remains zero throughout preventing other * threads preempting before we decrement the * count again. */ } } } else { /* Exit critical region */ CRITICAL_END (); /* Not currently in thread context, can't suspend */ status = ATOM_ERR_CONTEXT; } } else { /* timeout == -1, requested not to block and count is zero */ CRITICAL_END(); status = ATOM_WOULDBLOCK; } } else { /* Count is non-zero, just decrement it and return to calling thread */ sem->count--; /* Exit critical region */ CRITICAL_END (); /* Successful */ status = ATOM_OK; } } return (status); }
/** * \b atomTimerDelay * * Suspend a thread for the given number of system ticks. * * Note that the wakeup time is the number of ticks from the current system * tick, therefore, for a one tick delay, the thread may be woken up at any * time between the atomTimerDelay() call and the next system tick. For * a minimum number of ticks, you should specify minimum number of ticks + 1. * * This function can only be called from thread context. * * @param[in] ticks Number of system ticks to delay (must be > 0) * * @retval ATOM_OK Successful delay * @retval ATOM_ERR_PARAM Bad parameter (ticks must be non-zero) * @retval ATOM_ERR_CONTEXT Not called from thread context */ uint8_t atomTimerDelay (uint32_t ticks) { ATOM_TCB *curr_tcb_ptr; ATOM_TIMER timer_cb; DELAY_TIMER timer_data; CRITICAL_STORE; uint8_t status; /* Get the current TCB */ curr_tcb_ptr = atomCurrentContext(); /* Parameter check */ if (ticks == 0) { /* Return error */ status = ATOM_ERR_PARAM; } /* Check we are actually in thread context */ else if (curr_tcb_ptr == NULL) { /* Not currently in thread context, can't suspend */ status = ATOM_ERR_CONTEXT; } /* Otherwise safe to proceed */ else { /* Protect the system queues */ CRITICAL_START (); /* Set suspended status for the current thread */ curr_tcb_ptr->suspended = TRUE; /* Register the timer callback */ /* Fill out the data needed by the callback to wake us up */ timer_data.tcb_ptr = curr_tcb_ptr; /* Fill out the timer callback request structure */ timer_cb.cb_func = atomTimerDelayCallback; timer_cb.cb_data = (POINTER)&timer_data; timer_cb.cb_ticks = ticks; /* Store the timeout callback details, though we don't use it */ curr_tcb_ptr->suspend_timo_cb = &timer_cb; /* Register the callback */ if (atomTimerRegister (&timer_cb) != ATOM_OK) { /* Exit critical region */ CRITICAL_END (); /* Timer registration didn't work, won't get a callback */ status = ATOM_ERR_TIMER; } else { /* Exit critical region */ CRITICAL_END (); /* Successful timer registration */ status = ATOM_OK; /* Current thread should now block, schedule in another */ atomSched (FALSE); } } return (status); }
/** * \b test_start * * Start mutex test. * * This test exercises the atomMutexGet() and atomMutexPut() APIs including * forcing the various error indications which can be returned from the * APIs to ensure that handling for these corner cases have been correctly * implemented. * * @retval Number of failures */ uint32_t test_start (void) { int failures; uint8_t status; ATOM_TIMER timer_cb; int count; /* Default to zero failures */ failures = 0; /* Test parameter checks */ if (atomMutexGet (NULL, 0) != ATOM_ERR_PARAM) { ATOMLOG (_STR("Get param failed\n")); failures++; } if (atomMutexPut (NULL) != ATOM_ERR_PARAM) { ATOMLOG (_STR("Put param failed\n")); failures++; } /* Test atomMutexGet() can not be called from interrupt context */ g_result = 0; if (atomMutexCreate (&mutex1) != ATOM_OK) { ATOMLOG (_STR("Error creating test mutex1\n")); failures++; } else { /* Fill out the timer callback request structure */ timer_cb.cb_func = testCallback; timer_cb.cb_data = NULL; timer_cb.cb_ticks = SYSTEM_TICKS_PER_SEC; /* Request the timer callback to run in one second */ if (atomTimerRegister (&timer_cb) != ATOM_OK) { ATOMLOG (_STR("Error registering timer\n")); failures++; } /* Wait two seconds for g_result to be set indicating success */ else { atomTimerDelay (2 * SYSTEM_TICKS_PER_SEC); if (g_result != 1) { ATOMLOG (_STR("Context check failed\n")); failures++; } } /* Delete the test mutex */ if (atomMutexDelete (&mutex1) != ATOM_OK) { ATOMLOG (_STR("Mutex1 delete failed\n")); failures++; } } /* Create mutex1 which will be owned by us */ if (atomMutexCreate (&mutex1) != ATOM_OK) { ATOMLOG (_STR("Error creating test mutex 1\n")); failures++; } /* Create mutex2 which will be owned by another thread */ else if (atomMutexCreate (&mutex2) != ATOM_OK) { ATOMLOG (_STR("Error creating test mutex 2\n")); failures++; } /* Create a test thread, the sole purpose of which is to own mutex2 */ g_owned = 0; if (atomThreadCreate(&tcb[0], TEST_THREAD_PRIO, test_thread_func, 0, &test_thread_stack[0][TEST_THREAD_STACK_SIZE - 1], TEST_THREAD_STACK_SIZE) != ATOM_OK) { /* Fail */ ATOMLOG (_STR("Error creating test thread 1\n")); failures++; } /* Sleep until the test thread owns mutex2 */ atomTimerDelay (SYSTEM_TICKS_PER_SEC); if (g_owned == 0) { ATOMLOG (_STR("Thread own fail\n")); failures++; } /* Test wait on mutex with timeout - should timeout while owned by another thread */ if ((status = atomMutexGet (&mutex2, SYSTEM_TICKS_PER_SEC)) != ATOM_TIMEOUT) { ATOMLOG (_STR("Get %d\n"), status); failures++; } else { /* Success */ } /* Test wait on mutex with no blocking - should return that owned by another thread */ if ((status = atomMutexGet (&mutex2, -1)) != ATOM_WOULDBLOCK) { ATOMLOG (_STR("Wouldblock err %d\n"), status); failures++; } /* Test wait on mutex with no blocking when mutex is available */ if (atomMutexGet (&mutex1, -1) != ATOM_OK) { ATOMLOG (_STR("Error taking mutex1\n")); failures++; } else { /* Relinquish ownership of mutex1 */ if (atomMutexPut (&mutex1) != ATOM_OK) { ATOMLOG (_STR("Error posting mutex\n")); failures++; } } /* Test for lock count overflows with too many gets */ count = 255; while (count--) { if (atomMutexGet (&mutex1, 0) != ATOM_OK) { ATOMLOG (_STR("Error getting mutex1\n")); failures++; break; } } /* The lock count should overflow this time */ if (atomMutexGet (&mutex1, 0) != ATOM_ERR_OVF) { ATOMLOG (_STR("Error tracking overflow\n")); failures++; } else { /* Success */ } /* Delete the test mutexes */ if (atomMutexDelete (&mutex1) != ATOM_OK) { ATOMLOG (_STR("Error deleting mutex1\n")); failures++; } if (atomMutexDelete (&mutex2) != ATOM_OK) { ATOMLOG (_STR("Error deleting mutex2\n")); failures++; } /* Check thread stack usage (if enabled) */ #ifdef ATOM_STACK_CHECKING { uint32_t used_bytes, free_bytes; int thread; /* Check all threads */ for (thread = 0; thread < NUM_TEST_THREADS; thread++) { /* Check thread stack usage */ if (atomThreadStackCheck (&tcb[thread], &used_bytes, &free_bytes) != ATOM_OK) { ATOMLOG (_STR("StackCheck\n")); failures++; } else { /* Check the thread did not use up to the end of stack */ if (free_bytes == 0) { ATOMLOG (_STR("StackOverflow %d\n"), thread); failures++; } /* Log the stack usage */ #ifdef TESTS_LOG_STACK_USAGE ATOMLOG (_STR("StackUse:%d\n"), (int)used_bytes); #endif } } } #endif /* Quit */ return failures; }
/** * \b test_start * * Start mutex test. * * This tests the ownership checks of the mutex library. Only threads * which own a mutex can release it. It should not be possible to * release a mutex if it is not owned by any thread, is owned by a * different thread, or at interrupt context. We test here that all * three cases are trapped. * * @retval Number of failures */ uint32_t test_start (void) { int failures; ATOM_TIMER timer_cb; /* Default to zero failures */ failures = 0; /* Create mutex */ if (atomMutexCreate (&mutex1) != ATOM_OK) { ATOMLOG (_STR("Error creating mutex\n")); failures++; } else { /* Initialise the shared_data to zero */ shared_data = 0; /* Attempt to release the mutex when not owned by any thread */ if (atomMutexPut (&mutex1) != ATOM_ERR_OWNERSHIP) { ATOMLOG (_STR("Release error\n")); failures++; } /* Create second thread */ else if (atomThreadCreate(&tcb[0], TEST_THREAD_PRIO, test_thread_func, 1, &test_thread_stack[0][TEST_THREAD_STACK_SIZE - 1], TEST_THREAD_STACK_SIZE) != ATOM_OK) { /* Fail */ ATOMLOG (_STR("Error creating test thread\n")); failures++; } /* * The second thread has now been created and should take ownership * of the mutex. We wait a while and check that shared_data has been * modified, which proves to us that the thread has taken the mutex. */ atomTimerDelay (SYSTEM_TICKS_PER_SEC/4); if (shared_data != 1) { ATOMLOG (_STR("Shared data unmodified\n")); failures++; } /* Check successful so far */ if (failures == 0) { /* * Attempt to release the mutex again now that it is owned * by another thread. */ if (atomMutexPut (&mutex1) != ATOM_ERR_OWNERSHIP) { ATOMLOG (_STR("Release error 2\n")); failures++; } /* Finally check that the mutex cannot be released from an ISR */ /* Fill out the timer callback request structure */ timer_cb.cb_func = testCallback; timer_cb.cb_data = NULL; timer_cb.cb_ticks = SYSTEM_TICKS_PER_SEC; /* Request the timer callback to run in one second */ if (atomTimerRegister (&timer_cb) != ATOM_OK) { ATOMLOG (_STR("Error registering timer\n")); failures++; } /* * Wait two seconds for shared_date to be set to 2 * indicating success. This happens if the timer * callback received the expected ownership error * when attempting to release the mutex. */ else { atomTimerDelay (2 * SYSTEM_TICKS_PER_SEC); if (shared_data != 2) { ATOMLOG (_STR("Context check failed\n")); failures++; } } } /* Delete mutex, test finished */ if (atomMutexDelete (&mutex1) != ATOM_OK) { ATOMLOG (_STR("Delete failed\n")); failures++; } } /* Check thread stack usage (if enabled) */ #ifdef ATOM_STACK_CHECKING { uint32_t used_bytes, free_bytes; int thread; /* Check all threads */ for (thread = 0; thread < NUM_TEST_THREADS; thread++) { /* Check thread stack usage */ if (atomThreadStackCheck (&tcb[thread], &used_bytes, &free_bytes) != ATOM_OK) { ATOMLOG (_STR("StackCheck\n")); failures++; } else { /* Check the thread did not use up to the end of stack */ if (free_bytes == 0) { ATOMLOG (_STR("StackOverflow %d\n"), thread); failures++; } /* Log the stack usage */ #ifdef TESTS_LOG_STACK_USAGE ATOMLOG (_STR("StackUse:%d\n"), (int)used_bytes); #endif } } } #endif /* Quit */ return failures; }
/** * \b test_start * * Start timer test. * * This test exercises the atomTimerCancel() API, particularly its * behaviour when there are several timers registered. Four timers * are registered, two of which are cancelled, and the test confirms * that only the two which are not cancelled are called back. * * @retval Number of failures */ uint32_t test_start (void) { int failures; int i; /* Default to zero failures */ failures = 0; /* Clear down the ran flag for all four timers */ for (i = 0; i < 4; i++) { callback_ran_flag[i] = FALSE; } /* * Fill out four timer request structures. Callbacks are * requested starting in one second, with the others * at 1 tick intervals thereafter. */ for (i = 0; i < 4; i++) { /* * testCallback() is passed a pointer to the flag it * should set to notify that it has run. */ timer_cb[i].cb_ticks = SYSTEM_TICKS_PER_SEC + i; timer_cb[i].cb_func = testCallback; timer_cb[i].cb_data = &callback_ran_flag[i]; } /* Register all four timers */ for (i = 0; i < 4; i++) { if (atomTimerRegister (&timer_cb[i]) != ATOM_OK) { ATOMLOG (_STR("TimerReg\n")); failures++; } } /* Check timers were successfully created */ if (failures == 0) { /* Cancel two of the callbacks */ if (atomTimerCancel (&timer_cb[1]) != ATOM_OK) { ATOMLOG (_STR("Cancel1\n")); failures++; } if (atomTimerCancel (&timer_cb[2]) != ATOM_OK) { ATOMLOG (_STR("Cancel2\n")); failures++; } /* Wait two seconds for callbacks to complete */ if (atomTimerDelay(2 * SYSTEM_TICKS_PER_SEC) != ATOM_OK) { ATOMLOG (_STR("Wait\n")); failures++; } else { /* * We should now find that timer callbacks 0 and 3 * have run, but 1 and 2 did not (due to cancellation). */ if ((callback_ran_flag[0] != TRUE) || (callback_ran_flag[3] != TRUE) || (callback_ran_flag[1] != FALSE) || (callback_ran_flag[2] != FALSE)) { ATOMLOG (_STR("Cancellations\n")); failures++; } } } /* Quit */ return failures; }
/** * \b atomMutexGet * * Take the lock on a mutex. * * This takes ownership of a mutex if it is not currently owned. Ownership * is held by this thread until a corresponding call to atomMutexPut() by * the same thread. * * Can be called recursively by the original locking thread (owner). * Recursive calls are counted, and ownership is not relinquished until * the number of unlock (atomMutexPut()) calls by the owner matches the * number of lock (atomMutexGet()) calls. * * No thread other than the owner can lock or unlock the mutex while it is * locked by another thread. * * Depending on the \c timeout value specified the call will do one of * the following if the mutex is already locked by another thread: * * \c timeout == 0 : Call will block until the mutex is available \n * \c timeout > 0 : Call will block until available up to the specified timeout \n * \c timeout == -1 : Return immediately if mutex is locked by another thread \n * * If the call needs to block and \c timeout is zero, it will block * indefinitely until the owning thread calls atomMutexPut() or * atomMutexDelete() is called on the mutex. * * If the call needs to block and \c timeout is non-zero, the call will only * block for the specified number of system ticks after which time, if the * thread was not already woken, the call will return with \c ATOM_TIMEOUT. * * If the call would normally block and \c timeout is -1, the call will * return immediately with \c ATOM_WOULDBLOCK. * * This function can only be called from thread context. A mutex has the * concept of an owner thread, so it is never valid to make a mutex call * from interrupt context when there is no thread to associate with. * * @param[in] mutex Pointer to mutex object * @param[in] timeout Max system ticks to block (0 = forever) * * @retval ATOM_OK Success * @retval ATOM_TIMEOUT Mutex timed out before being woken * @retval ATOM_WOULDBLOCK Called with timeout == -1 but count is zero * @retval ATOM_ERR_DELETED Mutex was deleted while suspended * @retval ATOM_ERR_CONTEXT Not called in thread context and attempted to block * @retval ATOM_ERR_PARAM Bad parameter * @retval ATOM_ERR_QUEUE Problem putting the thread on the suspend queue * @retval ATOM_ERR_TIMER Problem registering the timeout * @retval ATOM_ERR_OVF The recursive lock count would have overflowed (>255) */ uint8_t atomMutexGet (ATOM_MUTEX *mutex, int32_t timeout){ CRITICAL_STORE; uint8_t status; MUTEX_TIMER timer_data; ATOM_TIMER timer_cb; ATOM_TCB *curr_tcb_ptr; /* Check parameters */ if (mutex == NULL) { /* Bad mutex pointer */ status = ATOM_ERR_PARAM; } else { /* Get the current TCB */ curr_tcb_ptr = atomCurrentContext(); /* Protect access to the mutex object and OS queues */ CRITICAL_START (); /** * Check we are at thread context. Because mutexes have the concept of * owner threads, it is never valid to call here from an ISR, * regardless of whether we will block. */ if (curr_tcb_ptr == NULL) { /* Exit critical region */ CRITICAL_END (); /* Not currently in thread context, can't suspend */ status = ATOM_ERR_CONTEXT; } /* Otherwise if mutex is owned by another thread, block the calling thread */ else if ((mutex->owner != NULL) && (mutex->owner != curr_tcb_ptr)) { /* If called with timeout >= 0, we should block */ if (timeout >= 0) { /* Add current thread to the suspend list on this mutex */ if (tcbEnqueuePriority (&mutex->suspQ, curr_tcb_ptr) != ATOM_OK) { /* Exit critical region */ CRITICAL_END (); /* There was an error putting this thread on the suspend list */ status = ATOM_ERR_QUEUE; } else { /* Set suspended status for the current thread */ curr_tcb_ptr->suspended = TRUE; /* Track errors */ status = ATOM_OK; /* Register a timer callback if requested */ if (timeout) { /* Fill out the data needed by the callback to wake us up */ timer_data.tcb_ptr = curr_tcb_ptr; timer_data.mutex_ptr = mutex; /* Fill out the timer callback request structure */ timer_cb.cb_func = atomMutexTimerCallback; timer_cb.cb_data = (POINTER)&timer_data; timer_cb.cb_ticks = timeout; /** * Store the timer details in the TCB so that we can * cancel the timer callback if the mutex is put * before the timeout occurs. */ curr_tcb_ptr->suspend_timo_cb = &timer_cb; /* Register a callback on timeout */ if (atomTimerRegister (&timer_cb) != ATOM_OK) { /* Timer registration failed */ status = ATOM_ERR_TIMER; /* Clean up and return to the caller */ (void)tcbDequeueEntry (&mutex->suspQ, curr_tcb_ptr); curr_tcb_ptr->suspended = FALSE; curr_tcb_ptr->suspend_timo_cb = NULL; } } /* Set no timeout requested */ else { /* No need to cancel timeouts on this one */ curr_tcb_ptr->suspend_timo_cb = NULL; } /* Exit critical region */ CRITICAL_END (); /* Check no errors have occurred */ if (status == ATOM_OK) { /** * Current thread now blocking, schedule in a new * one. We already know we are in thread context * so can call the scheduler from here. */ atomSched (FALSE); /** * Normal atomMutexPut() wakeups will set ATOM_OK status, * while timeouts will set ATOM_TIMEOUT and mutex * deletions will set ATOM_ERR_DELETED. */ status = curr_tcb_ptr->suspend_wake_status; /** * If we were woken up by another thread relinquishing * the mutex and handing this thread ownership, then * the relinquishing thread will set status to ATOM_OK * and will make this thread the owner. Setting the * owner before waking the thread ensures that no other * thread can preempt and take ownership of the mutex * between this thread being made ready to run, and * actually being scheduled back in here. */ if (status == ATOM_OK) { /** * Since this thread has just gained ownership, the * lock count is zero and should be incremented * once for this call. */ mutex->count++; } } } } else { /* timeout == -1, requested not to block and mutex is owned by another thread */ CRITICAL_END(); status = ATOM_WOULDBLOCK; } } else { /* Thread is not owned or is owned by us, we can claim ownership */ /* Increment the lock count, checking for count overflow */ if (mutex->count == 255) { /* Don't increment, just return error status */ status = ATOM_ERR_OVF; } else { /* Increment the count and return to the calling thread */ mutex->count++; /* If the mutex is not locked, mark the calling thread as the new owner */ if (mutex->owner == NULL) { mutex->owner = curr_tcb_ptr; } /* Successful */ status = ATOM_OK; } /* Exit critical region */ CRITICAL_END (); } } return (status); }