int testSemWait(void) { if (fiberDetectedFailure != 0) { TC_ERROR(" *** Failure detected in the fiber."); return TC_FAIL; } nano_task_sem_give(&testSem); /* Wake the fiber. */ if (semTestState != STS_TASK_WOKE_FIBER) { TC_ERROR(" *** Expected task to wake fiber. It did not.\n"); return TC_FAIL; } TC_PRINT("Semaphore from the task woke the fiber\n"); nano_task_sem_take_wait(&testSem); /* Wait on <testSem> */ if (semTestState != STS_FIBER_WOKE_TASK) { TC_ERROR(" *** Expected fiber to wake task. It did not.\n"); return TC_FAIL; } TC_PRINT("Semaphore from the fiber woke the task\n"); nano_task_sem_take_wait(&testSem); /* Wait on <testSem> again. */ if (semTestState != STS_ISR_WOKE_TASK) { TC_ERROR(" *** Expected ISR to wake task. It did not.\n"); return TC_FAIL; } TC_PRINT("Semaphore from the ISR woke the task.\n"); return TC_PASS; }
void testTaskFifoGetW(void) { void *pGetData; /* pointer to FIFO object get from the queue */ void *pPutData; /* pointer to FIFO object to put to the queue */ PRINT_LINE; TC_PRINT("Test Task FIFO Get Wait Interfaces\n\n"); pPutData = pMyFifoData1; TC_PRINT("TASK FIFO Put to queue2: %p\n", pPutData); nano_task_fifo_put(&nanoFifoObj2, pPutData); /* Activate fiber2 */ nano_task_sem_give(&nanoSemObj2); pGetData = nano_task_fifo_get(&nanoFifoObj, TICKS_UNLIMITED); TC_PRINT("TASK FIFO Get from queue1: %p\n", pGetData); /* Verify results */ if (pGetData != pMyFifoData2) { retCode = TC_FAIL; TCERR2; return; } pPutData = pMyFifoData3; TC_PRINT("TASK FIFO Put to queue2: %p\n", pPutData); nano_task_fifo_put(&nanoFifoObj2, pPutData); TC_END_RESULT(retCode); } /* testTaskFifoGetW */
int testSemTaskNoWait(void) { int i; /* loop counter */ TC_PRINT("Giving and taking a semaphore in a task (non-blocking)\n"); /* * Give the semaphore many times and then make sure that it can only be * taken that many times. */ for (i = 0; i < 32; i++) { nano_task_sem_give(&testSem); } for (i = 0; i < 32; i++) { if (nano_task_sem_take(&testSem) != 1) { TC_ERROR(" *** Expected nano_task_sem_take() to succeed, not fail\n"); goto errorReturn; } } if (nano_task_sem_take(&testSem) != 0) { TC_ERROR(" *** Expected nano_task_sem_take() to fail, not succeed!\n"); goto errorReturn; } return TC_PASS; errorReturn: return TC_FAIL; }
/** * @brief Manokernel entry point. * * @details Start the kernel event data colector fiber. Then * do wait forever. * @return No return value. */ int main(void) { int i; #ifdef CONFIG_MICROKERNEL tmon_index = 0; #endif kernel_event_logger_fiber_start(); /* initialize philosopher semaphores */ for (i = 0; i < N_PHILOSOPHERS; i++) { nano_sem_init(&forks[i]); nano_task_sem_give(&forks[i]); } /* create philosopher fibers */ for (i = 0; i < N_PHILOSOPHERS; i++) { task_fiber_start(&philStack[i][0], STSIZE, (nano_fiber_entry_t) philEntry, 0, 0, 6, 0); } task_fiber_start(&philStack[N_PHILOSOPHERS][0], STSIZE, (nano_fiber_entry_t) fork_manager_entry, 0, 0, 6, 0); /* wait forever */ while (1) { extern void nano_cpu_idle(void); nano_cpu_idle(); } }
/** * Initialize the resources used by the framework's sync services. * * IMPORTANT : this function must be called during the initialization * of the OS abstraction layer. * this function shall only be called once after reset, otherwise * it may cause the queue services to fail */ void framework_init_queue (void) { #if defined (CONFIG_NANOKERNEL) && ! defined (ZEPHYR_MICRO_OS_ABSTRACTION_USE_SINGLE_POOL_LOCK) nano_sem_init (&QueuePoolLockSem); nano_task_sem_give (&QueuePoolLockSem); #endif ; /* Nothing to do */ }
static void context_sem_give(struct nano_sem *chan) { switch (sys_execution_context_type_get()) { case NANO_CTX_FIBER: nano_fiber_sem_give(chan); break; case NANO_CTX_TASK: nano_task_sem_give(chan); break; case NANO_CTX_ISR: default: /* Invalid context type */ break; } }
void main(void) { int status = TC_FAIL; uint32_t start_tick; uint32_t end_tick; TC_START("Test Nanokernel Sleep and Wakeup APIs\n"); test_objects_init(); test_fiber_id = task_fiber_start(test_fiber_stack, FIBER_STACKSIZE, test_fiber, 0, 0, TEST_FIBER_PRIORITY, 0); TC_PRINT("Test fiber started: id = 0x%x\n", test_fiber_id); helper_fiber_id = task_fiber_start(helper_fiber_stack, FIBER_STACKSIZE, helper_fiber, 0, 0, HELPER_FIBER_PRIORITY, 0); TC_PRINT("Helper fiber started: id = 0x%x\n", helper_fiber_id); /* Activate test_fiber */ nano_task_sem_give(&test_fiber_sem); /* Wait for test_fiber to activate us */ nano_task_sem_take(&task_sem, TICKS_UNLIMITED); /* Wake the test fiber */ task_fiber_wakeup(test_fiber_id); if (test_failure) { goto done_tests; } TC_PRINT("Testing nanokernel task_sleep()\n"); align_to_tick_boundary(); start_tick = sys_tick_get_32(); task_sleep(ONE_SECOND); end_tick = sys_tick_get_32(); if (end_tick - start_tick != ONE_SECOND) { TC_ERROR("task_sleep() slept for %d ticks, not %d\n", end_tick - start_tick, ONE_SECOND); goto done_tests; } status = TC_PASS; done_tests: TC_END_REPORT(status); }
int taskLifoWaitTest(void) { void *data; /* ptr to data retrieved from LIFO */ /* Wait on <taskWaitSem> in case fiber's print message blocked */ nano_fiber_sem_take(&taskWaitSem, TICKS_UNLIMITED); /* The fiber is waiting on the LIFO. Wake it. */ nano_task_lifo_put(&test_lifo, &lifoItem[0]); /* * The fiber ran, but is now blocked on the semaphore. Add an item to the * LIFO before giving the semaphore that wakes the fiber so that we can * cover the path of nano_fiber_lifo_get(TICKS_UNLIMITED) not waiting on * the LIFO. */ nano_task_lifo_put(&test_lifo, &lifoItem[2]); nano_task_sem_give(&fiberWaitSem); /* Check that the fiber got the correct item (lifoItem[0]) */ if (fiberDetectedFailure) { TC_ERROR(" *** nano_task_lifo_put()/nano_fiber_lifo_get() failure\n"); return TC_FAIL; } /* The LIFO is empty. This time the task will wait for the item. */ TC_PRINT("Task waiting on an empty LIFO\n"); data = nano_task_lifo_get(&test_lifo, TICKS_UNLIMITED); if (data != (void *) &lifoItem[1]) { TC_ERROR(" *** nano_task_lifo_get()/nano_fiber_lifo_put() failure\n"); return TC_FAIL; } data = nano_task_lifo_get(&test_lifo, TICKS_UNLIMITED); if (data != (void *) &lifoItem[3]) { TC_ERROR(" *** nano_task_lifo_get()/nano_fiber_lifo_put() failure\n"); return TC_FAIL; } /* Waiting on an empty LIFO passed for both fiber and task. */ return TC_PASS; }
/* the task spins fibers that get the sem in time, except the last one */ static int test_multiple_fibers_get_sem(struct timeout_order_data *test_data, int test_data_size) { struct timeout_order_data *data; int ii; for (ii = 0; ii < test_data_size-1; ii++) { task_fiber_start(timeout_stacks[ii], FIBER_STACKSIZE, test_fiber_pend_and_get_sem, (int)&test_data[ii], 0, FIBER_PRIORITY, 0); } task_fiber_start(timeout_stacks[ii], FIBER_STACKSIZE, test_fiber_pend_and_timeout, (int)&test_data[ii], 0, FIBER_PRIORITY, 0); for (ii = 0; ii < test_data_size-1; ii++) { nano_task_sem_give(test_data[ii].sem); data = nano_task_fifo_get_wait(&timeout_order_fifo); if (data->q_order == ii) { TC_PRINT(" got fiber (q order: %d, t/o: %d, sem: %p) as expected\n", data->q_order, data->timeout, data->sem); } else { TC_ERROR(" *** fiber %d woke up, expected %d\n", data->q_order, ii); return TC_FAIL; } } data = nano_task_fifo_get_wait(&timeout_order_fifo); if (data->q_order == ii) { TC_PRINT(" got fiber (q order: %d, t/o: %d, sem: %p) as expected\n", data->q_order, data->timeout, data->sem); } else { TC_ERROR(" *** fiber %d woke up, expected %d\n", data->timeout_order, ii); return TC_FAIL; } return TC_PASS; }
static void test_nano_timers(int unused1, int unused2) { struct nano_timer timer; ARG_UNUSED(unused1); ARG_UNUSED(unused2); nano_timer_init(&timer, (void *)0xdeadbeef); TC_PRINT("starting nano timer to expire in %d seconds\n", TEST_NANO_TIMERS_DELAY); nano_fiber_timer_start(&timer, SECONDS(TEST_NANO_TIMERS_DELAY)); TC_PRINT("fiber pending on timer\n"); nano_fiber_timer_wait(&timer); TC_PRINT("fiber back from waiting on timer: giving semaphore.\n"); nano_task_sem_give(&test_nano_timers_sem); TC_PRINT("fiber semaphore given.\n"); /* on failure, don't give semaphore, main test will not obtain it */ }
static int do_test_multiple_waiters(void) { int ii; /* pend all fibers one the same semaphore */ for (ii = 0; ii < NUM_WAITERS; ii++) { task_fiber_start(fiber_multi_waiters_stacks[ii], FIBER_STACKSIZE, fiber_multi_waiters, ii, 0, FIBER_PRIORITY, 0); } /* wake up all the fibers: the task is preempted each time */ for (ii = 0; ii < NUM_WAITERS; ii++) { nano_task_sem_give(&multi_waiters); } /* reply_multi_waiters will have been given once for each fiber */ for (ii = 0; ii < NUM_WAITERS; ii++) { if (!nano_task_sem_take(&reply_multi_waiters)) { TC_ERROR(" *** Cannot take sem supposedly given by waiters.\n"); return TC_FAIL; } } TC_PRINT("Task took multi-waiter reply semaphore %d times, as expected.\n", NUM_WAITERS); if (nano_task_sem_take(&multi_waiters)) { TC_ERROR(" *** multi_waiters should have been empty.\n"); return TC_FAIL; } if (nano_task_sem_take(&reply_multi_waiters)) { TC_ERROR(" *** reply_multi_waiters should have been empty.\n"); return TC_FAIL; } return TC_PASS; }
int main(void) { int i; PRINTF(DEMO_DESCRIPTION, "fibers", "nanokernel"); for (i = 0; i < N_PHILOSOPHERS; i++) { nano_sem_init(&forks[i]); nano_task_sem_give(&forks[i]); } /* create philosopher fibers */ for (i = 0; i < N_PHILOSOPHERS; i++) { task_fiber_start(&philStack[i][0], STSIZE, (nano_fiber_entry_t) philEntry, 0, 0, 6, 0); } /* wait forever */ while (1) { extern void nano_cpu_idle(void); nano_cpu_idle(); } }
void main(void) { struct nano_timer timer; uint32_t data[2] = {0, 0}; task_fiber_start(&fiberStack[0], STACKSIZE, (nano_fiber_entry_t) fiberEntry, 0, 0, 7, 0); nano_sem_init(&nanoSemTask); nano_timer_init(&timer, data); while (1) { /* say "hello" */ PRINT("%s: Hello Screen!\n", __FUNCTION__); /* wait a while, then let fiber have a turn */ nano_task_timer_start(&timer, SLEEPTICKS); nano_task_timer_wait(&timer); nano_task_sem_give(&nanoSemFiber); /* now wait for fiber to let us have a turn */ nano_task_sem_take_wait(&nanoSemTask); } }
static void test_fiber(int arg1, int arg2) { uint32_t start_tick; uint32_t end_tick; nano_fiber_sem_take(&test_fiber_sem, TICKS_UNLIMITED); TC_PRINT("Testing normal expiration of fiber_sleep()\n"); align_to_tick_boundary(); start_tick = sys_tick_get_32(); fiber_sleep(ONE_SECOND); end_tick = sys_tick_get_32(); if (end_tick != start_tick + ONE_SECOND) { TC_ERROR(" *** fiber_sleep() slept for %d ticks not %d.", end_tick - start_tick, ONE_SECOND); return; } TC_PRINT("Testing fiber_sleep() + fiber_fiber_wakeup()\n"); nano_fiber_sem_give(&helper_fiber_sem); /* Activate helper fiber */ align_to_tick_boundary(); start_tick = sys_tick_get_32(); fiber_sleep(ONE_SECOND); end_tick = sys_tick_get_32(); if (end_tick > start_tick) { TC_ERROR(" *** fiber_fiber_wakeup() took too long (%d ticks)\n", end_tick - start_tick); return; } TC_PRINT("Testing fiber_sleep() + isr_fiber_wakeup()\n"); nano_fiber_sem_give(&helper_fiber_sem); /* Activate helper fiber */ align_to_tick_boundary(); start_tick = sys_tick_get_32(); fiber_sleep(ONE_SECOND); end_tick = sys_tick_get_32(); if (end_tick > start_tick) { TC_ERROR(" *** isr_fiber_wakeup() took too long (%d ticks)\n", end_tick - start_tick); return; } TC_PRINT("Testing fiber_sleep() + task_fiber_wakeup()\n"); nano_task_sem_give(&task_sem); /* Activate task */ align_to_tick_boundary(); start_tick = sys_tick_get_32(); fiber_sleep(ONE_SECOND); /* Task will execute */ end_tick = sys_tick_get_32(); if (end_tick > start_tick) { TC_ERROR(" *** task_fiber_wakeup() took too long (%d ticks)\n", end_tick - start_tick); return; } test_failure = false; }
int taskLifoNonWaitTest(void) { void *data; /* ptr to data retrieved from LIFO */ /* * The fiber is presently waiting for <fiberWaitSem>. Populate the LIFO * before waking the fiber. */ TC_PRINT("Fiber to get LIFO items without waiting\n"); nano_task_lifo_put(&test_lifo, &lifoItem[2]); nano_task_lifo_put(&test_lifo, &lifoItem[3]); nano_task_sem_give(&fiberWaitSem); /* Wake the fiber */ /* Check that fiber received the items correctly */ if (fiberDetectedFailure) { TC_ERROR(" *** nano_task_lifo_put()/nano_fiber_lifo_get() failure\n"); return TC_FAIL; } /* Wait for the fiber to be ready */ nano_task_sem_take(&taskWaitSem, TICKS_UNLIMITED); data = nano_task_lifo_get(&test_lifo, TICKS_NONE); if (data != (void *) &lifoItem[1]) { TC_ERROR(" *** nano_task_lifo_get()/nano_fiber_lifo_put() failure\n"); return TC_FAIL; } data = nano_task_lifo_get(&test_lifo, TICKS_NONE); if (data != (void *) &lifoItem[0]) { TC_ERROR(" *** nano_task_lifo_get()/nano_fiber_lifo_put() failure\n"); return TC_FAIL; } data = nano_task_lifo_get(&test_lifo, TICKS_NONE); if (data != NULL) { TC_ERROR(" *** nano_task_lifo_get()/nano_fiber_lifo_put() failure\n"); return TC_FAIL; } /* * Software interrupts have been configured so that when invoked, * the ISR will add an item to the LIFO. The fiber (when unblocked) * trigger software interrupts to get the items from the LIFO from * within an ISR. * * Populate the LIFO. */ TC_PRINT("ISR to get LIFO items without waiting\n"); isrLifoInfo.data = &lifoItem[3]; _trigger_nano_isr_lifo_put(); isrLifoInfo.data = &lifoItem[1]; _trigger_nano_isr_lifo_put(); isrLifoInfo.data = NULL; /* Force NULL to ensure [data] changes */ nano_task_sem_give(&fiberWaitSem); /* Wake the fiber */ if (fiberDetectedFailure) { TC_ERROR(" *** nano_isr_lifo_put()/nano_isr_lifo_get() failure\n"); return TC_FAIL; } return TC_PASS; }
void main(void) { void *pData; /* pointer to FIFO object get from the queue */ int count = 0; /* counter */ TC_START("Test Nanokernel FIFO"); /* Initialize the FIFO queues and semaphore */ initNanoObjects(); /* Create and start the three (3) fibers. */ task_fiber_start(&fiberStack1[0], FIBER_STACKSIZE, (nano_fiber_entry_t) fiber1, 0, 0, 7, 0); task_fiber_start(&fiberStack2[0], FIBER_STACKSIZE, (nano_fiber_entry_t) fiber2, 0, 0, 7, 0); task_fiber_start(&fiberStack3[0], FIBER_STACKSIZE, (nano_fiber_entry_t) fiber3, 0, 0, 7, 0); /* * The three fibers have each blocked on a different semaphore. Giving * the semaphore nanoSemObjX will unblock fiberX (where X = {1, 2, 3}). * * Activate fibers #1 and #2. They will each block on nanoFifoObj. */ nano_task_sem_give(&nanoSemObj1); nano_task_sem_give(&nanoSemObj2); /* Put two items into <nanoFifoObj> to unblock fibers #1 and #2. */ nano_task_fifo_put(&nanoFifoObj, pPutList1[0]); /* Wake fiber1 */ nano_task_fifo_put(&nanoFifoObj, pPutList1[1]); /* Wake fiber2 */ /* Activate fiber #3 */ nano_task_sem_give(&nanoSemObj3); /* * All three fibers should be blocked on their semaphores. Put data into * <nanoFifoObj2>. Fiber #3 will read it after it is reactivated. */ nano_task_fifo_put(&nanoFifoObj2, pPutList2[0]); nano_task_sem_give(&nanoSemObj3); /* Reactivate fiber #3 */ for (int i = 0; i < 4; i++) { pData = nano_task_fifo_get(&nanoFifoObj2, TICKS_UNLIMITED); if (pData != pPutList2[i]) { TC_ERROR("nano_task_fifo_get() expected 0x%x, got 0x%x\n", pPutList2[i], pData); goto exit; } } /* Add items to <nanoFifoObj> for fiber #2 */ for (int i = 0; i < 4; i++) { nano_task_fifo_put(&nanoFifoObj, pPutList1[i]); } nano_task_sem_give(&nanoSemObj2); /* Activate fiber #2 */ /* Wait for fibers to finish */ nano_task_sem_take(&nanoSemObjTask, TICKS_UNLIMITED); if (retCode == TC_FAIL) { goto exit; } /* * Entries in the FIFO queue have to be unique. * Put data to queue. */ TC_PRINT("Test Task FIFO Put\n"); TC_PRINT("\nTASK FIFO Put Order: "); for (int i = 0; i < NUM_FIFO_ELEMENT; i++) { nano_task_fifo_put(&nanoFifoObj, pPutList1[i]); TC_PRINT(" %p,", pPutList1[i]); } TC_PRINT("\n"); PRINT_LINE; nano_task_sem_give(&nanoSemObj1); /* Activate fiber1 */ if (retCode == TC_FAIL) { goto exit; } /* * Wait for fiber1 to complete execution. (Using a semaphore gives * the fiber the freedom to do blocking-type operations if it wants to.) */ nano_task_sem_take(&nanoSemObjTask, TICKS_UNLIMITED); TC_PRINT("Test Task FIFO Get\n"); /* Get all FIFOs */ while ((pData = nano_task_fifo_get(&nanoFifoObj, TICKS_NONE)) != NULL) { TC_PRINT("TASK FIFO Get: count = %d, ptr is %p\n", count, pData); if ((count >= NUM_FIFO_ELEMENT) || (pData != pPutList2[count])) { TCERR1(count); retCode = TC_FAIL; goto exit; } count++; } /* Test FIFO Get Wait interfaces*/ testTaskFifoGetW(); PRINT_LINE; testIsrFifoFromTask(); PRINT_LINE; /* test timeouts */ if (test_fifo_timeout() != TC_PASS) { retCode = TC_FAIL; goto exit; } PRINT_LINE; exit: TC_END_RESULT(retCode); TC_END_REPORT(retCode); }
/* the timeout test entry point */ static int test_timeout(void) { int64_t orig_ticks; int32_t timeout; int rv; int test_data_size; struct reply_packet reply_packet; nano_sem_init(&sem_timeout[0]); nano_sem_init(&sem_timeout[1]); nano_fifo_init(&timeout_order_fifo); /* test nano_task_sem_take_wait_timeout() with timeout */ timeout = 10; orig_ticks = nano_tick_get(); rv = nano_task_sem_take_wait_timeout(&sem_timeout[0], timeout); if (rv) { TC_ERROR(" *** timeout of %d did not time out.\n", timeout); return TC_FAIL; } if ((nano_tick_get() - orig_ticks) < timeout) { TC_ERROR(" *** task did not wait long enough on timeout of %d.\n", timeout); return TC_FAIL; } /* test nano_task_sem_take_wait_timeout with timeout of 0 */ rv = nano_task_sem_take_wait_timeout(&sem_timeout[0], 0); if (rv) { TC_ERROR(" *** timeout of 0 did not time out.\n"); return TC_FAIL; } /* test nano_task_sem_take_wait_timeout with timeout > 0 */ TC_PRINT("test nano_task_sem_take_wait_timeout with timeout > 0\n"); timeout = 3; orig_ticks = nano_tick_get(); rv = nano_task_sem_take_wait_timeout(&sem_timeout[0], timeout); if (rv) { TC_ERROR(" *** timeout of %d did not time out.\n", timeout); return TC_FAIL; } if (!is_timeout_in_range(orig_ticks, timeout)) { return TC_FAIL; } TC_PRINT("nano_task_sem_take_wait_timeout timed out as expected\n"); /* * test nano_task_sem_take_wait_timeout with a timeout and fiber that gives * the semaphore on time */ timeout = 5; orig_ticks = nano_tick_get(); task_fiber_start(timeout_stacks[0], FIBER_STACKSIZE, test_fiber_give_timeout, (int)&sem_timeout[0], timeout, FIBER_PRIORITY, 0); rv = nano_task_sem_take_wait_timeout(&sem_timeout[0], (int)(timeout + 5)); if (!rv) { TC_ERROR(" *** timed out even if semaphore was given in time.\n"); return TC_FAIL; } if (!is_timeout_in_range(orig_ticks, timeout)) { return TC_FAIL; } TC_PRINT("nano_task_sem_take_wait_timeout got sem in time, as expected\n"); /* * test nano_task_sem_take_wait_timeout with TICKS_NONE and the * semaphore unavailable. */ if (nano_task_sem_take_wait_timeout(&sem_timeout[0], TICKS_NONE)) { TC_ERROR("task with TICKS_NONE got sem, but shouldn't have\n"); return TC_FAIL; } TC_PRINT("task with TICKS_NONE did not get sem, as expected\n"); /* * test nano_task_sem_take_wait_timeout with TICKS_NONE and the * semaphore available. */ nano_task_sem_give(&sem_timeout[0]); if (!nano_task_sem_take_wait_timeout(&sem_timeout[0], TICKS_NONE)) { TC_ERROR("task with TICKS_NONE did not get available sem\n"); return TC_FAIL; } TC_PRINT("task with TICKS_NONE got available sem, as expected\n"); /* * test nano_task_sem_take_wait_timeout with TICKS_UNLIMITED and the * semaphore available. */ TC_PRINT("Trying to take available sem with TICKS_UNLIMITED:\n" " will hang the test if it fails.\n"); nano_task_sem_give(&sem_timeout[0]); if (!nano_task_sem_take_wait_timeout(&sem_timeout[0], TICKS_UNLIMITED)) { TC_ERROR(" *** This will never be hit!!! .\n"); return TC_FAIL; } TC_PRINT("task with TICKS_UNLIMITED got available sem, as expected\n"); /* test fiber with timeout of TICKS_NONE not getting empty semaphore */ task_fiber_start(timeout_stacks[0], FIBER_STACKSIZE, test_fiber_ticks_special_values, (int)&reply_packet, TICKS_NONE, FIBER_PRIORITY, 0); if (!nano_task_fifo_get(&timeout_order_fifo)) { TC_ERROR(" *** fiber should have run and filled the fifo.\n"); return TC_FAIL; } if (reply_packet.reply != 0) { TC_ERROR(" *** fiber should not have obtained the semaphore.\n"); return TC_FAIL; } TC_PRINT("fiber with TICKS_NONE did not get sem, as expected\n"); /* test fiber with timeout of TICKS_NONE getting full semaphore */ nano_task_sem_give(&sem_timeout[0]); task_fiber_start(timeout_stacks[0], FIBER_STACKSIZE, test_fiber_ticks_special_values, (int)&reply_packet, TICKS_NONE, FIBER_PRIORITY, 0); if (!nano_task_fifo_get(&timeout_order_fifo)) { TC_ERROR(" *** fiber should have run and filled the fifo.\n"); return TC_FAIL; } if (reply_packet.reply != 1) { TC_ERROR(" *** fiber should have obtained the semaphore.\n"); return TC_FAIL; } TC_PRINT("fiber with TICKS_NONE got available sem, as expected\n"); /* test fiber with timeout of TICKS_UNLIMITED getting full semaphore */ nano_task_sem_give(&sem_timeout[0]); task_fiber_start(timeout_stacks[0], FIBER_STACKSIZE, test_fiber_ticks_special_values, (int)&reply_packet, TICKS_UNLIMITED, FIBER_PRIORITY, 0); if (!nano_task_fifo_get(&timeout_order_fifo)) { TC_ERROR(" *** fiber should have run and filled the fifo.\n"); return TC_FAIL; } if (reply_packet.reply != 1) { TC_ERROR(" *** fiber should have obtained the semaphore.\n"); return TC_FAIL; } TC_PRINT("fiber with TICKS_UNLIMITED got available sem, as expected\n"); /* test multiple fibers pending on the same sem with different timeouts */ test_data_size = ARRAY_SIZE(timeout_order_data); TC_PRINT("testing timeouts of %d fibers on same sem\n", test_data_size); rv = test_multiple_fibers_pending(timeout_order_data, test_data_size); if (rv != TC_PASS) { TC_ERROR(" *** fibers did not time out in the right order\n"); return TC_FAIL; } /* test multiple fibers pending on different sems with different timeouts */ test_data_size = ARRAY_SIZE(timeout_order_data_mult_sem); TC_PRINT("testing timeouts of %d fibers on different sems\n", test_data_size); rv = test_multiple_fibers_pending(timeout_order_data_mult_sem, test_data_size); if (rv != TC_PASS) { TC_ERROR(" *** fibers did not time out in the right order\n"); return TC_FAIL; } /* * test multiple fibers pending on same sem with different timeouts, but * getting the semaphore in time, except the last one. */ test_data_size = ARRAY_SIZE(timeout_order_data); TC_PRINT("testing %d fibers timing out, but obtaining the sem in time\n" "(except the last one, which times out)\n", test_data_size); rv = test_multiple_fibers_get_sem(timeout_order_data, test_data_size); if (rv != TC_PASS) { TC_ERROR(" *** fibers did not get the sem in the right order\n"); return TC_FAIL; } return TC_PASS; }
void main(void) { int rv; /* return value from tests */ TC_START("Test Nanokernel CPU and thread routines"); TC_PRINT("Initializing nanokernel objects\n"); rv = initNanoObjects(); if (rv != TC_PASS) { goto doneTests; } TC_PRINT("Testing nano_cpu_idle()\n"); rv = nano_cpu_idleTest(); if (rv != TC_PASS) { goto doneTests; } TC_PRINT("Testing interrupt locking and unlocking\n"); rv = nanoCpuDisableInterruptsTest(irq_lockWrapper, irq_unlockWrapper, -1); if (rv != TC_PASS) { goto doneTests; } /* * The Cortex-M3/M4 use the SYSTICK exception for the system timer, which is * not considered an IRQ by the irq_enable/Disable APIs. */ #if !defined(CONFIG_CPU_CORTEX_M3_M4) /* Disable interrupts coming from the timer. */ TC_PRINT("Testing irq_disable() and irq_enable()\n"); rv = nanoCpuDisableInterruptsTest(irq_disableWrapper, irq_enableWrapper, TICK_IRQ); if (rv != TC_PASS) { goto doneTests; } #endif rv = nanoCtxTaskTest(); if (rv != TC_PASS) { goto doneTests; } TC_PRINT("Spawning a fiber from a task\n"); fiberEvidence = 0; task_fiber_start(fiberStack1, FIBER_STACKSIZE, fiberEntry, (int) sys_thread_self_get(), 0, FIBER_PRIORITY, 0); if (fiberEvidence != 1) { rv = TC_FAIL; TC_ERROR(" - fiber did not execute as expected!\n"); goto doneTests; } /* * The fiber ran, now wake it so it can test sys_thread_self_get and * sys_execution_context_type_get. */ TC_PRINT("Fiber to test sys_thread_self_get() and sys_execution_context_type_get\n"); nano_task_sem_give(&wakeFiber); if (fiberDetectedError != 0) { rv = TC_FAIL; TC_ERROR(" - failure detected in fiber; fiberDetectedError = %d\n", fiberDetectedError); goto doneTests; } TC_PRINT("Fiber to test fiber_yield()\n"); nano_task_sem_give(&wakeFiber); if (fiberDetectedError != 0) { rv = TC_FAIL; TC_ERROR(" - failure detected in fiber; fiberDetectedError = %d\n", fiberDetectedError); goto doneTests; } nano_task_sem_give(&wakeFiber); rv = test_timeout(); if (rv != TC_PASS) { goto doneTests; } /* Cortex-M3/M4 does not implement connecting non-IRQ exception handlers */ #if !defined(CONFIG_CPU_CORTEX_M3_M4) /* * Test divide by zero exception handler. * * WARNING: This code has been very carefully crafted so that it does * what it is supposed to. Both "error" and "excHandlerExecuted" must be * volatile to prevent the compiler from issuing a "divide by zero" * warning (since otherwise in knows "excHandlerExecuted" is zero), * and to ensure the compiler issues the two byte "idiv" instruction * that the exception handler is designed to deal with. */ volatile int error; /* used to create a divide by zero error */ TC_PRINT("Verifying exception handler installed\n"); excHandlerExecuted = 0; error = error / excHandlerExecuted; TC_PRINT("excHandlerExecuted: %d\n", excHandlerExecuted); rv = (excHandlerExecuted == 1) ? TC_PASS : TC_FAIL; #endif doneTests: TC_END_RESULT(rv); TC_END_REPORT(rv); }