void testFiberStackPopW(void) { uint32_t data; /* data used to put and get from the stack queue */ int rc; TC_PRINT("Test Fiber STACK Pop Wait Interfaces\n\n"); rc = nano_fiber_stack_pop(&nanoStackObj2, &data, TICKS_UNLIMITED); TC_PRINT("FIBER STACK Pop from queue2: %d\n", data); /* Verify results */ if ((rc == 0) || (data != myData[0])) { retCode = TC_FAIL; TCERR2; return; } data = myData[1]; TC_PRINT("FIBER STACK Push to queue1: %d\n", data); nano_fiber_stack_push(&nanoStackObj, data); rc = nano_fiber_stack_pop(&nanoStackObj2, &data, TICKS_UNLIMITED); TC_PRINT("FIBER STACK Pop from queue2: %d\n", data); /* Verify results */ if ((rc == 0) || (data != myData[2])) { retCode = TC_FAIL; TCERR2; return; } data = myData[3]; TC_PRINT("FIBER STACK Push to queue1: %d\n", data); nano_fiber_stack_push(&nanoStackObj, data); TC_END_RESULT(retCode); } /* testFiberStackPopW */
void fiber1(void) { uint32_t data; /* data used to put and get from the stack queue */ int count = 0; /* counter */ TC_PRINT("Test Fiber STACK Pop\n\n"); /* Get all data */ while (nano_fiber_stack_pop(&nanoStackObj, &data, TICKS_NONE) != 0) { TC_PRINT("FIBER STACK Pop: count = %d, data is %d\n", count, data); if ((count >= NUM_STACK_ELEMENT) || (data != myData[NUM_STACK_ELEMENT - 1 - count])) { TCERR1(count); retCode = TC_FAIL; return; } count++; } TC_END_RESULT(retCode); PRINT_LINE; /* Put data */ TC_PRINT("Test Fiber STACK Push\n"); TC_PRINT("\nFIBER STACK Put Order: "); for (int i=NUM_STACK_ELEMENT; i>0; i--) { nano_fiber_stack_push(&nanoStackObj, myData[i-1]); TC_PRINT(" %d,", myData[i-1]); } TC_PRINT("\n"); PRINT_LINE; /* Give semaphore to allow the main task to run */ nano_fiber_sem_give(&nanoSemObj); } /* fiber1 */
/** * * @brief Stack test fiber * * @param par1 Ignored parameter. * @param par2 Number of test loops. * * @return N/A * */ void stack_fiber1(int par1, int par2) { int i; uint32_t data; ARG_UNUSED(par1); for (i = 0; i < par2 / 2; i++) { nano_fiber_stack_pop(&nano_stack_1, &data, TICKS_UNLIMITED); if (data != 2 * i) { break; } data = 2 * i; nano_fiber_stack_push(&nano_stack_2, data); nano_fiber_stack_pop(&nano_stack_1, &data, TICKS_UNLIMITED); if (data != 2 * i + 1) { break; } data = 2 * i + 1; nano_fiber_stack_push(&nano_stack_2, data); } }
/** * * @brief Stack test fiber * * @param par1 Address of the counter. * @param par2 Number of test cycles. * * @return N/A * */ void stack_fiber2(int par1, int par2) { int i; uint32_t data; int * pcounter = (int *) par1; for (i = 0; i < par2; i++) { data = i; nano_fiber_stack_push(&nano_stack_1, data); nano_fiber_stack_pop(&nano_stack_2, &data, TICKS_UNLIMITED); if (data != i) { break; } (*pcounter)++; } }
void stack_fiber3(int par1, int par2) { int i; uint32_t data; int * pcounter = (int *) par1; for (i = 0; i < par2; i++) { data = i; nano_fiber_stack_push(&nano_stack_1, data); data = 0xffffffff; while (!nano_fiber_stack_pop(&nano_stack_2, &data)) { fiber_yield(); } if (data != i) { break; } (*pcounter)++; } }
/** * * @brief The microkernel thread entry point * * This function implements the microkernel fiber. It waits for command * packets to arrive on its command stack. It executes all commands on the * stack and then sets up the next task that is ready to run. Next it * goes to wait on further inputs on the command stack. * * @return Does not return. */ FUNC_NORETURN void _k_server(int unused1, int unused2) { struct k_args *pArgs; struct k_task *pNextTask; ARG_UNUSED(unused1); ARG_UNUSED(unused2); /* indicate that failure of this fiber may be fatal to the entire system */ _nanokernel.current->flags |= ESSENTIAL; while (1) { /* forever */ (void) nano_fiber_stack_pop(&_k_command_stack, (uint32_t *)&pArgs, TICKS_UNLIMITED); /* will schedule */ do { int cmd_type = (int)pArgs & KERNEL_CMD_TYPE_MASK; if (cmd_type == KERNEL_CMD_PACKET_TYPE) { /* process command packet */ #ifdef CONFIG_TASK_MONITOR if (_k_monitor_mask & MON_KSERV) { _k_task_monitor_args(pArgs); } #endif (*pArgs->Comm)(pArgs); } else if (cmd_type == KERNEL_CMD_EVENT_TYPE) { /* give event */ #ifdef CONFIG_TASK_MONITOR if (_k_monitor_mask & MON_EVENT) { _k_task_monitor_args(pArgs); } #endif kevent_t event = (int)pArgs & ~KERNEL_CMD_TYPE_MASK; _k_do_event_signal(event); } else { /* cmd_type == KERNEL_CMD_SEMAPHORE_TYPE */ /* give semaphore */ #ifdef CONFIG_TASK_MONITOR /* task monitoring for giving semaphore not implemented */ #endif ksem_t sem = (int)pArgs & ~KERNEL_CMD_TYPE_MASK; _k_sem_struct_value_update(1, (struct _k_sem_struct *)sem); } /* * check if another fiber (of equal or greater priority) * needs to run */ if (_nanokernel.fiber) { fiber_yield(); } } while (nano_fiber_stack_pop(&_k_command_stack, (uint32_t *)&pArgs, TICKS_NONE)); pNextTask = next_task_select(); if (_k_current_task != pNextTask) { /* * switch from currently selected task to a different * one */ #ifdef CONFIG_WORKLOAD_MONITOR if (pNextTask->id == 0x00000000) { _k_workload_monitor_idle_start(); } else if (_k_current_task->id == 0x00000000) { _k_workload_monitor_idle_end(); } #endif _k_current_task = pNextTask; _nanokernel.task = (struct tcs *)pNextTask->workspace; #ifdef CONFIG_TASK_MONITOR if (_k_monitor_mask & MON_TSWAP) { _k_task_monitor(_k_current_task, 0); } #endif } } /* * Code analyzers may complain that _k_server() uses an infinite loop * unless we indicate that this is intentional */ CODE_UNREACHABLE; }