void RegressionTaskEntry(void) { int tcRC; nano_sem_init(&test_nano_timers_sem); PRINT_DATA("Starting timer tests\n"); PRINT_LINE; task_fiber_start(test_nano_timers_stack, 512, test_nano_timers, 0, 0, 5, 0); /* Test the task_timer_alloc() API */ TC_PRINT("Test the allocation of timers\n"); tcRC = testLowTimerGet(); if (tcRC != TC_PASS) { goto exitRtn; } TC_PRINT("Test the one shot feature of a timer\n"); tcRC = testLowTimerOneShot(); if (tcRC != TC_PASS) { goto exitRtn; } TC_PRINT("Test that a timer does not start\n"); tcRC = testLowTimerDoesNotStart(); if (tcRC != TC_PASS) { goto exitRtn; } TC_PRINT("Test the periodic feature of a timer\n"); tcRC = testLowTimerPeriodicity(); if (tcRC != TC_PASS) { goto exitRtn; } TC_PRINT("Test the stopping of a timer\n"); tcRC = testLowTimerStop(); if (tcRC != TC_PASS) { goto exitRtn; } TC_PRINT("Verifying the nanokernel timer fired\n"); if (!nano_task_sem_take(&test_nano_timers_sem)) { tcRC = TC_FAIL; goto exitRtn; } TC_PRINT("Verifying the nanokernel timeouts worked\n"); tcRC = task_sem_take_wait_timeout(test_nano_timeouts_sem, SECONDS(5)); tcRC = tcRC == RC_OK ? TC_PASS : TC_FAIL; exitRtn: TC_END_RESULT(tcRC); TC_END_REPORT(tcRC); }
void RegressionTaskEntry(void) { int tc_result; /* test result code */ uint32_t rnd_values[N_VALUES]; int i; PRINT_DATA("Starting random number tests\n"); PRINT_LINE; /* * Test subsequently calls sys_rand32_get(), checking * that two values are not equal. */ PRINT_DATA("Generating random numbers\n"); /* * Get several subsequent numbers as fast as possible. * If random number generator is based on timer, check * the situation when random number generator is called * faster than timer clock ticks. * In order to do this, make several subsequent calls * and save results in an array to verify them on the * next step */ for (i = 0; i < N_VALUES; i++) { rnd_values[i] = sys_rand32_get(); } for (tc_result = TC_PASS, i = 1; i < N_VALUES; i++) { if (rnd_values[i - 1] == rnd_values[i]) { tc_result = TC_FAIL; break; } } if (tc_result == TC_FAIL) { TC_ERROR("random number subsequent calls\n" "returned same value %d\n", rnd_values[i]); } else { PRINT_DATA("Generated %d values with expected randomness\n", N_VALUES); } TC_END_RESULT(tc_result); TC_END_REPORT(tc_result); }
void print_data(uint8_t type, void *data, uint8_t data_length) { // Depending on the type ID stored, print the data as a different type switch (type) { case TYPE_INT16: PRINT_DATA(data, data_length, int16_t, "%d"); break; case TYPE_INT32: PRINT_DATA(data, data_length, int32_t, "%d"); break; case TYPE_FLOAT32: PRINT_DATA(data, data_length, float, "%.7f"); break; case TYPE_FLOAT64: PRINT_DATA(data, data_length, double, "%.15f"); break; case TYPE_ASCII: PRINT_DATA(data, data_length, char, "%c"); break; } }
int h3600_micro_proc_stats_read(char *page, char **start, off_t off, int count, int *eof, void *data) { char *p = page; int len; int i; PRINT_DATA(isr, "Interrupts"); PRINT_DATA(tx, "Bytes transmitted"); PRINT_DATA(rx, "Bytes received"); PRINT_DATA(frame, "Frame errors"); PRINT_DATA(overrun, "Overrun errors"); PRINT_DATA(parity, "Parity errors"); PRINT_DATA(pass_limit, "Pass limit"); PRINT_DATA(missing_sof, "Missing SOF"); PRINT_DATA(bad_checksum, "Bad checksums"); PRINT_DATA(timeouts, "ACK timeouts"); p += sprintf(p,"\nMessages Sent Rcvd Jiffs Timeout\n"); for ( i = 0 ; i < 16 ; i++ ) p += sprintf(p,"%-18s %6d %6d %6d %6d\n", g_handlers[i].name, g_statistics.msg[i].sent, g_statistics.msg[i].received, g_statistics.msg[i].total_ack_time, g_statistics.msg[i].timeouts); len = (p - page) - off; if (len < 0) len = 0; *eof = (len <= count) ? 1 : 0; *start = page + off; return len; }
int h3600_stowaway_proc_stats_read(char *page, char **start, off_t off, int count, int *eof, void *data) { char *p = page; int len; PRINT_DATA(dcd, "DCD interrupts"); PRINT_DATA(isr, "Keyboard interrupts"); PRINT_DATA(rx, "Bytes received"); PRINT_DATA(frame, "Frame errors"); PRINT_DATA(overrun, "Overrun errors"); PRINT_DATA(parity, "Parity errors"); p += sprintf(p,"%-20s : %d\n", "Usage count", g_keyboard.usage_count); len = (p - page) - off; if (len < 0) len = 0; *eof = (len <= count) ? 1 : 0; *start = page + off; return len; }
void load_store_high(void) { unsigned int i; unsigned char init_byte; unsigned char *reg_set_ptr = (unsigned char *)&float_reg_set; /* test until the specified time limit, or until an error is detected */ while (1) { /* * Initialize the float_reg_set structure by treating it as * a simple array of bytes (the arrangement and actual number * of registers is not important for this generic C code). The * structure is initialized by using the byte value specified * by the constant FIBER_FLOAT_REG_CHECK_BYTE, and then * incrementing the value for each successive location in the * float_reg_set structure. * * The initial byte value, and thus the contents of the entire * float_reg_set structure, must be different for each * thread to effectively test the kernel's ability to * properly save/restore the floating point values during a * context switch. */ init_byte = FIBER_FLOAT_REG_CHECK_BYTE; for (i = 0; i < SIZEOF_FP_REGISTER_SET; i++) { reg_set_ptr[i] = init_byte++; } /* * Utilize an architecture specific function to load all the * floating point registers with the contents of the * float_reg_set structure. * * The goal of the loading all floating point registers with * values that differ from the values used in other threads is * to help determine whether the floating point register * save/restore mechanism in the kernel's context switcher * is operating correctly. * * When a subsequent k_timer_test() invocation is * performed, a (cooperative) context switch back to the * preempted task will occur. This context switch should result * in restoring the state of the task's floating point * registers when the task was swapped out due to the * occurrence of the timer tick. */ _load_then_store_all_float_registers(&float_reg_set); /* * Relinquish the processor for the remainder of the current * system clock tick, so that lower priority threads get a * chance to run. * * This exercises the ability of the kernel to restore the * FPU state of a low priority thread _and_ the ability of the * kernel to provide a "clean" FPU state to this thread * once the sleep ends. */ k_sleep(1); /* periodically issue progress report */ if ((++load_store_high_count % 100) == 0) { PRINT_DATA("Load and store OK after %u (high) " "+ %u (low) tests\n", load_store_high_count, load_store_low_count); } /* terminate testing if specified limit has been reached */ if (load_store_high_count == MAX_TESTS) { TC_END_RESULT(TC_PASS); TC_END_REPORT(TC_PASS); return; } } }
void load_store_low(void) { unsigned int i; unsigned char init_byte; unsigned char *store_ptr = (unsigned char *)&float_reg_set_store; unsigned char *load_ptr = (unsigned char *)&float_reg_set_load; volatile char volatile_stack_var = 0; PRINT_DATA("Floating point sharing tests started\n"); PRINT_LINE; /* * The high priority thread has a sleep to get this (low pri) thread * running and here (low priority) we enable slicing and waste cycles * to run hi pri thread in between fp ops. * * Enable round robin scheduling to allow both the low priority pi * computation and load/store tasks to execute. The high priority pi * computation and load/store tasks will preempt the low priority tasks * periodically. */ k_sched_time_slice_set(10, LO_PRI); /* * Initialize floating point load buffer to known values; * these values must be different than the value used in other threads. */ init_byte = MAIN_FLOAT_REG_CHECK_BYTE; for (i = 0; i < SIZEOF_FP_REGISTER_SET; i++) { load_ptr[i] = init_byte++; } /* Keep cranking forever, or until an error is detected. */ for (load_store_low_count = 0; ; load_store_low_count++) { /* * Clear store buffer to erase all traces of any previous * floating point values that have been saved. */ memset(&float_reg_set_store, 0, SIZEOF_FP_REGISTER_SET); /* * Utilize an architecture specific function to load all the * floating point registers with known values. */ _load_all_float_registers(&float_reg_set_load); /* * Waste some cycles to give the high priority load/store * thread an opportunity to run when the low priority thread is * using the floating point registers. * * IMPORTANT: This logic requires that sys_tick_get_32() not * perform any floating point operations! */ while ((_tick_get_32() % 5) != 0) { /* * Use a volatile variable to prevent compiler * optimizing out the spin loop. */ ++volatile_stack_var; } /* * Utilize an architecture specific function to dump the * contents of all floating point registers to memory. */ _store_all_float_registers(&float_reg_set_store); /* * Compare each byte of buffer to ensure the expected value is * present, indicating that the floating point registers weren't * impacted by the operation of the high priority thread(s). * * Display error message and terminate if discrepancies are * detected. */ init_byte = MAIN_FLOAT_REG_CHECK_BYTE; for (i = 0; i < SIZEOF_FP_REGISTER_SET; i++) { if (store_ptr[i] != init_byte) { TC_ERROR("load_store_low found 0x%x instead " "of 0x%x @ offset 0x%x\n", store_ptr[i], init_byte, i); TC_ERROR("Discrepancy found during " "iteration %d\n", load_store_low_count); fpu_sharing_error = 1; } init_byte++; } /* * Terminate if a test error has been reported. */ if (fpu_sharing_error) { TC_END_RESULT(TC_FAIL); TC_END_REPORT(TC_FAIL); return; } #if defined(CONFIG_ISA_IA32) /* * After every 1000 iterations (arbitrarily chosen), explicitly * disable floating point operations for the task. The * subsequent execution of _load_all_float_registers() will * result in an exception to automatically re-enable * floating point support for the task. * * The purpose of this part of the test is to exercise the * k_float_disable() API, and to also continue exercising * the (exception based) floating enabling mechanism. */ if ((load_store_low_count % 1000) == 0) { k_float_disable(k_current_get()); } #elif defined(CONFIG_CPU_CORTEX_M4) /* * The routine k_float_disable() allows for thread-level * granularity for disabling floating point. Furthermore, it * is useful for testing on the fly thread enabling of floating * point. Neither of these capabilities are currently supported * for ARM. */ #endif } }