int pipePutTest(void) { int rv; /* return value from pipePutTestWork() */ rv = pipePutTestWork(all_N, ARRAY_SIZE(all_N), many_all_N, ARRAY_SIZE(many_all_N)); if (rv != TC_PASS) { TC_ERROR("Failed on _ALL_N/many_ALL_N test\n"); return TC_FAIL; } rv = pipePutTestWork(one_to_N, ARRAY_SIZE(one_to_N), many_one_to_N, ARRAY_SIZE(many_one_to_N)); if (rv != TC_PASS) { TC_ERROR("Failed on _1_TO_N/many_1_TO_N test\n"); return TC_FAIL; } rv = pipePutTestWork(zero_to_N, ARRAY_SIZE(zero_to_N), many_zero_to_N, ARRAY_SIZE(many_zero_to_N)); if (rv != TC_PASS) { TC_ERROR("Failed on _0_TO_N/many_0_TO_N test\n"); return TC_FAIL; } return TC_PASS; }
int HighPriTask(void) { int status; /* Wait until task is activated */ status = task_sem_take(hpSem, TICKS_UNLIMITED); if (status != RC_OK) { TC_ERROR("%s priority task failed to wait on %s: %d\n", "High", "HIGH_PRI_SEM", status); return TC_FAIL; } /* Wait on a semaphore along with other tasks */ status = task_sem_take(manyBlockSem, TICKS_UNLIMITED); if (status != RC_OK) { TC_ERROR("%s priority task failed to wait on %s: %d\n", "High", "MANY_BLOCKED_SEM", status); return TC_FAIL; } /* Inform Regression test HP task is no longer blocked on MANY_BLOCKED_SEM*/ task_sem_give(blockHpSem); return TC_PASS; }
int testLowTimerGet(void) { int i; int j; int k; for (j = 0; j < 2; j++) { for (i = 0; i < NTIMERS; i++) { pTimer[i] = task_timer_alloc(); for (k = 0; k < i; k++) { if (pTimer[i] == pTimer[k]) { TC_ERROR("** task_timer_alloc() did not return a unique " "timer ID.\n"); return TC_FAIL; } } } /* Whitebox test to ensure that all timers were allocated. */ if (_k_timer_free.list != NULL) { TC_ERROR("** Not all timers were allocated!\n"); } for (i = 0; i < NTIMERS; i++) { task_timer_free(pTimer[i]); } } return TC_PASS; }
int testSemFiberNoWait(void) { int i; TC_PRINT("Giving and taking a semaphore in a fiber (non-blocking)\n"); /* * Give the semaphore many times and then make sure that it can only be * taken that many times. */ for (i = 0; i < 32; i++) { nano_fiber_sem_give(&testSem); } for (i = 0; i < 32; i++) { if (nano_fiber_sem_take(&testSem) != 1) { TC_ERROR(" *** Expected nano_fiber_sem_take() to succeed, not fail\n"); goto errorReturn; } } if (nano_fiber_sem_take(&testSem) != 0) { TC_ERROR(" *** Expected nano_fiber_sem_take() to fail, not succeed\n"); goto errorReturn; } return TC_PASS; errorReturn: fiberDetectedFailure = 1; return TC_FAIL; }
int testSemTaskNoWait(void) { int i; /* loop counter */ TC_PRINT("Giving and taking a semaphore in a task (non-blocking)\n"); /* * Give the semaphore many times and then make sure that it can only be * taken that many times. */ for (i = 0; i < 32; i++) { nano_task_sem_give(&testSem); } for (i = 0; i < 32; i++) { if (nano_task_sem_take(&testSem) != 1) { TC_ERROR(" *** Expected nano_task_sem_take() to succeed, not fail\n"); goto errorReturn; } } if (nano_task_sem_take(&testSem) != 0) { TC_ERROR(" *** Expected nano_task_sem_take() to fail, not succeed!\n"); goto errorReturn; } return TC_PASS; errorReturn: return TC_FAIL; }
int idt_stub_test(void) { struct segment_descriptor *p_idt_entry; u32_t offset; /* Check for the interrupt stub */ p_idt_entry = (struct segment_descriptor *) (_idt_base_address + (TEST_SOFT_INT << 3)); offset = (u32_t)(&int_stub); if (DTE_OFFSET(p_idt_entry) != offset) { TC_ERROR("Failed to find offset of int_stub (0x%x) at vector %d\n", offset, TEST_SOFT_INT); return TC_FAIL; } /* Check for the exception stub */ p_idt_entry = (struct segment_descriptor *) (_idt_base_address + (IV_DIVIDE_ERROR << 3)); offset = (u32_t)(&_EXCEPTION_STUB_NAME(exc_divide_error_handler, 0)); if (DTE_OFFSET(p_idt_entry) != offset) { TC_ERROR("Failed to find offset of exc stub (0x%x) at vector %d\n", offset, IV_DIVIDE_ERROR); return TC_FAIL; } /* * If the other fields are wrong, the system will crash when the exception * and software interrupt are triggered so we don't check them. */ return TC_PASS; }
static int do_test_multiple_waiters(void) { int ii; /* pend all fibers one the same lifo */ for (ii = 0; ii < NUM_WAITERS; ii++) { task_fiber_start(fiber_multi_waiters_stacks[ii], FIBER_STACKSIZE, fiber_multi_waiters, ii, 0, FIBER_PRIORITY, 0); } /* wake up all the fibers: the task is preempted each time */ for (ii = 0; ii < NUM_WAITERS; ii++) { nano_task_lifo_put(&multi_waiters, &multi_waiters_items[ii]); } /* reply_multi_waiters will have been given once for each fiber */ for (ii = 0; ii < NUM_WAITERS; ii++) { if (!nano_task_sem_take(&reply_multi_waiters, TICKS_NONE)) { TC_ERROR(" *** Cannot take sem supposedly given by waiters.\n"); return TC_FAIL; } } TC_PRINT("Task took multi-waiter reply semaphore %d times, as expected.\n", NUM_WAITERS); if (nano_task_lifo_get(&multi_waiters, TICKS_NONE)) { TC_ERROR(" *** multi_waiters should have been empty.\n"); return TC_FAIL; } return TC_PASS; }
int testSemWait(void) { if (fiberDetectedFailure != 0) { TC_ERROR(" *** Failure detected in the fiber."); return TC_FAIL; } nano_task_sem_give(&testSem); /* Wake the fiber. */ if (semTestState != STS_TASK_WOKE_FIBER) { TC_ERROR(" *** Expected task to wake fiber. It did not.\n"); return TC_FAIL; } TC_PRINT("Semaphore from the task woke the fiber\n"); nano_task_sem_take_wait(&testSem); /* Wait on <testSem> */ if (semTestState != STS_FIBER_WOKE_TASK) { TC_ERROR(" *** Expected fiber to wake task. It did not.\n"); return TC_FAIL; } TC_PRINT("Semaphore from the fiber woke the task\n"); nano_task_sem_take_wait(&testSem); /* Wait on <testSem> again. */ if (semTestState != STS_ISR_WOKE_TASK) { TC_ERROR(" *** Expected ISR to wake task. It did not.\n"); return TC_FAIL; } TC_PRINT("Semaphore from the ISR woke the task.\n"); return TC_PASS; }
int pool_block_get_wait_test(void) { int rv; rv = k_mem_pool_alloc(&POOL_ID, &block_list[0], 3000, K_FOREVER); if (rv != 0) { TC_ERROR("k_mem_pool_alloc(3000) expected %d, got %d\n", 0, rv); return TC_FAIL; } k_sem_give(&ALTERNATE_SEM); /* Wake alternate_task */ evidence = 0; rv = k_mem_pool_alloc(&POOL_ID, &block_list[1], 128, K_FOREVER); if (rv != 0) { TC_ERROR("k_mem_pool_alloc(128) expected %d, got %d\n", 0, rv); return TC_FAIL; } switch (evidence) { case 0: TC_ERROR("k_mem_pool_alloc(128) did not block!\n"); return TC_FAIL; case 1: break; case 2: default: TC_ERROR("Rescheduling did not occur " "after k_mem_pool_free()\n"); return TC_FAIL; } k_mem_pool_free(&block_list[1]); return TC_PASS; }
void fiber1(void) { void *pData; /* pointer to FIFO object get from the queue */ int count = 0; /* counter */ /* Wait for fiber1 to be activated. */ nano_fiber_sem_take_wait(&nanoSemObj1); /* Wait for data to be added to <nanoFifoObj> by task */ pData = nano_fiber_fifo_get_wait(&nanoFifoObj); if (pData != pPutList1[0]) { TC_ERROR("fiber1 (1) - expected 0x%x, got 0x%x\n", pPutList1[0], pData); retCode = TC_FAIL; return; } /* Wait for data to be added to <nanoFifoObj2> by fiber3 */ pData = nano_fiber_fifo_get_wait(&nanoFifoObj2); if (pData != pPutList2[0]) { TC_ERROR("fiber1 (2) - expected 0x%x, got 0x%x\n", pPutList2[0], pData); retCode = TC_FAIL; return; } nano_fiber_sem_take_wait(&nanoSemObj1); /* Wait for fiber1 to be reactivated */ TC_PRINT("Test Fiber FIFO Get\n\n"); /* Get all FIFOs */ while ((pData = nano_fiber_fifo_get(&nanoFifoObj)) != NULL) { TC_PRINT("FIBER FIFO Get: count = %d, ptr is %p\n", count, pData); if ((count >= NUM_FIFO_ELEMENT) || (pData != pPutList1[count])) { TCERR1(count); retCode = TC_FAIL; return; } count++; } TC_END_RESULT(retCode); PRINT_LINE; /* * Entries in the FIFO queue have to be unique. * Put data. */ TC_PRINT("Test Fiber FIFO Put\n"); TC_PRINT("\nFIBER FIFO Put Order: "); for (int i=0; i<NUM_FIFO_ELEMENT; i++) { nano_fiber_fifo_put(&nanoFifoObj, pPutList2[i]); TC_PRINT(" %p,", pPutList2[i]); } TC_PRINT("\n"); PRINT_LINE; /* Give semaphore to allow the main task to run */ nano_fiber_sem_give(&nanoSemObjTask); } /* fiber1 */
int pipeGetWaitTest(void) { int rv; /* return code from pipeGetWaitTestWork() */ int bytesRead; /* # of bytes read from task_pipe_get_waitait() */ task_sem_give(altSem); /* Wake AlternateTask */ rv = pipeGetWaitTestWork(wait_all_N, ARRAY_SIZE(wait_all_N)); if (rv != TC_PASS) { TC_ERROR("Failed on _ALL_N test\n"); return TC_FAIL; } rv = pipeGetWaitTestWork(wait_one_to_N, ARRAY_SIZE(wait_one_to_N)); if (rv != TC_PASS) { TC_ERROR("Failed on _1_TO_N test\n"); return TC_FAIL; } rv = task_pipe_get_wait(pipeId, rxBuffer, PIPE_SIZE, &bytesRead, _0_TO_N); if (rv != RC_FAIL) { TC_ERROR("Expected return code of %d, not %d\n", RC_FAIL, rv); return TC_FAIL; } return TC_PASS; }
int pipePutTimeoutHelper(void) { int i; /* loop counter */ int rv; /* return value from task_pipe_get_wait_timeout() */ int bytesRead; /* # of bytes read from task_pipe_get_wait_timeout() */ (void)task_sem_take_wait(altSem); /* Wait until test is ready */ /* 1. task_pipe_get_wait_timeout() will force a context switch to RegressionTask() */ rv = task_pipe_get_wait_timeout(pipeId, rxBuffer, PIPE_SIZE, &bytesRead, _ALL_N, ONE_SECOND); if ((rv != RC_OK) || (bytesRead != PIPE_SIZE)) { TC_ERROR("Expected return code %d, not %d\n" "Expected %d bytes to be read, not %d\n", RC_OK, rv, PIPE_SIZE, bytesRead); return TC_FAIL; } /* 2. task_pipe_get_wait_timeout() will force a context switch to RegressionTask(). */ rv = task_pipe_get_wait_timeout(pipeId, rxBuffer, PIPE_SIZE, &bytesRead, _1_TO_N, ONE_SECOND); if ((rv != RC_OK) || (bytesRead != PIPE_SIZE)) { TC_ERROR("Expected return code %d, not %d\n" "Expected %d bytes to be read, not %d\n", RC_OK, rv, PIPE_SIZE, bytesRead); return TC_FAIL; } /* * Before emptying the pipe, check that task_pipe_get_wait_timeout() fails when * using the _0_TO_N option. */ rv = task_pipe_get_wait_timeout(pipeId, rxBuffer, PIPE_SIZE / 2, &bytesRead, _0_TO_N, ONE_SECOND); if (rv != RC_FAIL) { TC_ERROR("Expected return code %d, not %d\n", RC_FAIL, rv); return TC_FAIL; } /* 3. Empty the pipe in two reads */ for (i = 0; i < 2; i++) { rv = task_pipe_get(pipeId, rxBuffer, PIPE_SIZE / 2, &bytesRead, _0_TO_N); if ((rv != RC_OK) || (bytesRead != PIPE_SIZE / 2)) { TC_ERROR("Expected return code %d, not %d\n" "Expected %d bytes to be read, not %d\n", RC_OK, rv, PIPE_SIZE / 2, bytesRead); return TC_FAIL; } } task_sem_give(regSem); return TC_PASS; }
int testMapGetAllBlocks(void **p) { int retValue; /* task_mem_map_xxx interface return value */ void *errPtr; /* Pointer to block */ TC_PRINT("Function %s\n", __func__); /* Number of blocks in the map is defined in MDEF file */ for (int i = 0; i < NUMBLOCKS; i++) { /* Verify number of used blocks in the map */ retValue = task_mem_map_used_get(MAP_LgBlks); if (verifyRetValue(i, retValue)) { TC_PRINT("MAP_LgBlks used %d blocks\n", retValue); } else { TC_ERROR("Failed task_mem_map_used_get for MAP_LgBlks, i=%d, retValue=%d\n", i, retValue); return TC_FAIL; } /* Get memory block */ retValue = task_mem_map_alloc(MAP_LgBlks, &p[i], TICKS_NONE); if (verifyRetValue(RC_OK, retValue)) { TC_PRINT(" task_mem_map_alloc OK, p[%d] = %p\n", i, p[i]); } else { TC_ERROR("Failed task_mem_map_alloc, i=%d, retValue %d\n", i, retValue); return TC_FAIL; } } /* for */ /* Verify number of used blocks in the map - expect all blocks are used */ retValue = task_mem_map_used_get(MAP_LgBlks); if (verifyRetValue(NUMBLOCKS, retValue)) { TC_PRINT("MAP_LgBlks used %d blocks\n", retValue); } else { TC_ERROR("Failed task_mem_map_used_get for MAP_LgBlks, retValue %d\n", retValue); return TC_FAIL; } /* Try to get one more block and it should fail */ retValue = task_mem_map_alloc(MAP_LgBlks, &errPtr, TICKS_NONE); if (verifyRetValue(RC_FAIL, retValue)) { TC_PRINT(" task_mem_map_alloc RC_FAIL expected as all (%d) blocks are used.\n", NUMBLOCKS); } else { TC_ERROR("Failed task_mem_map_alloc, expect RC_FAIL, got %d\n", retValue); return TC_FAIL; } PRINT_LINE; return TC_PASS; } /* testMapGetAllBlocks */
void RegressionTask(void) { uint32_t nCalls = 0; int status; TC_START("Test Microkernel Critical Section API\n"); task_sem_give(ALT_SEM); /* Activate AlternateTask() */ nCalls = criticalLoop(nCalls); /* Wait for AlternateTask() to complete */ status = task_sem_take_wait_timeout(REGRESS_SEM, TEST_TIMEOUT); if (status != RC_OK) { TC_ERROR("Timed out waiting for REGRESS_SEM\n"); goto errorReturn; } if (criticalVar != nCalls + altTaskIterations) { TC_ERROR("Unexpected value for <criticalVar>. Expected %d, got %d\n", nCalls + altTaskIterations, criticalVar); goto errorReturn; } TC_PRINT("Obtained expected <criticalVar> value of %u\n", criticalVar); TC_PRINT("Enabling time slicing ...\n"); sys_scheduler_time_slice_set(1, 10); task_sem_give(ALT_SEM); /* Re-activate AlternateTask() */ nCalls = criticalLoop(nCalls); /* Wait for AlternateTask() to finish */ status = task_sem_take_wait_timeout(REGRESS_SEM, TEST_TIMEOUT); if (status != RC_OK) { TC_ERROR("Timed out waiting for REGRESS_SEM\n"); goto errorReturn; } if (criticalVar != nCalls + altTaskIterations) { TC_ERROR("Unexpected value for <criticalVar>. Expected %d, got %d\n", nCalls + altTaskIterations, criticalVar); goto errorReturn; } TC_PRINT("Obtained expected <criticalVar> value of %u\n", criticalVar); TC_END_RESULT(TC_PASS); TC_END_REPORT(TC_PASS); return; errorReturn: TC_END_RESULT(TC_FAIL); TC_END_REPORT(TC_FAIL); }
void main(void) { /* Fake personalization and additional_input * (replace by appropriate values) * e.g.: hostname+timestamp */ uint8_t additional_input[] = "additional input"; uint8_t personalization[] = "HOSTNAME"; uint32_t size = (1 << 15); uint32_t result = TC_PASS; struct tc_hmac_prng_struct h; uint8_t random[size]; uint8_t seed[128]; uint32_t i; TC_START("Performing HMAC-PRNG tests:"); TC_PRINT("HMAC-PRNG test#1 (init, reseed, generate):\n"); /* Fake seed (replace by a a truly random seed): */ for (i = 0; i < (uint32_t)sizeof(seed); ++i) { seed[i] = i; } TC_PRINT("HMAC-PRNG test#1 (init):\n"); if (tc_hmac_prng_init(&h, personalization, sizeof(personalization)) == 0) { TC_ERROR("HMAC-PRNG initialization failed.\n"); result = TC_FAIL; goto exitTest; } TC_END_RESULT(result); TC_PRINT("HMAC-PRNG test#1 (reseed):\n"); if (tc_hmac_prng_reseed(&h, seed, sizeof(seed), additional_input, sizeof(additional_input)) == 0) { TC_ERROR("HMAC-PRNG reseed failed.\n"); result = TC_FAIL; goto exitTest; } TC_END_RESULT(result); TC_PRINT("HMAC-PRNG test#1 (generate):\n"); if (tc_hmac_prng_generate(random, size, &h) < 1) { TC_ERROR("HMAC-PRNG generate failed.\n"); result = TC_FAIL; goto exitTest; } TC_END_RESULT(result); TC_PRINT("All HMAC tests succeeded!\n"); exitTest: TC_END_RESULT(result); TC_END_REPORT(result); }
void main(void) { u32_t result = TC_PASS; struct tc_cmac_struct state; struct tc_aes_key_sched_struct sched; const u8_t key[BUF_LEN] = { 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6, 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c }; u8_t K1[BUF_LEN], K2[BUF_LEN]; TC_START("Performing CMAC tests:"); (void) tc_cmac_setup(&state, key, &sched); result = verify_gf_2_128_double(K1, K2, state); if (result == TC_FAIL) { /* terminate test */ TC_ERROR("CMAC test #1 (128 double) failed.\n"); goto exitTest; } (void) tc_cmac_setup(&state, key, &sched); result = verify_cmac_null_msg(&state); if (result == TC_FAIL) { /* terminate test */ TC_ERROR("CMAC test #2 (null msg) failed.\n"); goto exitTest; } (void) tc_cmac_setup(&state, key, &sched); result = verify_cmac_1_block_msg(&state); if (result == TC_FAIL) { /* terminate test */ TC_ERROR("CMAC test #3 (1 block msg)failed.\n"); goto exitTest; } (void) tc_cmac_setup(&state, key, &sched); result = verify_cmac_320_bit_msg(&state); if (result == TC_FAIL) { /* terminate test */ TC_ERROR("CMAC test #4 (320 bit msg) failed.\n"); goto exitTest; } (void) tc_cmac_setup(&state, key, &sched); result = verify_cmac_512_bit_msg(&state); if (result == TC_FAIL) { /* terminate test */ TC_ERROR("CMAC test #5 (512 bit msg)failed.\n"); goto exitTest; } TC_PRINT("All CMAC tests succeeded!\n"); exitTest: TC_END_RESULT(result); TC_END_REPORT(result); }
int simpleGroupWaitTest(void) { int i; ksem_t sema; task_sem_give(altSem); /* Wake the alternate task */ /* * Wait for a semaphore to be signalled by the alternate task. * Each semaphore in the group will be tested. */ for (i = 0; semList[i] != ENDLIST; i++) { sema = task_sem_group_take(semList, TICKS_UNLIMITED); if (sema != semList[i]) { TC_ERROR("task_sem_group_take() error. Expected %d, not %d\n", (int) semList[i], (int) sema); return TC_FAIL; } } /* * The Alternate Task will signal the semaphore group once. Note that * when the semaphore group is signalled, the last semaphore in the * group is the value that is returned, not the first. */ for (i = 3; i >= 0; i--) { sema = task_sem_group_take(semList, TICKS_UNLIMITED); if (sema != semList[i]) { TC_ERROR("task_sem_group_take() error. Expected %d, not %d\n", (int) semList[3], (int) sema); return TC_FAIL; } } /* * Again wait for a semaphore to be signalled. This time, the alternate * task will trigger an interrupt that signals the semaphore. */ for (i = 0; semList[i] != ENDLIST; i++) { sema = task_sem_group_take(semList, TICKS_UNLIMITED); if (sema != semList[i]) { TC_ERROR("task_sem_group_take() error. Expected %d, not %d\n", (int) semList[i], (int) sema); return TC_FAIL; } } return TC_PASS; }
int pipePutWaitTest(void) { int rv; /* return code from task_pipe_put_wait() */ int bytesWritten; /* # of bytes written to pipe */ /* 1. Fill the pipe */ rv = task_pipe_put_wait(pipeId, txBuffer, PIPE_SIZE, &bytesWritten, _ALL_N); if ((rv != RC_OK) || (bytesWritten != PIPE_SIZE)) { TC_ERROR("Return code: expected %d, got %d\n" "Bytes written: expected %d, got %d\n", RC_OK, rv, PIPE_SIZE, bytesWritten); return TC_FAIL; } task_sem_give(altSem); /* Wake the alternate task */ /* 2. task_pipe_put_wait() will force a context switch to AlternateTask(). */ rv = task_pipe_put_wait(pipeId, txBuffer, PIPE_SIZE, &bytesWritten, _ALL_N); if ((rv != RC_OK) || (bytesWritten != PIPE_SIZE)) { TC_ERROR("Return code: expected %d, got %d\n" "Bytes written: expected %d, got %d\n", RC_OK, rv, PIPE_SIZE, bytesWritten); return TC_FAIL; } /* 3. task_pipe_put_wait() will force a context switch to AlternateTask(). */ rv = task_pipe_put_wait(pipeId, txBuffer, PIPE_SIZE, &bytesWritten, _1_TO_N); if ((rv != RC_OK) || (bytesWritten != PIPE_SIZE)) { TC_ERROR("Return code: expected %d, got %d\n" "Bytes written: expected %d, got %d\n", RC_OK, rv, PIPE_SIZE, bytesWritten); return TC_FAIL; } /* This should return immediately as _0_TO_N with a wait is an error. */ rv = task_pipe_put_wait(pipeId, txBuffer, PIPE_SIZE, &bytesWritten, _0_TO_N); if ((rv != RC_FAIL) || (bytesWritten != 0)) { TC_ERROR("Return code: expected %d, got %d\n" "Bytes written: expected %d, got %d\n", RC_FAIL, rv, 0, bytesWritten); return TC_FAIL; } /* Wait for AlternateTask()'s pipePutWaitHelper() to finish */ (void)task_sem_take_wait(regSem); return TC_PASS; }
void fiber2(void) { void *pData; /* pointer to FIFO object from the queue */ /* Wait for fiber2 to be activated */ nano_fiber_sem_take(&nanoSemObj2, TICKS_UNLIMITED); /* Wait for data to be added to <nanoFifoObj> */ pData = nano_fiber_fifo_get(&nanoFifoObj, TICKS_UNLIMITED); if (pData != pPutList1[1]) { TC_ERROR("fiber2 (1) - expected 0x%x, got 0x%x\n", pPutList1[1], pData); retCode = TC_FAIL; return; } /* Wait for data to be added to <nanoFifoObj2> by fiber3 */ pData = nano_fiber_fifo_get(&nanoFifoObj2, TICKS_UNLIMITED); if (pData != pPutList2[1]) { TC_ERROR("fiber2 (2) - expected 0x%x, got 0x%x\n", pPutList2[1], pData); retCode = TC_FAIL; return; } /* Wait for fiber2 to be reactivated */ nano_fiber_sem_take(&nanoSemObj2, TICKS_UNLIMITED); /* Fiber #2 has been reactivated by main task */ for (int i = 0; i < 4; i++) { pData = nano_fiber_fifo_get(&nanoFifoObj, TICKS_UNLIMITED); if (pData != pPutList1[i]) { TC_ERROR("fiber2 (3) - iteration %d expected 0x%x, got 0x%x\n", i, pPutList1[i], pData); retCode = TC_FAIL; return; } } nano_fiber_sem_give(&nanoSemObjTask); /* Wake main task */ /* Wait for fiber2 to be reactivated */ nano_fiber_sem_take(&nanoSemObj2, TICKS_UNLIMITED); testFiberFifoGetW(); PRINT_LINE; testIsrFifoFromFiber(); TC_END_RESULT(retCode); } /* fiber2 */
int simpleSemaWaitTest(void) { int status; int i; for (i = 0; i < 5; i++) { /* Wait one second for SIMPLE_SEM. Timeout is expected. */ status = task_sem_take(simpleSem, OBJ_TIMEOUT); if (status != RC_TIME) { TC_ERROR("task_sem_take() error. Expected %d, got %d\n", RC_TIME, status); return TC_FAIL; } } /* * Signal the semaphore upon which the alternate task is waiting. The * alternate task (which is at a lower priority) will cause SIMPLE_SEM * to be signalled, thus waking this task. */ task_sem_give(altSem); status = task_sem_take(simpleSem, OBJ_TIMEOUT); if (status != RC_OK) { TC_ERROR("task_sem_take() error. Expected %d, got %d\n", RC_OK, status); return TC_FAIL; } /* * Note that task_sem_take(TICKS_UNLIMITED) has been tested when waking up * the alternate task. Since previous tests had this task waiting, the * alternate task must have had the time to enter the state where it is * waiting for the ALTTASK_SEM semaphore to be given. Thus, we do not need * to test for it here. * * Now wait on SIMPLE_SEM again. This time it will be woken up by an * ISR signalling the semaphore. */ status = task_sem_take(simpleSem, OBJ_TIMEOUT); if (status != RC_OK) { TC_ERROR("task_sem_take() error. Expected %d, got %d\n", RC_OK, status); return TC_FAIL; } return TC_PASS; }
static u32_t verify_gf_2_128_double(u8_t *K1, u8_t *K2, struct tc_cmac_struct s) { u32_t result = TC_PASS; TC_PRINT("Performing CMAC test #1 (GF(2^128) double):\n"); u8_t zero[BUF_LEN]; u8_t L[BUF_LEN]; const u8_t l[BUF_LEN] = { 0x7d, 0xf7, 0x6b, 0x0c, 0x1a, 0xb8, 0x99, 0xb3, 0x3e, 0x42, 0xf0, 0x47, 0xb9, 0x1b, 0x54, 0x6f }; const u8_t k1[BUF_LEN] = { 0xfb, 0xee, 0xd6, 0x18, 0x35, 0x71, 0x33, 0x66, 0x7c, 0x85, 0xe0, 0x8f, 0x72, 0x36, 0xa8, 0xde }; const u8_t k2[BUF_LEN] = { 0xf7, 0xdd, 0xac, 0x30, 0x6a, 0xe2, 0x66, 0xcc, 0xf9, 0x0b, 0xc1, 0x1e, 0xe4, 0x6d, 0x51, 0x3b }; (void) memset(zero, '\0', sizeof(zero)); tc_aes_encrypt(L, zero, s.sched); if (memcmp(L, l, BUF_LEN) != 0) { TC_ERROR("%s: AES encryption failed\n", __func__); show("expected L =", l, sizeof(l)); show("computed L =", L, sizeof(L)); return TC_FAIL; } gf_double(K1, L); if (memcmp(K1, k1, BUF_LEN) != 0) { TC_ERROR("%s: gf_2_128_double failed when msb = 0\n", __func__); show("expected K1 =", k1, sizeof(k1)); show("computed K1 =", K1, sizeof(k1)); return TC_FAIL; } gf_double(K2, K1); if (memcmp(K2, k2, BUF_LEN) != 0) { TC_ERROR("%s: gf_2_128_double failed when msb = 1\n", __func__); show("expected K2 =", k2, sizeof(k2)); show("computed K2 =", K2, sizeof(k2)); return TC_FAIL; } TC_END_RESULT(result); return result; }
static u32_t verify_cmac_1_block_msg(TCCmacState_t s) { u32_t result = TC_PASS; TC_PRINT("Performing CMAC test #3 (SP 800-38B test vector #2):\n"); const u8_t msg[BUF_LEN] = { 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96, 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a }; const u8_t tag[BUF_LEN] = { 0x07, 0x0a, 0x16, 0xb4, 0x6b, 0x4d, 0x41, 0x44, 0xf7, 0x9b, 0xdd, 0x9d, 0xd0, 0x4a, 0x28, 0x7c }; u8_t Tag[BUF_LEN]; (void) tc_cmac_init(s); (void) tc_cmac_update(s, msg, sizeof(msg)); (void) tc_cmac_final(Tag, s); if (memcmp(Tag, tag, BUF_LEN) != 0) { TC_ERROR("%s: aes_cmac failed with 1 block msg\n", __func__); show("aes_cmac failed with 1 block msg =", msg, sizeof(msg)); show("expected Tag =", tag, sizeof(tag)); show("computed Tag =", Tag, sizeof(Tag)); return TC_FAIL; } TC_END_RESULT(result); return result; }
int testLowTimerDoesNotStart(void) { int32_t ticks; int status; int Ti[3] = {-1, 1, 0}; int Tr[3] = {1, -1, 0}; int i; pTimer[0] = task_timer_alloc(); for (i = 0; i < 3; i++) { /* Align to a tick */ ticks = task_tick_get_32(); while (task_tick_get_32() == ticks) { } task_timer_start(pTimer[0], Ti[i], Tr[i], TIMER_SEM); status = task_sem_take_wait_timeout(TIMER_SEM, 200); if (status != RC_TIME) { TC_ERROR("** Timer appears to have fired unexpectedly\n"); return TC_FAIL; /* Return failure, do not "clean up" */ } } task_timer_free(pTimer[0]); return TC_PASS; }
int pipeGetWaitTestWork(SIZE_EXPECT *items, int nItems) { int i; /* loop counter */ int rv; /* return code from task_pipe_get_wait() */ int bytesRead; /* # of bytes read from task_pipe_get_wait() */ for (i = 0; i < nItems; i++) { /* * Pipe should be empty. Most calls to task_pipe_get_wait() should * block until the call to task_pipe_put() is performed in the routine * pipeGetWaitHelperWork(). */ rv = task_pipe_get_wait(pipeId, rxBuffer, items[i].size, &bytesRead, items[i].options); if ((rv != items[i].rcode) || (bytesRead != items[i].sent)) { TC_ERROR("Expected return value %d, got %d\n" "Expected bytesRead = %d, got %d\n", items[i].rcode, rv, 0, bytesRead); return TC_FAIL; } } return TC_PASS; }
/** * * @brief Test the k_cpu_idle() routine * * This tests the k_cpu_idle() routine. The first thing it does is align to * a tick boundary. The only source of interrupts while the test is running is * expected to be the tick clock timer which should wake the CPU. Thus after * each call to k_cpu_idle(), the tick count should be one higher. * * @return TC_PASS on success * @return TC_FAIL on failure */ static int test_kernel_cpu_idle(int atomic) { int tms, tms2;; /* current time in millisecond */ int i; /* loop variable */ /* Align to a "ms boundary". */ tms = k_uptime_get_32(); while (tms == k_uptime_get_32()) { } tms = k_uptime_get_32(); for (i = 0; i < 5; i++) { /* Repeat the test five times */ if (atomic) { unsigned int key = irq_lock(); k_cpu_atomic_idle(key); } else { k_cpu_idle(); } /* calculating milliseconds per tick*/ tms += sys_clock_us_per_tick / USEC_PER_MSEC; tms2 = k_uptime_get_32(); if (tms2 < tms) { TC_ERROR("Bad ms per tick value computed, got %d which is less than %d\n", tms2, tms); return TC_FAIL; } } return TC_PASS; }
static u32_t verify_cmac_null_msg(TCCmacState_t s) { u32_t result = TC_PASS; TC_PRINT("Performing CMAC test #2 (SP 800-38B test vector #1):\n"); const u8_t tag[BUF_LEN] = { 0xbb, 0x1d, 0x69, 0x29, 0xe9, 0x59, 0x37, 0x28, 0x7f, 0xa3, 0x7d, 0x12, 0x9b, 0x75, 0x67, 0x46 }; u8_t Tag[BUF_LEN]; (void) tc_cmac_init(s); (void) tc_cmac_update(s, (const u8_t *) 0, 0); (void) tc_cmac_final(Tag, s); if (memcmp(Tag, tag, BUF_LEN) != 0) { TC_ERROR("%s: aes_cmac failed with null msg = 1\n", __func__); show("expected Tag =", tag, sizeof(tag)); show("computed Tag =", Tag, sizeof(Tag)); return TC_FAIL; } TC_END_RESULT(result); return result; }
static u32_t verify_cmac_320_bit_msg(TCCmacState_t s) { u32_t result = TC_PASS; TC_PRINT("Performing CMAC test #4 (SP 800-38B test vector #3):\n"); const u8_t msg[40] = { 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96, 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a, 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c, 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51, 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11 }; const u8_t tag[BUF_LEN] = { 0xdf, 0xa6, 0x67, 0x47, 0xde, 0x9a, 0xe6, 0x30, 0x30, 0xca, 0x32, 0x61, 0x14, 0x97, 0xc8, 0x27 }; u8_t Tag[BUF_LEN]; (void) tc_cmac_init(s); (void) tc_cmac_update(s, msg, sizeof(msg)); (void) tc_cmac_final(Tag, s); if (memcmp(Tag, tag, BUF_LEN) != 0) { TC_ERROR("%s: aes_cmac failed with 320 bit msg\n", __func__); show("aes_cmac failed with 320 bit msg =", msg, sizeof(msg)); show("expected Tag =", tag, sizeof(tag)); show("computed Tag =", Tag, sizeof(Tag)); return TC_FAIL; } TC_END_RESULT(result); return result; }
/* the task spins several fibers that pend and timeout on lifos */ static int test_multiple_fibers_pending(struct timeout_order_data *test_data, int test_data_size) { int ii; for (ii = 0; ii < test_data_size; ii++) { task_fiber_start(timeout_stacks[ii], FIBER_STACKSIZE, test_fiber_pend_and_timeout, (int)&test_data[ii], 0, FIBER_PRIORITY, 0); } for (ii = 0; ii < test_data_size; ii++) { struct timeout_order_data *data = nano_task_fifo_get(&timeout_order_fifo, TICKS_UNLIMITED); if (data->timeout_order == ii) { TC_PRINT(" got fiber (q order: %d, t/o: %d, lifo %p) as expected\n", data->q_order, data->timeout, data->lifo); } else { TC_ERROR(" *** fiber %d woke up, expected %d\n", data->timeout_order, ii); return TC_FAIL; } } return TC_PASS; }
int pipeGetWaitHelperWork(SIZE_EXPECT *items, int nItems) { int i; /* loop counter */ int rv; /* return value from task_pipe_put() */ int bytesSent; /* # of bytes sent to task_pipe_put() */ for (i = 0; i < nItems; i++) { /* * Pipe should be empty. Most calls to task_pipe_get(TICKS_UNLIMITED) * should block until the call to task_pipe_put() is performed in the * routine pipeGetWaitHelperWork(). */ bytesSent = 0; rv = task_pipe_put(pipeId, rxBuffer, items[i].size, &bytesSent, items[i].options, TICKS_UNLIMITED); if ((rv != items[i].rcode) || (bytesSent != items[i].sent)) { TC_ERROR("Expected return value %d, got %d\n" "Expected bytesSent = %d, got %d\n", items[i].rcode, rv, 0, bytesSent); return TC_FAIL; } } return TC_PASS; }
int taskLifoWaitTest(void) { void *data; /* ptr to data retrieved from LIFO */ /* Wait on <taskWaitSem> in case fiber's print message blocked */ nano_fiber_sem_take(&taskWaitSem, TICKS_UNLIMITED); /* The fiber is waiting on the LIFO. Wake it. */ nano_task_lifo_put(&test_lifo, &lifoItem[0]); /* * The fiber ran, but is now blocked on the semaphore. Add an item to the * LIFO before giving the semaphore that wakes the fiber so that we can * cover the path of nano_fiber_lifo_get(TICKS_UNLIMITED) not waiting on * the LIFO. */ nano_task_lifo_put(&test_lifo, &lifoItem[2]); nano_task_sem_give(&fiberWaitSem); /* Check that the fiber got the correct item (lifoItem[0]) */ if (fiberDetectedFailure) { TC_ERROR(" *** nano_task_lifo_put()/nano_fiber_lifo_get() failure\n"); return TC_FAIL; } /* The LIFO is empty. This time the task will wait for the item. */ TC_PRINT("Task waiting on an empty LIFO\n"); data = nano_task_lifo_get(&test_lifo, TICKS_UNLIMITED); if (data != (void *) &lifoItem[1]) { TC_ERROR(" *** nano_task_lifo_get()/nano_fiber_lifo_put() failure\n"); return TC_FAIL; } data = nano_task_lifo_get(&test_lifo, TICKS_UNLIMITED); if (data != (void *) &lifoItem[3]) { TC_ERROR(" *** nano_task_lifo_get()/nano_fiber_lifo_put() failure\n"); return TC_FAIL; } /* Waiting on an empty LIFO passed for both fiber and task. */ return TC_PASS; }