void RegressionTask(void *arg1, void *arg2, void *arg3) { u32_t nCalls = 0; ARG_UNUSED(arg1); ARG_UNUSED(arg2); ARG_UNUSED(arg3); k_sem_give(&ALT_SEM); /* Activate AlternateTask() */ nCalls = criticalLoop(nCalls); /* Wait for AlternateTask() to complete */ zassert_true(k_sem_take(®RESS_SEM, TEST_TIMEOUT) == 0, "Timed out waiting for REGRESS_SEM"); zassert_equal(criticalVar, nCalls + altTaskIterations, "Unexpected value for <criticalVar>"); k_sched_time_slice_set(10, 10); k_sem_give(&ALT_SEM); /* Re-activate AlternateTask() */ nCalls = criticalLoop(nCalls); /* Wait for AlternateTask() to finish */ zassert_true(k_sem_take(®RESS_SEM, TEST_TIMEOUT) == 0, "Timed out waiting for REGRESS_SEM"); zassert_equal(criticalVar, nCalls + altTaskIterations, "Unexpected value for <criticalVar>"); k_sem_give(&TEST_SEM); }
static void tmutex_test_lock_unlock(struct k_mutex *pmutex) { k_mutex_init(pmutex); zassert_true(k_mutex_lock(pmutex, K_FOREVER) == 0, "fail to lock K_FOREVER"); k_mutex_unlock(pmutex); zassert_true(k_mutex_lock(pmutex, K_NO_WAIT) == 0, "fail to lock K_NO_WAIT"); k_mutex_unlock(pmutex); zassert_true(k_mutex_lock(pmutex, TIMEOUT) == 0, "fail to lock TIMEOUT"); k_mutex_unlock(pmutex); }
static void tcoop_ctx(void *p1, void *p2, void *p3) { /** TESTPOINT: The thread's priority is in the cooperative range.*/ zassert_false(k_is_preempt_thread(), NULL); k_thread_priority_set(k_current_get(), K_PRIO_PREEMPT(1)); /** TESTPOINT: The thread's priority is in the preemptible range.*/ zassert_true(k_is_preempt_thread(), NULL); k_sched_lock(); /** TESTPOINT: The thread has locked the scheduler.*/ zassert_false(k_is_preempt_thread(), NULL); k_sched_unlock(); /** TESTPOINT: The thread has not locked the scheduler.*/ zassert_true(k_is_preempt_thread(), NULL); k_sem_give(&end_sema); }
static void threads_suspend_resume(int prio) { int old_prio = k_thread_priority_get(k_current_get()); /* set current thread */ last_prio = prio; k_thread_priority_set(k_current_get(), last_prio); /* create thread with lower priority */ int create_prio = last_prio + 1; k_tid_t tid = k_thread_create(&tdata, tstack, STACK_SIZE, thread_entry, NULL, NULL, NULL, create_prio, 0, 0); /* checkpoint: suspend current thread */ k_thread_suspend(tid); k_sleep(100); /* checkpoint: created thread shouldn't be executed after suspend */ zassert_false(last_prio == create_prio, NULL); k_thread_resume(tid); k_sleep(100); /* checkpoint: created thread should be executed after resume */ zassert_true(last_prio == create_prio, NULL); k_thread_abort(tid); /* restore environment */ k_thread_priority_set(k_current_get(), old_prio); }
void test_priority_preemptible(void) { int old_prio = k_thread_priority_get(k_current_get()); /* set current thread to a non-negative priority */ last_prio = 2; k_thread_priority_set(k_current_get(), last_prio); int spawn_prio = last_prio - 1; k_tid_t tid = k_thread_create(&tdata, tstack, STACK_SIZE, thread_entry, NULL, NULL, NULL, spawn_prio, 0, 0); /* checkpoint: thread is preempted by higher thread */ zassert_true(last_prio == spawn_prio, NULL); k_sleep(100); k_thread_abort(tid); spawn_prio = last_prio + 1; tid = k_thread_create(&tdata, tstack, STACK_SIZE, thread_entry, NULL, NULL, NULL, spawn_prio, 0, 0); /* checkpoint: thread is not preempted by lower thread */ zassert_false(last_prio == spawn_prio, NULL); k_thread_abort(tid); /* restore environment */ k_thread_priority_set(k_current_get(), old_prio); }
void test_arm_irq_vector_table(void) { printk("Test Cortex-M3 IRQ installed directly in vector table\n"); for (int ii = 0; ii < 3; ii++) { irq_enable(ii); _irq_priority_set(ii, 0, 0); k_sem_init(&sem[ii], 0, UINT_MAX); } zassert_true((k_sem_take(&sem[0], K_NO_WAIT) || k_sem_take(&sem[1], K_NO_WAIT) || k_sem_take(&sem[2], K_NO_WAIT)), NULL); for (int ii = 0; ii < 3; ii++) { #if defined(CONFIG_SOC_TI_LM3S6965_QEMU) /* the QEMU does not simulate the * STIR register: this is a workaround */ NVIC_SetPendingIRQ(ii); #else NVIC->STIR = ii; #endif } zassert_false((k_sem_take(&sem[0], K_NO_WAIT) || k_sem_take(&sem[1], K_NO_WAIT) || k_sem_take(&sem[2], K_NO_WAIT)), NULL); }
void test_critical(void) { init_objects(); start_threads(); zassert_true(k_sem_take(&TEST_SEM, TEST_TIMEOUT * 2) == 0, "Timed out waiting for TEST_SEM"); }
static bool send_ipv4_udp_msg(struct net_if *iface, struct in_addr *src, struct in_addr *dst, u16_t src_port, u16_t dst_port, struct ud *ud, bool expect_failure) { struct net_pkt *pkt; struct net_buf *frag; int ret; pkt = net_pkt_get_reserve_tx(0, K_FOREVER); frag = net_pkt_get_frag(pkt, K_FOREVER); net_pkt_frag_add(pkt, frag); net_pkt_set_iface(pkt, iface); net_pkt_set_ll_reserve(pkt, net_buf_headroom(frag)); setup_ipv4_udp(pkt, src, dst, src_port, dst_port); ret = net_recv_data(iface, pkt); if (ret < 0) { printk("Cannot recv pkt %p, ret %d\n", pkt, ret); zassert_true(0, "exiting"); } if (k_sem_take(&recv_lock, TIMEOUT)) { /**TESTPOINT: Check for failure*/ zassert_true(expect_failure, "Timeout, packet not received"); return true; } /* Check that the returned user data is the same as what was given * as a parameter. */ if (ud != returned_ud && !expect_failure) { printk("IPv4 wrong user data %p returned, expected %p\n", returned_ud, ud); zassert_true(0, "exiting"); } return !fail; }
static void thread_entry_abort(void *p1, void *p2, void *p3) { /**TESTPOINT: abort current thread*/ execute_flag = 1; k_thread_abort(k_current_get()); /*unreachable*/ execute_flag = 2; zassert_true(1 == 0, NULL); }
static void send_iface1(void) { bool ret; DBG("Sending data to iface 1 %p\n", iface1); ret = send_iface(iface1, 1, false); zassert_true(ret, "iface 1"); }
static void send_iface2(void) { bool ret; DBG("Sending data to iface 2 %p\n", iface2); ret = send_iface(iface2, 2, false); zassert_true(ret, "iface 2"); }
static void send_iface3(void) { bool ret; DBG("Sending data to iface 3 %p\n", iface3); ret = send_iface(iface3, 3, false); zassert_true(ret, "iface 3"); }
void test_threads_abort_self(void) { execute_flag = 0; k_tid_t tid = k_thread_create(&tdata, tstack, STACK_SIZE, thread_entry_abort, NULL, NULL, NULL, 0, 0, 0); k_sleep(100); /**TESTPOINT: spawned thread executed but abort itself*/ zassert_true(execute_flag == 1, NULL); k_thread_abort(tid); }
static void send_iface1_up(void) { bool ret; DBG("Sending data to iface 1 %p again\n", iface1); net_if_up(iface1); ret = send_iface(iface1, 1, false); zassert_true(ret, "iface 1 up again"); }
static void send_iface1_down(void) { bool ret; DBG("Sending data to iface 1 %p while down\n", iface1); net_if_down(iface1); ret = send_iface(iface1, 1, true); zassert_true(ret, "iface 1 down"); }
/*test cases*/ void test_mpool_alloc_merge_failed_diff_size(void) { struct k_mem_block block[BLK_NUM_MIN], block_fail; size_t block_size[] = { BLK_SIZE_MIN, BLK_SIZE_MIN, BLK_SIZE_MIN, BLK_SIZE_MIN, BLK_SIZE_MID, BLK_SIZE_MID, BLK_SIZE_MID, BLK_SIZE_MIN, BLK_SIZE_MIN, BLK_SIZE_MIN, BLK_SIZE_MIN, BLK_SIZE_MID, BLK_SIZE_MID, BLK_SIZE_MID}; int block_count = sizeof(block_size)/sizeof(size_t); /** * TESTPOINT: the merging algorithm cannot combine adjacent free blocks * of different sizes * Test steps: 1. allocate 14 blocks in different sizes * 2. free block [2~8], in different sizes * 3. request a big block * verify blocks [2~8] can't be merged * 4. tear down, free blocks [0, 1, 9~13] */ for (int i = 0; i < block_count; i++) { /* 1. allocate blocks in different sizes*/ zassert_true(k_mem_pool_alloc(&mpool3, &block[i], block_size[i], K_NO_WAIT) == 0, NULL); } /* 2. free block [2~8], in different sizes*/ for (int i = 2; i < 9; i++) { k_mem_pool_free(&block[i]); } /* 3. request a big block, expected failed to merge*/ k_mem_pool_defrag(&mpool3); zassert_true(k_mem_pool_alloc(&mpool3, &block_fail, BLK_SIZE_MAX, TIMEOUT) == -EAGAIN, NULL); /* 4. test case tear down*/ k_mem_pool_free(&block[0]); k_mem_pool_free(&block[1]); for (int i = 9; i < block_count; i++) { k_mem_pool_free(&block[i]); } }
void test_threads_abort_others(void) { execute_flag = 0; k_tid_t tid = k_thread_create(&tdata, tstack, STACK_SIZE, thread_entry, NULL, NULL, NULL, 0, 0, 0); k_thread_abort(tid); k_sleep(100); /**TESTPOINT: check not-started thread is aborted*/ zassert_true(execute_flag == 0, NULL); tid = k_thread_create(&tdata, tstack, STACK_SIZE, thread_entry, NULL, NULL, NULL, 0, 0, 0); k_sleep(50); k_thread_abort(tid); /**TESTPOINT: check running thread is aborted*/ zassert_true(execute_flag == 1, NULL); k_sleep(1000); zassert_true(execute_flag == 1, NULL); }
static void tpipe_put(struct k_pipe *ppipe) { size_t to_wt, wt_byte = 0; for (int i = 0; i < PIPE_LEN; i += wt_byte) { /**TESTPOINT: pipe put*/ to_wt = (PIPE_LEN - i) >= BYTES_TO_WRITE ? BYTES_TO_WRITE : (PIPE_LEN - i); zassert_false(k_pipe_put(ppipe, &data[i], to_wt, &wt_byte, 1, K_NO_WAIT), NULL); zassert_true(wt_byte == to_wt || wt_byte == 1, NULL); } }
/*test cases*/ void test_priority_cooperative(void) { int old_prio = k_thread_priority_get(k_current_get()); /* set current thread to a negative priority */ last_prio = -1; k_thread_priority_set(k_current_get(), last_prio); /* spawn thread with higher priority */ int spawn_prio = last_prio - 1; k_tid_t tid = k_thread_create(&tdata, tstack, STACK_SIZE, thread_entry, NULL, NULL, NULL, spawn_prio, 0, 0); /* checkpoint: current thread shouldn't preempted by higher thread */ zassert_true(last_prio == k_thread_priority_get(k_current_get()), NULL); k_sleep(100); /* checkpoint: spawned thread get executed */ zassert_true(last_prio == spawn_prio, NULL); k_thread_abort(tid); /* restore environment */ k_thread_priority_set(k_current_get(), old_prio); }
static void tpipe_get(struct k_pipe *ppipe) { unsigned char rx_data[PIPE_LEN]; size_t to_rd, rd_byte = 0; /*get pipe data from "pipe_put"*/ for (int i = 0; i < PIPE_LEN; i += rd_byte) { /**TESTPOINT: pipe get*/ to_rd = (PIPE_LEN - i) >= BYTES_TO_READ ? BYTES_TO_READ : (PIPE_LEN - i); zassert_false(k_pipe_get(ppipe, &rx_data[i], to_rd, &rd_byte, 1, K_FOREVER), NULL); zassert_true(rd_byte == to_rd || rd_byte == 1, NULL); } for (int i = 0; i < PIPE_LEN; i++) { zassert_equal(rx_data[i], data[i], NULL); } }
static void test_ipv6_multi_frags(void) { struct net_pkt *pkt; struct net_buf *frag; struct ipv6_hdr *ipv6; struct udp_hdr *udp; int bytes, remaining = strlen(example_data), pos = 0; /* Example of multi fragment scenario with IPv6 */ pkt = net_pkt_get_reserve_rx(0, K_FOREVER); frag = net_pkt_get_reserve_rx_data(LL_RESERVE, K_FOREVER); /* Place the IP + UDP header in the first fragment */ if (!net_buf_tailroom(frag)) { ipv6 = (struct ipv6_hdr *)(frag->data); udp = (struct udp_hdr *)((void *)ipv6 + sizeof(*ipv6)); if (net_buf_tailroom(frag) < sizeof(ipv6)) { printk("Not enough space for IPv6 header, " "needed %zd bytes, has %zd bytes\n", sizeof(ipv6), net_buf_tailroom(frag)); zassert_true(false, "No space for IPv6 header"); } net_buf_add(frag, sizeof(ipv6)); if (net_buf_tailroom(frag) < sizeof(udp)) { printk("Not enough space for UDP header, " "needed %zd bytes, has %zd bytes\n", sizeof(udp), net_buf_tailroom(frag)); zassert_true(false, "No space for UDP header"); } net_pkt_set_appdata(pkt, (void *)udp + sizeof(*udp)); net_pkt_set_appdatalen(pkt, 0); } net_pkt_frag_add(pkt, frag); /* Put some data to rest of the fragments */ frag = net_pkt_get_reserve_rx_data(LL_RESERVE, K_FOREVER); if (net_buf_tailroom(frag) - (CONFIG_NET_BUF_DATA_SIZE - LL_RESERVE)) { printk("Invalid number of bytes available in the buf, " "should be 0 but was %zd - %d\n", net_buf_tailroom(frag), CONFIG_NET_BUF_DATA_SIZE - LL_RESERVE); zassert_true(false, "Invalid byte count"); } if (((int)net_buf_tailroom(frag) - remaining) > 0) { printk("We should have been out of space now, " "tailroom %zd user data len %zd\n", net_buf_tailroom(frag), strlen(example_data)); zassert_true(false, "Still space"); } while (remaining > 0) { int copy; bytes = net_buf_tailroom(frag); copy = remaining > bytes ? bytes : remaining; memcpy(net_buf_add(frag, copy), &example_data[pos], copy); DBG("Remaining %d left %d copy %d\n", remaining, bytes, copy); pos += bytes; remaining -= bytes; if (net_buf_tailroom(frag) - (bytes - copy)) { printk("There should have not been any tailroom left, " "tailroom %zd\n", net_buf_tailroom(frag) - (bytes - copy)); zassert_true(false, "There is still tailroom left"); } net_pkt_frag_add(pkt, frag); if (remaining > 0) { frag = net_pkt_get_reserve_rx_data(LL_RESERVE, K_FOREVER); } } bytes = net_pkt_get_len(pkt); if (bytes != strlen(example_data)) { printk("Invalid number of bytes in message, %zd vs %d\n", strlen(example_data), bytes); zassert_true(false, "Invalid number of bytes"); } /* Normally one should not unref the fragment list like this * because it will leave the pkt->frags pointing to already * freed fragment. */ net_pkt_frag_unref(pkt->frags); zassert_not_null(pkt->frags, "Frag list empty"); pkt->frags = NULL; /* to prevent double free */ net_pkt_unref(pkt); }
static void tThread_entry_lock_timeout_fail(void *p1, void *p2, void *p3) { zassert_true(k_mutex_lock((struct k_mutex *)p1, TIMEOUT - 100) != 0, NULL); TC_PRINT("bypass locked resource from spawn thread\n"); }
static void tThread_entry_lock_no_wait(void *p1, void *p2, void *p3) { zassert_true(k_mutex_lock((struct k_mutex *)p1, K_NO_WAIT) != 0, NULL); TC_PRINT("bypass locked resource from spawn thread\n"); }
static void test_fragment_copy(void) { struct net_pkt *pkt, *new_pkt; struct net_buf *frag, *new_frag; struct ipv6_hdr *ipv6; struct udp_hdr *udp; size_t orig_len, reserve; int pos; pkt = net_pkt_get_reserve_rx(0, K_FOREVER); frag = net_pkt_get_reserve_rx_data(LL_RESERVE, K_FOREVER); /* Place the IP + UDP header in the first fragment */ if (net_buf_tailroom(frag)) { ipv6 = (struct ipv6_hdr *)(frag->data); udp = (struct udp_hdr *)((void *)ipv6 + sizeof(*ipv6)); if (net_buf_tailroom(frag) < sizeof(*ipv6)) { printk("Not enough space for IPv6 header, " "needed %zd bytes, has %zd bytes\n", sizeof(ipv6), net_buf_tailroom(frag)); zassert_true(false, "No space for IPv6 header"); } net_buf_add(frag, sizeof(*ipv6)); if (net_buf_tailroom(frag) < sizeof(*udp)) { printk("Not enough space for UDP header, " "needed %zd bytes, has %zd bytes\n", sizeof(udp), net_buf_tailroom(frag)); zassert_true(false, "No space for UDP header"); } net_buf_add(frag, sizeof(*udp)); memcpy(net_buf_add(frag, 15), example_data, 15); net_pkt_set_appdata(pkt, (void *)udp + sizeof(*udp) + 15); net_pkt_set_appdatalen(pkt, 0); } net_pkt_frag_add(pkt, frag); orig_len = net_pkt_get_len(pkt); DBG("Total copy data len %zd\n", orig_len); linearize(pkt, buf_orig, orig_len); /* Then copy a fragment list to a new fragment list. * Reserve some space in front of the buffers. */ reserve = sizeof(struct ipv6_hdr) + sizeof(struct icmp_hdr); new_frag = net_pkt_copy_all(pkt, reserve, K_FOREVER); zassert_not_null(new_frag, "Cannot copy fragment list"); new_pkt = net_pkt_get_reserve_tx(0, K_FOREVER); new_pkt->frags = net_buf_frag_add(new_pkt->frags, new_frag); DBG("Total new data len %zd\n", net_pkt_get_len(new_pkt)); if ((net_pkt_get_len(pkt) + reserve) != net_pkt_get_len(new_pkt)) { int diff; diff = net_pkt_get_len(new_pkt) - reserve - net_pkt_get_len(pkt); printk("Fragment list missing data, %d bytes not copied " "(%zd vs %zd)\n", diff, net_pkt_get_len(pkt) + reserve, net_pkt_get_len(new_pkt)); zassert_true(false, "Frag list missing"); } if (net_pkt_get_len(new_pkt) != (orig_len + sizeof(struct ipv6_hdr) + sizeof(struct icmp_hdr))) { printk("Fragment list missing data, new pkt len %zd " "should be %zd\n", net_pkt_get_len(new_pkt), orig_len + sizeof(struct ipv6_hdr) + sizeof(struct icmp_hdr)); zassert_true(false, "Frag list missing data"); } linearize(new_pkt, buf_copy, sizeof(buf_copy)); zassert_true(memcmp(buf_orig, buf_copy, sizeof(buf_orig)), "Buffer copy failed, buffers are same"); pos = memcmp(buf_orig, buf_copy + sizeof(struct ipv6_hdr) + sizeof(struct icmp_hdr), orig_len); if (pos) { printk("Buffer copy failed at pos %d\n", pos); zassert_true(false, "Buf copy failed"); } }
static void tThread_entry_lock_timeout_pass(void *p1, void *p2, void *p3) { zassert_true(k_mutex_lock((struct k_mutex *)p1, TIMEOUT + 100) == 0, NULL); TC_PRINT("access resource from spawn thread\n"); k_mutex_unlock((struct k_mutex *)p1); }
static void test_pkt_read_append(void) { int remaining = strlen(sample_data); u8_t verify_rw_short[sizeof(test_rw_short)]; u8_t verify_rw_long[sizeof(test_rw_long)]; struct net_pkt *pkt; struct net_buf *frag; struct net_buf *tfrag; struct ipv6_hdr *ipv6; struct udp_hdr *udp; u8_t data[10]; int pos = 0; int bytes; u16_t off; u16_t tpos; u16_t fail_pos; /* Example of multi fragment read, append and skip APS's */ pkt = net_pkt_get_reserve_rx(0, K_FOREVER); frag = net_pkt_get_reserve_rx_data(LL_RESERVE, K_FOREVER); /* Place the IP + UDP header in the first fragment */ if (!net_buf_tailroom(frag)) { ipv6 = (struct ipv6_hdr *)(frag->data); udp = (struct udp_hdr *)((void *)ipv6 + sizeof(*ipv6)); if (net_buf_tailroom(frag) < sizeof(ipv6)) { printk("Not enough space for IPv6 header, " "needed %zd bytes, has %zd bytes\n", sizeof(ipv6), net_buf_tailroom(frag)); zassert_true(false, "No space for IPv6 header"); } net_buf_add(frag, sizeof(ipv6)); if (net_buf_tailroom(frag) < sizeof(udp)) { printk("Not enough space for UDP header, " "needed %zd bytes, has %zd bytes\n", sizeof(udp), net_buf_tailroom(frag)); zassert_true(false, "No space for UDP header"); } net_pkt_set_appdata(pkt, (void *)udp + sizeof(*udp)); net_pkt_set_appdatalen(pkt, 0); } net_pkt_frag_add(pkt, frag); /* Put some data to rest of the fragments */ frag = net_pkt_get_reserve_rx_data(LL_RESERVE, K_FOREVER); if (net_buf_tailroom(frag) - (CONFIG_NET_BUF_DATA_SIZE - LL_RESERVE)) { printk("Invalid number of bytes available in the buf, " "should be 0 but was %zd - %d\n", net_buf_tailroom(frag), CONFIG_NET_BUF_DATA_SIZE - LL_RESERVE); zassert_true(false, "Invalid number of bytes avail"); } if (((int)net_buf_tailroom(frag) - remaining) > 0) { printk("We should have been out of space now, " "tailroom %zd user data len %zd\n", net_buf_tailroom(frag), strlen(sample_data)); zassert_true(false, "Not out of space"); } while (remaining > 0) { int copy; bytes = net_buf_tailroom(frag); copy = remaining > bytes ? bytes : remaining; memcpy(net_buf_add(frag, copy), &sample_data[pos], copy); DBG("Remaining %d left %d copy %d\n", remaining, bytes, copy); pos += bytes; remaining -= bytes; if (net_buf_tailroom(frag) - (bytes - copy)) { printk("There should have not been any tailroom left, " "tailroom %zd\n", net_buf_tailroom(frag) - (bytes - copy)); zassert_true(false, "Still tailroom left"); } net_pkt_frag_add(pkt, frag); if (remaining > 0) { frag = net_pkt_get_reserve_rx_data(LL_RESERVE, K_FOREVER); } } bytes = net_pkt_get_len(pkt); if (bytes != strlen(sample_data)) { printk("Invalid number of bytes in message, %zd vs %d\n", strlen(sample_data), bytes); zassert_true(false, "Message size wrong"); } /* Failure cases */ /* Invalid buffer */ tfrag = net_frag_skip(NULL, 10, &fail_pos, 10); zassert_true(!tfrag && fail_pos == 0xffff, "Invalid case NULL buffer"); /* Invalid: Skip more than a buffer length.*/ tfrag = net_buf_frag_last(pkt->frags); tfrag = net_frag_skip(tfrag, tfrag->len - 1, &fail_pos, tfrag->len + 2); if (!(!tfrag && fail_pos == 0xffff)) { printk("Invalid case offset %d length to skip %d," "frag length %d\n", tfrag->len - 1, tfrag->len + 2, tfrag->len); zassert_true(false, "Invalid offset"); } /* Invalid offset */ tfrag = net_buf_frag_last(pkt->frags); tfrag = net_frag_skip(tfrag, tfrag->len + 10, &fail_pos, 10); if (!(!tfrag && fail_pos == 0xffff)) { printk("Invalid case offset %d length to skip %d," "frag length %d\n", tfrag->len + 10, 10, tfrag->len); zassert_true(false, "Invalid offset"); } /* Valid cases */ /* Offset is more than single fragment length */ /* Get the first data fragment */ tfrag = pkt->frags; tfrag = tfrag->frags; off = tfrag->len; tfrag = net_frag_read(tfrag, off + 10, &tpos, 10, data); if (!tfrag || memcmp(sample_data + off + 10, data, 10)) { printk("Failed to read from offset %d, frag length %d " "read length %d\n", tfrag->len + 10, tfrag->len, 10); zassert_true(false, "Fail offset read"); } /* Skip till end of all fragments */ /* Get the first data fragment */ tfrag = pkt->frags; tfrag = tfrag->frags; tfrag = net_frag_skip(tfrag, 0, &tpos, strlen(sample_data)); zassert_true(!tfrag && tpos == 0, "Invalid skip till end of all fragments"); /* Short data test case */ /* Test case scenario: * 1) Cache the current fragment and offset * 2) Append short data * 3) Append short data again * 4) Skip first short data from cached frag or offset * 5) Read short data and compare */ tfrag = net_buf_frag_last(pkt->frags); off = tfrag->len; zassert_true(net_pkt_append_all(pkt, (u16_t)sizeof(test_rw_short), test_rw_short, K_FOREVER), "net_pkt_append failed"); zassert_true(net_pkt_append_all(pkt, (u16_t)sizeof(test_rw_short), test_rw_short, K_FOREVER), "net_pkt_append failed"); tfrag = net_frag_skip(tfrag, off, &tpos, (u16_t)sizeof(test_rw_short)); zassert_not_null(tfrag, "net_frag_skip failed"); tfrag = net_frag_read(tfrag, tpos, &tpos, (u16_t)sizeof(test_rw_short), verify_rw_short); zassert_true(!tfrag && tpos == 0, "net_frag_read failed"); zassert_false(memcmp(test_rw_short, verify_rw_short, sizeof(test_rw_short)), "net_frag_read failed with mismatch data"); /* Long data test case */ /* Test case scenario: * 1) Cache the current fragment and offset * 2) Append long data * 3) Append long data again * 4) Skip first long data from cached frag or offset * 5) Read long data and compare */ tfrag = net_buf_frag_last(pkt->frags); off = tfrag->len; zassert_true(net_pkt_append_all(pkt, (u16_t)sizeof(test_rw_long), test_rw_long, K_FOREVER), "net_pkt_append failed"); zassert_true(net_pkt_append_all(pkt, (u16_t)sizeof(test_rw_long), test_rw_long, K_FOREVER), "net_pkt_append failed"); tfrag = net_frag_skip(tfrag, off, &tpos, (u16_t)sizeof(test_rw_long)); zassert_not_null(tfrag, "net_frag_skip failed"); tfrag = net_frag_read(tfrag, tpos, &tpos, (u16_t)sizeof(test_rw_long), verify_rw_long); zassert_true(!tfrag && tpos == 0, "net_frag_read failed"); zassert_false(memcmp(test_rw_long, verify_rw_long, sizeof(test_rw_long)), "net_frag_read failed with mismatch data"); net_pkt_unref(pkt); DBG("test_pkt_read_append passed\n"); }
static void test_pkt_read_write_insert(void) { struct net_buf *read_frag; struct net_buf *temp_frag; struct net_pkt *pkt; struct net_buf *frag; u8_t read_data[100]; u16_t read_pos; u16_t len; u16_t pos; /* Example of multi fragment read, append and skip APS's */ pkt = net_pkt_get_reserve_rx(0, K_FOREVER); net_pkt_set_ll_reserve(pkt, LL_RESERVE); frag = net_pkt_get_reserve_rx_data(net_pkt_ll_reserve(pkt), K_FOREVER); net_pkt_frag_add(pkt, frag); /* 1) Offset is with in input fragment. * Write app data after IPv6 and UDP header. (If the offset is after * IPv6 + UDP header size, api will create empty space till offset * and write data). */ frag = net_pkt_write(pkt, frag, NET_IPV6UDPH_LEN, &pos, 10, (u8_t *)sample_data, K_FOREVER); zassert_false(!frag || pos != 58, "Usecase 1: Write failed"); read_frag = net_frag_read(frag, NET_IPV6UDPH_LEN, &read_pos, 10, read_data); zassert_false(!read_frag && read_pos == 0xffff, "Usecase 1: Read failed"); zassert_false(memcmp(read_data, sample_data, 10), "Usecase 1: Read data mismatch"); /* 2) Write IPv6 and UDP header at offset 0. (Empty space is created * already in Usecase 1, just need to fill the header, at this point * there shouldn't be any length change). */ frag = net_pkt_write(pkt, frag, 0, &pos, NET_IPV6UDPH_LEN, (u8_t *)sample_data, K_FOREVER); zassert_false(!frag || pos != 48, "Usecase 2: Write failed"); read_frag = net_frag_read(frag, 0, &read_pos, NET_IPV6UDPH_LEN, read_data); zassert_false(!read_frag && read_pos == 0xffff, "Usecase 2: Read failed"); zassert_false(memcmp(read_data, sample_data, NET_IPV6UDPH_LEN), "Usecase 2: Read data mismatch"); net_pkt_unref(pkt); pkt = net_pkt_get_reserve_rx(0, K_FOREVER); net_pkt_set_ll_reserve(pkt, LL_RESERVE); /* 3) Offset is in next to next fragment. * Write app data after 2 fragments. (If the offset far away, api will * create empty fragments(space) till offset and write data). */ frag = net_pkt_write(pkt, pkt->frags, 200, &pos, 10, (u8_t *)sample_data + 10, K_FOREVER); zassert_not_null(frag, "Usecase 3: Write failed"); read_frag = net_frag_read(frag, pos - 10, &read_pos, 10, read_data); zassert_false(!read_frag && read_pos == 0xffff, "Usecase 3: Read failed"); zassert_false(memcmp(read_data, sample_data + 10, 10), "Usecase 3: Read data mismatch"); /* 4) Offset is in next to next fragment (overwrite). * Write app data after 2 fragments. (Space is already available from * Usecase 3, this scenatio doesn't create any space, it just overwrites * the existing data. */ frag = net_pkt_write(pkt, pkt->frags, 190, &pos, 10, (u8_t *)sample_data, K_FOREVER); zassert_not_null(frag, "Usecase 4: Write failed"); read_frag = net_frag_read(frag, pos - 10, &read_pos, 20, read_data); zassert_false(!read_frag && read_pos == 0xffff, "Usecase 4: Read failed"); zassert_false(memcmp(read_data, sample_data, 20), "Usecase 4: Read data mismatch"); net_pkt_unref(pkt); /* 5) Write 20 bytes in fragment which has only 10 bytes space. * API should overwrite on first 10 bytes and create extra 10 bytes * and write there. */ pkt = net_pkt_get_reserve_rx(0, K_FOREVER); net_pkt_set_ll_reserve(pkt, LL_RESERVE); frag = net_pkt_get_reserve_rx_data(net_pkt_ll_reserve(pkt), K_FOREVER); net_pkt_frag_add(pkt, frag); /* Create 10 bytes space. */ net_buf_add(frag, 10); frag = net_pkt_write(pkt, frag, 0, &pos, 20, (u8_t *)sample_data, K_FOREVER); zassert_false(!frag && pos != 20, "Usecase 5: Write failed"); read_frag = net_frag_read(frag, 0, &read_pos, 20, read_data); zassert_false(!read_frag && read_pos == 0xffff, "Usecase 5: Read failed"); zassert_false(memcmp(read_data, sample_data, 20), "USecase 5: Read data mismatch"); net_pkt_unref(pkt); /* 6) First fragment is full, second fragment has 10 bytes tail room, * third fragment has 5 bytes. * Write data (30 bytes) in second fragment where offset is 10 bytes * before the tailroom. * So it should overwrite 10 bytes and create space for another 10 * bytes and write data. Third fragment 5 bytes overwritten and space * for 5 bytes created. */ pkt = net_pkt_get_reserve_rx(0, K_FOREVER); net_pkt_set_ll_reserve(pkt, LL_RESERVE); /* First fragment make it fully occupied. */ frag = net_pkt_get_reserve_rx_data(net_pkt_ll_reserve(pkt), K_FOREVER); net_pkt_frag_add(pkt, frag); len = net_buf_tailroom(frag); net_buf_add(frag, len); /* 2nd fragment last 10 bytes tailroom, rest occupied */ frag = net_pkt_get_reserve_rx_data(net_pkt_ll_reserve(pkt), K_FOREVER); net_pkt_frag_add(pkt, frag); len = net_buf_tailroom(frag); net_buf_add(frag, len - 10); read_frag = temp_frag = frag; read_pos = frag->len - 10; /* 3rd fragment, only 5 bytes occupied */ frag = net_pkt_get_reserve_rx_data(net_pkt_ll_reserve(pkt), K_FOREVER); net_pkt_frag_add(pkt, frag); net_buf_add(frag, 5); temp_frag = net_pkt_write(pkt, temp_frag, temp_frag->len - 10, &pos, 30, (u8_t *) sample_data, K_FOREVER); zassert_not_null(temp_frag, "Use case 6: Write failed"); read_frag = net_frag_read(read_frag, read_pos, &read_pos, 30, read_data); zassert_false(!read_frag && read_pos == 0xffff, "Usecase 6: Read failed"); zassert_false(memcmp(read_data, sample_data, 30), "Usecase 6: Read data mismatch"); net_pkt_unref(pkt); /* 7) Offset is with in input fragment. * Write app data after IPv6 and UDP header. (If the offset is after * IPv6 + UDP header size, api will create empty space till offset * and write data). Insert some app data after IPv6 + UDP header * before first set of app data. */ pkt = net_pkt_get_reserve_rx(0, K_FOREVER); net_pkt_set_ll_reserve(pkt, LL_RESERVE); /* First fragment make it fully occupied. */ frag = net_pkt_get_reserve_rx_data(net_pkt_ll_reserve(pkt), K_FOREVER); net_pkt_frag_add(pkt, frag); frag = net_pkt_write(pkt, frag, NET_IPV6UDPH_LEN, &pos, 10, (u8_t *)sample_data + 10, K_FOREVER); zassert_false(!frag || pos != 58, "Usecase 7: Write failed"); read_frag = net_frag_read(frag, NET_IPV6UDPH_LEN, &read_pos, 10, read_data); zassert_false(!read_frag && read_pos == 0xffff, "Usecase 7: Read failed"); zassert_false(memcmp(read_data, sample_data + 10, 10), "Usecase 7: Read data mismatch"); zassert_true(net_pkt_insert(pkt, frag, NET_IPV6UDPH_LEN, 10, (u8_t *)sample_data, K_FOREVER), "Usecase 7: Insert failed"); read_frag = net_frag_read(frag, NET_IPV6UDPH_LEN, &read_pos, 20, read_data); zassert_false(!read_frag && read_pos == 0xffff, "Usecase 7: Read after failed"); zassert_false(memcmp(read_data, sample_data, 20), "Usecase 7: Read data mismatch after insertion"); /* Insert data outside input fragment length, error case. */ zassert_false(net_pkt_insert(pkt, frag, 70, 10, (u8_t *)sample_data, K_FOREVER), "Usecase 7: False insert failed"); net_pkt_unref(pkt); /* 8) Offset is with in input fragment. * Write app data after IPv6 and UDP header. (If the offset is after * IPv6 + UDP header size, api will create empty space till offset * and write data). Insert some app data after IPv6 + UDP header * before first set of app data. Insertion data is long which will * take two fragments. */ pkt = net_pkt_get_reserve_rx(0, K_FOREVER); net_pkt_set_ll_reserve(pkt, LL_RESERVE); /* First fragment make it fully occupied. */ frag = net_pkt_get_reserve_rx_data(net_pkt_ll_reserve(pkt), K_FOREVER); net_pkt_frag_add(pkt, frag); frag = net_pkt_write(pkt, frag, NET_IPV6UDPH_LEN, &pos, 10, (u8_t *)sample_data + 60, K_FOREVER); zassert_false(!frag || pos != 58, "Usecase 8: Write failed"); read_frag = net_frag_read(frag, NET_IPV6UDPH_LEN, &read_pos, 10, read_data); zassert_false(!read_frag && read_pos == 0xffff, "Usecase 8: Read failed"); zassert_false(memcmp(read_data, sample_data + 60, 10), "Usecase 8: Read data mismatch"); zassert_true(net_pkt_insert(pkt, frag, NET_IPV6UDPH_LEN, 60, (u8_t *)sample_data, K_FOREVER), "Usecase 8: Insert failed"); read_frag = net_frag_read(frag, NET_IPV6UDPH_LEN, &read_pos, 70, read_data); zassert_false(!read_frag && read_pos == 0xffff, "Usecase 8: Read after failed"); zassert_false(memcmp(read_data, sample_data, 70), "Usecase 8: Read data mismatch after insertion"); net_pkt_unref(pkt); DBG("test_pkt_read_write_insert passed\n"); }
static void test_fragment_compact(void) { struct net_pkt *pkt; struct net_buf *frags[FRAG_COUNT], *frag; int i, bytes, total, count; pkt = net_pkt_get_reserve_rx(0, K_FOREVER); frag = NULL; for (i = 0, total = 0; i < FRAG_COUNT; i++) { frags[i] = net_pkt_get_reserve_rx_data(12, K_FOREVER); if (frag) { net_buf_frag_add(frag, frags[i]); } frag = frags[i]; /* Copy character test data in front of the fragment */ memcpy(net_buf_add(frags[i], sizeof(test_data)), test_data, sizeof(test_data)); /* Followed by bytes of zeroes */ memset(net_buf_add(frags[i], sizeof(test_data)), 0, sizeof(test_data)); total++; } if (total != FRAG_COUNT) { printk("There should be %d fragments but was %d\n", FRAG_COUNT, total); zassert_true(false, "Invalid fragment count"); } DBG("step 1\n"); pkt->frags = net_buf_frag_add(pkt->frags, frags[0]); bytes = net_pkt_get_len(pkt); if (bytes != FRAG_COUNT * sizeof(test_data) * 2) { printk("Compact test failed, fragments had %d bytes but " "should have had %zd\n", bytes, FRAG_COUNT * sizeof(test_data) * 2); zassert_true(false, "Invalid fragment bytes"); } zassert_false(net_pkt_is_compact(pkt), "The pkt is definitely not compact"); DBG("step 2\n"); net_pkt_compact(pkt); zassert_true(net_pkt_is_compact(pkt), "The pkt should be in compact form"); DBG("step 3\n"); /* Try compacting again, nothing should happen */ net_pkt_compact(pkt); zassert_true(net_pkt_is_compact(pkt), "The pkt should be compacted now"); total = calc_fragments(pkt); /* Add empty fragment at the end and compact, the last fragment * should be removed. */ frag = net_pkt_get_reserve_rx_data(0, K_FOREVER); net_pkt_frag_add(pkt, frag); count = calc_fragments(pkt); DBG("step 4\n"); net_pkt_compact(pkt); i = calc_fragments(pkt); if (count != (i + 1)) { printk("Last fragment removal failed, chain should have %d " "fragments but had %d\n", i-1, i); zassert_true(false, "Last frag rm fails"); } if (i != total) { printk("Fragments missing, expecting %d but got %d\n", total, i); zassert_true(false, "Frags missing"); } /* Add two empty fragments at the end and compact, the last two * fragment should be removed. */ frag = net_pkt_get_reserve_rx_data(0, K_FOREVER); net_pkt_frag_add(pkt, frag); frag = net_pkt_get_reserve_rx_data(0, K_FOREVER); net_pkt_frag_add(pkt, frag); count = calc_fragments(pkt); DBG("step 5\n"); net_pkt_compact(pkt); i = calc_fragments(pkt); if (count != (i + 2)) { printk("Last two fragment removal failed, chain should have " "%d fragments but had %d\n", i-2, i); zassert_true(false, "Last two frag rm fails"); } if (i != total) { printk("Fragments missing, expecting %d but got %d\n", total, i); zassert_true(false, "Frags missing"); } /* Add empty fragment at the beginning and at the end, and then * compact, the two fragment should be removed. */ frag = net_pkt_get_reserve_rx_data(0, K_FOREVER); net_pkt_frag_insert(pkt, frag); frag = net_pkt_get_reserve_rx_data(0, K_FOREVER); net_pkt_frag_add(pkt, frag); count = calc_fragments(pkt); DBG("step 6\n"); net_pkt_compact(pkt); i = calc_fragments(pkt); if (count != (i + 2)) { printk("Two fragment removal failed, chain should have " "%d fragments but had %d\n", i-2, i); zassert_true(false, "Two frag rm fails"); } if (i != total) { printk("Fragments missing, expecting %d but got %d\n", total, i); zassert_true(false, "Frags missing"); } DBG("test_fragment_compact passed\n"); }
void atomic_test(void) { int i; atomic_t target, orig; atomic_val_t value; atomic_val_t oldvalue; target = 4; value = 5; oldvalue = 6; /* atomic_cas() */ zassert_true((atomic_cas(&target, oldvalue, value) == 0), "atomic_cas"); target = 6; zassert_true((atomic_cas(&target, oldvalue, value) == 1), "atomic_cas"); zassert_true((target == value), "atomic_cas"); /* atomic_add() */ target = 1; value = 2; zassert_true((atomic_add(&target, value) == 1), "atomic_add"); zassert_true((target == 3), "atomic_add"); /* atomic_sub() */ target = 10; value = 2; zassert_true((atomic_sub(&target, value) == 10), "atomic_sub"); zassert_true((target == 8), "atomic_sub"); /* atomic_inc() */ target = 5; zassert_true((atomic_inc(&target) == 5), "atomic_inc"); zassert_true((target == 6), "atomic_inc"); /* atomic_dec() */ target = 2; zassert_true((atomic_dec(&target) == 2), "atomic_dec"); zassert_true((target == 1), "atomic_dec"); /* atomic_get() */ target = 50; zassert_true((atomic_get(&target) == 50), "atomic_get"); /* atomic_set() */ target = 42; value = 77; zassert_true((atomic_set(&target, value) == 42), "atomic_set"); zassert_true((target == value), "atomic_set"); /* atomic_clear() */ target = 100; zassert_true((atomic_clear(&target) == 100), "atomic_clear"); zassert_true((target == 0), "atomic_clear"); /* atomic_or() */ target = 0xFF00; value = 0x0F0F; zassert_true((atomic_or(&target, value) == 0xFF00), "atomic_or"); zassert_true((target == 0xFF0F), "atomic_or"); /* atomic_xor() */ target = 0xFF00; value = 0x0F0F; zassert_true((atomic_xor(&target, value) == 0xFF00), "atomic_xor"); zassert_true((target == 0xF00F), "atomic_xor"); /* atomic_and() */ target = 0xFF00; value = 0x0F0F; zassert_true((atomic_and(&target, value) == 0xFF00), "atomic_and"); zassert_true((target == 0x0F00), "atomic_and"); /* atomic_nand() */ target = 0xFF00; value = 0x0F0F; zassert_true((atomic_nand(&target, value) == 0xFF00), "atomic_nand"); zassert_true((target == 0xFFFFF0FF), "atomic_nand"); /* atomic_test_bit() */ for (i = 0; i < 32; i++) { target = 0x0F0F0F0F; zassert_true(!!(atomic_test_bit(&target, i) == !!(target & (1 << i))), "atomic_test_bit"); } /* atomic_test_and_clear_bit() */ for (i = 0; i < 32; i++) { orig = 0x0F0F0F0F; target = orig; zassert_true(!!(atomic_test_and_clear_bit(&target, i)) == !!(orig & (1 << i)), "atomic_test_and_clear_bit"); zassert_true(target == (orig & ~(1 << i)), "atomic_test_and_clear_bit"); } /* atomic_test_and_set_bit() */ for (i = 0; i < 32; i++) { orig = 0x0F0F0F0F; target = orig; zassert_true(!!(atomic_test_and_set_bit(&target, i)) == !!(orig & (1 << i)), "atomic_test_and_set_bit"); zassert_true(target == (orig | (1 << i)), "atomic_test_and_set_bit"); } /* atomic_clear_bit() */ for (i = 0; i < 32; i++) { orig = 0x0F0F0F0F; target = orig; atomic_clear_bit(&target, i); zassert_true(target == (orig & ~(1 << i)), "atomic_clear_bit"); } /* atomic_set_bit() */ for (i = 0; i < 32; i++) { orig = 0x0F0F0F0F; target = orig; atomic_set_bit(&target, i); zassert_true(target == (orig | (1 << i)), "atomic_set_bit"); } }
static void test_fragment_split(void) { #define TEST_FRAG_COUNT (FRAG_COUNT - 2) #define FRAGA (FRAG_COUNT - 2) #define FRAGB (FRAG_COUNT - 1) struct net_pkt *pkt; struct net_buf *frags[FRAG_COUNT], *frag, *frag_a, *frag_b; int i, total, split_a, split_b; int ret, frag_size; memset(frags, 0, FRAG_COUNT * sizeof(void *)); pkt = net_pkt_get_reserve_rx(0, K_FOREVER); frag = NULL; for (i = 0, total = 0; i < TEST_FRAG_COUNT; i++) { frags[i] = net_pkt_get_reserve_rx_data(12, K_FOREVER); if (frag) { net_buf_frag_add(frag, frags[i]); } frag = frags[i]; /* Copy some test data in front of the fragment */ memcpy(net_buf_add(frags[i], sizeof(frag_data)), frag_data, sizeof(frag_data)); total++; } if (total != TEST_FRAG_COUNT) { printk("There should be %d fragments but was %d\n", TEST_FRAG_COUNT, total); zassert_true(false, "Frags missing"); } frag_size = frags[0]->size; zassert_true(frag_size > 0, "Invalid frag size"); net_pkt_frag_add(pkt, frags[0]); frag_a = frags[FRAGA]; frag_b = frags[FRAGB]; zassert_is_null(frag_a, "frag_a is not NULL"); zassert_is_null(frag_b, "frag_b is not NULL"); split_a = frag_size * 2 / 3; split_b = frag_size - split_a; zassert_true(split_a > 0, "A size is 0"); zassert_true(split_a > split_b, "A is smaller than B"); /* Test some error cases first */ ret = net_pkt_split(NULL, NULL, 1024, &frag_a, &frag_b, K_NO_WAIT); zassert_equal(ret, -EINVAL, "Invalid buf pointers"); ret = net_pkt_split(pkt, pkt->frags, CONFIG_NET_BUF_DATA_SIZE + 1, &frag_a, &frag_b, K_NO_WAIT); zassert_equal(ret, 0, "Split failed"); ret = net_pkt_split(pkt, pkt->frags, split_a, &frag_a, &frag_b, K_NO_WAIT); zassert_equal(ret, 0, "Cannot split frag"); if (frag_a->len != split_a) { printk("Frag_a len %d not %d\n", frag_a->len, split_a); zassert_equal(frag_a->len, split_a, "Frag_a len wrong"); } if (frag_b->len != split_b) { printk("Frag_b len %d not %d\n", frag_b->len, split_b); zassert_true(false, "Frag_b len wrong"); } zassert_false(memcmp(pkt->frags->data, frag_a->data, split_a), "Frag_a data mismatch"); zassert_false(memcmp(pkt->frags->data + split_a, frag_b->data, split_b), "Frag_b data mismatch"); }