void read_and_echo_data(struct device *dev) { uart_buf_t *rx_buf; while(true) { rx_buf = k_fifo_get(&rx_fifo, K_FOREVER); if (rx_buf != NULL) { uint16_t len = sys_be16_to_cpu(*(uint16_t *)rx_buf->data); printf("FIFO get: %d bytes [hdr=%d]\n", rx_buf->len, len); /* if len is 0, echo bach the same data */ /* if len is !0, echo bach len bytes of data w/ filled content so receiver can verify */ if (len != 0) { fill_data(rx_buf->data, len); } else { len = rx_buf->len; } send_data(dev, rx_buf->data, len); } } }
static void rx_thread(void) { SYS_LOG_INF("RX thread started"); while (1) { struct net_pkt *pkt; struct net_buf *buf; u8_t specifier; pkt = k_fifo_get(&rx_queue, K_FOREVER); buf = net_buf_frag_last(pkt->frags); SYS_LOG_DBG("Got pkt %p buf %p", pkt, buf); hexdump("SLIP >", buf->data, buf->len); /* TODO: process */ specifier = net_buf_pull_u8(buf); switch (specifier) { case '?': process_request(buf); break; case '!': process_config(pkt); break; default: SYS_LOG_ERR("Unknown message specifier %c", specifier); break; } net_pkt_unref(pkt); k_yield(); } }
static u8_t *recv_cb(u8_t *buf, size_t *off) { struct btp_hdr *cmd = (void *) buf; u8_t *new_buf; u16_t len; if (*off < sizeof(*cmd)) { return buf; } len = sys_le16_to_cpu(cmd->len); if (len > BTP_MTU - sizeof(*cmd)) { SYS_LOG_ERR("BT tester: invalid packet length"); *off = 0; return buf; } if (*off < sizeof(*cmd) + len) { return buf; } new_buf = k_fifo_get(&avail_queue, K_NO_WAIT); if (!new_buf) { SYS_LOG_ERR("BT tester: RX overflow"); *off = 0; return buf; } k_fifo_put(&cmds_queue, buf); *off = 0; return new_buf; }
void main(void) { struct device *rtc_dev; struct rtc_config config; u32_t now; printk("LMT: Quark SE PM Multicore Demo\n"); k_fifo_init(&fifo); build_suspend_device_list(); ipm = device_get_binding("alarm_notification"); if (!ipm) { printk("Error: Failed to get IPM device\n"); return; } rtc_dev = device_get_binding("RTC_0"); if (!rtc_dev) { printk("Error: Failed to get RTC device\n"); return; } rtc_enable(rtc_dev); /* In QMSI, in order to save the alarm callback we must set * 'alarm_enable = 1' during configuration. However, this * automatically triggers the alarm underneath. So, to avoid * the alarm being fired any time soon, we set the 'init_val' * to 1 and the 'alarm_val' to 0. */ config.init_val = 1; config.alarm_val = 0; config.alarm_enable = 1; config.cb_fn = alarm_handler; rtc_set_config(rtc_dev, &config); while (1) { /* Simulate some task handling by busy waiting. */ printk("LMT: busy\n"); k_busy_wait(TASK_TIME_IN_SEC * 1000 * 1000); now = rtc_read(rtc_dev); rtc_set_alarm(rtc_dev, now + (RTC_ALARM_SECOND * IDLE_TIME_IN_SEC)); printk("LMT: idle\n"); k_fifo_get(&fifo, K_FOREVER); } }
void tester_init(void) { int i; for (i = 0; i < CMD_QUEUED; i++) { k_fifo_put(&avail_queue, &cmd_buf[i * BTP_MTU]); } k_thread_create(&cmd_thread, stack, STACKSIZE, cmd_handler, NULL, NULL, NULL, K_PRIO_COOP(7), 0, K_NO_WAIT); uart_pipe_register(k_fifo_get(&avail_queue, K_NO_WAIT), BTP_MTU, recv_cb); tester_send(BTP_SERVICE_ID_CORE, CORE_EV_IUT_READY, BTP_INDEX_NONE, NULL, 0); }
static void https_shutdown(struct http_client_ctx *ctx) { if (!ctx->https.tid) { return; } /* Empty the fifo just in case there is any received packets * still there. */ while (1) { struct rx_fifo_block *rx_data; rx_data = k_fifo_get(&ctx->https.mbedtls.ssl_ctx.rx_fifo, K_NO_WAIT); if (!rx_data) { break; } net_pkt_unref(rx_data->pkt); k_mem_pool_free(&rx_data->block); } k_fifo_cancel_wait(&ctx->https.mbedtls.ssl_ctx.rx_fifo); /* Let the ssl_rx() run if there is anything there waiting */ k_yield(); mbedtls_ssl_close_notify(&ctx->https.mbedtls.ssl); mbedtls_ssl_free(&ctx->https.mbedtls.ssl); mbedtls_ssl_config_free(&ctx->https.mbedtls.conf); mbedtls_ctr_drbg_free(&ctx->https.mbedtls.ctr_drbg); mbedtls_entropy_free(&ctx->https.mbedtls.entropy); #if defined(MBEDTLS_X509_CRT_PARSE_C) mbedtls_x509_crt_free(&ctx->https.mbedtls.ca_cert); #endif tcp_disconnect(ctx); NET_DBG("HTTPS thread %p stopped for %p", ctx->https.tid, ctx); k_thread_abort(ctx->https.tid); ctx->https.tid = 0; }
static void shell(void *p1, void *p2, void *p3) { ARG_UNUSED(p1); ARG_UNUSED(p2); ARG_UNUSED(p3); while (1) { struct console_input *cmd; printk("%s", get_prompt()); cmd = k_fifo_get(&cmds_queue, K_FOREVER); shell_exec(cmd->line); k_fifo_put(&avail_queue, cmd); } }
static void net_rx_thread(void) { struct net_pkt *pkt; NET_DBG("Starting RX thread (stack %zu bytes)", sizeof(rx_stack)); /* Starting TX side. The ordering is important here and the TX * can only be started when RX side is ready to receive packets. * We synchronize the startup of the device so that both RX and TX * are only started fully when both are ready to receive or send * data. */ net_if_init(&startup_sync); k_sem_take(&startup_sync, K_FOREVER); /* This will take the interface up and start everything. */ net_if_post_init(); while (1) { #if defined(CONFIG_NET_STATISTICS) || defined(CONFIG_NET_DEBUG_CORE) size_t pkt_len; #endif pkt = k_fifo_get(&rx_queue, K_FOREVER); net_analyze_stack("RX thread", rx_stack, sizeof(rx_stack)); #if defined(CONFIG_NET_STATISTICS) || defined(CONFIG_NET_DEBUG_CORE) pkt_len = net_pkt_get_len(pkt); #endif NET_DBG("Received pkt %p len %zu", pkt, pkt_len); net_stats_update_bytes_recv(pkt_len); processing_data(pkt, false); net_print_statistics(); net_pkt_print(); k_yield(); } }
static inline void telnet_handle_input(struct net_pkt *pkt) { struct console_input *input; size_t len; len = net_pkt_remaining_data(pkt); if (len > CONSOLE_MAX_LINE_LEN || len < TELNET_MIN_MSG) { return; } if (telnet_handle_command(pkt)) { return; } if (!avail_queue || !input_queue) { return; } input = k_fifo_get(avail_queue, K_NO_WAIT); if (!input) { return; } len = MIN(len, CONSOLE_MAX_LINE_LEN); if (net_pkt_read_new(pkt, (u8_t *)input->line, len)) { return; } /* LF/CR will be removed if only the line is not NUL terminated */ if (input->line[len - 1] != NVT_NUL) { if (input->line[len - 1] == NVT_LF) { input->line[len - 1] = NVT_NUL; } if (input->line[len - 2] == NVT_CR) { input->line[len - 2] = NVT_NUL; } } k_fifo_put(input_queue, input); }
static void cmd_handler(void *p1, void *p2, void *p3) { while (1) { struct btp_hdr *cmd; u16_t len; cmd = k_fifo_get(&cmds_queue, K_FOREVER); len = sys_le16_to_cpu(cmd->len); /* TODO * verify if service is registered before calling handler */ switch (cmd->service) { case BTP_SERVICE_ID_CORE: handle_core(cmd->opcode, cmd->index, cmd->data, len); break; case BTP_SERVICE_ID_GAP: tester_handle_gap(cmd->opcode, cmd->index, cmd->data, len); break; case BTP_SERVICE_ID_GATT: tester_handle_gatt(cmd->opcode, cmd->index, cmd->data, len); break; #if defined(CONFIG_BLUETOOTH_L2CAP_DYNAMIC_CHANNEL) case BTP_SERVICE_ID_L2CAP: tester_handle_l2cap(cmd->opcode, cmd->index, cmd->data, len); #endif /* CONFIG_BLUETOOTH_L2CAP_DYNAMIC_CHANNEL */ break; default: tester_rsp(cmd->service, cmd->opcode, cmd->index, BTP_STATUS_FAILED); break; } k_fifo_put(&avail_queue, cmd); } }
/** * TX - transmit to SLIP interface */ static void tx_thread(void) { SYS_LOG_DBG("TX thread started"); /* Allow to send one TX */ k_sem_give(&tx_sem); while (1) { struct net_pkt *pkt; struct net_buf *buf; size_t len; k_sem_take(&tx_sem, K_FOREVER); pkt = k_fifo_get(&tx_queue, K_FOREVER); buf = net_buf_frag_last(pkt->frags); len = net_pkt_get_len(pkt); SYS_LOG_DBG("Send pkt %p buf %p len %d", pkt, buf, len); hexdump("SLIP <", buf->data, buf->len); /* Remove LQI */ /* TODO: Reuse get_lqi() */ buf->len -= 1; /* remove FCS 2 bytes */ buf->len -= 2; /* SLIP encode and send */ len = slip_buffer(slip_buf, buf); uart_fifo_fill(uart_dev, slip_buf, len); net_pkt_unref(pkt); #if 0 k_yield(); #endif } }
static inline void telnet_handle_input(struct net_pkt *pkt) { struct console_input *input; u16_t len, offset, pos; len = net_pkt_appdatalen(pkt); if (len > CONSOLE_MAX_LINE_LEN || len < TELNET_MIN_MSG) { return; } if (telnet_handle_command(pkt)) { return; } if (!avail_queue || !input_queue) { return; } input = k_fifo_get(avail_queue, K_NO_WAIT); if (!input) { return; } offset = net_pkt_get_len(pkt) - len; net_frag_read(pkt->frags, offset, &pos, len, input->line); /* LF/CR will be removed if only the line is not NUL terminated */ if (input->line[len-1] != NVT_NUL) { if (input->line[len-1] == NVT_LF) { input->line[len-1] = NVT_NUL; } if (input->line[len-2] == NVT_CR) { input->line[len-2] = NVT_NUL; } } k_fifo_put(input_queue, input); }
void uart_console_isr(struct device *unused) { ARG_UNUSED(unused); while (uart_irq_update(uart_console_dev) && uart_irq_is_pending(uart_console_dev)) { static struct console_input *cmd; u8_t byte; int rx; if (!uart_irq_rx_ready(uart_console_dev)) { continue; } /* Character(s) have been received */ rx = read_uart(uart_console_dev, &byte, 1); if (rx < 0) { return; } #ifdef CONFIG_UART_CONSOLE_DEBUG_SERVER_HOOKS if (debug_hook_in != NULL && debug_hook_in(byte) != 0) { /* * The input hook indicates that no further processing * should be done by this handler. */ return; } #endif if (!cmd) { cmd = k_fifo_get(avail_queue, K_NO_WAIT); if (!cmd) { return; } } #ifdef CONFIG_UART_CONSOLE_MCUMGR /* Divert this byte from normal console handling if it is part * of an mcumgr frame. */ if (handle_mcumgr(cmd, byte)) { continue; } #endif /* CONFIG_UART_CONSOLE_MCUMGR */ /* Handle ANSI escape mode */ if (atomic_test_bit(&esc_state, ESC_ANSI)) { handle_ansi(byte, cmd->line); continue; } /* Handle escape mode */ if (atomic_test_and_clear_bit(&esc_state, ESC_ESC)) { if (byte == ANSI_ESC) { atomic_set_bit(&esc_state, ESC_ANSI); atomic_set_bit(&esc_state, ESC_ANSI_FIRST); } continue; } /* Handle special control characters */ if (!isprint(byte)) { switch (byte) { case DEL: if (cur > 0) { del_char(&cmd->line[--cur], end); } break; case ESC: atomic_set_bit(&esc_state, ESC_ESC); break; case '\r': cmd->line[cur + end] = '\0'; uart_poll_out(uart_console_dev, '\r'); uart_poll_out(uart_console_dev, '\n'); cur = 0; end = 0; k_fifo_put(lines_queue, cmd); cmd = NULL; break; case '\t': if (completion_cb && !end) { cur += completion_cb(cmd->line, cur); } break; default: break; } continue; } /* Ignore characters if there's no more buffer space */ if (cur + end < sizeof(cmd->line) - 1) { insert_char(&cmd->line[cur++], byte, end); } } }
int ssl_rx(void *context, unsigned char *buf, size_t size) { struct http_client_ctx *ctx = context; struct rx_fifo_block *rx_data; u16_t read_bytes; u8_t *ptr; int pos; int len; int ret = 0; if (!ctx->https.mbedtls.ssl_ctx.frag) { rx_data = k_fifo_get(&ctx->https.mbedtls.ssl_ctx.rx_fifo, K_FOREVER); if (!rx_data || !rx_data->pkt) { NET_DBG("Closing %p connection", ctx); if (rx_data) { k_mem_pool_free(&rx_data->block); } return MBEDTLS_ERR_SSL_CONN_EOF; } ctx->https.mbedtls.ssl_ctx.rx_pkt = rx_data->pkt; k_mem_pool_free(&rx_data->block); read_bytes = net_pkt_appdatalen( ctx->https.mbedtls.ssl_ctx.rx_pkt); ctx->https.mbedtls.ssl_ctx.remaining = read_bytes; ctx->https.mbedtls.ssl_ctx.frag = ctx->https.mbedtls.ssl_ctx.rx_pkt->frags; ptr = net_pkt_appdata(ctx->https.mbedtls.ssl_ctx.rx_pkt); len = ptr - ctx->https.mbedtls.ssl_ctx.frag->data; if (len > ctx->https.mbedtls.ssl_ctx.frag->size) { NET_ERR("Buf overflow (%d > %u)", len, ctx->https.mbedtls.ssl_ctx.frag->size); return -EINVAL; } /* This will get rid of IP header */ net_buf_pull(ctx->https.mbedtls.ssl_ctx.frag, len); } else { read_bytes = ctx->https.mbedtls.ssl_ctx.remaining; ptr = ctx->https.mbedtls.ssl_ctx.frag->data; } len = ctx->https.mbedtls.ssl_ctx.frag->len; pos = 0; if (read_bytes > size) { while (ctx->https.mbedtls.ssl_ctx.frag) { read_bytes = len < (size - pos) ? len : (size - pos); #if RX_EXTRA_DEBUG == 1 NET_DBG("Copying %d bytes", read_bytes); #endif memcpy(buf + pos, ptr, read_bytes); pos += read_bytes; if (pos < size) { ctx->https.mbedtls.ssl_ctx.frag = ctx->https.mbedtls.ssl_ctx.frag->frags; ptr = ctx->https.mbedtls.ssl_ctx.frag->data; len = ctx->https.mbedtls.ssl_ctx.frag->len; } else { if (read_bytes == len) { ctx->https.mbedtls.ssl_ctx.frag = ctx->https.mbedtls.ssl_ctx.frag->frags; } else { net_buf_pull( ctx->https.mbedtls.ssl_ctx.frag, read_bytes); } ctx->https.mbedtls.ssl_ctx.remaining -= size; return size; } } } else { while (ctx->https.mbedtls.ssl_ctx.frag) { #if RX_EXTRA_DEBUG == 1 NET_DBG("Copying all %d bytes", len); #endif memcpy(buf + pos, ptr, len); pos += len; ctx->https.mbedtls.ssl_ctx.frag = ctx->https.mbedtls.ssl_ctx.frag->frags; if (!ctx->https.mbedtls.ssl_ctx.frag) { break; } ptr = ctx->https.mbedtls.ssl_ctx.frag->data; len = ctx->https.mbedtls.ssl_ctx.frag->len; } net_pkt_unref(ctx->https.mbedtls.ssl_ctx.rx_pkt); ctx->https.mbedtls.ssl_ctx.rx_pkt = NULL; ctx->https.mbedtls.ssl_ctx.frag = NULL; ctx->https.mbedtls.ssl_ctx.remaining = 0; if (read_bytes != pos) { return -EIO; } ret = read_bytes; } return ret; }
static void https_handler(struct http_client_ctx *ctx, struct k_sem *startup_sync) { struct tx_fifo_block *tx_data; struct http_client_request req; size_t len; int ret; /* First mbedtls specific initialization */ ret = https_init(ctx); k_sem_give(startup_sync); if (ret < 0) { return; } reset: http_parser_init(&ctx->parser, HTTP_RESPONSE); ctx->rsp.data_len = 0; /* Wait that the sender sends the data, and the peer to respond to. */ tx_data = k_fifo_get(&ctx->https.mbedtls.ssl_ctx.tx_fifo, K_FOREVER); if (tx_data) { /* Because the req pointer might disappear as it is controlled * by application, copy the data here. */ memcpy(&req, tx_data->req, sizeof(req)); } else { NET_ASSERT(tx_data); goto reset; } print_info(ctx, ctx->req.method); /* If the connection is not active, then re-connect */ ret = tcp_connect(ctx); if (ret < 0 && ret != -EALREADY) { k_sem_give(&ctx->req.wait); goto reset; } mbedtls_ssl_session_reset(&ctx->https.mbedtls.ssl); mbedtls_ssl_set_bio(&ctx->https.mbedtls.ssl, ctx, ssl_tx, ssl_rx, NULL); /* SSL handshake. The ssl_rx() function will be called next by * mbedtls library. The ssl_rx() will block and wait that data is * received by ssl_received() and passed to it via fifo. After * receiving the data, this function will then proceed with secure * connection establishment. */ /* Waiting SSL handshake */ do { ret = mbedtls_ssl_handshake(&ctx->https.mbedtls.ssl); if (ret != MBEDTLS_ERR_SSL_WANT_READ && ret != MBEDTLS_ERR_SSL_WANT_WRITE) { if (ret == MBEDTLS_ERR_SSL_CONN_EOF) { goto close; } if (ret < 0) { print_error("mbedtls_ssl_handshake returned " "-0x%x", ret); goto close; } } } while (ret != 0); ret = http_request(ctx, &req, BUF_ALLOC_TIMEOUT); k_mem_pool_free(&tx_data->block); if (ret < 0) { NET_DBG("Send error (%d)", ret); goto close; } NET_DBG("Read HTTPS response"); do { len = ctx->rsp.response_buf_len - 1; memset(ctx->rsp.response_buf, 0, ctx->rsp.response_buf_len); ret = mbedtls_ssl_read(&ctx->https.mbedtls.ssl, ctx->rsp.response_buf, len); if (ret == 0) { goto close; } if (ret == MBEDTLS_ERR_SSL_WANT_READ || ret == MBEDTLS_ERR_SSL_WANT_WRITE) { continue; } if (ret == MBEDTLS_ERR_SSL_PEER_CLOSE_NOTIFY) { NET_DBG("Connection was closed gracefully"); goto close; } if (ret == MBEDTLS_ERR_NET_CONN_RESET) { NET_DBG("Connection was reset by peer"); goto close; } if (ret == -EIO) { NET_DBG("Response received, waiting another ctx %p", ctx); goto next; } if (ret < 0) { print_error("mbedtls_ssl_read returned -0x%x", ret); goto close; } /* The data_len will count how many bytes we have read, * this value is passed to user supplied response callback * by on_body() and on_message_complete() functions. */ ctx->rsp.data_len += ret; ret = http_parser_execute(&ctx->parser, &ctx->settings, ctx->rsp.response_buf, ret); if (!ret) { goto close; } ctx->rsp.data_len = 0; if (ret > 0) { /* Get more data */ ret = MBEDTLS_ERR_SSL_WANT_READ; } } while (ret < 0); close: /* If there is any pending data that have not been processed yet, * we need to free it here. */ if (ctx->https.mbedtls.ssl_ctx.rx_pkt) { net_pkt_unref(ctx->https.mbedtls.ssl_ctx.rx_pkt); ctx->https.mbedtls.ssl_ctx.rx_pkt = NULL; ctx->https.mbedtls.ssl_ctx.frag = NULL; } NET_DBG("Resetting HTTPS connection %p", ctx); tcp_disconnect(ctx); next: mbedtls_ssl_close_notify(&ctx->https.mbedtls.ssl); goto reset; }
static int test_timeout(void) { struct timeout_order *data; s32_t timeout; int rv; int i; /* test k_busy_wait() */ TC_PRINT("Testing k_busy_wait()\n"); timeout = 20; /* in ms */ k_thread_spawn(timeout_stacks[0], THREAD_STACKSIZE, test_busy_wait, (void *)(intptr_t) timeout, NULL, NULL, K_PRIO_COOP(THREAD_PRIORITY), 0, 0); rv = k_sem_take(&reply_timeout, timeout * 2); if (rv) { TC_ERROR(" *** task timed out waiting for " "k_busy_wait()\n"); return TC_FAIL; } /* test k_sleep() */ TC_PRINT("Testing k_sleep()\n"); timeout = 50; k_thread_spawn(timeout_stacks[0], THREAD_STACKSIZE, test_thread_sleep, (void *)(intptr_t) timeout, NULL, NULL, K_PRIO_COOP(THREAD_PRIORITY), 0, 0); rv = k_sem_take(&reply_timeout, timeout * 2); if (rv) { TC_ERROR(" *** task timed out waiting for thread on " "k_sleep().\n"); return TC_FAIL; } /* test k_thread_spawn() without cancellation */ TC_PRINT("Testing k_thread_spawn() without cancellation\n"); for (i = 0; i < NUM_TIMEOUT_THREADS; i++) { k_thread_spawn(timeout_stacks[i], THREAD_STACKSIZE, delayed_thread, (void *)i, NULL, NULL, K_PRIO_COOP(5), 0, timeouts[i].timeout); } for (i = 0; i < NUM_TIMEOUT_THREADS; i++) { data = k_fifo_get(&timeout_order_fifo, 750); if (!data) { TC_ERROR (" *** timeout while waiting for delayed thread\n"); return TC_FAIL; } if (data->timeout_order != i) { TC_ERROR(" *** wrong delayed thread ran (got %d, " "expected %d)\n", data->timeout_order, i); return TC_FAIL; } TC_PRINT(" got thread (q order: %d, t/o: %d) as expected\n", data->q_order, data->timeout); } /* ensure no more thread fire */ data = k_fifo_get(&timeout_order_fifo, 750); if (data) { TC_ERROR(" *** got something unexpected in the fifo\n"); return TC_FAIL; } /* test k_thread_spawn() with cancellation */ TC_PRINT("Testing k_thread_spawn() with cancellations\n"); int cancellations[] = { 0, 3, 4, 6 }; int num_cancellations = ARRAY_SIZE(cancellations); int next_cancellation = 0; k_tid_t delayed_threads[NUM_TIMEOUT_THREADS]; for (i = 0; i < NUM_TIMEOUT_THREADS; i++) { k_tid_t id; id = k_thread_spawn(timeout_stacks[i], THREAD_STACKSIZE, delayed_thread, (void *)i, NULL, NULL, K_PRIO_COOP(5), 0, timeouts[i].timeout); delayed_threads[i] = id; } for (i = 0; i < NUM_TIMEOUT_THREADS; i++) { int j; if (i == cancellations[next_cancellation]) { TC_PRINT(" cancelling " "[q order: %d, t/o: %d, t/o order: %d]\n", timeouts[i].q_order, timeouts[i].timeout, i); for (j = 0; j < NUM_TIMEOUT_THREADS; j++) { if (timeouts[j].timeout_order == i) { break; } } if (j < NUM_TIMEOUT_THREADS) { k_thread_cancel(delayed_threads[j]); ++next_cancellation; continue; } } data = k_fifo_get(&timeout_order_fifo, 2750); if (!data) { TC_ERROR (" *** timeout while waiting for delayed thread\n"); return TC_FAIL; } if (data->timeout_order != i) { TC_ERROR(" *** wrong delayed thread ran (got %d, " "expected %d)\n", data->timeout_order, i); return TC_FAIL; } TC_PRINT(" got (q order: %d, t/o: %d, t/o order %d) " "as expected\n", data->q_order, data->timeout, data->timeout_order); } if (num_cancellations != next_cancellation) { TC_ERROR(" *** wrong number of cancellations (expected %d, " "got %d\n", num_cancellations, next_cancellation); return TC_FAIL; } /* ensure no more thread fire */ data = k_fifo_get(&timeout_order_fifo, 750); if (data) { TC_ERROR(" *** got something unexpected in the fifo\n"); return TC_FAIL; } return TC_PASS; }